]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/syscall.c
linux-user: Use correct target SHMLBA in shmat()
[mirror_qemu.git] / linux-user / syscall.c
1 /*
2 * Linux syscalls
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
77
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
84
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
112
113 #include "qemu.h"
114
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
117
118 //#define DEBUG
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
121 */
122 //#define DEBUG_ERESTARTSYS
123
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
127
128 #undef _syscall0
129 #undef _syscall1
130 #undef _syscall2
131 #undef _syscall3
132 #undef _syscall4
133 #undef _syscall5
134 #undef _syscall6
135
136 #define _syscall0(type,name) \
137 static type name (void) \
138 { \
139 return syscall(__NR_##name); \
140 }
141
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
144 { \
145 return syscall(__NR_##name, arg1); \
146 }
147
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
150 { \
151 return syscall(__NR_##name, arg1, arg2); \
152 }
153
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
156 { \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
158 }
159
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
162 { \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
164 }
165
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
167 type5,arg5) \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
169 { \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
171 }
172
173
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
177 type6 arg6) \
178 { \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
180 }
181
182
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
194
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
196 defined(__s390x__)
197 #define __NR__llseek __NR_lseek
198 #endif
199
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
203 #endif
204
205 #ifdef __NR_gettid
206 _syscall0(int, gettid)
207 #else
208 /* This is a replacement for the host gettid() and must return a host
209 errno. */
210 static int gettid(void) {
211 return -ENOSYS;
212 }
213 #endif
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
216 #endif
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
220 #endif
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
223 loff_t *, res, uint, wh);
224 #endif
225 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
226 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group,int,error_code)
229 #endif
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address,int *,tidptr)
232 #endif
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
235 const struct timespec *,timeout,int *,uaddr2,int,val3)
236 #endif
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
239 unsigned long *, user_mask_ptr);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
242 unsigned long *, user_mask_ptr);
243 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
244 void *, arg);
245 _syscall2(int, capget, struct __user_cap_header_struct *, header,
246 struct __user_cap_data_struct *, data);
247 _syscall2(int, capset, struct __user_cap_header_struct *, header,
248 struct __user_cap_data_struct *, data);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get, int, which, int, who)
251 #endif
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
254 #endif
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
257 #endif
258
259 static bitmask_transtbl fcntl_flags_tbl[] = {
260 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
261 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
262 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
263 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
264 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
265 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
266 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
267 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
268 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
269 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
270 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
271 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
272 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
275 #endif
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
278 #endif
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
281 #endif
282 #if defined(O_PATH)
283 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
284 #endif
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
288 #endif
289 { 0, 0, 0, 0 }
290 };
291
292 enum {
293 QEMU_IFLA_BR_UNSPEC,
294 QEMU_IFLA_BR_FORWARD_DELAY,
295 QEMU_IFLA_BR_HELLO_TIME,
296 QEMU_IFLA_BR_MAX_AGE,
297 QEMU_IFLA_BR_AGEING_TIME,
298 QEMU_IFLA_BR_STP_STATE,
299 QEMU_IFLA_BR_PRIORITY,
300 QEMU_IFLA_BR_VLAN_FILTERING,
301 QEMU_IFLA_BR_VLAN_PROTOCOL,
302 QEMU_IFLA_BR_GROUP_FWD_MASK,
303 QEMU_IFLA_BR_ROOT_ID,
304 QEMU_IFLA_BR_BRIDGE_ID,
305 QEMU_IFLA_BR_ROOT_PORT,
306 QEMU_IFLA_BR_ROOT_PATH_COST,
307 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
308 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
309 QEMU_IFLA_BR_HELLO_TIMER,
310 QEMU_IFLA_BR_TCN_TIMER,
311 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
312 QEMU_IFLA_BR_GC_TIMER,
313 QEMU_IFLA_BR_GROUP_ADDR,
314 QEMU_IFLA_BR_FDB_FLUSH,
315 QEMU_IFLA_BR_MCAST_ROUTER,
316 QEMU_IFLA_BR_MCAST_SNOOPING,
317 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
318 QEMU_IFLA_BR_MCAST_QUERIER,
319 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
320 QEMU_IFLA_BR_MCAST_HASH_MAX,
321 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
322 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
323 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
324 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
325 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
326 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
327 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
328 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
329 QEMU_IFLA_BR_NF_CALL_IPTABLES,
330 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
331 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
332 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
333 QEMU_IFLA_BR_PAD,
334 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
335 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
336 QEMU___IFLA_BR_MAX,
337 };
338
339 enum {
340 QEMU_IFLA_UNSPEC,
341 QEMU_IFLA_ADDRESS,
342 QEMU_IFLA_BROADCAST,
343 QEMU_IFLA_IFNAME,
344 QEMU_IFLA_MTU,
345 QEMU_IFLA_LINK,
346 QEMU_IFLA_QDISC,
347 QEMU_IFLA_STATS,
348 QEMU_IFLA_COST,
349 QEMU_IFLA_PRIORITY,
350 QEMU_IFLA_MASTER,
351 QEMU_IFLA_WIRELESS,
352 QEMU_IFLA_PROTINFO,
353 QEMU_IFLA_TXQLEN,
354 QEMU_IFLA_MAP,
355 QEMU_IFLA_WEIGHT,
356 QEMU_IFLA_OPERSTATE,
357 QEMU_IFLA_LINKMODE,
358 QEMU_IFLA_LINKINFO,
359 QEMU_IFLA_NET_NS_PID,
360 QEMU_IFLA_IFALIAS,
361 QEMU_IFLA_NUM_VF,
362 QEMU_IFLA_VFINFO_LIST,
363 QEMU_IFLA_STATS64,
364 QEMU_IFLA_VF_PORTS,
365 QEMU_IFLA_PORT_SELF,
366 QEMU_IFLA_AF_SPEC,
367 QEMU_IFLA_GROUP,
368 QEMU_IFLA_NET_NS_FD,
369 QEMU_IFLA_EXT_MASK,
370 QEMU_IFLA_PROMISCUITY,
371 QEMU_IFLA_NUM_TX_QUEUES,
372 QEMU_IFLA_NUM_RX_QUEUES,
373 QEMU_IFLA_CARRIER,
374 QEMU_IFLA_PHYS_PORT_ID,
375 QEMU_IFLA_CARRIER_CHANGES,
376 QEMU_IFLA_PHYS_SWITCH_ID,
377 QEMU_IFLA_LINK_NETNSID,
378 QEMU_IFLA_PHYS_PORT_NAME,
379 QEMU_IFLA_PROTO_DOWN,
380 QEMU_IFLA_GSO_MAX_SEGS,
381 QEMU_IFLA_GSO_MAX_SIZE,
382 QEMU_IFLA_PAD,
383 QEMU_IFLA_XDP,
384 QEMU___IFLA_MAX
385 };
386
387 enum {
388 QEMU_IFLA_BRPORT_UNSPEC,
389 QEMU_IFLA_BRPORT_STATE,
390 QEMU_IFLA_BRPORT_PRIORITY,
391 QEMU_IFLA_BRPORT_COST,
392 QEMU_IFLA_BRPORT_MODE,
393 QEMU_IFLA_BRPORT_GUARD,
394 QEMU_IFLA_BRPORT_PROTECT,
395 QEMU_IFLA_BRPORT_FAST_LEAVE,
396 QEMU_IFLA_BRPORT_LEARNING,
397 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
398 QEMU_IFLA_BRPORT_PROXYARP,
399 QEMU_IFLA_BRPORT_LEARNING_SYNC,
400 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
401 QEMU_IFLA_BRPORT_ROOT_ID,
402 QEMU_IFLA_BRPORT_BRIDGE_ID,
403 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
404 QEMU_IFLA_BRPORT_DESIGNATED_COST,
405 QEMU_IFLA_BRPORT_ID,
406 QEMU_IFLA_BRPORT_NO,
407 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
408 QEMU_IFLA_BRPORT_CONFIG_PENDING,
409 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
410 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
411 QEMU_IFLA_BRPORT_HOLD_TIMER,
412 QEMU_IFLA_BRPORT_FLUSH,
413 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
414 QEMU_IFLA_BRPORT_PAD,
415 QEMU___IFLA_BRPORT_MAX
416 };
417
418 enum {
419 QEMU_IFLA_INFO_UNSPEC,
420 QEMU_IFLA_INFO_KIND,
421 QEMU_IFLA_INFO_DATA,
422 QEMU_IFLA_INFO_XSTATS,
423 QEMU_IFLA_INFO_SLAVE_KIND,
424 QEMU_IFLA_INFO_SLAVE_DATA,
425 QEMU___IFLA_INFO_MAX,
426 };
427
428 enum {
429 QEMU_IFLA_INET_UNSPEC,
430 QEMU_IFLA_INET_CONF,
431 QEMU___IFLA_INET_MAX,
432 };
433
434 enum {
435 QEMU_IFLA_INET6_UNSPEC,
436 QEMU_IFLA_INET6_FLAGS,
437 QEMU_IFLA_INET6_CONF,
438 QEMU_IFLA_INET6_STATS,
439 QEMU_IFLA_INET6_MCAST,
440 QEMU_IFLA_INET6_CACHEINFO,
441 QEMU_IFLA_INET6_ICMP6STATS,
442 QEMU_IFLA_INET6_TOKEN,
443 QEMU_IFLA_INET6_ADDR_GEN_MODE,
444 QEMU___IFLA_INET6_MAX
445 };
446
447 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
448 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
449 typedef struct TargetFdTrans {
450 TargetFdDataFunc host_to_target_data;
451 TargetFdDataFunc target_to_host_data;
452 TargetFdAddrFunc target_to_host_addr;
453 } TargetFdTrans;
454
455 static TargetFdTrans **target_fd_trans;
456
457 static unsigned int target_fd_max;
458
459 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
460 {
461 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
462 return target_fd_trans[fd]->target_to_host_data;
463 }
464 return NULL;
465 }
466
467 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
468 {
469 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
470 return target_fd_trans[fd]->host_to_target_data;
471 }
472 return NULL;
473 }
474
475 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
476 {
477 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
478 return target_fd_trans[fd]->target_to_host_addr;
479 }
480 return NULL;
481 }
482
483 static void fd_trans_register(int fd, TargetFdTrans *trans)
484 {
485 unsigned int oldmax;
486
487 if (fd >= target_fd_max) {
488 oldmax = target_fd_max;
489 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
490 target_fd_trans = g_renew(TargetFdTrans *,
491 target_fd_trans, target_fd_max);
492 memset((void *)(target_fd_trans + oldmax), 0,
493 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
494 }
495 target_fd_trans[fd] = trans;
496 }
497
498 static void fd_trans_unregister(int fd)
499 {
500 if (fd >= 0 && fd < target_fd_max) {
501 target_fd_trans[fd] = NULL;
502 }
503 }
504
505 static void fd_trans_dup(int oldfd, int newfd)
506 {
507 fd_trans_unregister(newfd);
508 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
509 fd_trans_register(newfd, target_fd_trans[oldfd]);
510 }
511 }
512
513 static int sys_getcwd1(char *buf, size_t size)
514 {
515 if (getcwd(buf, size) == NULL) {
516 /* getcwd() sets errno */
517 return (-1);
518 }
519 return strlen(buf)+1;
520 }
521
522 #ifdef TARGET_NR_utimensat
523 #if defined(__NR_utimensat)
524 #define __NR_sys_utimensat __NR_utimensat
525 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
526 const struct timespec *,tsp,int,flags)
527 #else
528 static int sys_utimensat(int dirfd, const char *pathname,
529 const struct timespec times[2], int flags)
530 {
531 errno = ENOSYS;
532 return -1;
533 }
534 #endif
535 #endif /* TARGET_NR_utimensat */
536
537 #ifdef CONFIG_INOTIFY
538 #include <sys/inotify.h>
539
540 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
541 static int sys_inotify_init(void)
542 {
543 return (inotify_init());
544 }
545 #endif
546 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
547 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
548 {
549 return (inotify_add_watch(fd, pathname, mask));
550 }
551 #endif
552 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
553 static int sys_inotify_rm_watch(int fd, int32_t wd)
554 {
555 return (inotify_rm_watch(fd, wd));
556 }
557 #endif
558 #ifdef CONFIG_INOTIFY1
559 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
560 static int sys_inotify_init1(int flags)
561 {
562 return (inotify_init1(flags));
563 }
564 #endif
565 #endif
566 #else
567 /* Userspace can usually survive runtime without inotify */
568 #undef TARGET_NR_inotify_init
569 #undef TARGET_NR_inotify_init1
570 #undef TARGET_NR_inotify_add_watch
571 #undef TARGET_NR_inotify_rm_watch
572 #endif /* CONFIG_INOTIFY */
573
574 #if defined(TARGET_NR_prlimit64)
575 #ifndef __NR_prlimit64
576 # define __NR_prlimit64 -1
577 #endif
578 #define __NR_sys_prlimit64 __NR_prlimit64
579 /* The glibc rlimit structure may not be that used by the underlying syscall */
580 struct host_rlimit64 {
581 uint64_t rlim_cur;
582 uint64_t rlim_max;
583 };
584 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
585 const struct host_rlimit64 *, new_limit,
586 struct host_rlimit64 *, old_limit)
587 #endif
588
589
590 #if defined(TARGET_NR_timer_create)
591 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
592 static timer_t g_posix_timers[32] = { 0, } ;
593
594 static inline int next_free_host_timer(void)
595 {
596 int k ;
597 /* FIXME: Does finding the next free slot require a lock? */
598 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
599 if (g_posix_timers[k] == 0) {
600 g_posix_timers[k] = (timer_t) 1;
601 return k;
602 }
603 }
604 return -1;
605 }
606 #endif
607
608 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
609 #ifdef TARGET_ARM
610 static inline int regpairs_aligned(void *cpu_env) {
611 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
612 }
613 #elif defined(TARGET_MIPS)
614 static inline int regpairs_aligned(void *cpu_env) { return 1; }
615 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
616 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
617 * of registers which translates to the same as ARM/MIPS, because we start with
618 * r3 as arg1 */
619 static inline int regpairs_aligned(void *cpu_env) { return 1; }
620 #else
621 static inline int regpairs_aligned(void *cpu_env) { return 0; }
622 #endif
623
624 #define ERRNO_TABLE_SIZE 1200
625
626 /* target_to_host_errno_table[] is initialized from
627 * host_to_target_errno_table[] in syscall_init(). */
628 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
629 };
630
631 /*
632 * This list is the union of errno values overridden in asm-<arch>/errno.h
633 * minus the errnos that are not actually generic to all archs.
634 */
635 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
636 [EAGAIN] = TARGET_EAGAIN,
637 [EIDRM] = TARGET_EIDRM,
638 [ECHRNG] = TARGET_ECHRNG,
639 [EL2NSYNC] = TARGET_EL2NSYNC,
640 [EL3HLT] = TARGET_EL3HLT,
641 [EL3RST] = TARGET_EL3RST,
642 [ELNRNG] = TARGET_ELNRNG,
643 [EUNATCH] = TARGET_EUNATCH,
644 [ENOCSI] = TARGET_ENOCSI,
645 [EL2HLT] = TARGET_EL2HLT,
646 [EDEADLK] = TARGET_EDEADLK,
647 [ENOLCK] = TARGET_ENOLCK,
648 [EBADE] = TARGET_EBADE,
649 [EBADR] = TARGET_EBADR,
650 [EXFULL] = TARGET_EXFULL,
651 [ENOANO] = TARGET_ENOANO,
652 [EBADRQC] = TARGET_EBADRQC,
653 [EBADSLT] = TARGET_EBADSLT,
654 [EBFONT] = TARGET_EBFONT,
655 [ENOSTR] = TARGET_ENOSTR,
656 [ENODATA] = TARGET_ENODATA,
657 [ETIME] = TARGET_ETIME,
658 [ENOSR] = TARGET_ENOSR,
659 [ENONET] = TARGET_ENONET,
660 [ENOPKG] = TARGET_ENOPKG,
661 [EREMOTE] = TARGET_EREMOTE,
662 [ENOLINK] = TARGET_ENOLINK,
663 [EADV] = TARGET_EADV,
664 [ESRMNT] = TARGET_ESRMNT,
665 [ECOMM] = TARGET_ECOMM,
666 [EPROTO] = TARGET_EPROTO,
667 [EDOTDOT] = TARGET_EDOTDOT,
668 [EMULTIHOP] = TARGET_EMULTIHOP,
669 [EBADMSG] = TARGET_EBADMSG,
670 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
671 [EOVERFLOW] = TARGET_EOVERFLOW,
672 [ENOTUNIQ] = TARGET_ENOTUNIQ,
673 [EBADFD] = TARGET_EBADFD,
674 [EREMCHG] = TARGET_EREMCHG,
675 [ELIBACC] = TARGET_ELIBACC,
676 [ELIBBAD] = TARGET_ELIBBAD,
677 [ELIBSCN] = TARGET_ELIBSCN,
678 [ELIBMAX] = TARGET_ELIBMAX,
679 [ELIBEXEC] = TARGET_ELIBEXEC,
680 [EILSEQ] = TARGET_EILSEQ,
681 [ENOSYS] = TARGET_ENOSYS,
682 [ELOOP] = TARGET_ELOOP,
683 [ERESTART] = TARGET_ERESTART,
684 [ESTRPIPE] = TARGET_ESTRPIPE,
685 [ENOTEMPTY] = TARGET_ENOTEMPTY,
686 [EUSERS] = TARGET_EUSERS,
687 [ENOTSOCK] = TARGET_ENOTSOCK,
688 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
689 [EMSGSIZE] = TARGET_EMSGSIZE,
690 [EPROTOTYPE] = TARGET_EPROTOTYPE,
691 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
692 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
693 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
694 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
695 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
696 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
697 [EADDRINUSE] = TARGET_EADDRINUSE,
698 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
699 [ENETDOWN] = TARGET_ENETDOWN,
700 [ENETUNREACH] = TARGET_ENETUNREACH,
701 [ENETRESET] = TARGET_ENETRESET,
702 [ECONNABORTED] = TARGET_ECONNABORTED,
703 [ECONNRESET] = TARGET_ECONNRESET,
704 [ENOBUFS] = TARGET_ENOBUFS,
705 [EISCONN] = TARGET_EISCONN,
706 [ENOTCONN] = TARGET_ENOTCONN,
707 [EUCLEAN] = TARGET_EUCLEAN,
708 [ENOTNAM] = TARGET_ENOTNAM,
709 [ENAVAIL] = TARGET_ENAVAIL,
710 [EISNAM] = TARGET_EISNAM,
711 [EREMOTEIO] = TARGET_EREMOTEIO,
712 [ESHUTDOWN] = TARGET_ESHUTDOWN,
713 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
714 [ETIMEDOUT] = TARGET_ETIMEDOUT,
715 [ECONNREFUSED] = TARGET_ECONNREFUSED,
716 [EHOSTDOWN] = TARGET_EHOSTDOWN,
717 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
718 [EALREADY] = TARGET_EALREADY,
719 [EINPROGRESS] = TARGET_EINPROGRESS,
720 [ESTALE] = TARGET_ESTALE,
721 [ECANCELED] = TARGET_ECANCELED,
722 [ENOMEDIUM] = TARGET_ENOMEDIUM,
723 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
724 #ifdef ENOKEY
725 [ENOKEY] = TARGET_ENOKEY,
726 #endif
727 #ifdef EKEYEXPIRED
728 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
729 #endif
730 #ifdef EKEYREVOKED
731 [EKEYREVOKED] = TARGET_EKEYREVOKED,
732 #endif
733 #ifdef EKEYREJECTED
734 [EKEYREJECTED] = TARGET_EKEYREJECTED,
735 #endif
736 #ifdef EOWNERDEAD
737 [EOWNERDEAD] = TARGET_EOWNERDEAD,
738 #endif
739 #ifdef ENOTRECOVERABLE
740 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
741 #endif
742 };
743
744 static inline int host_to_target_errno(int err)
745 {
746 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
747 host_to_target_errno_table[err]) {
748 return host_to_target_errno_table[err];
749 }
750 return err;
751 }
752
753 static inline int target_to_host_errno(int err)
754 {
755 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
756 target_to_host_errno_table[err]) {
757 return target_to_host_errno_table[err];
758 }
759 return err;
760 }
761
762 static inline abi_long get_errno(abi_long ret)
763 {
764 if (ret == -1)
765 return -host_to_target_errno(errno);
766 else
767 return ret;
768 }
769
770 static inline int is_error(abi_long ret)
771 {
772 return (abi_ulong)ret >= (abi_ulong)(-4096);
773 }
774
775 const char *target_strerror(int err)
776 {
777 if (err == TARGET_ERESTARTSYS) {
778 return "To be restarted";
779 }
780 if (err == TARGET_QEMU_ESIGRETURN) {
781 return "Successful exit from sigreturn";
782 }
783
784 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
785 return NULL;
786 }
787 return strerror(target_to_host_errno(err));
788 }
789
790 #define safe_syscall0(type, name) \
791 static type safe_##name(void) \
792 { \
793 return safe_syscall(__NR_##name); \
794 }
795
796 #define safe_syscall1(type, name, type1, arg1) \
797 static type safe_##name(type1 arg1) \
798 { \
799 return safe_syscall(__NR_##name, arg1); \
800 }
801
802 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
803 static type safe_##name(type1 arg1, type2 arg2) \
804 { \
805 return safe_syscall(__NR_##name, arg1, arg2); \
806 }
807
808 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
809 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
810 { \
811 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
812 }
813
814 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
815 type4, arg4) \
816 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
817 { \
818 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
819 }
820
821 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
822 type4, arg4, type5, arg5) \
823 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
824 type5 arg5) \
825 { \
826 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
827 }
828
829 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
830 type4, arg4, type5, arg5, type6, arg6) \
831 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
832 type5 arg5, type6 arg6) \
833 { \
834 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
835 }
836
837 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
838 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
839 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
840 int, flags, mode_t, mode)
841 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
842 struct rusage *, rusage)
843 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
844 int, options, struct rusage *, rusage)
845 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
846 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
847 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
848 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
849 struct timespec *, tsp, const sigset_t *, sigmask,
850 size_t, sigsetsize)
851 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
852 int, maxevents, int, timeout, const sigset_t *, sigmask,
853 size_t, sigsetsize)
854 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
855 const struct timespec *,timeout,int *,uaddr2,int,val3)
856 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
857 safe_syscall2(int, kill, pid_t, pid, int, sig)
858 safe_syscall2(int, tkill, int, tid, int, sig)
859 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
860 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
861 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
862 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
863 socklen_t, addrlen)
864 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
865 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
866 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
867 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
868 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
869 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
870 safe_syscall2(int, flock, int, fd, int, operation)
871 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
872 const struct timespec *, uts, size_t, sigsetsize)
873 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
874 int, flags)
875 safe_syscall2(int, nanosleep, const struct timespec *, req,
876 struct timespec *, rem)
877 #ifdef TARGET_NR_clock_nanosleep
878 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
879 const struct timespec *, req, struct timespec *, rem)
880 #endif
881 #ifdef __NR_msgsnd
882 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
883 int, flags)
884 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
885 long, msgtype, int, flags)
886 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
887 unsigned, nsops, const struct timespec *, timeout)
888 #else
889 /* This host kernel architecture uses a single ipc syscall; fake up
890 * wrappers for the sub-operations to hide this implementation detail.
891 * Annoyingly we can't include linux/ipc.h to get the constant definitions
892 * for the call parameter because some structs in there conflict with the
893 * sys/ipc.h ones. So we just define them here, and rely on them being
894 * the same for all host architectures.
895 */
896 #define Q_SEMTIMEDOP 4
897 #define Q_MSGSND 11
898 #define Q_MSGRCV 12
899 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
900
901 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
902 void *, ptr, long, fifth)
903 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
904 {
905 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
906 }
907 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
908 {
909 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
910 }
911 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
912 const struct timespec *timeout)
913 {
914 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
915 (long)timeout);
916 }
917 #endif
918 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
919 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
920 size_t, len, unsigned, prio, const struct timespec *, timeout)
921 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
922 size_t, len, unsigned *, prio, const struct timespec *, timeout)
923 #endif
924 /* We do ioctl like this rather than via safe_syscall3 to preserve the
925 * "third argument might be integer or pointer or not present" behaviour of
926 * the libc function.
927 */
928 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
929 /* Similarly for fcntl. Note that callers must always:
930 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
931 * use the flock64 struct rather than unsuffixed flock
932 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
933 */
934 #ifdef __NR_fcntl64
935 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
936 #else
937 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
938 #endif
939
940 static inline int host_to_target_sock_type(int host_type)
941 {
942 int target_type;
943
944 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
945 case SOCK_DGRAM:
946 target_type = TARGET_SOCK_DGRAM;
947 break;
948 case SOCK_STREAM:
949 target_type = TARGET_SOCK_STREAM;
950 break;
951 default:
952 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
953 break;
954 }
955
956 #if defined(SOCK_CLOEXEC)
957 if (host_type & SOCK_CLOEXEC) {
958 target_type |= TARGET_SOCK_CLOEXEC;
959 }
960 #endif
961
962 #if defined(SOCK_NONBLOCK)
963 if (host_type & SOCK_NONBLOCK) {
964 target_type |= TARGET_SOCK_NONBLOCK;
965 }
966 #endif
967
968 return target_type;
969 }
970
971 static abi_ulong target_brk;
972 static abi_ulong target_original_brk;
973 static abi_ulong brk_page;
974
975 void target_set_brk(abi_ulong new_brk)
976 {
977 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
978 brk_page = HOST_PAGE_ALIGN(target_brk);
979 }
980
981 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
982 #define DEBUGF_BRK(message, args...)
983
984 /* do_brk() must return target values and target errnos. */
985 abi_long do_brk(abi_ulong new_brk)
986 {
987 abi_long mapped_addr;
988 abi_ulong new_alloc_size;
989
990 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
991
992 if (!new_brk) {
993 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
994 return target_brk;
995 }
996 if (new_brk < target_original_brk) {
997 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
998 target_brk);
999 return target_brk;
1000 }
1001
1002 /* If the new brk is less than the highest page reserved to the
1003 * target heap allocation, set it and we're almost done... */
1004 if (new_brk <= brk_page) {
1005 /* Heap contents are initialized to zero, as for anonymous
1006 * mapped pages. */
1007 if (new_brk > target_brk) {
1008 memset(g2h(target_brk), 0, new_brk - target_brk);
1009 }
1010 target_brk = new_brk;
1011 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1012 return target_brk;
1013 }
1014
1015 /* We need to allocate more memory after the brk... Note that
1016 * we don't use MAP_FIXED because that will map over the top of
1017 * any existing mapping (like the one with the host libc or qemu
1018 * itself); instead we treat "mapped but at wrong address" as
1019 * a failure and unmap again.
1020 */
1021 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1022 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1023 PROT_READ|PROT_WRITE,
1024 MAP_ANON|MAP_PRIVATE, 0, 0));
1025
1026 if (mapped_addr == brk_page) {
1027 /* Heap contents are initialized to zero, as for anonymous
1028 * mapped pages. Technically the new pages are already
1029 * initialized to zero since they *are* anonymous mapped
1030 * pages, however we have to take care with the contents that
1031 * come from the remaining part of the previous page: it may
1032 * contains garbage data due to a previous heap usage (grown
1033 * then shrunken). */
1034 memset(g2h(target_brk), 0, brk_page - target_brk);
1035
1036 target_brk = new_brk;
1037 brk_page = HOST_PAGE_ALIGN(target_brk);
1038 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1039 target_brk);
1040 return target_brk;
1041 } else if (mapped_addr != -1) {
1042 /* Mapped but at wrong address, meaning there wasn't actually
1043 * enough space for this brk.
1044 */
1045 target_munmap(mapped_addr, new_alloc_size);
1046 mapped_addr = -1;
1047 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1048 }
1049 else {
1050 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1051 }
1052
1053 #if defined(TARGET_ALPHA)
1054 /* We (partially) emulate OSF/1 on Alpha, which requires we
1055 return a proper errno, not an unchanged brk value. */
1056 return -TARGET_ENOMEM;
1057 #endif
1058 /* For everything else, return the previous break. */
1059 return target_brk;
1060 }
1061
1062 static inline abi_long copy_from_user_fdset(fd_set *fds,
1063 abi_ulong target_fds_addr,
1064 int n)
1065 {
1066 int i, nw, j, k;
1067 abi_ulong b, *target_fds;
1068
1069 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1070 if (!(target_fds = lock_user(VERIFY_READ,
1071 target_fds_addr,
1072 sizeof(abi_ulong) * nw,
1073 1)))
1074 return -TARGET_EFAULT;
1075
1076 FD_ZERO(fds);
1077 k = 0;
1078 for (i = 0; i < nw; i++) {
1079 /* grab the abi_ulong */
1080 __get_user(b, &target_fds[i]);
1081 for (j = 0; j < TARGET_ABI_BITS; j++) {
1082 /* check the bit inside the abi_ulong */
1083 if ((b >> j) & 1)
1084 FD_SET(k, fds);
1085 k++;
1086 }
1087 }
1088
1089 unlock_user(target_fds, target_fds_addr, 0);
1090
1091 return 0;
1092 }
1093
1094 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1095 abi_ulong target_fds_addr,
1096 int n)
1097 {
1098 if (target_fds_addr) {
1099 if (copy_from_user_fdset(fds, target_fds_addr, n))
1100 return -TARGET_EFAULT;
1101 *fds_ptr = fds;
1102 } else {
1103 *fds_ptr = NULL;
1104 }
1105 return 0;
1106 }
1107
1108 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1109 const fd_set *fds,
1110 int n)
1111 {
1112 int i, nw, j, k;
1113 abi_long v;
1114 abi_ulong *target_fds;
1115
1116 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1117 if (!(target_fds = lock_user(VERIFY_WRITE,
1118 target_fds_addr,
1119 sizeof(abi_ulong) * nw,
1120 0)))
1121 return -TARGET_EFAULT;
1122
1123 k = 0;
1124 for (i = 0; i < nw; i++) {
1125 v = 0;
1126 for (j = 0; j < TARGET_ABI_BITS; j++) {
1127 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1128 k++;
1129 }
1130 __put_user(v, &target_fds[i]);
1131 }
1132
1133 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1134
1135 return 0;
1136 }
1137
1138 #if defined(__alpha__)
1139 #define HOST_HZ 1024
1140 #else
1141 #define HOST_HZ 100
1142 #endif
1143
1144 static inline abi_long host_to_target_clock_t(long ticks)
1145 {
1146 #if HOST_HZ == TARGET_HZ
1147 return ticks;
1148 #else
1149 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1150 #endif
1151 }
1152
1153 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1154 const struct rusage *rusage)
1155 {
1156 struct target_rusage *target_rusage;
1157
1158 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1159 return -TARGET_EFAULT;
1160 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1161 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1162 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1163 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1164 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1165 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1166 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1167 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1168 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1169 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1170 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1171 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1172 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1173 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1174 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1175 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1176 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1177 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1178 unlock_user_struct(target_rusage, target_addr, 1);
1179
1180 return 0;
1181 }
1182
1183 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1184 {
1185 abi_ulong target_rlim_swap;
1186 rlim_t result;
1187
1188 target_rlim_swap = tswapal(target_rlim);
1189 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1190 return RLIM_INFINITY;
1191
1192 result = target_rlim_swap;
1193 if (target_rlim_swap != (rlim_t)result)
1194 return RLIM_INFINITY;
1195
1196 return result;
1197 }
1198
1199 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1200 {
1201 abi_ulong target_rlim_swap;
1202 abi_ulong result;
1203
1204 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1205 target_rlim_swap = TARGET_RLIM_INFINITY;
1206 else
1207 target_rlim_swap = rlim;
1208 result = tswapal(target_rlim_swap);
1209
1210 return result;
1211 }
1212
1213 static inline int target_to_host_resource(int code)
1214 {
1215 switch (code) {
1216 case TARGET_RLIMIT_AS:
1217 return RLIMIT_AS;
1218 case TARGET_RLIMIT_CORE:
1219 return RLIMIT_CORE;
1220 case TARGET_RLIMIT_CPU:
1221 return RLIMIT_CPU;
1222 case TARGET_RLIMIT_DATA:
1223 return RLIMIT_DATA;
1224 case TARGET_RLIMIT_FSIZE:
1225 return RLIMIT_FSIZE;
1226 case TARGET_RLIMIT_LOCKS:
1227 return RLIMIT_LOCKS;
1228 case TARGET_RLIMIT_MEMLOCK:
1229 return RLIMIT_MEMLOCK;
1230 case TARGET_RLIMIT_MSGQUEUE:
1231 return RLIMIT_MSGQUEUE;
1232 case TARGET_RLIMIT_NICE:
1233 return RLIMIT_NICE;
1234 case TARGET_RLIMIT_NOFILE:
1235 return RLIMIT_NOFILE;
1236 case TARGET_RLIMIT_NPROC:
1237 return RLIMIT_NPROC;
1238 case TARGET_RLIMIT_RSS:
1239 return RLIMIT_RSS;
1240 case TARGET_RLIMIT_RTPRIO:
1241 return RLIMIT_RTPRIO;
1242 case TARGET_RLIMIT_SIGPENDING:
1243 return RLIMIT_SIGPENDING;
1244 case TARGET_RLIMIT_STACK:
1245 return RLIMIT_STACK;
1246 default:
1247 return code;
1248 }
1249 }
1250
1251 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1252 abi_ulong target_tv_addr)
1253 {
1254 struct target_timeval *target_tv;
1255
1256 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1257 return -TARGET_EFAULT;
1258
1259 __get_user(tv->tv_sec, &target_tv->tv_sec);
1260 __get_user(tv->tv_usec, &target_tv->tv_usec);
1261
1262 unlock_user_struct(target_tv, target_tv_addr, 0);
1263
1264 return 0;
1265 }
1266
1267 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1268 const struct timeval *tv)
1269 {
1270 struct target_timeval *target_tv;
1271
1272 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1273 return -TARGET_EFAULT;
1274
1275 __put_user(tv->tv_sec, &target_tv->tv_sec);
1276 __put_user(tv->tv_usec, &target_tv->tv_usec);
1277
1278 unlock_user_struct(target_tv, target_tv_addr, 1);
1279
1280 return 0;
1281 }
1282
1283 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1284 abi_ulong target_tz_addr)
1285 {
1286 struct target_timezone *target_tz;
1287
1288 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1289 return -TARGET_EFAULT;
1290 }
1291
1292 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1293 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1294
1295 unlock_user_struct(target_tz, target_tz_addr, 0);
1296
1297 return 0;
1298 }
1299
1300 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1301 #include <mqueue.h>
1302
1303 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1304 abi_ulong target_mq_attr_addr)
1305 {
1306 struct target_mq_attr *target_mq_attr;
1307
1308 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1309 target_mq_attr_addr, 1))
1310 return -TARGET_EFAULT;
1311
1312 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1313 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1314 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1315 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1316
1317 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1318
1319 return 0;
1320 }
1321
1322 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1323 const struct mq_attr *attr)
1324 {
1325 struct target_mq_attr *target_mq_attr;
1326
1327 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1328 target_mq_attr_addr, 0))
1329 return -TARGET_EFAULT;
1330
1331 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1332 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1333 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1334 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1335
1336 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1337
1338 return 0;
1339 }
1340 #endif
1341
1342 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1343 /* do_select() must return target values and target errnos. */
1344 static abi_long do_select(int n,
1345 abi_ulong rfd_addr, abi_ulong wfd_addr,
1346 abi_ulong efd_addr, abi_ulong target_tv_addr)
1347 {
1348 fd_set rfds, wfds, efds;
1349 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1350 struct timeval tv;
1351 struct timespec ts, *ts_ptr;
1352 abi_long ret;
1353
1354 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1355 if (ret) {
1356 return ret;
1357 }
1358 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1359 if (ret) {
1360 return ret;
1361 }
1362 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1363 if (ret) {
1364 return ret;
1365 }
1366
1367 if (target_tv_addr) {
1368 if (copy_from_user_timeval(&tv, target_tv_addr))
1369 return -TARGET_EFAULT;
1370 ts.tv_sec = tv.tv_sec;
1371 ts.tv_nsec = tv.tv_usec * 1000;
1372 ts_ptr = &ts;
1373 } else {
1374 ts_ptr = NULL;
1375 }
1376
1377 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1378 ts_ptr, NULL));
1379
1380 if (!is_error(ret)) {
1381 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1382 return -TARGET_EFAULT;
1383 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1384 return -TARGET_EFAULT;
1385 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1386 return -TARGET_EFAULT;
1387
1388 if (target_tv_addr) {
1389 tv.tv_sec = ts.tv_sec;
1390 tv.tv_usec = ts.tv_nsec / 1000;
1391 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1392 return -TARGET_EFAULT;
1393 }
1394 }
1395 }
1396
1397 return ret;
1398 }
1399 #endif
1400
1401 static abi_long do_pipe2(int host_pipe[], int flags)
1402 {
1403 #ifdef CONFIG_PIPE2
1404 return pipe2(host_pipe, flags);
1405 #else
1406 return -ENOSYS;
1407 #endif
1408 }
1409
1410 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1411 int flags, int is_pipe2)
1412 {
1413 int host_pipe[2];
1414 abi_long ret;
1415 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1416
1417 if (is_error(ret))
1418 return get_errno(ret);
1419
1420 /* Several targets have special calling conventions for the original
1421 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1422 if (!is_pipe2) {
1423 #if defined(TARGET_ALPHA)
1424 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1425 return host_pipe[0];
1426 #elif defined(TARGET_MIPS)
1427 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1428 return host_pipe[0];
1429 #elif defined(TARGET_SH4)
1430 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1431 return host_pipe[0];
1432 #elif defined(TARGET_SPARC)
1433 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1434 return host_pipe[0];
1435 #endif
1436 }
1437
1438 if (put_user_s32(host_pipe[0], pipedes)
1439 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1440 return -TARGET_EFAULT;
1441 return get_errno(ret);
1442 }
1443
1444 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1445 abi_ulong target_addr,
1446 socklen_t len)
1447 {
1448 struct target_ip_mreqn *target_smreqn;
1449
1450 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1451 if (!target_smreqn)
1452 return -TARGET_EFAULT;
1453 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1454 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1455 if (len == sizeof(struct target_ip_mreqn))
1456 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1457 unlock_user(target_smreqn, target_addr, 0);
1458
1459 return 0;
1460 }
1461
1462 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1463 abi_ulong target_addr,
1464 socklen_t len)
1465 {
1466 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1467 sa_family_t sa_family;
1468 struct target_sockaddr *target_saddr;
1469
1470 if (fd_trans_target_to_host_addr(fd)) {
1471 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1472 }
1473
1474 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1475 if (!target_saddr)
1476 return -TARGET_EFAULT;
1477
1478 sa_family = tswap16(target_saddr->sa_family);
1479
1480 /* Oops. The caller might send a incomplete sun_path; sun_path
1481 * must be terminated by \0 (see the manual page), but
1482 * unfortunately it is quite common to specify sockaddr_un
1483 * length as "strlen(x->sun_path)" while it should be
1484 * "strlen(...) + 1". We'll fix that here if needed.
1485 * Linux kernel has a similar feature.
1486 */
1487
1488 if (sa_family == AF_UNIX) {
1489 if (len < unix_maxlen && len > 0) {
1490 char *cp = (char*)target_saddr;
1491
1492 if ( cp[len-1] && !cp[len] )
1493 len++;
1494 }
1495 if (len > unix_maxlen)
1496 len = unix_maxlen;
1497 }
1498
1499 memcpy(addr, target_saddr, len);
1500 addr->sa_family = sa_family;
1501 if (sa_family == AF_NETLINK) {
1502 struct sockaddr_nl *nladdr;
1503
1504 nladdr = (struct sockaddr_nl *)addr;
1505 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1506 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1507 } else if (sa_family == AF_PACKET) {
1508 struct target_sockaddr_ll *lladdr;
1509
1510 lladdr = (struct target_sockaddr_ll *)addr;
1511 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1512 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1513 }
1514 unlock_user(target_saddr, target_addr, 0);
1515
1516 return 0;
1517 }
1518
1519 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1520 struct sockaddr *addr,
1521 socklen_t len)
1522 {
1523 struct target_sockaddr *target_saddr;
1524
1525 if (len == 0) {
1526 return 0;
1527 }
1528
1529 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1530 if (!target_saddr)
1531 return -TARGET_EFAULT;
1532 memcpy(target_saddr, addr, len);
1533 if (len >= offsetof(struct target_sockaddr, sa_family) +
1534 sizeof(target_saddr->sa_family)) {
1535 target_saddr->sa_family = tswap16(addr->sa_family);
1536 }
1537 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1538 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1539 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1540 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1541 } else if (addr->sa_family == AF_PACKET) {
1542 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1543 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1544 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1545 }
1546 unlock_user(target_saddr, target_addr, len);
1547
1548 return 0;
1549 }
1550
1551 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1552 struct target_msghdr *target_msgh)
1553 {
1554 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1555 abi_long msg_controllen;
1556 abi_ulong target_cmsg_addr;
1557 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1558 socklen_t space = 0;
1559
1560 msg_controllen = tswapal(target_msgh->msg_controllen);
1561 if (msg_controllen < sizeof (struct target_cmsghdr))
1562 goto the_end;
1563 target_cmsg_addr = tswapal(target_msgh->msg_control);
1564 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1565 target_cmsg_start = target_cmsg;
1566 if (!target_cmsg)
1567 return -TARGET_EFAULT;
1568
1569 while (cmsg && target_cmsg) {
1570 void *data = CMSG_DATA(cmsg);
1571 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1572
1573 int len = tswapal(target_cmsg->cmsg_len)
1574 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1575
1576 space += CMSG_SPACE(len);
1577 if (space > msgh->msg_controllen) {
1578 space -= CMSG_SPACE(len);
1579 /* This is a QEMU bug, since we allocated the payload
1580 * area ourselves (unlike overflow in host-to-target
1581 * conversion, which is just the guest giving us a buffer
1582 * that's too small). It can't happen for the payload types
1583 * we currently support; if it becomes an issue in future
1584 * we would need to improve our allocation strategy to
1585 * something more intelligent than "twice the size of the
1586 * target buffer we're reading from".
1587 */
1588 gemu_log("Host cmsg overflow\n");
1589 break;
1590 }
1591
1592 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1593 cmsg->cmsg_level = SOL_SOCKET;
1594 } else {
1595 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1596 }
1597 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1598 cmsg->cmsg_len = CMSG_LEN(len);
1599
1600 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1601 int *fd = (int *)data;
1602 int *target_fd = (int *)target_data;
1603 int i, numfds = len / sizeof(int);
1604
1605 for (i = 0; i < numfds; i++) {
1606 __get_user(fd[i], target_fd + i);
1607 }
1608 } else if (cmsg->cmsg_level == SOL_SOCKET
1609 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1610 struct ucred *cred = (struct ucred *)data;
1611 struct target_ucred *target_cred =
1612 (struct target_ucred *)target_data;
1613
1614 __get_user(cred->pid, &target_cred->pid);
1615 __get_user(cred->uid, &target_cred->uid);
1616 __get_user(cred->gid, &target_cred->gid);
1617 } else {
1618 gemu_log("Unsupported ancillary data: %d/%d\n",
1619 cmsg->cmsg_level, cmsg->cmsg_type);
1620 memcpy(data, target_data, len);
1621 }
1622
1623 cmsg = CMSG_NXTHDR(msgh, cmsg);
1624 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1625 target_cmsg_start);
1626 }
1627 unlock_user(target_cmsg, target_cmsg_addr, 0);
1628 the_end:
1629 msgh->msg_controllen = space;
1630 return 0;
1631 }
1632
1633 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1634 struct msghdr *msgh)
1635 {
1636 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1637 abi_long msg_controllen;
1638 abi_ulong target_cmsg_addr;
1639 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1640 socklen_t space = 0;
1641
1642 msg_controllen = tswapal(target_msgh->msg_controllen);
1643 if (msg_controllen < sizeof (struct target_cmsghdr))
1644 goto the_end;
1645 target_cmsg_addr = tswapal(target_msgh->msg_control);
1646 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1647 target_cmsg_start = target_cmsg;
1648 if (!target_cmsg)
1649 return -TARGET_EFAULT;
1650
1651 while (cmsg && target_cmsg) {
1652 void *data = CMSG_DATA(cmsg);
1653 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1654
1655 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1656 int tgt_len, tgt_space;
1657
1658 /* We never copy a half-header but may copy half-data;
1659 * this is Linux's behaviour in put_cmsg(). Note that
1660 * truncation here is a guest problem (which we report
1661 * to the guest via the CTRUNC bit), unlike truncation
1662 * in target_to_host_cmsg, which is a QEMU bug.
1663 */
1664 if (msg_controllen < sizeof(struct cmsghdr)) {
1665 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1666 break;
1667 }
1668
1669 if (cmsg->cmsg_level == SOL_SOCKET) {
1670 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1671 } else {
1672 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1673 }
1674 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1675
1676 tgt_len = TARGET_CMSG_LEN(len);
1677
1678 /* Payload types which need a different size of payload on
1679 * the target must adjust tgt_len here.
1680 */
1681 switch (cmsg->cmsg_level) {
1682 case SOL_SOCKET:
1683 switch (cmsg->cmsg_type) {
1684 case SO_TIMESTAMP:
1685 tgt_len = sizeof(struct target_timeval);
1686 break;
1687 default:
1688 break;
1689 }
1690 default:
1691 break;
1692 }
1693
1694 if (msg_controllen < tgt_len) {
1695 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1696 tgt_len = msg_controllen;
1697 }
1698
1699 /* We must now copy-and-convert len bytes of payload
1700 * into tgt_len bytes of destination space. Bear in mind
1701 * that in both source and destination we may be dealing
1702 * with a truncated value!
1703 */
1704 switch (cmsg->cmsg_level) {
1705 case SOL_SOCKET:
1706 switch (cmsg->cmsg_type) {
1707 case SCM_RIGHTS:
1708 {
1709 int *fd = (int *)data;
1710 int *target_fd = (int *)target_data;
1711 int i, numfds = tgt_len / sizeof(int);
1712
1713 for (i = 0; i < numfds; i++) {
1714 __put_user(fd[i], target_fd + i);
1715 }
1716 break;
1717 }
1718 case SO_TIMESTAMP:
1719 {
1720 struct timeval *tv = (struct timeval *)data;
1721 struct target_timeval *target_tv =
1722 (struct target_timeval *)target_data;
1723
1724 if (len != sizeof(struct timeval) ||
1725 tgt_len != sizeof(struct target_timeval)) {
1726 goto unimplemented;
1727 }
1728
1729 /* copy struct timeval to target */
1730 __put_user(tv->tv_sec, &target_tv->tv_sec);
1731 __put_user(tv->tv_usec, &target_tv->tv_usec);
1732 break;
1733 }
1734 case SCM_CREDENTIALS:
1735 {
1736 struct ucred *cred = (struct ucred *)data;
1737 struct target_ucred *target_cred =
1738 (struct target_ucred *)target_data;
1739
1740 __put_user(cred->pid, &target_cred->pid);
1741 __put_user(cred->uid, &target_cred->uid);
1742 __put_user(cred->gid, &target_cred->gid);
1743 break;
1744 }
1745 default:
1746 goto unimplemented;
1747 }
1748 break;
1749
1750 default:
1751 unimplemented:
1752 gemu_log("Unsupported ancillary data: %d/%d\n",
1753 cmsg->cmsg_level, cmsg->cmsg_type);
1754 memcpy(target_data, data, MIN(len, tgt_len));
1755 if (tgt_len > len) {
1756 memset(target_data + len, 0, tgt_len - len);
1757 }
1758 }
1759
1760 target_cmsg->cmsg_len = tswapal(tgt_len);
1761 tgt_space = TARGET_CMSG_SPACE(len);
1762 if (msg_controllen < tgt_space) {
1763 tgt_space = msg_controllen;
1764 }
1765 msg_controllen -= tgt_space;
1766 space += tgt_space;
1767 cmsg = CMSG_NXTHDR(msgh, cmsg);
1768 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1769 target_cmsg_start);
1770 }
1771 unlock_user(target_cmsg, target_cmsg_addr, space);
1772 the_end:
1773 target_msgh->msg_controllen = tswapal(space);
1774 return 0;
1775 }
1776
1777 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1778 {
1779 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1780 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1781 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1782 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1783 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1784 }
1785
1786 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1787 size_t len,
1788 abi_long (*host_to_target_nlmsg)
1789 (struct nlmsghdr *))
1790 {
1791 uint32_t nlmsg_len;
1792 abi_long ret;
1793
1794 while (len > sizeof(struct nlmsghdr)) {
1795
1796 nlmsg_len = nlh->nlmsg_len;
1797 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1798 nlmsg_len > len) {
1799 break;
1800 }
1801
1802 switch (nlh->nlmsg_type) {
1803 case NLMSG_DONE:
1804 tswap_nlmsghdr(nlh);
1805 return 0;
1806 case NLMSG_NOOP:
1807 break;
1808 case NLMSG_ERROR:
1809 {
1810 struct nlmsgerr *e = NLMSG_DATA(nlh);
1811 e->error = tswap32(e->error);
1812 tswap_nlmsghdr(&e->msg);
1813 tswap_nlmsghdr(nlh);
1814 return 0;
1815 }
1816 default:
1817 ret = host_to_target_nlmsg(nlh);
1818 if (ret < 0) {
1819 tswap_nlmsghdr(nlh);
1820 return ret;
1821 }
1822 break;
1823 }
1824 tswap_nlmsghdr(nlh);
1825 len -= NLMSG_ALIGN(nlmsg_len);
1826 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1827 }
1828 return 0;
1829 }
1830
1831 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1832 size_t len,
1833 abi_long (*target_to_host_nlmsg)
1834 (struct nlmsghdr *))
1835 {
1836 int ret;
1837
1838 while (len > sizeof(struct nlmsghdr)) {
1839 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1840 tswap32(nlh->nlmsg_len) > len) {
1841 break;
1842 }
1843 tswap_nlmsghdr(nlh);
1844 switch (nlh->nlmsg_type) {
1845 case NLMSG_DONE:
1846 return 0;
1847 case NLMSG_NOOP:
1848 break;
1849 case NLMSG_ERROR:
1850 {
1851 struct nlmsgerr *e = NLMSG_DATA(nlh);
1852 e->error = tswap32(e->error);
1853 tswap_nlmsghdr(&e->msg);
1854 return 0;
1855 }
1856 default:
1857 ret = target_to_host_nlmsg(nlh);
1858 if (ret < 0) {
1859 return ret;
1860 }
1861 }
1862 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1863 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1864 }
1865 return 0;
1866 }
1867
1868 #ifdef CONFIG_RTNETLINK
1869 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1870 size_t len, void *context,
1871 abi_long (*host_to_target_nlattr)
1872 (struct nlattr *,
1873 void *context))
1874 {
1875 unsigned short nla_len;
1876 abi_long ret;
1877
1878 while (len > sizeof(struct nlattr)) {
1879 nla_len = nlattr->nla_len;
1880 if (nla_len < sizeof(struct nlattr) ||
1881 nla_len > len) {
1882 break;
1883 }
1884 ret = host_to_target_nlattr(nlattr, context);
1885 nlattr->nla_len = tswap16(nlattr->nla_len);
1886 nlattr->nla_type = tswap16(nlattr->nla_type);
1887 if (ret < 0) {
1888 return ret;
1889 }
1890 len -= NLA_ALIGN(nla_len);
1891 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1892 }
1893 return 0;
1894 }
1895
1896 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1897 size_t len,
1898 abi_long (*host_to_target_rtattr)
1899 (struct rtattr *))
1900 {
1901 unsigned short rta_len;
1902 abi_long ret;
1903
1904 while (len > sizeof(struct rtattr)) {
1905 rta_len = rtattr->rta_len;
1906 if (rta_len < sizeof(struct rtattr) ||
1907 rta_len > len) {
1908 break;
1909 }
1910 ret = host_to_target_rtattr(rtattr);
1911 rtattr->rta_len = tswap16(rtattr->rta_len);
1912 rtattr->rta_type = tswap16(rtattr->rta_type);
1913 if (ret < 0) {
1914 return ret;
1915 }
1916 len -= RTA_ALIGN(rta_len);
1917 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1918 }
1919 return 0;
1920 }
1921
1922 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1923
1924 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1925 void *context)
1926 {
1927 uint16_t *u16;
1928 uint32_t *u32;
1929 uint64_t *u64;
1930
1931 switch (nlattr->nla_type) {
1932 /* no data */
1933 case QEMU_IFLA_BR_FDB_FLUSH:
1934 break;
1935 /* binary */
1936 case QEMU_IFLA_BR_GROUP_ADDR:
1937 break;
1938 /* uint8_t */
1939 case QEMU_IFLA_BR_VLAN_FILTERING:
1940 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
1941 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
1942 case QEMU_IFLA_BR_MCAST_ROUTER:
1943 case QEMU_IFLA_BR_MCAST_SNOOPING:
1944 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
1945 case QEMU_IFLA_BR_MCAST_QUERIER:
1946 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
1947 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
1948 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
1949 break;
1950 /* uint16_t */
1951 case QEMU_IFLA_BR_PRIORITY:
1952 case QEMU_IFLA_BR_VLAN_PROTOCOL:
1953 case QEMU_IFLA_BR_GROUP_FWD_MASK:
1954 case QEMU_IFLA_BR_ROOT_PORT:
1955 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
1956 u16 = NLA_DATA(nlattr);
1957 *u16 = tswap16(*u16);
1958 break;
1959 /* uint32_t */
1960 case QEMU_IFLA_BR_FORWARD_DELAY:
1961 case QEMU_IFLA_BR_HELLO_TIME:
1962 case QEMU_IFLA_BR_MAX_AGE:
1963 case QEMU_IFLA_BR_AGEING_TIME:
1964 case QEMU_IFLA_BR_STP_STATE:
1965 case QEMU_IFLA_BR_ROOT_PATH_COST:
1966 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
1967 case QEMU_IFLA_BR_MCAST_HASH_MAX:
1968 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
1969 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
1970 u32 = NLA_DATA(nlattr);
1971 *u32 = tswap32(*u32);
1972 break;
1973 /* uint64_t */
1974 case QEMU_IFLA_BR_HELLO_TIMER:
1975 case QEMU_IFLA_BR_TCN_TIMER:
1976 case QEMU_IFLA_BR_GC_TIMER:
1977 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
1978 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
1979 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
1980 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
1981 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
1982 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
1983 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
1984 u64 = NLA_DATA(nlattr);
1985 *u64 = tswap64(*u64);
1986 break;
1987 /* ifla_bridge_id: uin8_t[] */
1988 case QEMU_IFLA_BR_ROOT_ID:
1989 case QEMU_IFLA_BR_BRIDGE_ID:
1990 break;
1991 default:
1992 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
1993 break;
1994 }
1995 return 0;
1996 }
1997
1998 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
1999 void *context)
2000 {
2001 uint16_t *u16;
2002 uint32_t *u32;
2003 uint64_t *u64;
2004
2005 switch (nlattr->nla_type) {
2006 /* uint8_t */
2007 case QEMU_IFLA_BRPORT_STATE:
2008 case QEMU_IFLA_BRPORT_MODE:
2009 case QEMU_IFLA_BRPORT_GUARD:
2010 case QEMU_IFLA_BRPORT_PROTECT:
2011 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2012 case QEMU_IFLA_BRPORT_LEARNING:
2013 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2014 case QEMU_IFLA_BRPORT_PROXYARP:
2015 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2016 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2017 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2018 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2019 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2020 break;
2021 /* uint16_t */
2022 case QEMU_IFLA_BRPORT_PRIORITY:
2023 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2024 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2025 case QEMU_IFLA_BRPORT_ID:
2026 case QEMU_IFLA_BRPORT_NO:
2027 u16 = NLA_DATA(nlattr);
2028 *u16 = tswap16(*u16);
2029 break;
2030 /* uin32_t */
2031 case QEMU_IFLA_BRPORT_COST:
2032 u32 = NLA_DATA(nlattr);
2033 *u32 = tswap32(*u32);
2034 break;
2035 /* uint64_t */
2036 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2037 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2038 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2039 u64 = NLA_DATA(nlattr);
2040 *u64 = tswap64(*u64);
2041 break;
2042 /* ifla_bridge_id: uint8_t[] */
2043 case QEMU_IFLA_BRPORT_ROOT_ID:
2044 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2045 break;
2046 default:
2047 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2048 break;
2049 }
2050 return 0;
2051 }
2052
2053 struct linkinfo_context {
2054 int len;
2055 char *name;
2056 int slave_len;
2057 char *slave_name;
2058 };
2059
2060 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2061 void *context)
2062 {
2063 struct linkinfo_context *li_context = context;
2064
2065 switch (nlattr->nla_type) {
2066 /* string */
2067 case QEMU_IFLA_INFO_KIND:
2068 li_context->name = NLA_DATA(nlattr);
2069 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2070 break;
2071 case QEMU_IFLA_INFO_SLAVE_KIND:
2072 li_context->slave_name = NLA_DATA(nlattr);
2073 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2074 break;
2075 /* stats */
2076 case QEMU_IFLA_INFO_XSTATS:
2077 /* FIXME: only used by CAN */
2078 break;
2079 /* nested */
2080 case QEMU_IFLA_INFO_DATA:
2081 if (strncmp(li_context->name, "bridge",
2082 li_context->len) == 0) {
2083 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2084 nlattr->nla_len,
2085 NULL,
2086 host_to_target_data_bridge_nlattr);
2087 } else {
2088 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2089 }
2090 break;
2091 case QEMU_IFLA_INFO_SLAVE_DATA:
2092 if (strncmp(li_context->slave_name, "bridge",
2093 li_context->slave_len) == 0) {
2094 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2095 nlattr->nla_len,
2096 NULL,
2097 host_to_target_slave_data_bridge_nlattr);
2098 } else {
2099 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2100 li_context->slave_name);
2101 }
2102 break;
2103 default:
2104 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2105 break;
2106 }
2107
2108 return 0;
2109 }
2110
2111 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2112 void *context)
2113 {
2114 uint32_t *u32;
2115 int i;
2116
2117 switch (nlattr->nla_type) {
2118 case QEMU_IFLA_INET_CONF:
2119 u32 = NLA_DATA(nlattr);
2120 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2121 i++) {
2122 u32[i] = tswap32(u32[i]);
2123 }
2124 break;
2125 default:
2126 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2127 }
2128 return 0;
2129 }
2130
2131 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2132 void *context)
2133 {
2134 uint32_t *u32;
2135 uint64_t *u64;
2136 struct ifla_cacheinfo *ci;
2137 int i;
2138
2139 switch (nlattr->nla_type) {
2140 /* binaries */
2141 case QEMU_IFLA_INET6_TOKEN:
2142 break;
2143 /* uint8_t */
2144 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2145 break;
2146 /* uint32_t */
2147 case QEMU_IFLA_INET6_FLAGS:
2148 u32 = NLA_DATA(nlattr);
2149 *u32 = tswap32(*u32);
2150 break;
2151 /* uint32_t[] */
2152 case QEMU_IFLA_INET6_CONF:
2153 u32 = NLA_DATA(nlattr);
2154 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2155 i++) {
2156 u32[i] = tswap32(u32[i]);
2157 }
2158 break;
2159 /* ifla_cacheinfo */
2160 case QEMU_IFLA_INET6_CACHEINFO:
2161 ci = NLA_DATA(nlattr);
2162 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2163 ci->tstamp = tswap32(ci->tstamp);
2164 ci->reachable_time = tswap32(ci->reachable_time);
2165 ci->retrans_time = tswap32(ci->retrans_time);
2166 break;
2167 /* uint64_t[] */
2168 case QEMU_IFLA_INET6_STATS:
2169 case QEMU_IFLA_INET6_ICMP6STATS:
2170 u64 = NLA_DATA(nlattr);
2171 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2172 i++) {
2173 u64[i] = tswap64(u64[i]);
2174 }
2175 break;
2176 default:
2177 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2178 }
2179 return 0;
2180 }
2181
2182 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2183 void *context)
2184 {
2185 switch (nlattr->nla_type) {
2186 case AF_INET:
2187 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2188 NULL,
2189 host_to_target_data_inet_nlattr);
2190 case AF_INET6:
2191 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2192 NULL,
2193 host_to_target_data_inet6_nlattr);
2194 default:
2195 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2196 break;
2197 }
2198 return 0;
2199 }
2200
2201 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2202 {
2203 uint32_t *u32;
2204 struct rtnl_link_stats *st;
2205 struct rtnl_link_stats64 *st64;
2206 struct rtnl_link_ifmap *map;
2207 struct linkinfo_context li_context;
2208
2209 switch (rtattr->rta_type) {
2210 /* binary stream */
2211 case QEMU_IFLA_ADDRESS:
2212 case QEMU_IFLA_BROADCAST:
2213 /* string */
2214 case QEMU_IFLA_IFNAME:
2215 case QEMU_IFLA_QDISC:
2216 break;
2217 /* uin8_t */
2218 case QEMU_IFLA_OPERSTATE:
2219 case QEMU_IFLA_LINKMODE:
2220 case QEMU_IFLA_CARRIER:
2221 case QEMU_IFLA_PROTO_DOWN:
2222 break;
2223 /* uint32_t */
2224 case QEMU_IFLA_MTU:
2225 case QEMU_IFLA_LINK:
2226 case QEMU_IFLA_WEIGHT:
2227 case QEMU_IFLA_TXQLEN:
2228 case QEMU_IFLA_CARRIER_CHANGES:
2229 case QEMU_IFLA_NUM_RX_QUEUES:
2230 case QEMU_IFLA_NUM_TX_QUEUES:
2231 case QEMU_IFLA_PROMISCUITY:
2232 case QEMU_IFLA_EXT_MASK:
2233 case QEMU_IFLA_LINK_NETNSID:
2234 case QEMU_IFLA_GROUP:
2235 case QEMU_IFLA_MASTER:
2236 case QEMU_IFLA_NUM_VF:
2237 u32 = RTA_DATA(rtattr);
2238 *u32 = tswap32(*u32);
2239 break;
2240 /* struct rtnl_link_stats */
2241 case QEMU_IFLA_STATS:
2242 st = RTA_DATA(rtattr);
2243 st->rx_packets = tswap32(st->rx_packets);
2244 st->tx_packets = tswap32(st->tx_packets);
2245 st->rx_bytes = tswap32(st->rx_bytes);
2246 st->tx_bytes = tswap32(st->tx_bytes);
2247 st->rx_errors = tswap32(st->rx_errors);
2248 st->tx_errors = tswap32(st->tx_errors);
2249 st->rx_dropped = tswap32(st->rx_dropped);
2250 st->tx_dropped = tswap32(st->tx_dropped);
2251 st->multicast = tswap32(st->multicast);
2252 st->collisions = tswap32(st->collisions);
2253
2254 /* detailed rx_errors: */
2255 st->rx_length_errors = tswap32(st->rx_length_errors);
2256 st->rx_over_errors = tswap32(st->rx_over_errors);
2257 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2258 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2259 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2260 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2261
2262 /* detailed tx_errors */
2263 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2264 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2265 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2266 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2267 st->tx_window_errors = tswap32(st->tx_window_errors);
2268
2269 /* for cslip etc */
2270 st->rx_compressed = tswap32(st->rx_compressed);
2271 st->tx_compressed = tswap32(st->tx_compressed);
2272 break;
2273 /* struct rtnl_link_stats64 */
2274 case QEMU_IFLA_STATS64:
2275 st64 = RTA_DATA(rtattr);
2276 st64->rx_packets = tswap64(st64->rx_packets);
2277 st64->tx_packets = tswap64(st64->tx_packets);
2278 st64->rx_bytes = tswap64(st64->rx_bytes);
2279 st64->tx_bytes = tswap64(st64->tx_bytes);
2280 st64->rx_errors = tswap64(st64->rx_errors);
2281 st64->tx_errors = tswap64(st64->tx_errors);
2282 st64->rx_dropped = tswap64(st64->rx_dropped);
2283 st64->tx_dropped = tswap64(st64->tx_dropped);
2284 st64->multicast = tswap64(st64->multicast);
2285 st64->collisions = tswap64(st64->collisions);
2286
2287 /* detailed rx_errors: */
2288 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2289 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2290 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2291 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2292 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2293 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2294
2295 /* detailed tx_errors */
2296 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2297 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2298 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2299 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2300 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2301
2302 /* for cslip etc */
2303 st64->rx_compressed = tswap64(st64->rx_compressed);
2304 st64->tx_compressed = tswap64(st64->tx_compressed);
2305 break;
2306 /* struct rtnl_link_ifmap */
2307 case QEMU_IFLA_MAP:
2308 map = RTA_DATA(rtattr);
2309 map->mem_start = tswap64(map->mem_start);
2310 map->mem_end = tswap64(map->mem_end);
2311 map->base_addr = tswap64(map->base_addr);
2312 map->irq = tswap16(map->irq);
2313 break;
2314 /* nested */
2315 case QEMU_IFLA_LINKINFO:
2316 memset(&li_context, 0, sizeof(li_context));
2317 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2318 &li_context,
2319 host_to_target_data_linkinfo_nlattr);
2320 case QEMU_IFLA_AF_SPEC:
2321 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2322 NULL,
2323 host_to_target_data_spec_nlattr);
2324 default:
2325 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2326 break;
2327 }
2328 return 0;
2329 }
2330
2331 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2332 {
2333 uint32_t *u32;
2334 struct ifa_cacheinfo *ci;
2335
2336 switch (rtattr->rta_type) {
2337 /* binary: depends on family type */
2338 case IFA_ADDRESS:
2339 case IFA_LOCAL:
2340 break;
2341 /* string */
2342 case IFA_LABEL:
2343 break;
2344 /* u32 */
2345 case IFA_FLAGS:
2346 case IFA_BROADCAST:
2347 u32 = RTA_DATA(rtattr);
2348 *u32 = tswap32(*u32);
2349 break;
2350 /* struct ifa_cacheinfo */
2351 case IFA_CACHEINFO:
2352 ci = RTA_DATA(rtattr);
2353 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2354 ci->ifa_valid = tswap32(ci->ifa_valid);
2355 ci->cstamp = tswap32(ci->cstamp);
2356 ci->tstamp = tswap32(ci->tstamp);
2357 break;
2358 default:
2359 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2360 break;
2361 }
2362 return 0;
2363 }
2364
2365 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2366 {
2367 uint32_t *u32;
2368 switch (rtattr->rta_type) {
2369 /* binary: depends on family type */
2370 case RTA_GATEWAY:
2371 case RTA_DST:
2372 case RTA_PREFSRC:
2373 break;
2374 /* u32 */
2375 case RTA_PRIORITY:
2376 case RTA_TABLE:
2377 case RTA_OIF:
2378 u32 = RTA_DATA(rtattr);
2379 *u32 = tswap32(*u32);
2380 break;
2381 default:
2382 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2383 break;
2384 }
2385 return 0;
2386 }
2387
2388 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2389 uint32_t rtattr_len)
2390 {
2391 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2392 host_to_target_data_link_rtattr);
2393 }
2394
2395 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2396 uint32_t rtattr_len)
2397 {
2398 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2399 host_to_target_data_addr_rtattr);
2400 }
2401
2402 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2403 uint32_t rtattr_len)
2404 {
2405 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2406 host_to_target_data_route_rtattr);
2407 }
2408
2409 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2410 {
2411 uint32_t nlmsg_len;
2412 struct ifinfomsg *ifi;
2413 struct ifaddrmsg *ifa;
2414 struct rtmsg *rtm;
2415
2416 nlmsg_len = nlh->nlmsg_len;
2417 switch (nlh->nlmsg_type) {
2418 case RTM_NEWLINK:
2419 case RTM_DELLINK:
2420 case RTM_GETLINK:
2421 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2422 ifi = NLMSG_DATA(nlh);
2423 ifi->ifi_type = tswap16(ifi->ifi_type);
2424 ifi->ifi_index = tswap32(ifi->ifi_index);
2425 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2426 ifi->ifi_change = tswap32(ifi->ifi_change);
2427 host_to_target_link_rtattr(IFLA_RTA(ifi),
2428 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2429 }
2430 break;
2431 case RTM_NEWADDR:
2432 case RTM_DELADDR:
2433 case RTM_GETADDR:
2434 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2435 ifa = NLMSG_DATA(nlh);
2436 ifa->ifa_index = tswap32(ifa->ifa_index);
2437 host_to_target_addr_rtattr(IFA_RTA(ifa),
2438 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2439 }
2440 break;
2441 case RTM_NEWROUTE:
2442 case RTM_DELROUTE:
2443 case RTM_GETROUTE:
2444 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2445 rtm = NLMSG_DATA(nlh);
2446 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2447 host_to_target_route_rtattr(RTM_RTA(rtm),
2448 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2449 }
2450 break;
2451 default:
2452 return -TARGET_EINVAL;
2453 }
2454 return 0;
2455 }
2456
2457 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2458 size_t len)
2459 {
2460 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2461 }
2462
2463 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2464 size_t len,
2465 abi_long (*target_to_host_rtattr)
2466 (struct rtattr *))
2467 {
2468 abi_long ret;
2469
2470 while (len >= sizeof(struct rtattr)) {
2471 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2472 tswap16(rtattr->rta_len) > len) {
2473 break;
2474 }
2475 rtattr->rta_len = tswap16(rtattr->rta_len);
2476 rtattr->rta_type = tswap16(rtattr->rta_type);
2477 ret = target_to_host_rtattr(rtattr);
2478 if (ret < 0) {
2479 return ret;
2480 }
2481 len -= RTA_ALIGN(rtattr->rta_len);
2482 rtattr = (struct rtattr *)(((char *)rtattr) +
2483 RTA_ALIGN(rtattr->rta_len));
2484 }
2485 return 0;
2486 }
2487
2488 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2489 {
2490 switch (rtattr->rta_type) {
2491 default:
2492 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2493 break;
2494 }
2495 return 0;
2496 }
2497
2498 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2499 {
2500 switch (rtattr->rta_type) {
2501 /* binary: depends on family type */
2502 case IFA_LOCAL:
2503 case IFA_ADDRESS:
2504 break;
2505 default:
2506 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2507 break;
2508 }
2509 return 0;
2510 }
2511
2512 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2513 {
2514 uint32_t *u32;
2515 switch (rtattr->rta_type) {
2516 /* binary: depends on family type */
2517 case RTA_DST:
2518 case RTA_SRC:
2519 case RTA_GATEWAY:
2520 break;
2521 /* u32 */
2522 case RTA_OIF:
2523 u32 = RTA_DATA(rtattr);
2524 *u32 = tswap32(*u32);
2525 break;
2526 default:
2527 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2528 break;
2529 }
2530 return 0;
2531 }
2532
2533 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2534 uint32_t rtattr_len)
2535 {
2536 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2537 target_to_host_data_link_rtattr);
2538 }
2539
2540 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2541 uint32_t rtattr_len)
2542 {
2543 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2544 target_to_host_data_addr_rtattr);
2545 }
2546
2547 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2548 uint32_t rtattr_len)
2549 {
2550 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2551 target_to_host_data_route_rtattr);
2552 }
2553
2554 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2555 {
2556 struct ifinfomsg *ifi;
2557 struct ifaddrmsg *ifa;
2558 struct rtmsg *rtm;
2559
2560 switch (nlh->nlmsg_type) {
2561 case RTM_GETLINK:
2562 break;
2563 case RTM_NEWLINK:
2564 case RTM_DELLINK:
2565 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2566 ifi = NLMSG_DATA(nlh);
2567 ifi->ifi_type = tswap16(ifi->ifi_type);
2568 ifi->ifi_index = tswap32(ifi->ifi_index);
2569 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2570 ifi->ifi_change = tswap32(ifi->ifi_change);
2571 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2572 NLMSG_LENGTH(sizeof(*ifi)));
2573 }
2574 break;
2575 case RTM_GETADDR:
2576 case RTM_NEWADDR:
2577 case RTM_DELADDR:
2578 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2579 ifa = NLMSG_DATA(nlh);
2580 ifa->ifa_index = tswap32(ifa->ifa_index);
2581 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2582 NLMSG_LENGTH(sizeof(*ifa)));
2583 }
2584 break;
2585 case RTM_GETROUTE:
2586 break;
2587 case RTM_NEWROUTE:
2588 case RTM_DELROUTE:
2589 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2590 rtm = NLMSG_DATA(nlh);
2591 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2592 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2593 NLMSG_LENGTH(sizeof(*rtm)));
2594 }
2595 break;
2596 default:
2597 return -TARGET_EOPNOTSUPP;
2598 }
2599 return 0;
2600 }
2601
2602 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2603 {
2604 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2605 }
2606 #endif /* CONFIG_RTNETLINK */
2607
2608 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2609 {
2610 switch (nlh->nlmsg_type) {
2611 default:
2612 gemu_log("Unknown host audit message type %d\n",
2613 nlh->nlmsg_type);
2614 return -TARGET_EINVAL;
2615 }
2616 return 0;
2617 }
2618
2619 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2620 size_t len)
2621 {
2622 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2623 }
2624
2625 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2626 {
2627 switch (nlh->nlmsg_type) {
2628 case AUDIT_USER:
2629 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2630 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2631 break;
2632 default:
2633 gemu_log("Unknown target audit message type %d\n",
2634 nlh->nlmsg_type);
2635 return -TARGET_EINVAL;
2636 }
2637
2638 return 0;
2639 }
2640
2641 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2642 {
2643 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2644 }
2645
2646 /* do_setsockopt() Must return target values and target errnos. */
2647 static abi_long do_setsockopt(int sockfd, int level, int optname,
2648 abi_ulong optval_addr, socklen_t optlen)
2649 {
2650 abi_long ret;
2651 int val;
2652 struct ip_mreqn *ip_mreq;
2653 struct ip_mreq_source *ip_mreq_source;
2654
2655 switch(level) {
2656 case SOL_TCP:
2657 /* TCP options all take an 'int' value. */
2658 if (optlen < sizeof(uint32_t))
2659 return -TARGET_EINVAL;
2660
2661 if (get_user_u32(val, optval_addr))
2662 return -TARGET_EFAULT;
2663 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2664 break;
2665 case SOL_IP:
2666 switch(optname) {
2667 case IP_TOS:
2668 case IP_TTL:
2669 case IP_HDRINCL:
2670 case IP_ROUTER_ALERT:
2671 case IP_RECVOPTS:
2672 case IP_RETOPTS:
2673 case IP_PKTINFO:
2674 case IP_MTU_DISCOVER:
2675 case IP_RECVERR:
2676 case IP_RECVTOS:
2677 #ifdef IP_FREEBIND
2678 case IP_FREEBIND:
2679 #endif
2680 case IP_MULTICAST_TTL:
2681 case IP_MULTICAST_LOOP:
2682 val = 0;
2683 if (optlen >= sizeof(uint32_t)) {
2684 if (get_user_u32(val, optval_addr))
2685 return -TARGET_EFAULT;
2686 } else if (optlen >= 1) {
2687 if (get_user_u8(val, optval_addr))
2688 return -TARGET_EFAULT;
2689 }
2690 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2691 break;
2692 case IP_ADD_MEMBERSHIP:
2693 case IP_DROP_MEMBERSHIP:
2694 if (optlen < sizeof (struct target_ip_mreq) ||
2695 optlen > sizeof (struct target_ip_mreqn))
2696 return -TARGET_EINVAL;
2697
2698 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2699 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2700 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2701 break;
2702
2703 case IP_BLOCK_SOURCE:
2704 case IP_UNBLOCK_SOURCE:
2705 case IP_ADD_SOURCE_MEMBERSHIP:
2706 case IP_DROP_SOURCE_MEMBERSHIP:
2707 if (optlen != sizeof (struct target_ip_mreq_source))
2708 return -TARGET_EINVAL;
2709
2710 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2711 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2712 unlock_user (ip_mreq_source, optval_addr, 0);
2713 break;
2714
2715 default:
2716 goto unimplemented;
2717 }
2718 break;
2719 case SOL_IPV6:
2720 switch (optname) {
2721 case IPV6_MTU_DISCOVER:
2722 case IPV6_MTU:
2723 case IPV6_V6ONLY:
2724 case IPV6_RECVPKTINFO:
2725 val = 0;
2726 if (optlen < sizeof(uint32_t)) {
2727 return -TARGET_EINVAL;
2728 }
2729 if (get_user_u32(val, optval_addr)) {
2730 return -TARGET_EFAULT;
2731 }
2732 ret = get_errno(setsockopt(sockfd, level, optname,
2733 &val, sizeof(val)));
2734 break;
2735 default:
2736 goto unimplemented;
2737 }
2738 break;
2739 case SOL_RAW:
2740 switch (optname) {
2741 case ICMP_FILTER:
2742 /* struct icmp_filter takes an u32 value */
2743 if (optlen < sizeof(uint32_t)) {
2744 return -TARGET_EINVAL;
2745 }
2746
2747 if (get_user_u32(val, optval_addr)) {
2748 return -TARGET_EFAULT;
2749 }
2750 ret = get_errno(setsockopt(sockfd, level, optname,
2751 &val, sizeof(val)));
2752 break;
2753
2754 default:
2755 goto unimplemented;
2756 }
2757 break;
2758 case TARGET_SOL_SOCKET:
2759 switch (optname) {
2760 case TARGET_SO_RCVTIMEO:
2761 {
2762 struct timeval tv;
2763
2764 optname = SO_RCVTIMEO;
2765
2766 set_timeout:
2767 if (optlen != sizeof(struct target_timeval)) {
2768 return -TARGET_EINVAL;
2769 }
2770
2771 if (copy_from_user_timeval(&tv, optval_addr)) {
2772 return -TARGET_EFAULT;
2773 }
2774
2775 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2776 &tv, sizeof(tv)));
2777 return ret;
2778 }
2779 case TARGET_SO_SNDTIMEO:
2780 optname = SO_SNDTIMEO;
2781 goto set_timeout;
2782 case TARGET_SO_ATTACH_FILTER:
2783 {
2784 struct target_sock_fprog *tfprog;
2785 struct target_sock_filter *tfilter;
2786 struct sock_fprog fprog;
2787 struct sock_filter *filter;
2788 int i;
2789
2790 if (optlen != sizeof(*tfprog)) {
2791 return -TARGET_EINVAL;
2792 }
2793 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2794 return -TARGET_EFAULT;
2795 }
2796 if (!lock_user_struct(VERIFY_READ, tfilter,
2797 tswapal(tfprog->filter), 0)) {
2798 unlock_user_struct(tfprog, optval_addr, 1);
2799 return -TARGET_EFAULT;
2800 }
2801
2802 fprog.len = tswap16(tfprog->len);
2803 filter = g_try_new(struct sock_filter, fprog.len);
2804 if (filter == NULL) {
2805 unlock_user_struct(tfilter, tfprog->filter, 1);
2806 unlock_user_struct(tfprog, optval_addr, 1);
2807 return -TARGET_ENOMEM;
2808 }
2809 for (i = 0; i < fprog.len; i++) {
2810 filter[i].code = tswap16(tfilter[i].code);
2811 filter[i].jt = tfilter[i].jt;
2812 filter[i].jf = tfilter[i].jf;
2813 filter[i].k = tswap32(tfilter[i].k);
2814 }
2815 fprog.filter = filter;
2816
2817 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2818 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2819 g_free(filter);
2820
2821 unlock_user_struct(tfilter, tfprog->filter, 1);
2822 unlock_user_struct(tfprog, optval_addr, 1);
2823 return ret;
2824 }
2825 case TARGET_SO_BINDTODEVICE:
2826 {
2827 char *dev_ifname, *addr_ifname;
2828
2829 if (optlen > IFNAMSIZ - 1) {
2830 optlen = IFNAMSIZ - 1;
2831 }
2832 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2833 if (!dev_ifname) {
2834 return -TARGET_EFAULT;
2835 }
2836 optname = SO_BINDTODEVICE;
2837 addr_ifname = alloca(IFNAMSIZ);
2838 memcpy(addr_ifname, dev_ifname, optlen);
2839 addr_ifname[optlen] = 0;
2840 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2841 addr_ifname, optlen));
2842 unlock_user (dev_ifname, optval_addr, 0);
2843 return ret;
2844 }
2845 /* Options with 'int' argument. */
2846 case TARGET_SO_DEBUG:
2847 optname = SO_DEBUG;
2848 break;
2849 case TARGET_SO_REUSEADDR:
2850 optname = SO_REUSEADDR;
2851 break;
2852 case TARGET_SO_TYPE:
2853 optname = SO_TYPE;
2854 break;
2855 case TARGET_SO_ERROR:
2856 optname = SO_ERROR;
2857 break;
2858 case TARGET_SO_DONTROUTE:
2859 optname = SO_DONTROUTE;
2860 break;
2861 case TARGET_SO_BROADCAST:
2862 optname = SO_BROADCAST;
2863 break;
2864 case TARGET_SO_SNDBUF:
2865 optname = SO_SNDBUF;
2866 break;
2867 case TARGET_SO_SNDBUFFORCE:
2868 optname = SO_SNDBUFFORCE;
2869 break;
2870 case TARGET_SO_RCVBUF:
2871 optname = SO_RCVBUF;
2872 break;
2873 case TARGET_SO_RCVBUFFORCE:
2874 optname = SO_RCVBUFFORCE;
2875 break;
2876 case TARGET_SO_KEEPALIVE:
2877 optname = SO_KEEPALIVE;
2878 break;
2879 case TARGET_SO_OOBINLINE:
2880 optname = SO_OOBINLINE;
2881 break;
2882 case TARGET_SO_NO_CHECK:
2883 optname = SO_NO_CHECK;
2884 break;
2885 case TARGET_SO_PRIORITY:
2886 optname = SO_PRIORITY;
2887 break;
2888 #ifdef SO_BSDCOMPAT
2889 case TARGET_SO_BSDCOMPAT:
2890 optname = SO_BSDCOMPAT;
2891 break;
2892 #endif
2893 case TARGET_SO_PASSCRED:
2894 optname = SO_PASSCRED;
2895 break;
2896 case TARGET_SO_PASSSEC:
2897 optname = SO_PASSSEC;
2898 break;
2899 case TARGET_SO_TIMESTAMP:
2900 optname = SO_TIMESTAMP;
2901 break;
2902 case TARGET_SO_RCVLOWAT:
2903 optname = SO_RCVLOWAT;
2904 break;
2905 break;
2906 default:
2907 goto unimplemented;
2908 }
2909 if (optlen < sizeof(uint32_t))
2910 return -TARGET_EINVAL;
2911
2912 if (get_user_u32(val, optval_addr))
2913 return -TARGET_EFAULT;
2914 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2915 break;
2916 default:
2917 unimplemented:
2918 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2919 ret = -TARGET_ENOPROTOOPT;
2920 }
2921 return ret;
2922 }
2923
2924 /* do_getsockopt() Must return target values and target errnos. */
2925 static abi_long do_getsockopt(int sockfd, int level, int optname,
2926 abi_ulong optval_addr, abi_ulong optlen)
2927 {
2928 abi_long ret;
2929 int len, val;
2930 socklen_t lv;
2931
2932 switch(level) {
2933 case TARGET_SOL_SOCKET:
2934 level = SOL_SOCKET;
2935 switch (optname) {
2936 /* These don't just return a single integer */
2937 case TARGET_SO_LINGER:
2938 case TARGET_SO_RCVTIMEO:
2939 case TARGET_SO_SNDTIMEO:
2940 case TARGET_SO_PEERNAME:
2941 goto unimplemented;
2942 case TARGET_SO_PEERCRED: {
2943 struct ucred cr;
2944 socklen_t crlen;
2945 struct target_ucred *tcr;
2946
2947 if (get_user_u32(len, optlen)) {
2948 return -TARGET_EFAULT;
2949 }
2950 if (len < 0) {
2951 return -TARGET_EINVAL;
2952 }
2953
2954 crlen = sizeof(cr);
2955 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2956 &cr, &crlen));
2957 if (ret < 0) {
2958 return ret;
2959 }
2960 if (len > crlen) {
2961 len = crlen;
2962 }
2963 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2964 return -TARGET_EFAULT;
2965 }
2966 __put_user(cr.pid, &tcr->pid);
2967 __put_user(cr.uid, &tcr->uid);
2968 __put_user(cr.gid, &tcr->gid);
2969 unlock_user_struct(tcr, optval_addr, 1);
2970 if (put_user_u32(len, optlen)) {
2971 return -TARGET_EFAULT;
2972 }
2973 break;
2974 }
2975 /* Options with 'int' argument. */
2976 case TARGET_SO_DEBUG:
2977 optname = SO_DEBUG;
2978 goto int_case;
2979 case TARGET_SO_REUSEADDR:
2980 optname = SO_REUSEADDR;
2981 goto int_case;
2982 case TARGET_SO_TYPE:
2983 optname = SO_TYPE;
2984 goto int_case;
2985 case TARGET_SO_ERROR:
2986 optname = SO_ERROR;
2987 goto int_case;
2988 case TARGET_SO_DONTROUTE:
2989 optname = SO_DONTROUTE;
2990 goto int_case;
2991 case TARGET_SO_BROADCAST:
2992 optname = SO_BROADCAST;
2993 goto int_case;
2994 case TARGET_SO_SNDBUF:
2995 optname = SO_SNDBUF;
2996 goto int_case;
2997 case TARGET_SO_RCVBUF:
2998 optname = SO_RCVBUF;
2999 goto int_case;
3000 case TARGET_SO_KEEPALIVE:
3001 optname = SO_KEEPALIVE;
3002 goto int_case;
3003 case TARGET_SO_OOBINLINE:
3004 optname = SO_OOBINLINE;
3005 goto int_case;
3006 case TARGET_SO_NO_CHECK:
3007 optname = SO_NO_CHECK;
3008 goto int_case;
3009 case TARGET_SO_PRIORITY:
3010 optname = SO_PRIORITY;
3011 goto int_case;
3012 #ifdef SO_BSDCOMPAT
3013 case TARGET_SO_BSDCOMPAT:
3014 optname = SO_BSDCOMPAT;
3015 goto int_case;
3016 #endif
3017 case TARGET_SO_PASSCRED:
3018 optname = SO_PASSCRED;
3019 goto int_case;
3020 case TARGET_SO_TIMESTAMP:
3021 optname = SO_TIMESTAMP;
3022 goto int_case;
3023 case TARGET_SO_RCVLOWAT:
3024 optname = SO_RCVLOWAT;
3025 goto int_case;
3026 case TARGET_SO_ACCEPTCONN:
3027 optname = SO_ACCEPTCONN;
3028 goto int_case;
3029 default:
3030 goto int_case;
3031 }
3032 break;
3033 case SOL_TCP:
3034 /* TCP options all take an 'int' value. */
3035 int_case:
3036 if (get_user_u32(len, optlen))
3037 return -TARGET_EFAULT;
3038 if (len < 0)
3039 return -TARGET_EINVAL;
3040 lv = sizeof(lv);
3041 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3042 if (ret < 0)
3043 return ret;
3044 if (optname == SO_TYPE) {
3045 val = host_to_target_sock_type(val);
3046 }
3047 if (len > lv)
3048 len = lv;
3049 if (len == 4) {
3050 if (put_user_u32(val, optval_addr))
3051 return -TARGET_EFAULT;
3052 } else {
3053 if (put_user_u8(val, optval_addr))
3054 return -TARGET_EFAULT;
3055 }
3056 if (put_user_u32(len, optlen))
3057 return -TARGET_EFAULT;
3058 break;
3059 case SOL_IP:
3060 switch(optname) {
3061 case IP_TOS:
3062 case IP_TTL:
3063 case IP_HDRINCL:
3064 case IP_ROUTER_ALERT:
3065 case IP_RECVOPTS:
3066 case IP_RETOPTS:
3067 case IP_PKTINFO:
3068 case IP_MTU_DISCOVER:
3069 case IP_RECVERR:
3070 case IP_RECVTOS:
3071 #ifdef IP_FREEBIND
3072 case IP_FREEBIND:
3073 #endif
3074 case IP_MULTICAST_TTL:
3075 case IP_MULTICAST_LOOP:
3076 if (get_user_u32(len, optlen))
3077 return -TARGET_EFAULT;
3078 if (len < 0)
3079 return -TARGET_EINVAL;
3080 lv = sizeof(lv);
3081 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3082 if (ret < 0)
3083 return ret;
3084 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3085 len = 1;
3086 if (put_user_u32(len, optlen)
3087 || put_user_u8(val, optval_addr))
3088 return -TARGET_EFAULT;
3089 } else {
3090 if (len > sizeof(int))
3091 len = sizeof(int);
3092 if (put_user_u32(len, optlen)
3093 || put_user_u32(val, optval_addr))
3094 return -TARGET_EFAULT;
3095 }
3096 break;
3097 default:
3098 ret = -TARGET_ENOPROTOOPT;
3099 break;
3100 }
3101 break;
3102 default:
3103 unimplemented:
3104 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3105 level, optname);
3106 ret = -TARGET_EOPNOTSUPP;
3107 break;
3108 }
3109 return ret;
3110 }
3111
3112 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3113 abi_ulong count, int copy)
3114 {
3115 struct target_iovec *target_vec;
3116 struct iovec *vec;
3117 abi_ulong total_len, max_len;
3118 int i;
3119 int err = 0;
3120 bool bad_address = false;
3121
3122 if (count == 0) {
3123 errno = 0;
3124 return NULL;
3125 }
3126 if (count > IOV_MAX) {
3127 errno = EINVAL;
3128 return NULL;
3129 }
3130
3131 vec = g_try_new0(struct iovec, count);
3132 if (vec == NULL) {
3133 errno = ENOMEM;
3134 return NULL;
3135 }
3136
3137 target_vec = lock_user(VERIFY_READ, target_addr,
3138 count * sizeof(struct target_iovec), 1);
3139 if (target_vec == NULL) {
3140 err = EFAULT;
3141 goto fail2;
3142 }
3143
3144 /* ??? If host page size > target page size, this will result in a
3145 value larger than what we can actually support. */
3146 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3147 total_len = 0;
3148
3149 for (i = 0; i < count; i++) {
3150 abi_ulong base = tswapal(target_vec[i].iov_base);
3151 abi_long len = tswapal(target_vec[i].iov_len);
3152
3153 if (len < 0) {
3154 err = EINVAL;
3155 goto fail;
3156 } else if (len == 0) {
3157 /* Zero length pointer is ignored. */
3158 vec[i].iov_base = 0;
3159 } else {
3160 vec[i].iov_base = lock_user(type, base, len, copy);
3161 /* If the first buffer pointer is bad, this is a fault. But
3162 * subsequent bad buffers will result in a partial write; this
3163 * is realized by filling the vector with null pointers and
3164 * zero lengths. */
3165 if (!vec[i].iov_base) {
3166 if (i == 0) {
3167 err = EFAULT;
3168 goto fail;
3169 } else {
3170 bad_address = true;
3171 }
3172 }
3173 if (bad_address) {
3174 len = 0;
3175 }
3176 if (len > max_len - total_len) {
3177 len = max_len - total_len;
3178 }
3179 }
3180 vec[i].iov_len = len;
3181 total_len += len;
3182 }
3183
3184 unlock_user(target_vec, target_addr, 0);
3185 return vec;
3186
3187 fail:
3188 while (--i >= 0) {
3189 if (tswapal(target_vec[i].iov_len) > 0) {
3190 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3191 }
3192 }
3193 unlock_user(target_vec, target_addr, 0);
3194 fail2:
3195 g_free(vec);
3196 errno = err;
3197 return NULL;
3198 }
3199
3200 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3201 abi_ulong count, int copy)
3202 {
3203 struct target_iovec *target_vec;
3204 int i;
3205
3206 target_vec = lock_user(VERIFY_READ, target_addr,
3207 count * sizeof(struct target_iovec), 1);
3208 if (target_vec) {
3209 for (i = 0; i < count; i++) {
3210 abi_ulong base = tswapal(target_vec[i].iov_base);
3211 abi_long len = tswapal(target_vec[i].iov_len);
3212 if (len < 0) {
3213 break;
3214 }
3215 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3216 }
3217 unlock_user(target_vec, target_addr, 0);
3218 }
3219
3220 g_free(vec);
3221 }
3222
3223 static inline int target_to_host_sock_type(int *type)
3224 {
3225 int host_type = 0;
3226 int target_type = *type;
3227
3228 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3229 case TARGET_SOCK_DGRAM:
3230 host_type = SOCK_DGRAM;
3231 break;
3232 case TARGET_SOCK_STREAM:
3233 host_type = SOCK_STREAM;
3234 break;
3235 default:
3236 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3237 break;
3238 }
3239 if (target_type & TARGET_SOCK_CLOEXEC) {
3240 #if defined(SOCK_CLOEXEC)
3241 host_type |= SOCK_CLOEXEC;
3242 #else
3243 return -TARGET_EINVAL;
3244 #endif
3245 }
3246 if (target_type & TARGET_SOCK_NONBLOCK) {
3247 #if defined(SOCK_NONBLOCK)
3248 host_type |= SOCK_NONBLOCK;
3249 #elif !defined(O_NONBLOCK)
3250 return -TARGET_EINVAL;
3251 #endif
3252 }
3253 *type = host_type;
3254 return 0;
3255 }
3256
3257 /* Try to emulate socket type flags after socket creation. */
3258 static int sock_flags_fixup(int fd, int target_type)
3259 {
3260 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3261 if (target_type & TARGET_SOCK_NONBLOCK) {
3262 int flags = fcntl(fd, F_GETFL);
3263 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3264 close(fd);
3265 return -TARGET_EINVAL;
3266 }
3267 }
3268 #endif
3269 return fd;
3270 }
3271
3272 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3273 abi_ulong target_addr,
3274 socklen_t len)
3275 {
3276 struct sockaddr *addr = host_addr;
3277 struct target_sockaddr *target_saddr;
3278
3279 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3280 if (!target_saddr) {
3281 return -TARGET_EFAULT;
3282 }
3283
3284 memcpy(addr, target_saddr, len);
3285 addr->sa_family = tswap16(target_saddr->sa_family);
3286 /* spkt_protocol is big-endian */
3287
3288 unlock_user(target_saddr, target_addr, 0);
3289 return 0;
3290 }
3291
3292 static TargetFdTrans target_packet_trans = {
3293 .target_to_host_addr = packet_target_to_host_sockaddr,
3294 };
3295
3296 #ifdef CONFIG_RTNETLINK
3297 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3298 {
3299 abi_long ret;
3300
3301 ret = target_to_host_nlmsg_route(buf, len);
3302 if (ret < 0) {
3303 return ret;
3304 }
3305
3306 return len;
3307 }
3308
3309 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3310 {
3311 abi_long ret;
3312
3313 ret = host_to_target_nlmsg_route(buf, len);
3314 if (ret < 0) {
3315 return ret;
3316 }
3317
3318 return len;
3319 }
3320
3321 static TargetFdTrans target_netlink_route_trans = {
3322 .target_to_host_data = netlink_route_target_to_host,
3323 .host_to_target_data = netlink_route_host_to_target,
3324 };
3325 #endif /* CONFIG_RTNETLINK */
3326
3327 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3328 {
3329 abi_long ret;
3330
3331 ret = target_to_host_nlmsg_audit(buf, len);
3332 if (ret < 0) {
3333 return ret;
3334 }
3335
3336 return len;
3337 }
3338
3339 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3340 {
3341 abi_long ret;
3342
3343 ret = host_to_target_nlmsg_audit(buf, len);
3344 if (ret < 0) {
3345 return ret;
3346 }
3347
3348 return len;
3349 }
3350
3351 static TargetFdTrans target_netlink_audit_trans = {
3352 .target_to_host_data = netlink_audit_target_to_host,
3353 .host_to_target_data = netlink_audit_host_to_target,
3354 };
3355
3356 /* do_socket() Must return target values and target errnos. */
3357 static abi_long do_socket(int domain, int type, int protocol)
3358 {
3359 int target_type = type;
3360 int ret;
3361
3362 ret = target_to_host_sock_type(&type);
3363 if (ret) {
3364 return ret;
3365 }
3366
3367 if (domain == PF_NETLINK && !(
3368 #ifdef CONFIG_RTNETLINK
3369 protocol == NETLINK_ROUTE ||
3370 #endif
3371 protocol == NETLINK_KOBJECT_UEVENT ||
3372 protocol == NETLINK_AUDIT)) {
3373 return -EPFNOSUPPORT;
3374 }
3375
3376 if (domain == AF_PACKET ||
3377 (domain == AF_INET && type == SOCK_PACKET)) {
3378 protocol = tswap16(protocol);
3379 }
3380
3381 ret = get_errno(socket(domain, type, protocol));
3382 if (ret >= 0) {
3383 ret = sock_flags_fixup(ret, target_type);
3384 if (type == SOCK_PACKET) {
3385 /* Manage an obsolete case :
3386 * if socket type is SOCK_PACKET, bind by name
3387 */
3388 fd_trans_register(ret, &target_packet_trans);
3389 } else if (domain == PF_NETLINK) {
3390 switch (protocol) {
3391 #ifdef CONFIG_RTNETLINK
3392 case NETLINK_ROUTE:
3393 fd_trans_register(ret, &target_netlink_route_trans);
3394 break;
3395 #endif
3396 case NETLINK_KOBJECT_UEVENT:
3397 /* nothing to do: messages are strings */
3398 break;
3399 case NETLINK_AUDIT:
3400 fd_trans_register(ret, &target_netlink_audit_trans);
3401 break;
3402 default:
3403 g_assert_not_reached();
3404 }
3405 }
3406 }
3407 return ret;
3408 }
3409
3410 /* do_bind() Must return target values and target errnos. */
3411 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3412 socklen_t addrlen)
3413 {
3414 void *addr;
3415 abi_long ret;
3416
3417 if ((int)addrlen < 0) {
3418 return -TARGET_EINVAL;
3419 }
3420
3421 addr = alloca(addrlen+1);
3422
3423 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3424 if (ret)
3425 return ret;
3426
3427 return get_errno(bind(sockfd, addr, addrlen));
3428 }
3429
3430 /* do_connect() Must return target values and target errnos. */
3431 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3432 socklen_t addrlen)
3433 {
3434 void *addr;
3435 abi_long ret;
3436
3437 if ((int)addrlen < 0) {
3438 return -TARGET_EINVAL;
3439 }
3440
3441 addr = alloca(addrlen+1);
3442
3443 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3444 if (ret)
3445 return ret;
3446
3447 return get_errno(safe_connect(sockfd, addr, addrlen));
3448 }
3449
3450 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3451 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3452 int flags, int send)
3453 {
3454 abi_long ret, len;
3455 struct msghdr msg;
3456 abi_ulong count;
3457 struct iovec *vec;
3458 abi_ulong target_vec;
3459
3460 if (msgp->msg_name) {
3461 msg.msg_namelen = tswap32(msgp->msg_namelen);
3462 msg.msg_name = alloca(msg.msg_namelen+1);
3463 ret = target_to_host_sockaddr(fd, msg.msg_name,
3464 tswapal(msgp->msg_name),
3465 msg.msg_namelen);
3466 if (ret == -TARGET_EFAULT) {
3467 /* For connected sockets msg_name and msg_namelen must
3468 * be ignored, so returning EFAULT immediately is wrong.
3469 * Instead, pass a bad msg_name to the host kernel, and
3470 * let it decide whether to return EFAULT or not.
3471 */
3472 msg.msg_name = (void *)-1;
3473 } else if (ret) {
3474 goto out2;
3475 }
3476 } else {
3477 msg.msg_name = NULL;
3478 msg.msg_namelen = 0;
3479 }
3480 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3481 msg.msg_control = alloca(msg.msg_controllen);
3482 msg.msg_flags = tswap32(msgp->msg_flags);
3483
3484 count = tswapal(msgp->msg_iovlen);
3485 target_vec = tswapal(msgp->msg_iov);
3486
3487 if (count > IOV_MAX) {
3488 /* sendrcvmsg returns a different errno for this condition than
3489 * readv/writev, so we must catch it here before lock_iovec() does.
3490 */
3491 ret = -TARGET_EMSGSIZE;
3492 goto out2;
3493 }
3494
3495 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3496 target_vec, count, send);
3497 if (vec == NULL) {
3498 ret = -host_to_target_errno(errno);
3499 goto out2;
3500 }
3501 msg.msg_iovlen = count;
3502 msg.msg_iov = vec;
3503
3504 if (send) {
3505 if (fd_trans_target_to_host_data(fd)) {
3506 void *host_msg;
3507
3508 host_msg = g_malloc(msg.msg_iov->iov_len);
3509 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3510 ret = fd_trans_target_to_host_data(fd)(host_msg,
3511 msg.msg_iov->iov_len);
3512 if (ret >= 0) {
3513 msg.msg_iov->iov_base = host_msg;
3514 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3515 }
3516 g_free(host_msg);
3517 } else {
3518 ret = target_to_host_cmsg(&msg, msgp);
3519 if (ret == 0) {
3520 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3521 }
3522 }
3523 } else {
3524 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3525 if (!is_error(ret)) {
3526 len = ret;
3527 if (fd_trans_host_to_target_data(fd)) {
3528 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3529 len);
3530 } else {
3531 ret = host_to_target_cmsg(msgp, &msg);
3532 }
3533 if (!is_error(ret)) {
3534 msgp->msg_namelen = tswap32(msg.msg_namelen);
3535 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3536 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3537 msg.msg_name, msg.msg_namelen);
3538 if (ret) {
3539 goto out;
3540 }
3541 }
3542
3543 ret = len;
3544 }
3545 }
3546 }
3547
3548 out:
3549 unlock_iovec(vec, target_vec, count, !send);
3550 out2:
3551 return ret;
3552 }
3553
3554 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3555 int flags, int send)
3556 {
3557 abi_long ret;
3558 struct target_msghdr *msgp;
3559
3560 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3561 msgp,
3562 target_msg,
3563 send ? 1 : 0)) {
3564 return -TARGET_EFAULT;
3565 }
3566 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3567 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3568 return ret;
3569 }
3570
3571 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3572 * so it might not have this *mmsg-specific flag either.
3573 */
3574 #ifndef MSG_WAITFORONE
3575 #define MSG_WAITFORONE 0x10000
3576 #endif
3577
3578 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3579 unsigned int vlen, unsigned int flags,
3580 int send)
3581 {
3582 struct target_mmsghdr *mmsgp;
3583 abi_long ret = 0;
3584 int i;
3585
3586 if (vlen > UIO_MAXIOV) {
3587 vlen = UIO_MAXIOV;
3588 }
3589
3590 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3591 if (!mmsgp) {
3592 return -TARGET_EFAULT;
3593 }
3594
3595 for (i = 0; i < vlen; i++) {
3596 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3597 if (is_error(ret)) {
3598 break;
3599 }
3600 mmsgp[i].msg_len = tswap32(ret);
3601 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3602 if (flags & MSG_WAITFORONE) {
3603 flags |= MSG_DONTWAIT;
3604 }
3605 }
3606
3607 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3608
3609 /* Return number of datagrams sent if we sent any at all;
3610 * otherwise return the error.
3611 */
3612 if (i) {
3613 return i;
3614 }
3615 return ret;
3616 }
3617
3618 /* do_accept4() Must return target values and target errnos. */
3619 static abi_long do_accept4(int fd, abi_ulong target_addr,
3620 abi_ulong target_addrlen_addr, int flags)
3621 {
3622 socklen_t addrlen;
3623 void *addr;
3624 abi_long ret;
3625 int host_flags;
3626
3627 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3628
3629 if (target_addr == 0) {
3630 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3631 }
3632
3633 /* linux returns EINVAL if addrlen pointer is invalid */
3634 if (get_user_u32(addrlen, target_addrlen_addr))
3635 return -TARGET_EINVAL;
3636
3637 if ((int)addrlen < 0) {
3638 return -TARGET_EINVAL;
3639 }
3640
3641 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3642 return -TARGET_EINVAL;
3643
3644 addr = alloca(addrlen);
3645
3646 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3647 if (!is_error(ret)) {
3648 host_to_target_sockaddr(target_addr, addr, addrlen);
3649 if (put_user_u32(addrlen, target_addrlen_addr))
3650 ret = -TARGET_EFAULT;
3651 }
3652 return ret;
3653 }
3654
3655 /* do_getpeername() Must return target values and target errnos. */
3656 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3657 abi_ulong target_addrlen_addr)
3658 {
3659 socklen_t addrlen;
3660 void *addr;
3661 abi_long ret;
3662
3663 if (get_user_u32(addrlen, target_addrlen_addr))
3664 return -TARGET_EFAULT;
3665
3666 if ((int)addrlen < 0) {
3667 return -TARGET_EINVAL;
3668 }
3669
3670 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3671 return -TARGET_EFAULT;
3672
3673 addr = alloca(addrlen);
3674
3675 ret = get_errno(getpeername(fd, addr, &addrlen));
3676 if (!is_error(ret)) {
3677 host_to_target_sockaddr(target_addr, addr, addrlen);
3678 if (put_user_u32(addrlen, target_addrlen_addr))
3679 ret = -TARGET_EFAULT;
3680 }
3681 return ret;
3682 }
3683
3684 /* do_getsockname() Must return target values and target errnos. */
3685 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3686 abi_ulong target_addrlen_addr)
3687 {
3688 socklen_t addrlen;
3689 void *addr;
3690 abi_long ret;
3691
3692 if (get_user_u32(addrlen, target_addrlen_addr))
3693 return -TARGET_EFAULT;
3694
3695 if ((int)addrlen < 0) {
3696 return -TARGET_EINVAL;
3697 }
3698
3699 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3700 return -TARGET_EFAULT;
3701
3702 addr = alloca(addrlen);
3703
3704 ret = get_errno(getsockname(fd, addr, &addrlen));
3705 if (!is_error(ret)) {
3706 host_to_target_sockaddr(target_addr, addr, addrlen);
3707 if (put_user_u32(addrlen, target_addrlen_addr))
3708 ret = -TARGET_EFAULT;
3709 }
3710 return ret;
3711 }
3712
3713 /* do_socketpair() Must return target values and target errnos. */
3714 static abi_long do_socketpair(int domain, int type, int protocol,
3715 abi_ulong target_tab_addr)
3716 {
3717 int tab[2];
3718 abi_long ret;
3719
3720 target_to_host_sock_type(&type);
3721
3722 ret = get_errno(socketpair(domain, type, protocol, tab));
3723 if (!is_error(ret)) {
3724 if (put_user_s32(tab[0], target_tab_addr)
3725 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3726 ret = -TARGET_EFAULT;
3727 }
3728 return ret;
3729 }
3730
3731 /* do_sendto() Must return target values and target errnos. */
3732 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3733 abi_ulong target_addr, socklen_t addrlen)
3734 {
3735 void *addr;
3736 void *host_msg;
3737 void *copy_msg = NULL;
3738 abi_long ret;
3739
3740 if ((int)addrlen < 0) {
3741 return -TARGET_EINVAL;
3742 }
3743
3744 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3745 if (!host_msg)
3746 return -TARGET_EFAULT;
3747 if (fd_trans_target_to_host_data(fd)) {
3748 copy_msg = host_msg;
3749 host_msg = g_malloc(len);
3750 memcpy(host_msg, copy_msg, len);
3751 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3752 if (ret < 0) {
3753 goto fail;
3754 }
3755 }
3756 if (target_addr) {
3757 addr = alloca(addrlen+1);
3758 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3759 if (ret) {
3760 goto fail;
3761 }
3762 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3763 } else {
3764 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3765 }
3766 fail:
3767 if (copy_msg) {
3768 g_free(host_msg);
3769 host_msg = copy_msg;
3770 }
3771 unlock_user(host_msg, msg, 0);
3772 return ret;
3773 }
3774
3775 /* do_recvfrom() Must return target values and target errnos. */
3776 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3777 abi_ulong target_addr,
3778 abi_ulong target_addrlen)
3779 {
3780 socklen_t addrlen;
3781 void *addr;
3782 void *host_msg;
3783 abi_long ret;
3784
3785 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3786 if (!host_msg)
3787 return -TARGET_EFAULT;
3788 if (target_addr) {
3789 if (get_user_u32(addrlen, target_addrlen)) {
3790 ret = -TARGET_EFAULT;
3791 goto fail;
3792 }
3793 if ((int)addrlen < 0) {
3794 ret = -TARGET_EINVAL;
3795 goto fail;
3796 }
3797 addr = alloca(addrlen);
3798 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3799 addr, &addrlen));
3800 } else {
3801 addr = NULL; /* To keep compiler quiet. */
3802 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3803 }
3804 if (!is_error(ret)) {
3805 if (fd_trans_host_to_target_data(fd)) {
3806 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3807 }
3808 if (target_addr) {
3809 host_to_target_sockaddr(target_addr, addr, addrlen);
3810 if (put_user_u32(addrlen, target_addrlen)) {
3811 ret = -TARGET_EFAULT;
3812 goto fail;
3813 }
3814 }
3815 unlock_user(host_msg, msg, len);
3816 } else {
3817 fail:
3818 unlock_user(host_msg, msg, 0);
3819 }
3820 return ret;
3821 }
3822
3823 #ifdef TARGET_NR_socketcall
3824 /* do_socketcall() Must return target values and target errnos. */
3825 static abi_long do_socketcall(int num, abi_ulong vptr)
3826 {
3827 static const unsigned ac[] = { /* number of arguments per call */
3828 [SOCKOP_socket] = 3, /* domain, type, protocol */
3829 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3830 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3831 [SOCKOP_listen] = 2, /* sockfd, backlog */
3832 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3833 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3834 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3835 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3836 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3837 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3838 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3839 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3840 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3841 [SOCKOP_shutdown] = 2, /* sockfd, how */
3842 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3843 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3844 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3845 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3846 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3847 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3848 };
3849 abi_long a[6]; /* max 6 args */
3850
3851 /* first, collect the arguments in a[] according to ac[] */
3852 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3853 unsigned i;
3854 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3855 for (i = 0; i < ac[num]; ++i) {
3856 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3857 return -TARGET_EFAULT;
3858 }
3859 }
3860 }
3861
3862 /* now when we have the args, actually handle the call */
3863 switch (num) {
3864 case SOCKOP_socket: /* domain, type, protocol */
3865 return do_socket(a[0], a[1], a[2]);
3866 case SOCKOP_bind: /* sockfd, addr, addrlen */
3867 return do_bind(a[0], a[1], a[2]);
3868 case SOCKOP_connect: /* sockfd, addr, addrlen */
3869 return do_connect(a[0], a[1], a[2]);
3870 case SOCKOP_listen: /* sockfd, backlog */
3871 return get_errno(listen(a[0], a[1]));
3872 case SOCKOP_accept: /* sockfd, addr, addrlen */
3873 return do_accept4(a[0], a[1], a[2], 0);
3874 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3875 return do_accept4(a[0], a[1], a[2], a[3]);
3876 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3877 return do_getsockname(a[0], a[1], a[2]);
3878 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3879 return do_getpeername(a[0], a[1], a[2]);
3880 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3881 return do_socketpair(a[0], a[1], a[2], a[3]);
3882 case SOCKOP_send: /* sockfd, msg, len, flags */
3883 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3884 case SOCKOP_recv: /* sockfd, msg, len, flags */
3885 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3886 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3887 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3888 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3889 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3890 case SOCKOP_shutdown: /* sockfd, how */
3891 return get_errno(shutdown(a[0], a[1]));
3892 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3893 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3894 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3895 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3896 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3897 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3898 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3899 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3900 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3901 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3902 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3903 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3904 default:
3905 gemu_log("Unsupported socketcall: %d\n", num);
3906 return -TARGET_ENOSYS;
3907 }
3908 }
3909 #endif
3910
3911 #define N_SHM_REGIONS 32
3912
3913 static struct shm_region {
3914 abi_ulong start;
3915 abi_ulong size;
3916 bool in_use;
3917 } shm_regions[N_SHM_REGIONS];
3918
3919 #ifndef TARGET_SEMID64_DS
3920 /* asm-generic version of this struct */
3921 struct target_semid64_ds
3922 {
3923 struct target_ipc_perm sem_perm;
3924 abi_ulong sem_otime;
3925 #if TARGET_ABI_BITS == 32
3926 abi_ulong __unused1;
3927 #endif
3928 abi_ulong sem_ctime;
3929 #if TARGET_ABI_BITS == 32
3930 abi_ulong __unused2;
3931 #endif
3932 abi_ulong sem_nsems;
3933 abi_ulong __unused3;
3934 abi_ulong __unused4;
3935 };
3936 #endif
3937
3938 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3939 abi_ulong target_addr)
3940 {
3941 struct target_ipc_perm *target_ip;
3942 struct target_semid64_ds *target_sd;
3943
3944 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3945 return -TARGET_EFAULT;
3946 target_ip = &(target_sd->sem_perm);
3947 host_ip->__key = tswap32(target_ip->__key);
3948 host_ip->uid = tswap32(target_ip->uid);
3949 host_ip->gid = tswap32(target_ip->gid);
3950 host_ip->cuid = tswap32(target_ip->cuid);
3951 host_ip->cgid = tswap32(target_ip->cgid);
3952 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3953 host_ip->mode = tswap32(target_ip->mode);
3954 #else
3955 host_ip->mode = tswap16(target_ip->mode);
3956 #endif
3957 #if defined(TARGET_PPC)
3958 host_ip->__seq = tswap32(target_ip->__seq);
3959 #else
3960 host_ip->__seq = tswap16(target_ip->__seq);
3961 #endif
3962 unlock_user_struct(target_sd, target_addr, 0);
3963 return 0;
3964 }
3965
3966 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3967 struct ipc_perm *host_ip)
3968 {
3969 struct target_ipc_perm *target_ip;
3970 struct target_semid64_ds *target_sd;
3971
3972 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3973 return -TARGET_EFAULT;
3974 target_ip = &(target_sd->sem_perm);
3975 target_ip->__key = tswap32(host_ip->__key);
3976 target_ip->uid = tswap32(host_ip->uid);
3977 target_ip->gid = tswap32(host_ip->gid);
3978 target_ip->cuid = tswap32(host_ip->cuid);
3979 target_ip->cgid = tswap32(host_ip->cgid);
3980 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3981 target_ip->mode = tswap32(host_ip->mode);
3982 #else
3983 target_ip->mode = tswap16(host_ip->mode);
3984 #endif
3985 #if defined(TARGET_PPC)
3986 target_ip->__seq = tswap32(host_ip->__seq);
3987 #else
3988 target_ip->__seq = tswap16(host_ip->__seq);
3989 #endif
3990 unlock_user_struct(target_sd, target_addr, 1);
3991 return 0;
3992 }
3993
3994 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3995 abi_ulong target_addr)
3996 {
3997 struct target_semid64_ds *target_sd;
3998
3999 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4000 return -TARGET_EFAULT;
4001 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4002 return -TARGET_EFAULT;
4003 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4004 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4005 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4006 unlock_user_struct(target_sd, target_addr, 0);
4007 return 0;
4008 }
4009
4010 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4011 struct semid_ds *host_sd)
4012 {
4013 struct target_semid64_ds *target_sd;
4014
4015 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4016 return -TARGET_EFAULT;
4017 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4018 return -TARGET_EFAULT;
4019 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4020 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4021 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4022 unlock_user_struct(target_sd, target_addr, 1);
4023 return 0;
4024 }
4025
4026 struct target_seminfo {
4027 int semmap;
4028 int semmni;
4029 int semmns;
4030 int semmnu;
4031 int semmsl;
4032 int semopm;
4033 int semume;
4034 int semusz;
4035 int semvmx;
4036 int semaem;
4037 };
4038
4039 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4040 struct seminfo *host_seminfo)
4041 {
4042 struct target_seminfo *target_seminfo;
4043 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4044 return -TARGET_EFAULT;
4045 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4046 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4047 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4048 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4049 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4050 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4051 __put_user(host_seminfo->semume, &target_seminfo->semume);
4052 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4053 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4054 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4055 unlock_user_struct(target_seminfo, target_addr, 1);
4056 return 0;
4057 }
4058
4059 union semun {
4060 int val;
4061 struct semid_ds *buf;
4062 unsigned short *array;
4063 struct seminfo *__buf;
4064 };
4065
4066 union target_semun {
4067 int val;
4068 abi_ulong buf;
4069 abi_ulong array;
4070 abi_ulong __buf;
4071 };
4072
4073 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4074 abi_ulong target_addr)
4075 {
4076 int nsems;
4077 unsigned short *array;
4078 union semun semun;
4079 struct semid_ds semid_ds;
4080 int i, ret;
4081
4082 semun.buf = &semid_ds;
4083
4084 ret = semctl(semid, 0, IPC_STAT, semun);
4085 if (ret == -1)
4086 return get_errno(ret);
4087
4088 nsems = semid_ds.sem_nsems;
4089
4090 *host_array = g_try_new(unsigned short, nsems);
4091 if (!*host_array) {
4092 return -TARGET_ENOMEM;
4093 }
4094 array = lock_user(VERIFY_READ, target_addr,
4095 nsems*sizeof(unsigned short), 1);
4096 if (!array) {
4097 g_free(*host_array);
4098 return -TARGET_EFAULT;
4099 }
4100
4101 for(i=0; i<nsems; i++) {
4102 __get_user((*host_array)[i], &array[i]);
4103 }
4104 unlock_user(array, target_addr, 0);
4105
4106 return 0;
4107 }
4108
4109 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4110 unsigned short **host_array)
4111 {
4112 int nsems;
4113 unsigned short *array;
4114 union semun semun;
4115 struct semid_ds semid_ds;
4116 int i, ret;
4117
4118 semun.buf = &semid_ds;
4119
4120 ret = semctl(semid, 0, IPC_STAT, semun);
4121 if (ret == -1)
4122 return get_errno(ret);
4123
4124 nsems = semid_ds.sem_nsems;
4125
4126 array = lock_user(VERIFY_WRITE, target_addr,
4127 nsems*sizeof(unsigned short), 0);
4128 if (!array)
4129 return -TARGET_EFAULT;
4130
4131 for(i=0; i<nsems; i++) {
4132 __put_user((*host_array)[i], &array[i]);
4133 }
4134 g_free(*host_array);
4135 unlock_user(array, target_addr, 1);
4136
4137 return 0;
4138 }
4139
4140 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4141 abi_ulong target_arg)
4142 {
4143 union target_semun target_su = { .buf = target_arg };
4144 union semun arg;
4145 struct semid_ds dsarg;
4146 unsigned short *array = NULL;
4147 struct seminfo seminfo;
4148 abi_long ret = -TARGET_EINVAL;
4149 abi_long err;
4150 cmd &= 0xff;
4151
4152 switch( cmd ) {
4153 case GETVAL:
4154 case SETVAL:
4155 /* In 64 bit cross-endian situations, we will erroneously pick up
4156 * the wrong half of the union for the "val" element. To rectify
4157 * this, the entire 8-byte structure is byteswapped, followed by
4158 * a swap of the 4 byte val field. In other cases, the data is
4159 * already in proper host byte order. */
4160 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4161 target_su.buf = tswapal(target_su.buf);
4162 arg.val = tswap32(target_su.val);
4163 } else {
4164 arg.val = target_su.val;
4165 }
4166 ret = get_errno(semctl(semid, semnum, cmd, arg));
4167 break;
4168 case GETALL:
4169 case SETALL:
4170 err = target_to_host_semarray(semid, &array, target_su.array);
4171 if (err)
4172 return err;
4173 arg.array = array;
4174 ret = get_errno(semctl(semid, semnum, cmd, arg));
4175 err = host_to_target_semarray(semid, target_su.array, &array);
4176 if (err)
4177 return err;
4178 break;
4179 case IPC_STAT:
4180 case IPC_SET:
4181 case SEM_STAT:
4182 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4183 if (err)
4184 return err;
4185 arg.buf = &dsarg;
4186 ret = get_errno(semctl(semid, semnum, cmd, arg));
4187 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4188 if (err)
4189 return err;
4190 break;
4191 case IPC_INFO:
4192 case SEM_INFO:
4193 arg.__buf = &seminfo;
4194 ret = get_errno(semctl(semid, semnum, cmd, arg));
4195 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4196 if (err)
4197 return err;
4198 break;
4199 case IPC_RMID:
4200 case GETPID:
4201 case GETNCNT:
4202 case GETZCNT:
4203 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4204 break;
4205 }
4206
4207 return ret;
4208 }
4209
4210 struct target_sembuf {
4211 unsigned short sem_num;
4212 short sem_op;
4213 short sem_flg;
4214 };
4215
4216 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4217 abi_ulong target_addr,
4218 unsigned nsops)
4219 {
4220 struct target_sembuf *target_sembuf;
4221 int i;
4222
4223 target_sembuf = lock_user(VERIFY_READ, target_addr,
4224 nsops*sizeof(struct target_sembuf), 1);
4225 if (!target_sembuf)
4226 return -TARGET_EFAULT;
4227
4228 for(i=0; i<nsops; i++) {
4229 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4230 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4231 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4232 }
4233
4234 unlock_user(target_sembuf, target_addr, 0);
4235
4236 return 0;
4237 }
4238
4239 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4240 {
4241 struct sembuf sops[nsops];
4242
4243 if (target_to_host_sembuf(sops, ptr, nsops))
4244 return -TARGET_EFAULT;
4245
4246 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4247 }
4248
4249 struct target_msqid_ds
4250 {
4251 struct target_ipc_perm msg_perm;
4252 abi_ulong msg_stime;
4253 #if TARGET_ABI_BITS == 32
4254 abi_ulong __unused1;
4255 #endif
4256 abi_ulong msg_rtime;
4257 #if TARGET_ABI_BITS == 32
4258 abi_ulong __unused2;
4259 #endif
4260 abi_ulong msg_ctime;
4261 #if TARGET_ABI_BITS == 32
4262 abi_ulong __unused3;
4263 #endif
4264 abi_ulong __msg_cbytes;
4265 abi_ulong msg_qnum;
4266 abi_ulong msg_qbytes;
4267 abi_ulong msg_lspid;
4268 abi_ulong msg_lrpid;
4269 abi_ulong __unused4;
4270 abi_ulong __unused5;
4271 };
4272
4273 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4274 abi_ulong target_addr)
4275 {
4276 struct target_msqid_ds *target_md;
4277
4278 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4279 return -TARGET_EFAULT;
4280 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4281 return -TARGET_EFAULT;
4282 host_md->msg_stime = tswapal(target_md->msg_stime);
4283 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4284 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4285 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4286 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4287 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4288 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4289 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4290 unlock_user_struct(target_md, target_addr, 0);
4291 return 0;
4292 }
4293
4294 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4295 struct msqid_ds *host_md)
4296 {
4297 struct target_msqid_ds *target_md;
4298
4299 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4300 return -TARGET_EFAULT;
4301 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4302 return -TARGET_EFAULT;
4303 target_md->msg_stime = tswapal(host_md->msg_stime);
4304 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4305 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4306 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4307 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4308 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4309 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4310 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4311 unlock_user_struct(target_md, target_addr, 1);
4312 return 0;
4313 }
4314
4315 struct target_msginfo {
4316 int msgpool;
4317 int msgmap;
4318 int msgmax;
4319 int msgmnb;
4320 int msgmni;
4321 int msgssz;
4322 int msgtql;
4323 unsigned short int msgseg;
4324 };
4325
4326 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4327 struct msginfo *host_msginfo)
4328 {
4329 struct target_msginfo *target_msginfo;
4330 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4331 return -TARGET_EFAULT;
4332 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4333 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4334 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4335 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4336 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4337 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4338 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4339 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4340 unlock_user_struct(target_msginfo, target_addr, 1);
4341 return 0;
4342 }
4343
4344 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4345 {
4346 struct msqid_ds dsarg;
4347 struct msginfo msginfo;
4348 abi_long ret = -TARGET_EINVAL;
4349
4350 cmd &= 0xff;
4351
4352 switch (cmd) {
4353 case IPC_STAT:
4354 case IPC_SET:
4355 case MSG_STAT:
4356 if (target_to_host_msqid_ds(&dsarg,ptr))
4357 return -TARGET_EFAULT;
4358 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4359 if (host_to_target_msqid_ds(ptr,&dsarg))
4360 return -TARGET_EFAULT;
4361 break;
4362 case IPC_RMID:
4363 ret = get_errno(msgctl(msgid, cmd, NULL));
4364 break;
4365 case IPC_INFO:
4366 case MSG_INFO:
4367 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4368 if (host_to_target_msginfo(ptr, &msginfo))
4369 return -TARGET_EFAULT;
4370 break;
4371 }
4372
4373 return ret;
4374 }
4375
4376 struct target_msgbuf {
4377 abi_long mtype;
4378 char mtext[1];
4379 };
4380
4381 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4382 ssize_t msgsz, int msgflg)
4383 {
4384 struct target_msgbuf *target_mb;
4385 struct msgbuf *host_mb;
4386 abi_long ret = 0;
4387
4388 if (msgsz < 0) {
4389 return -TARGET_EINVAL;
4390 }
4391
4392 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4393 return -TARGET_EFAULT;
4394 host_mb = g_try_malloc(msgsz + sizeof(long));
4395 if (!host_mb) {
4396 unlock_user_struct(target_mb, msgp, 0);
4397 return -TARGET_ENOMEM;
4398 }
4399 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4400 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4401 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4402 g_free(host_mb);
4403 unlock_user_struct(target_mb, msgp, 0);
4404
4405 return ret;
4406 }
4407
4408 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4409 ssize_t msgsz, abi_long msgtyp,
4410 int msgflg)
4411 {
4412 struct target_msgbuf *target_mb;
4413 char *target_mtext;
4414 struct msgbuf *host_mb;
4415 abi_long ret = 0;
4416
4417 if (msgsz < 0) {
4418 return -TARGET_EINVAL;
4419 }
4420
4421 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4422 return -TARGET_EFAULT;
4423
4424 host_mb = g_try_malloc(msgsz + sizeof(long));
4425 if (!host_mb) {
4426 ret = -TARGET_ENOMEM;
4427 goto end;
4428 }
4429 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4430
4431 if (ret > 0) {
4432 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4433 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4434 if (!target_mtext) {
4435 ret = -TARGET_EFAULT;
4436 goto end;
4437 }
4438 memcpy(target_mb->mtext, host_mb->mtext, ret);
4439 unlock_user(target_mtext, target_mtext_addr, ret);
4440 }
4441
4442 target_mb->mtype = tswapal(host_mb->mtype);
4443
4444 end:
4445 if (target_mb)
4446 unlock_user_struct(target_mb, msgp, 1);
4447 g_free(host_mb);
4448 return ret;
4449 }
4450
4451 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4452 abi_ulong target_addr)
4453 {
4454 struct target_shmid_ds *target_sd;
4455
4456 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4457 return -TARGET_EFAULT;
4458 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4459 return -TARGET_EFAULT;
4460 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4461 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4462 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4463 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4464 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4465 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4466 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4467 unlock_user_struct(target_sd, target_addr, 0);
4468 return 0;
4469 }
4470
4471 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4472 struct shmid_ds *host_sd)
4473 {
4474 struct target_shmid_ds *target_sd;
4475
4476 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4477 return -TARGET_EFAULT;
4478 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4479 return -TARGET_EFAULT;
4480 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4481 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4482 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4483 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4484 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4485 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4486 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4487 unlock_user_struct(target_sd, target_addr, 1);
4488 return 0;
4489 }
4490
4491 struct target_shminfo {
4492 abi_ulong shmmax;
4493 abi_ulong shmmin;
4494 abi_ulong shmmni;
4495 abi_ulong shmseg;
4496 abi_ulong shmall;
4497 };
4498
4499 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4500 struct shminfo *host_shminfo)
4501 {
4502 struct target_shminfo *target_shminfo;
4503 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4504 return -TARGET_EFAULT;
4505 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4506 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4507 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4508 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4509 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4510 unlock_user_struct(target_shminfo, target_addr, 1);
4511 return 0;
4512 }
4513
4514 struct target_shm_info {
4515 int used_ids;
4516 abi_ulong shm_tot;
4517 abi_ulong shm_rss;
4518 abi_ulong shm_swp;
4519 abi_ulong swap_attempts;
4520 abi_ulong swap_successes;
4521 };
4522
4523 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4524 struct shm_info *host_shm_info)
4525 {
4526 struct target_shm_info *target_shm_info;
4527 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4528 return -TARGET_EFAULT;
4529 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4530 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4531 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4532 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4533 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4534 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4535 unlock_user_struct(target_shm_info, target_addr, 1);
4536 return 0;
4537 }
4538
4539 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4540 {
4541 struct shmid_ds dsarg;
4542 struct shminfo shminfo;
4543 struct shm_info shm_info;
4544 abi_long ret = -TARGET_EINVAL;
4545
4546 cmd &= 0xff;
4547
4548 switch(cmd) {
4549 case IPC_STAT:
4550 case IPC_SET:
4551 case SHM_STAT:
4552 if (target_to_host_shmid_ds(&dsarg, buf))
4553 return -TARGET_EFAULT;
4554 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4555 if (host_to_target_shmid_ds(buf, &dsarg))
4556 return -TARGET_EFAULT;
4557 break;
4558 case IPC_INFO:
4559 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4560 if (host_to_target_shminfo(buf, &shminfo))
4561 return -TARGET_EFAULT;
4562 break;
4563 case SHM_INFO:
4564 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4565 if (host_to_target_shm_info(buf, &shm_info))
4566 return -TARGET_EFAULT;
4567 break;
4568 case IPC_RMID:
4569 case SHM_LOCK:
4570 case SHM_UNLOCK:
4571 ret = get_errno(shmctl(shmid, cmd, NULL));
4572 break;
4573 }
4574
4575 return ret;
4576 }
4577
4578 #ifndef TARGET_FORCE_SHMLBA
4579 /* For most architectures, SHMLBA is the same as the page size;
4580 * some architectures have larger values, in which case they should
4581 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4582 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4583 * and defining its own value for SHMLBA.
4584 *
4585 * The kernel also permits SHMLBA to be set by the architecture to a
4586 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4587 * this means that addresses are rounded to the large size if
4588 * SHM_RND is set but addresses not aligned to that size are not rejected
4589 * as long as they are at least page-aligned. Since the only architecture
4590 * which uses this is ia64 this code doesn't provide for that oddity.
4591 */
4592 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4593 {
4594 return TARGET_PAGE_SIZE;
4595 }
4596 #endif
4597
4598 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4599 int shmid, abi_ulong shmaddr, int shmflg)
4600 {
4601 abi_long raddr;
4602 void *host_raddr;
4603 struct shmid_ds shm_info;
4604 int i,ret;
4605 abi_ulong shmlba;
4606
4607 /* find out the length of the shared memory segment */
4608 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4609 if (is_error(ret)) {
4610 /* can't get length, bail out */
4611 return ret;
4612 }
4613
4614 shmlba = target_shmlba(cpu_env);
4615
4616 if (shmaddr & (shmlba - 1)) {
4617 if (shmflg & SHM_RND) {
4618 shmaddr &= ~(shmlba - 1);
4619 } else {
4620 return -TARGET_EINVAL;
4621 }
4622 }
4623
4624 mmap_lock();
4625
4626 if (shmaddr)
4627 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4628 else {
4629 abi_ulong mmap_start;
4630
4631 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4632
4633 if (mmap_start == -1) {
4634 errno = ENOMEM;
4635 host_raddr = (void *)-1;
4636 } else
4637 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4638 }
4639
4640 if (host_raddr == (void *)-1) {
4641 mmap_unlock();
4642 return get_errno((long)host_raddr);
4643 }
4644 raddr=h2g((unsigned long)host_raddr);
4645
4646 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4647 PAGE_VALID | PAGE_READ |
4648 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4649
4650 for (i = 0; i < N_SHM_REGIONS; i++) {
4651 if (!shm_regions[i].in_use) {
4652 shm_regions[i].in_use = true;
4653 shm_regions[i].start = raddr;
4654 shm_regions[i].size = shm_info.shm_segsz;
4655 break;
4656 }
4657 }
4658
4659 mmap_unlock();
4660 return raddr;
4661
4662 }
4663
4664 static inline abi_long do_shmdt(abi_ulong shmaddr)
4665 {
4666 int i;
4667
4668 for (i = 0; i < N_SHM_REGIONS; ++i) {
4669 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4670 shm_regions[i].in_use = false;
4671 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4672 break;
4673 }
4674 }
4675
4676 return get_errno(shmdt(g2h(shmaddr)));
4677 }
4678
4679 #ifdef TARGET_NR_ipc
4680 /* ??? This only works with linear mappings. */
4681 /* do_ipc() must return target values and target errnos. */
4682 static abi_long do_ipc(CPUArchState *cpu_env,
4683 unsigned int call, abi_long first,
4684 abi_long second, abi_long third,
4685 abi_long ptr, abi_long fifth)
4686 {
4687 int version;
4688 abi_long ret = 0;
4689
4690 version = call >> 16;
4691 call &= 0xffff;
4692
4693 switch (call) {
4694 case IPCOP_semop:
4695 ret = do_semop(first, ptr, second);
4696 break;
4697
4698 case IPCOP_semget:
4699 ret = get_errno(semget(first, second, third));
4700 break;
4701
4702 case IPCOP_semctl: {
4703 /* The semun argument to semctl is passed by value, so dereference the
4704 * ptr argument. */
4705 abi_ulong atptr;
4706 get_user_ual(atptr, ptr);
4707 ret = do_semctl(first, second, third, atptr);
4708 break;
4709 }
4710
4711 case IPCOP_msgget:
4712 ret = get_errno(msgget(first, second));
4713 break;
4714
4715 case IPCOP_msgsnd:
4716 ret = do_msgsnd(first, ptr, second, third);
4717 break;
4718
4719 case IPCOP_msgctl:
4720 ret = do_msgctl(first, second, ptr);
4721 break;
4722
4723 case IPCOP_msgrcv:
4724 switch (version) {
4725 case 0:
4726 {
4727 struct target_ipc_kludge {
4728 abi_long msgp;
4729 abi_long msgtyp;
4730 } *tmp;
4731
4732 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4733 ret = -TARGET_EFAULT;
4734 break;
4735 }
4736
4737 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4738
4739 unlock_user_struct(tmp, ptr, 0);
4740 break;
4741 }
4742 default:
4743 ret = do_msgrcv(first, ptr, second, fifth, third);
4744 }
4745 break;
4746
4747 case IPCOP_shmat:
4748 switch (version) {
4749 default:
4750 {
4751 abi_ulong raddr;
4752 raddr = do_shmat(cpu_env, first, ptr, second);
4753 if (is_error(raddr))
4754 return get_errno(raddr);
4755 if (put_user_ual(raddr, third))
4756 return -TARGET_EFAULT;
4757 break;
4758 }
4759 case 1:
4760 ret = -TARGET_EINVAL;
4761 break;
4762 }
4763 break;
4764 case IPCOP_shmdt:
4765 ret = do_shmdt(ptr);
4766 break;
4767
4768 case IPCOP_shmget:
4769 /* IPC_* flag values are the same on all linux platforms */
4770 ret = get_errno(shmget(first, second, third));
4771 break;
4772
4773 /* IPC_* and SHM_* command values are the same on all linux platforms */
4774 case IPCOP_shmctl:
4775 ret = do_shmctl(first, second, ptr);
4776 break;
4777 default:
4778 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4779 ret = -TARGET_ENOSYS;
4780 break;
4781 }
4782 return ret;
4783 }
4784 #endif
4785
4786 /* kernel structure types definitions */
4787
4788 #define STRUCT(name, ...) STRUCT_ ## name,
4789 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4790 enum {
4791 #include "syscall_types.h"
4792 STRUCT_MAX
4793 };
4794 #undef STRUCT
4795 #undef STRUCT_SPECIAL
4796
4797 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4798 #define STRUCT_SPECIAL(name)
4799 #include "syscall_types.h"
4800 #undef STRUCT
4801 #undef STRUCT_SPECIAL
4802
4803 typedef struct IOCTLEntry IOCTLEntry;
4804
4805 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4806 int fd, int cmd, abi_long arg);
4807
4808 struct IOCTLEntry {
4809 int target_cmd;
4810 unsigned int host_cmd;
4811 const char *name;
4812 int access;
4813 do_ioctl_fn *do_ioctl;
4814 const argtype arg_type[5];
4815 };
4816
4817 #define IOC_R 0x0001
4818 #define IOC_W 0x0002
4819 #define IOC_RW (IOC_R | IOC_W)
4820
4821 #define MAX_STRUCT_SIZE 4096
4822
4823 #ifdef CONFIG_FIEMAP
4824 /* So fiemap access checks don't overflow on 32 bit systems.
4825 * This is very slightly smaller than the limit imposed by
4826 * the underlying kernel.
4827 */
4828 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4829 / sizeof(struct fiemap_extent))
4830
4831 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4832 int fd, int cmd, abi_long arg)
4833 {
4834 /* The parameter for this ioctl is a struct fiemap followed
4835 * by an array of struct fiemap_extent whose size is set
4836 * in fiemap->fm_extent_count. The array is filled in by the
4837 * ioctl.
4838 */
4839 int target_size_in, target_size_out;
4840 struct fiemap *fm;
4841 const argtype *arg_type = ie->arg_type;
4842 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4843 void *argptr, *p;
4844 abi_long ret;
4845 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4846 uint32_t outbufsz;
4847 int free_fm = 0;
4848
4849 assert(arg_type[0] == TYPE_PTR);
4850 assert(ie->access == IOC_RW);
4851 arg_type++;
4852 target_size_in = thunk_type_size(arg_type, 0);
4853 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4854 if (!argptr) {
4855 return -TARGET_EFAULT;
4856 }
4857 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4858 unlock_user(argptr, arg, 0);
4859 fm = (struct fiemap *)buf_temp;
4860 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4861 return -TARGET_EINVAL;
4862 }
4863
4864 outbufsz = sizeof (*fm) +
4865 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4866
4867 if (outbufsz > MAX_STRUCT_SIZE) {
4868 /* We can't fit all the extents into the fixed size buffer.
4869 * Allocate one that is large enough and use it instead.
4870 */
4871 fm = g_try_malloc(outbufsz);
4872 if (!fm) {
4873 return -TARGET_ENOMEM;
4874 }
4875 memcpy(fm, buf_temp, sizeof(struct fiemap));
4876 free_fm = 1;
4877 }
4878 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4879 if (!is_error(ret)) {
4880 target_size_out = target_size_in;
4881 /* An extent_count of 0 means we were only counting the extents
4882 * so there are no structs to copy
4883 */
4884 if (fm->fm_extent_count != 0) {
4885 target_size_out += fm->fm_mapped_extents * extent_size;
4886 }
4887 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4888 if (!argptr) {
4889 ret = -TARGET_EFAULT;
4890 } else {
4891 /* Convert the struct fiemap */
4892 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4893 if (fm->fm_extent_count != 0) {
4894 p = argptr + target_size_in;
4895 /* ...and then all the struct fiemap_extents */
4896 for (i = 0; i < fm->fm_mapped_extents; i++) {
4897 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4898 THUNK_TARGET);
4899 p += extent_size;
4900 }
4901 }
4902 unlock_user(argptr, arg, target_size_out);
4903 }
4904 }
4905 if (free_fm) {
4906 g_free(fm);
4907 }
4908 return ret;
4909 }
4910 #endif
4911
4912 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4913 int fd, int cmd, abi_long arg)
4914 {
4915 const argtype *arg_type = ie->arg_type;
4916 int target_size;
4917 void *argptr;
4918 int ret;
4919 struct ifconf *host_ifconf;
4920 uint32_t outbufsz;
4921 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4922 int target_ifreq_size;
4923 int nb_ifreq;
4924 int free_buf = 0;
4925 int i;
4926 int target_ifc_len;
4927 abi_long target_ifc_buf;
4928 int host_ifc_len;
4929 char *host_ifc_buf;
4930
4931 assert(arg_type[0] == TYPE_PTR);
4932 assert(ie->access == IOC_RW);
4933
4934 arg_type++;
4935 target_size = thunk_type_size(arg_type, 0);
4936
4937 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4938 if (!argptr)
4939 return -TARGET_EFAULT;
4940 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4941 unlock_user(argptr, arg, 0);
4942
4943 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4944 target_ifc_len = host_ifconf->ifc_len;
4945 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4946
4947 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
4948 nb_ifreq = target_ifc_len / target_ifreq_size;
4949 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4950
4951 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4952 if (outbufsz > MAX_STRUCT_SIZE) {
4953 /* We can't fit all the extents into the fixed size buffer.
4954 * Allocate one that is large enough and use it instead.
4955 */
4956 host_ifconf = malloc(outbufsz);
4957 if (!host_ifconf) {
4958 return -TARGET_ENOMEM;
4959 }
4960 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4961 free_buf = 1;
4962 }
4963 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
4964
4965 host_ifconf->ifc_len = host_ifc_len;
4966 host_ifconf->ifc_buf = host_ifc_buf;
4967
4968 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4969 if (!is_error(ret)) {
4970 /* convert host ifc_len to target ifc_len */
4971
4972 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4973 target_ifc_len = nb_ifreq * target_ifreq_size;
4974 host_ifconf->ifc_len = target_ifc_len;
4975
4976 /* restore target ifc_buf */
4977
4978 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4979
4980 /* copy struct ifconf to target user */
4981
4982 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4983 if (!argptr)
4984 return -TARGET_EFAULT;
4985 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4986 unlock_user(argptr, arg, target_size);
4987
4988 /* copy ifreq[] to target user */
4989
4990 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4991 for (i = 0; i < nb_ifreq ; i++) {
4992 thunk_convert(argptr + i * target_ifreq_size,
4993 host_ifc_buf + i * sizeof(struct ifreq),
4994 ifreq_arg_type, THUNK_TARGET);
4995 }
4996 unlock_user(argptr, target_ifc_buf, target_ifc_len);
4997 }
4998
4999 if (free_buf) {
5000 free(host_ifconf);
5001 }
5002
5003 return ret;
5004 }
5005
5006 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5007 int cmd, abi_long arg)
5008 {
5009 void *argptr;
5010 struct dm_ioctl *host_dm;
5011 abi_long guest_data;
5012 uint32_t guest_data_size;
5013 int target_size;
5014 const argtype *arg_type = ie->arg_type;
5015 abi_long ret;
5016 void *big_buf = NULL;
5017 char *host_data;
5018
5019 arg_type++;
5020 target_size = thunk_type_size(arg_type, 0);
5021 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5022 if (!argptr) {
5023 ret = -TARGET_EFAULT;
5024 goto out;
5025 }
5026 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5027 unlock_user(argptr, arg, 0);
5028
5029 /* buf_temp is too small, so fetch things into a bigger buffer */
5030 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5031 memcpy(big_buf, buf_temp, target_size);
5032 buf_temp = big_buf;
5033 host_dm = big_buf;
5034
5035 guest_data = arg + host_dm->data_start;
5036 if ((guest_data - arg) < 0) {
5037 ret = -TARGET_EINVAL;
5038 goto out;
5039 }
5040 guest_data_size = host_dm->data_size - host_dm->data_start;
5041 host_data = (char*)host_dm + host_dm->data_start;
5042
5043 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5044 if (!argptr) {
5045 ret = -TARGET_EFAULT;
5046 goto out;
5047 }
5048
5049 switch (ie->host_cmd) {
5050 case DM_REMOVE_ALL:
5051 case DM_LIST_DEVICES:
5052 case DM_DEV_CREATE:
5053 case DM_DEV_REMOVE:
5054 case DM_DEV_SUSPEND:
5055 case DM_DEV_STATUS:
5056 case DM_DEV_WAIT:
5057 case DM_TABLE_STATUS:
5058 case DM_TABLE_CLEAR:
5059 case DM_TABLE_DEPS:
5060 case DM_LIST_VERSIONS:
5061 /* no input data */
5062 break;
5063 case DM_DEV_RENAME:
5064 case DM_DEV_SET_GEOMETRY:
5065 /* data contains only strings */
5066 memcpy(host_data, argptr, guest_data_size);
5067 break;
5068 case DM_TARGET_MSG:
5069 memcpy(host_data, argptr, guest_data_size);
5070 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5071 break;
5072 case DM_TABLE_LOAD:
5073 {
5074 void *gspec = argptr;
5075 void *cur_data = host_data;
5076 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5077 int spec_size = thunk_type_size(arg_type, 0);
5078 int i;
5079
5080 for (i = 0; i < host_dm->target_count; i++) {
5081 struct dm_target_spec *spec = cur_data;
5082 uint32_t next;
5083 int slen;
5084
5085 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5086 slen = strlen((char*)gspec + spec_size) + 1;
5087 next = spec->next;
5088 spec->next = sizeof(*spec) + slen;
5089 strcpy((char*)&spec[1], gspec + spec_size);
5090 gspec += next;
5091 cur_data += spec->next;
5092 }
5093 break;
5094 }
5095 default:
5096 ret = -TARGET_EINVAL;
5097 unlock_user(argptr, guest_data, 0);
5098 goto out;
5099 }
5100 unlock_user(argptr, guest_data, 0);
5101
5102 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5103 if (!is_error(ret)) {
5104 guest_data = arg + host_dm->data_start;
5105 guest_data_size = host_dm->data_size - host_dm->data_start;
5106 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5107 switch (ie->host_cmd) {
5108 case DM_REMOVE_ALL:
5109 case DM_DEV_CREATE:
5110 case DM_DEV_REMOVE:
5111 case DM_DEV_RENAME:
5112 case DM_DEV_SUSPEND:
5113 case DM_DEV_STATUS:
5114 case DM_TABLE_LOAD:
5115 case DM_TABLE_CLEAR:
5116 case DM_TARGET_MSG:
5117 case DM_DEV_SET_GEOMETRY:
5118 /* no return data */
5119 break;
5120 case DM_LIST_DEVICES:
5121 {
5122 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5123 uint32_t remaining_data = guest_data_size;
5124 void *cur_data = argptr;
5125 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5126 int nl_size = 12; /* can't use thunk_size due to alignment */
5127
5128 while (1) {
5129 uint32_t next = nl->next;
5130 if (next) {
5131 nl->next = nl_size + (strlen(nl->name) + 1);
5132 }
5133 if (remaining_data < nl->next) {
5134 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5135 break;
5136 }
5137 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5138 strcpy(cur_data + nl_size, nl->name);
5139 cur_data += nl->next;
5140 remaining_data -= nl->next;
5141 if (!next) {
5142 break;
5143 }
5144 nl = (void*)nl + next;
5145 }
5146 break;
5147 }
5148 case DM_DEV_WAIT:
5149 case DM_TABLE_STATUS:
5150 {
5151 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5152 void *cur_data = argptr;
5153 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5154 int spec_size = thunk_type_size(arg_type, 0);
5155 int i;
5156
5157 for (i = 0; i < host_dm->target_count; i++) {
5158 uint32_t next = spec->next;
5159 int slen = strlen((char*)&spec[1]) + 1;
5160 spec->next = (cur_data - argptr) + spec_size + slen;
5161 if (guest_data_size < spec->next) {
5162 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5163 break;
5164 }
5165 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5166 strcpy(cur_data + spec_size, (char*)&spec[1]);
5167 cur_data = argptr + spec->next;
5168 spec = (void*)host_dm + host_dm->data_start + next;
5169 }
5170 break;
5171 }
5172 case DM_TABLE_DEPS:
5173 {
5174 void *hdata = (void*)host_dm + host_dm->data_start;
5175 int count = *(uint32_t*)hdata;
5176 uint64_t *hdev = hdata + 8;
5177 uint64_t *gdev = argptr + 8;
5178 int i;
5179
5180 *(uint32_t*)argptr = tswap32(count);
5181 for (i = 0; i < count; i++) {
5182 *gdev = tswap64(*hdev);
5183 gdev++;
5184 hdev++;
5185 }
5186 break;
5187 }
5188 case DM_LIST_VERSIONS:
5189 {
5190 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5191 uint32_t remaining_data = guest_data_size;
5192 void *cur_data = argptr;
5193 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5194 int vers_size = thunk_type_size(arg_type, 0);
5195
5196 while (1) {
5197 uint32_t next = vers->next;
5198 if (next) {
5199 vers->next = vers_size + (strlen(vers->name) + 1);
5200 }
5201 if (remaining_data < vers->next) {
5202 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5203 break;
5204 }
5205 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5206 strcpy(cur_data + vers_size, vers->name);
5207 cur_data += vers->next;
5208 remaining_data -= vers->next;
5209 if (!next) {
5210 break;
5211 }
5212 vers = (void*)vers + next;
5213 }
5214 break;
5215 }
5216 default:
5217 unlock_user(argptr, guest_data, 0);
5218 ret = -TARGET_EINVAL;
5219 goto out;
5220 }
5221 unlock_user(argptr, guest_data, guest_data_size);
5222
5223 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5224 if (!argptr) {
5225 ret = -TARGET_EFAULT;
5226 goto out;
5227 }
5228 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5229 unlock_user(argptr, arg, target_size);
5230 }
5231 out:
5232 g_free(big_buf);
5233 return ret;
5234 }
5235
5236 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5237 int cmd, abi_long arg)
5238 {
5239 void *argptr;
5240 int target_size;
5241 const argtype *arg_type = ie->arg_type;
5242 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5243 abi_long ret;
5244
5245 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5246 struct blkpg_partition host_part;
5247
5248 /* Read and convert blkpg */
5249 arg_type++;
5250 target_size = thunk_type_size(arg_type, 0);
5251 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5252 if (!argptr) {
5253 ret = -TARGET_EFAULT;
5254 goto out;
5255 }
5256 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5257 unlock_user(argptr, arg, 0);
5258
5259 switch (host_blkpg->op) {
5260 case BLKPG_ADD_PARTITION:
5261 case BLKPG_DEL_PARTITION:
5262 /* payload is struct blkpg_partition */
5263 break;
5264 default:
5265 /* Unknown opcode */
5266 ret = -TARGET_EINVAL;
5267 goto out;
5268 }
5269
5270 /* Read and convert blkpg->data */
5271 arg = (abi_long)(uintptr_t)host_blkpg->data;
5272 target_size = thunk_type_size(part_arg_type, 0);
5273 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5274 if (!argptr) {
5275 ret = -TARGET_EFAULT;
5276 goto out;
5277 }
5278 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5279 unlock_user(argptr, arg, 0);
5280
5281 /* Swizzle the data pointer to our local copy and call! */
5282 host_blkpg->data = &host_part;
5283 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5284
5285 out:
5286 return ret;
5287 }
5288
5289 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5290 int fd, int cmd, abi_long arg)
5291 {
5292 const argtype *arg_type = ie->arg_type;
5293 const StructEntry *se;
5294 const argtype *field_types;
5295 const int *dst_offsets, *src_offsets;
5296 int target_size;
5297 void *argptr;
5298 abi_ulong *target_rt_dev_ptr;
5299 unsigned long *host_rt_dev_ptr;
5300 abi_long ret;
5301 int i;
5302
5303 assert(ie->access == IOC_W);
5304 assert(*arg_type == TYPE_PTR);
5305 arg_type++;
5306 assert(*arg_type == TYPE_STRUCT);
5307 target_size = thunk_type_size(arg_type, 0);
5308 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5309 if (!argptr) {
5310 return -TARGET_EFAULT;
5311 }
5312 arg_type++;
5313 assert(*arg_type == (int)STRUCT_rtentry);
5314 se = struct_entries + *arg_type++;
5315 assert(se->convert[0] == NULL);
5316 /* convert struct here to be able to catch rt_dev string */
5317 field_types = se->field_types;
5318 dst_offsets = se->field_offsets[THUNK_HOST];
5319 src_offsets = se->field_offsets[THUNK_TARGET];
5320 for (i = 0; i < se->nb_fields; i++) {
5321 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5322 assert(*field_types == TYPE_PTRVOID);
5323 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5324 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5325 if (*target_rt_dev_ptr != 0) {
5326 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5327 tswapal(*target_rt_dev_ptr));
5328 if (!*host_rt_dev_ptr) {
5329 unlock_user(argptr, arg, 0);
5330 return -TARGET_EFAULT;
5331 }
5332 } else {
5333 *host_rt_dev_ptr = 0;
5334 }
5335 field_types++;
5336 continue;
5337 }
5338 field_types = thunk_convert(buf_temp + dst_offsets[i],
5339 argptr + src_offsets[i],
5340 field_types, THUNK_HOST);
5341 }
5342 unlock_user(argptr, arg, 0);
5343
5344 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5345 if (*host_rt_dev_ptr != 0) {
5346 unlock_user((void *)*host_rt_dev_ptr,
5347 *target_rt_dev_ptr, 0);
5348 }
5349 return ret;
5350 }
5351
5352 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5353 int fd, int cmd, abi_long arg)
5354 {
5355 int sig = target_to_host_signal(arg);
5356 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5357 }
5358
5359 static IOCTLEntry ioctl_entries[] = {
5360 #define IOCTL(cmd, access, ...) \
5361 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5362 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5363 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5364 #include "ioctls.h"
5365 { 0, 0, },
5366 };
5367
5368 /* ??? Implement proper locking for ioctls. */
5369 /* do_ioctl() Must return target values and target errnos. */
5370 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5371 {
5372 const IOCTLEntry *ie;
5373 const argtype *arg_type;
5374 abi_long ret;
5375 uint8_t buf_temp[MAX_STRUCT_SIZE];
5376 int target_size;
5377 void *argptr;
5378
5379 ie = ioctl_entries;
5380 for(;;) {
5381 if (ie->target_cmd == 0) {
5382 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5383 return -TARGET_ENOSYS;
5384 }
5385 if (ie->target_cmd == cmd)
5386 break;
5387 ie++;
5388 }
5389 arg_type = ie->arg_type;
5390 #if defined(DEBUG)
5391 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5392 #endif
5393 if (ie->do_ioctl) {
5394 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5395 }
5396
5397 switch(arg_type[0]) {
5398 case TYPE_NULL:
5399 /* no argument */
5400 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5401 break;
5402 case TYPE_PTRVOID:
5403 case TYPE_INT:
5404 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5405 break;
5406 case TYPE_PTR:
5407 arg_type++;
5408 target_size = thunk_type_size(arg_type, 0);
5409 switch(ie->access) {
5410 case IOC_R:
5411 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5412 if (!is_error(ret)) {
5413 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5414 if (!argptr)
5415 return -TARGET_EFAULT;
5416 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5417 unlock_user(argptr, arg, target_size);
5418 }
5419 break;
5420 case IOC_W:
5421 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5422 if (!argptr)
5423 return -TARGET_EFAULT;
5424 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5425 unlock_user(argptr, arg, 0);
5426 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5427 break;
5428 default:
5429 case IOC_RW:
5430 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5431 if (!argptr)
5432 return -TARGET_EFAULT;
5433 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5434 unlock_user(argptr, arg, 0);
5435 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5436 if (!is_error(ret)) {
5437 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5438 if (!argptr)
5439 return -TARGET_EFAULT;
5440 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5441 unlock_user(argptr, arg, target_size);
5442 }
5443 break;
5444 }
5445 break;
5446 default:
5447 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5448 (long)cmd, arg_type[0]);
5449 ret = -TARGET_ENOSYS;
5450 break;
5451 }
5452 return ret;
5453 }
5454
5455 static const bitmask_transtbl iflag_tbl[] = {
5456 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5457 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5458 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5459 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5460 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5461 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5462 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5463 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5464 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5465 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5466 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5467 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5468 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5469 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5470 { 0, 0, 0, 0 }
5471 };
5472
5473 static const bitmask_transtbl oflag_tbl[] = {
5474 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5475 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5476 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5477 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5478 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5479 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5480 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5481 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5482 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5483 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5484 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5485 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5486 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5487 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5488 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5489 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5490 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5491 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5492 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5493 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5494 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5495 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5496 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5497 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5498 { 0, 0, 0, 0 }
5499 };
5500
5501 static const bitmask_transtbl cflag_tbl[] = {
5502 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5503 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5504 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5505 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5506 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5507 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5508 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5509 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5510 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5511 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5512 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5513 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5514 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5515 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5516 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5517 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5518 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5519 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5520 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5521 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5522 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5523 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5524 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5525 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5526 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5527 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5528 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5529 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5530 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5531 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5532 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5533 { 0, 0, 0, 0 }
5534 };
5535
5536 static const bitmask_transtbl lflag_tbl[] = {
5537 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5538 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5539 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5540 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5541 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5542 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5543 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5544 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5545 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5546 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5547 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5548 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5549 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5550 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5551 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5552 { 0, 0, 0, 0 }
5553 };
5554
5555 static void target_to_host_termios (void *dst, const void *src)
5556 {
5557 struct host_termios *host = dst;
5558 const struct target_termios *target = src;
5559
5560 host->c_iflag =
5561 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5562 host->c_oflag =
5563 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5564 host->c_cflag =
5565 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5566 host->c_lflag =
5567 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5568 host->c_line = target->c_line;
5569
5570 memset(host->c_cc, 0, sizeof(host->c_cc));
5571 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5572 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5573 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5574 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5575 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5576 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5577 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5578 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5579 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5580 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5581 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5582 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5583 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5584 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5585 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5586 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5587 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5588 }
5589
5590 static void host_to_target_termios (void *dst, const void *src)
5591 {
5592 struct target_termios *target = dst;
5593 const struct host_termios *host = src;
5594
5595 target->c_iflag =
5596 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5597 target->c_oflag =
5598 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5599 target->c_cflag =
5600 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5601 target->c_lflag =
5602 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5603 target->c_line = host->c_line;
5604
5605 memset(target->c_cc, 0, sizeof(target->c_cc));
5606 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5607 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5608 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5609 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5610 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5611 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5612 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5613 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5614 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5615 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5616 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5617 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5618 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5619 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5620 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5621 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5622 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5623 }
5624
5625 static const StructEntry struct_termios_def = {
5626 .convert = { host_to_target_termios, target_to_host_termios },
5627 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5628 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5629 };
5630
5631 static bitmask_transtbl mmap_flags_tbl[] = {
5632 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5633 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5634 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5635 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5636 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5637 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5638 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5639 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5640 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5641 MAP_NORESERVE },
5642 { 0, 0, 0, 0 }
5643 };
5644
5645 #if defined(TARGET_I386)
5646
5647 /* NOTE: there is really one LDT for all the threads */
5648 static uint8_t *ldt_table;
5649
5650 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5651 {
5652 int size;
5653 void *p;
5654
5655 if (!ldt_table)
5656 return 0;
5657 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5658 if (size > bytecount)
5659 size = bytecount;
5660 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5661 if (!p)
5662 return -TARGET_EFAULT;
5663 /* ??? Should this by byteswapped? */
5664 memcpy(p, ldt_table, size);
5665 unlock_user(p, ptr, size);
5666 return size;
5667 }
5668
5669 /* XXX: add locking support */
5670 static abi_long write_ldt(CPUX86State *env,
5671 abi_ulong ptr, unsigned long bytecount, int oldmode)
5672 {
5673 struct target_modify_ldt_ldt_s ldt_info;
5674 struct target_modify_ldt_ldt_s *target_ldt_info;
5675 int seg_32bit, contents, read_exec_only, limit_in_pages;
5676 int seg_not_present, useable, lm;
5677 uint32_t *lp, entry_1, entry_2;
5678
5679 if (bytecount != sizeof(ldt_info))
5680 return -TARGET_EINVAL;
5681 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5682 return -TARGET_EFAULT;
5683 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5684 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5685 ldt_info.limit = tswap32(target_ldt_info->limit);
5686 ldt_info.flags = tswap32(target_ldt_info->flags);
5687 unlock_user_struct(target_ldt_info, ptr, 0);
5688
5689 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5690 return -TARGET_EINVAL;
5691 seg_32bit = ldt_info.flags & 1;
5692 contents = (ldt_info.flags >> 1) & 3;
5693 read_exec_only = (ldt_info.flags >> 3) & 1;
5694 limit_in_pages = (ldt_info.flags >> 4) & 1;
5695 seg_not_present = (ldt_info.flags >> 5) & 1;
5696 useable = (ldt_info.flags >> 6) & 1;
5697 #ifdef TARGET_ABI32
5698 lm = 0;
5699 #else
5700 lm = (ldt_info.flags >> 7) & 1;
5701 #endif
5702 if (contents == 3) {
5703 if (oldmode)
5704 return -TARGET_EINVAL;
5705 if (seg_not_present == 0)
5706 return -TARGET_EINVAL;
5707 }
5708 /* allocate the LDT */
5709 if (!ldt_table) {
5710 env->ldt.base = target_mmap(0,
5711 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5712 PROT_READ|PROT_WRITE,
5713 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5714 if (env->ldt.base == -1)
5715 return -TARGET_ENOMEM;
5716 memset(g2h(env->ldt.base), 0,
5717 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5718 env->ldt.limit = 0xffff;
5719 ldt_table = g2h(env->ldt.base);
5720 }
5721
5722 /* NOTE: same code as Linux kernel */
5723 /* Allow LDTs to be cleared by the user. */
5724 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5725 if (oldmode ||
5726 (contents == 0 &&
5727 read_exec_only == 1 &&
5728 seg_32bit == 0 &&
5729 limit_in_pages == 0 &&
5730 seg_not_present == 1 &&
5731 useable == 0 )) {
5732 entry_1 = 0;
5733 entry_2 = 0;
5734 goto install;
5735 }
5736 }
5737
5738 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5739 (ldt_info.limit & 0x0ffff);
5740 entry_2 = (ldt_info.base_addr & 0xff000000) |
5741 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5742 (ldt_info.limit & 0xf0000) |
5743 ((read_exec_only ^ 1) << 9) |
5744 (contents << 10) |
5745 ((seg_not_present ^ 1) << 15) |
5746 (seg_32bit << 22) |
5747 (limit_in_pages << 23) |
5748 (lm << 21) |
5749 0x7000;
5750 if (!oldmode)
5751 entry_2 |= (useable << 20);
5752
5753 /* Install the new entry ... */
5754 install:
5755 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5756 lp[0] = tswap32(entry_1);
5757 lp[1] = tswap32(entry_2);
5758 return 0;
5759 }
5760
5761 /* specific and weird i386 syscalls */
5762 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5763 unsigned long bytecount)
5764 {
5765 abi_long ret;
5766
5767 switch (func) {
5768 case 0:
5769 ret = read_ldt(ptr, bytecount);
5770 break;
5771 case 1:
5772 ret = write_ldt(env, ptr, bytecount, 1);
5773 break;
5774 case 0x11:
5775 ret = write_ldt(env, ptr, bytecount, 0);
5776 break;
5777 default:
5778 ret = -TARGET_ENOSYS;
5779 break;
5780 }
5781 return ret;
5782 }
5783
5784 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5785 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5786 {
5787 uint64_t *gdt_table = g2h(env->gdt.base);
5788 struct target_modify_ldt_ldt_s ldt_info;
5789 struct target_modify_ldt_ldt_s *target_ldt_info;
5790 int seg_32bit, contents, read_exec_only, limit_in_pages;
5791 int seg_not_present, useable, lm;
5792 uint32_t *lp, entry_1, entry_2;
5793 int i;
5794
5795 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5796 if (!target_ldt_info)
5797 return -TARGET_EFAULT;
5798 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5799 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5800 ldt_info.limit = tswap32(target_ldt_info->limit);
5801 ldt_info.flags = tswap32(target_ldt_info->flags);
5802 if (ldt_info.entry_number == -1) {
5803 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5804 if (gdt_table[i] == 0) {
5805 ldt_info.entry_number = i;
5806 target_ldt_info->entry_number = tswap32(i);
5807 break;
5808 }
5809 }
5810 }
5811 unlock_user_struct(target_ldt_info, ptr, 1);
5812
5813 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5814 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5815 return -TARGET_EINVAL;
5816 seg_32bit = ldt_info.flags & 1;
5817 contents = (ldt_info.flags >> 1) & 3;
5818 read_exec_only = (ldt_info.flags >> 3) & 1;
5819 limit_in_pages = (ldt_info.flags >> 4) & 1;
5820 seg_not_present = (ldt_info.flags >> 5) & 1;
5821 useable = (ldt_info.flags >> 6) & 1;
5822 #ifdef TARGET_ABI32
5823 lm = 0;
5824 #else
5825 lm = (ldt_info.flags >> 7) & 1;
5826 #endif
5827
5828 if (contents == 3) {
5829 if (seg_not_present == 0)
5830 return -TARGET_EINVAL;
5831 }
5832
5833 /* NOTE: same code as Linux kernel */
5834 /* Allow LDTs to be cleared by the user. */
5835 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5836 if ((contents == 0 &&
5837 read_exec_only == 1 &&
5838 seg_32bit == 0 &&
5839 limit_in_pages == 0 &&
5840 seg_not_present == 1 &&
5841 useable == 0 )) {
5842 entry_1 = 0;
5843 entry_2 = 0;
5844 goto install;
5845 }
5846 }
5847
5848 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5849 (ldt_info.limit & 0x0ffff);
5850 entry_2 = (ldt_info.base_addr & 0xff000000) |
5851 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5852 (ldt_info.limit & 0xf0000) |
5853 ((read_exec_only ^ 1) << 9) |
5854 (contents << 10) |
5855 ((seg_not_present ^ 1) << 15) |
5856 (seg_32bit << 22) |
5857 (limit_in_pages << 23) |
5858 (useable << 20) |
5859 (lm << 21) |
5860 0x7000;
5861
5862 /* Install the new entry ... */
5863 install:
5864 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5865 lp[0] = tswap32(entry_1);
5866 lp[1] = tswap32(entry_2);
5867 return 0;
5868 }
5869
5870 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5871 {
5872 struct target_modify_ldt_ldt_s *target_ldt_info;
5873 uint64_t *gdt_table = g2h(env->gdt.base);
5874 uint32_t base_addr, limit, flags;
5875 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5876 int seg_not_present, useable, lm;
5877 uint32_t *lp, entry_1, entry_2;
5878
5879 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5880 if (!target_ldt_info)
5881 return -TARGET_EFAULT;
5882 idx = tswap32(target_ldt_info->entry_number);
5883 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5884 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5885 unlock_user_struct(target_ldt_info, ptr, 1);
5886 return -TARGET_EINVAL;
5887 }
5888 lp = (uint32_t *)(gdt_table + idx);
5889 entry_1 = tswap32(lp[0]);
5890 entry_2 = tswap32(lp[1]);
5891
5892 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5893 contents = (entry_2 >> 10) & 3;
5894 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5895 seg_32bit = (entry_2 >> 22) & 1;
5896 limit_in_pages = (entry_2 >> 23) & 1;
5897 useable = (entry_2 >> 20) & 1;
5898 #ifdef TARGET_ABI32
5899 lm = 0;
5900 #else
5901 lm = (entry_2 >> 21) & 1;
5902 #endif
5903 flags = (seg_32bit << 0) | (contents << 1) |
5904 (read_exec_only << 3) | (limit_in_pages << 4) |
5905 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5906 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5907 base_addr = (entry_1 >> 16) |
5908 (entry_2 & 0xff000000) |
5909 ((entry_2 & 0xff) << 16);
5910 target_ldt_info->base_addr = tswapal(base_addr);
5911 target_ldt_info->limit = tswap32(limit);
5912 target_ldt_info->flags = tswap32(flags);
5913 unlock_user_struct(target_ldt_info, ptr, 1);
5914 return 0;
5915 }
5916 #endif /* TARGET_I386 && TARGET_ABI32 */
5917
5918 #ifndef TARGET_ABI32
5919 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5920 {
5921 abi_long ret = 0;
5922 abi_ulong val;
5923 int idx;
5924
5925 switch(code) {
5926 case TARGET_ARCH_SET_GS:
5927 case TARGET_ARCH_SET_FS:
5928 if (code == TARGET_ARCH_SET_GS)
5929 idx = R_GS;
5930 else
5931 idx = R_FS;
5932 cpu_x86_load_seg(env, idx, 0);
5933 env->segs[idx].base = addr;
5934 break;
5935 case TARGET_ARCH_GET_GS:
5936 case TARGET_ARCH_GET_FS:
5937 if (code == TARGET_ARCH_GET_GS)
5938 idx = R_GS;
5939 else
5940 idx = R_FS;
5941 val = env->segs[idx].base;
5942 if (put_user(val, addr, abi_ulong))
5943 ret = -TARGET_EFAULT;
5944 break;
5945 default:
5946 ret = -TARGET_EINVAL;
5947 break;
5948 }
5949 return ret;
5950 }
5951 #endif
5952
5953 #endif /* defined(TARGET_I386) */
5954
5955 #define NEW_STACK_SIZE 0x40000
5956
5957
5958 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
5959 typedef struct {
5960 CPUArchState *env;
5961 pthread_mutex_t mutex;
5962 pthread_cond_t cond;
5963 pthread_t thread;
5964 uint32_t tid;
5965 abi_ulong child_tidptr;
5966 abi_ulong parent_tidptr;
5967 sigset_t sigmask;
5968 } new_thread_info;
5969
5970 static void *clone_func(void *arg)
5971 {
5972 new_thread_info *info = arg;
5973 CPUArchState *env;
5974 CPUState *cpu;
5975 TaskState *ts;
5976
5977 rcu_register_thread();
5978 env = info->env;
5979 cpu = ENV_GET_CPU(env);
5980 thread_cpu = cpu;
5981 ts = (TaskState *)cpu->opaque;
5982 info->tid = gettid();
5983 cpu->host_tid = info->tid;
5984 task_settid(ts);
5985 if (info->child_tidptr)
5986 put_user_u32(info->tid, info->child_tidptr);
5987 if (info->parent_tidptr)
5988 put_user_u32(info->tid, info->parent_tidptr);
5989 /* Enable signals. */
5990 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
5991 /* Signal to the parent that we're ready. */
5992 pthread_mutex_lock(&info->mutex);
5993 pthread_cond_broadcast(&info->cond);
5994 pthread_mutex_unlock(&info->mutex);
5995 /* Wait until the parent has finshed initializing the tls state. */
5996 pthread_mutex_lock(&clone_lock);
5997 pthread_mutex_unlock(&clone_lock);
5998 cpu_loop(env);
5999 /* never exits */
6000 return NULL;
6001 }
6002
6003 /* do_fork() Must return host values and target errnos (unlike most
6004 do_*() functions). */
6005 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6006 abi_ulong parent_tidptr, target_ulong newtls,
6007 abi_ulong child_tidptr)
6008 {
6009 CPUState *cpu = ENV_GET_CPU(env);
6010 int ret;
6011 TaskState *ts;
6012 CPUState *new_cpu;
6013 CPUArchState *new_env;
6014 unsigned int nptl_flags;
6015 sigset_t sigmask;
6016
6017 /* Emulate vfork() with fork() */
6018 if (flags & CLONE_VFORK)
6019 flags &= ~(CLONE_VFORK | CLONE_VM);
6020
6021 if (flags & CLONE_VM) {
6022 TaskState *parent_ts = (TaskState *)cpu->opaque;
6023 new_thread_info info;
6024 pthread_attr_t attr;
6025
6026 ts = g_new0(TaskState, 1);
6027 init_task_state(ts);
6028 /* we create a new CPU instance. */
6029 new_env = cpu_copy(env);
6030 /* Init regs that differ from the parent. */
6031 cpu_clone_regs(new_env, newsp);
6032 new_cpu = ENV_GET_CPU(new_env);
6033 new_cpu->opaque = ts;
6034 ts->bprm = parent_ts->bprm;
6035 ts->info = parent_ts->info;
6036 ts->signal_mask = parent_ts->signal_mask;
6037 nptl_flags = flags;
6038 flags &= ~CLONE_NPTL_FLAGS2;
6039
6040 if (nptl_flags & CLONE_CHILD_CLEARTID) {
6041 ts->child_tidptr = child_tidptr;
6042 }
6043
6044 if (nptl_flags & CLONE_SETTLS)
6045 cpu_set_tls (new_env, newtls);
6046
6047 /* Grab a mutex so that thread setup appears atomic. */
6048 pthread_mutex_lock(&clone_lock);
6049
6050 memset(&info, 0, sizeof(info));
6051 pthread_mutex_init(&info.mutex, NULL);
6052 pthread_mutex_lock(&info.mutex);
6053 pthread_cond_init(&info.cond, NULL);
6054 info.env = new_env;
6055 if (nptl_flags & CLONE_CHILD_SETTID)
6056 info.child_tidptr = child_tidptr;
6057 if (nptl_flags & CLONE_PARENT_SETTID)
6058 info.parent_tidptr = parent_tidptr;
6059
6060 ret = pthread_attr_init(&attr);
6061 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6062 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6063 /* It is not safe to deliver signals until the child has finished
6064 initializing, so temporarily block all signals. */
6065 sigfillset(&sigmask);
6066 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6067
6068 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6069 /* TODO: Free new CPU state if thread creation failed. */
6070
6071 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6072 pthread_attr_destroy(&attr);
6073 if (ret == 0) {
6074 /* Wait for the child to initialize. */
6075 pthread_cond_wait(&info.cond, &info.mutex);
6076 ret = info.tid;
6077 if (flags & CLONE_PARENT_SETTID)
6078 put_user_u32(ret, parent_tidptr);
6079 } else {
6080 ret = -1;
6081 }
6082 pthread_mutex_unlock(&info.mutex);
6083 pthread_cond_destroy(&info.cond);
6084 pthread_mutex_destroy(&info.mutex);
6085 pthread_mutex_unlock(&clone_lock);
6086 } else {
6087 /* if no CLONE_VM, we consider it is a fork */
6088 if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) {
6089 return -TARGET_EINVAL;
6090 }
6091
6092 if (block_signals()) {
6093 return -TARGET_ERESTARTSYS;
6094 }
6095
6096 fork_start();
6097 ret = fork();
6098 if (ret == 0) {
6099 /* Child Process. */
6100 rcu_after_fork();
6101 cpu_clone_regs(env, newsp);
6102 fork_end(1);
6103 /* There is a race condition here. The parent process could
6104 theoretically read the TID in the child process before the child
6105 tid is set. This would require using either ptrace
6106 (not implemented) or having *_tidptr to point at a shared memory
6107 mapping. We can't repeat the spinlock hack used above because
6108 the child process gets its own copy of the lock. */
6109 if (flags & CLONE_CHILD_SETTID)
6110 put_user_u32(gettid(), child_tidptr);
6111 if (flags & CLONE_PARENT_SETTID)
6112 put_user_u32(gettid(), parent_tidptr);
6113 ts = (TaskState *)cpu->opaque;
6114 if (flags & CLONE_SETTLS)
6115 cpu_set_tls (env, newtls);
6116 if (flags & CLONE_CHILD_CLEARTID)
6117 ts->child_tidptr = child_tidptr;
6118 } else {
6119 fork_end(0);
6120 }
6121 }
6122 return ret;
6123 }
6124
6125 /* warning : doesn't handle linux specific flags... */
6126 static int target_to_host_fcntl_cmd(int cmd)
6127 {
6128 switch(cmd) {
6129 case TARGET_F_DUPFD:
6130 case TARGET_F_GETFD:
6131 case TARGET_F_SETFD:
6132 case TARGET_F_GETFL:
6133 case TARGET_F_SETFL:
6134 return cmd;
6135 case TARGET_F_GETLK:
6136 return F_GETLK64;
6137 case TARGET_F_SETLK:
6138 return F_SETLK64;
6139 case TARGET_F_SETLKW:
6140 return F_SETLKW64;
6141 case TARGET_F_GETOWN:
6142 return F_GETOWN;
6143 case TARGET_F_SETOWN:
6144 return F_SETOWN;
6145 case TARGET_F_GETSIG:
6146 return F_GETSIG;
6147 case TARGET_F_SETSIG:
6148 return F_SETSIG;
6149 #if TARGET_ABI_BITS == 32
6150 case TARGET_F_GETLK64:
6151 return F_GETLK64;
6152 case TARGET_F_SETLK64:
6153 return F_SETLK64;
6154 case TARGET_F_SETLKW64:
6155 return F_SETLKW64;
6156 #endif
6157 case TARGET_F_SETLEASE:
6158 return F_SETLEASE;
6159 case TARGET_F_GETLEASE:
6160 return F_GETLEASE;
6161 #ifdef F_DUPFD_CLOEXEC
6162 case TARGET_F_DUPFD_CLOEXEC:
6163 return F_DUPFD_CLOEXEC;
6164 #endif
6165 case TARGET_F_NOTIFY:
6166 return F_NOTIFY;
6167 #ifdef F_GETOWN_EX
6168 case TARGET_F_GETOWN_EX:
6169 return F_GETOWN_EX;
6170 #endif
6171 #ifdef F_SETOWN_EX
6172 case TARGET_F_SETOWN_EX:
6173 return F_SETOWN_EX;
6174 #endif
6175 #ifdef F_SETPIPE_SZ
6176 case TARGET_F_SETPIPE_SZ:
6177 return F_SETPIPE_SZ;
6178 case TARGET_F_GETPIPE_SZ:
6179 return F_GETPIPE_SZ;
6180 #endif
6181 default:
6182 return -TARGET_EINVAL;
6183 }
6184 return -TARGET_EINVAL;
6185 }
6186
6187 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6188 static const bitmask_transtbl flock_tbl[] = {
6189 TRANSTBL_CONVERT(F_RDLCK),
6190 TRANSTBL_CONVERT(F_WRLCK),
6191 TRANSTBL_CONVERT(F_UNLCK),
6192 TRANSTBL_CONVERT(F_EXLCK),
6193 TRANSTBL_CONVERT(F_SHLCK),
6194 { 0, 0, 0, 0 }
6195 };
6196
6197 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6198 abi_ulong target_flock_addr)
6199 {
6200 struct target_flock *target_fl;
6201 short l_type;
6202
6203 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6204 return -TARGET_EFAULT;
6205 }
6206
6207 __get_user(l_type, &target_fl->l_type);
6208 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6209 __get_user(fl->l_whence, &target_fl->l_whence);
6210 __get_user(fl->l_start, &target_fl->l_start);
6211 __get_user(fl->l_len, &target_fl->l_len);
6212 __get_user(fl->l_pid, &target_fl->l_pid);
6213 unlock_user_struct(target_fl, target_flock_addr, 0);
6214 return 0;
6215 }
6216
6217 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6218 const struct flock64 *fl)
6219 {
6220 struct target_flock *target_fl;
6221 short l_type;
6222
6223 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6224 return -TARGET_EFAULT;
6225 }
6226
6227 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6228 __put_user(l_type, &target_fl->l_type);
6229 __put_user(fl->l_whence, &target_fl->l_whence);
6230 __put_user(fl->l_start, &target_fl->l_start);
6231 __put_user(fl->l_len, &target_fl->l_len);
6232 __put_user(fl->l_pid, &target_fl->l_pid);
6233 unlock_user_struct(target_fl, target_flock_addr, 1);
6234 return 0;
6235 }
6236
6237 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6238 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6239
6240 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6241 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6242 abi_ulong target_flock_addr)
6243 {
6244 struct target_eabi_flock64 *target_fl;
6245 short l_type;
6246
6247 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6248 return -TARGET_EFAULT;
6249 }
6250
6251 __get_user(l_type, &target_fl->l_type);
6252 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6253 __get_user(fl->l_whence, &target_fl->l_whence);
6254 __get_user(fl->l_start, &target_fl->l_start);
6255 __get_user(fl->l_len, &target_fl->l_len);
6256 __get_user(fl->l_pid, &target_fl->l_pid);
6257 unlock_user_struct(target_fl, target_flock_addr, 0);
6258 return 0;
6259 }
6260
6261 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6262 const struct flock64 *fl)
6263 {
6264 struct target_eabi_flock64 *target_fl;
6265 short l_type;
6266
6267 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6268 return -TARGET_EFAULT;
6269 }
6270
6271 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6272 __put_user(l_type, &target_fl->l_type);
6273 __put_user(fl->l_whence, &target_fl->l_whence);
6274 __put_user(fl->l_start, &target_fl->l_start);
6275 __put_user(fl->l_len, &target_fl->l_len);
6276 __put_user(fl->l_pid, &target_fl->l_pid);
6277 unlock_user_struct(target_fl, target_flock_addr, 1);
6278 return 0;
6279 }
6280 #endif
6281
6282 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6283 abi_ulong target_flock_addr)
6284 {
6285 struct target_flock64 *target_fl;
6286 short l_type;
6287
6288 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6289 return -TARGET_EFAULT;
6290 }
6291
6292 __get_user(l_type, &target_fl->l_type);
6293 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6294 __get_user(fl->l_whence, &target_fl->l_whence);
6295 __get_user(fl->l_start, &target_fl->l_start);
6296 __get_user(fl->l_len, &target_fl->l_len);
6297 __get_user(fl->l_pid, &target_fl->l_pid);
6298 unlock_user_struct(target_fl, target_flock_addr, 0);
6299 return 0;
6300 }
6301
6302 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6303 const struct flock64 *fl)
6304 {
6305 struct target_flock64 *target_fl;
6306 short l_type;
6307
6308 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6309 return -TARGET_EFAULT;
6310 }
6311
6312 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6313 __put_user(l_type, &target_fl->l_type);
6314 __put_user(fl->l_whence, &target_fl->l_whence);
6315 __put_user(fl->l_start, &target_fl->l_start);
6316 __put_user(fl->l_len, &target_fl->l_len);
6317 __put_user(fl->l_pid, &target_fl->l_pid);
6318 unlock_user_struct(target_fl, target_flock_addr, 1);
6319 return 0;
6320 }
6321
6322 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6323 {
6324 struct flock64 fl64;
6325 #ifdef F_GETOWN_EX
6326 struct f_owner_ex fox;
6327 struct target_f_owner_ex *target_fox;
6328 #endif
6329 abi_long ret;
6330 int host_cmd = target_to_host_fcntl_cmd(cmd);
6331
6332 if (host_cmd == -TARGET_EINVAL)
6333 return host_cmd;
6334
6335 switch(cmd) {
6336 case TARGET_F_GETLK:
6337 ret = copy_from_user_flock(&fl64, arg);
6338 if (ret) {
6339 return ret;
6340 }
6341 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6342 if (ret == 0) {
6343 ret = copy_to_user_flock(arg, &fl64);
6344 }
6345 break;
6346
6347 case TARGET_F_SETLK:
6348 case TARGET_F_SETLKW:
6349 ret = copy_from_user_flock(&fl64, arg);
6350 if (ret) {
6351 return ret;
6352 }
6353 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6354 break;
6355
6356 case TARGET_F_GETLK64:
6357 ret = copy_from_user_flock64(&fl64, arg);
6358 if (ret) {
6359 return ret;
6360 }
6361 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6362 if (ret == 0) {
6363 ret = copy_to_user_flock64(arg, &fl64);
6364 }
6365 break;
6366 case TARGET_F_SETLK64:
6367 case TARGET_F_SETLKW64:
6368 ret = copy_from_user_flock64(&fl64, arg);
6369 if (ret) {
6370 return ret;
6371 }
6372 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6373 break;
6374
6375 case TARGET_F_GETFL:
6376 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6377 if (ret >= 0) {
6378 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6379 }
6380 break;
6381
6382 case TARGET_F_SETFL:
6383 ret = get_errno(safe_fcntl(fd, host_cmd,
6384 target_to_host_bitmask(arg,
6385 fcntl_flags_tbl)));
6386 break;
6387
6388 #ifdef F_GETOWN_EX
6389 case TARGET_F_GETOWN_EX:
6390 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6391 if (ret >= 0) {
6392 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6393 return -TARGET_EFAULT;
6394 target_fox->type = tswap32(fox.type);
6395 target_fox->pid = tswap32(fox.pid);
6396 unlock_user_struct(target_fox, arg, 1);
6397 }
6398 break;
6399 #endif
6400
6401 #ifdef F_SETOWN_EX
6402 case TARGET_F_SETOWN_EX:
6403 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6404 return -TARGET_EFAULT;
6405 fox.type = tswap32(target_fox->type);
6406 fox.pid = tswap32(target_fox->pid);
6407 unlock_user_struct(target_fox, arg, 0);
6408 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6409 break;
6410 #endif
6411
6412 case TARGET_F_SETOWN:
6413 case TARGET_F_GETOWN:
6414 case TARGET_F_SETSIG:
6415 case TARGET_F_GETSIG:
6416 case TARGET_F_SETLEASE:
6417 case TARGET_F_GETLEASE:
6418 case TARGET_F_SETPIPE_SZ:
6419 case TARGET_F_GETPIPE_SZ:
6420 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6421 break;
6422
6423 default:
6424 ret = get_errno(safe_fcntl(fd, cmd, arg));
6425 break;
6426 }
6427 return ret;
6428 }
6429
6430 #ifdef USE_UID16
6431
6432 static inline int high2lowuid(int uid)
6433 {
6434 if (uid > 65535)
6435 return 65534;
6436 else
6437 return uid;
6438 }
6439
6440 static inline int high2lowgid(int gid)
6441 {
6442 if (gid > 65535)
6443 return 65534;
6444 else
6445 return gid;
6446 }
6447
6448 static inline int low2highuid(int uid)
6449 {
6450 if ((int16_t)uid == -1)
6451 return -1;
6452 else
6453 return uid;
6454 }
6455
6456 static inline int low2highgid(int gid)
6457 {
6458 if ((int16_t)gid == -1)
6459 return -1;
6460 else
6461 return gid;
6462 }
6463 static inline int tswapid(int id)
6464 {
6465 return tswap16(id);
6466 }
6467
6468 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6469
6470 #else /* !USE_UID16 */
6471 static inline int high2lowuid(int uid)
6472 {
6473 return uid;
6474 }
6475 static inline int high2lowgid(int gid)
6476 {
6477 return gid;
6478 }
6479 static inline int low2highuid(int uid)
6480 {
6481 return uid;
6482 }
6483 static inline int low2highgid(int gid)
6484 {
6485 return gid;
6486 }
6487 static inline int tswapid(int id)
6488 {
6489 return tswap32(id);
6490 }
6491
6492 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6493
6494 #endif /* USE_UID16 */
6495
6496 /* We must do direct syscalls for setting UID/GID, because we want to
6497 * implement the Linux system call semantics of "change only for this thread",
6498 * not the libc/POSIX semantics of "change for all threads in process".
6499 * (See http://ewontfix.com/17/ for more details.)
6500 * We use the 32-bit version of the syscalls if present; if it is not
6501 * then either the host architecture supports 32-bit UIDs natively with
6502 * the standard syscall, or the 16-bit UID is the best we can do.
6503 */
6504 #ifdef __NR_setuid32
6505 #define __NR_sys_setuid __NR_setuid32
6506 #else
6507 #define __NR_sys_setuid __NR_setuid
6508 #endif
6509 #ifdef __NR_setgid32
6510 #define __NR_sys_setgid __NR_setgid32
6511 #else
6512 #define __NR_sys_setgid __NR_setgid
6513 #endif
6514 #ifdef __NR_setresuid32
6515 #define __NR_sys_setresuid __NR_setresuid32
6516 #else
6517 #define __NR_sys_setresuid __NR_setresuid
6518 #endif
6519 #ifdef __NR_setresgid32
6520 #define __NR_sys_setresgid __NR_setresgid32
6521 #else
6522 #define __NR_sys_setresgid __NR_setresgid
6523 #endif
6524
6525 _syscall1(int, sys_setuid, uid_t, uid)
6526 _syscall1(int, sys_setgid, gid_t, gid)
6527 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6528 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6529
6530 void syscall_init(void)
6531 {
6532 IOCTLEntry *ie;
6533 const argtype *arg_type;
6534 int size;
6535 int i;
6536
6537 thunk_init(STRUCT_MAX);
6538
6539 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6540 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6541 #include "syscall_types.h"
6542 #undef STRUCT
6543 #undef STRUCT_SPECIAL
6544
6545 /* Build target_to_host_errno_table[] table from
6546 * host_to_target_errno_table[]. */
6547 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6548 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6549 }
6550
6551 /* we patch the ioctl size if necessary. We rely on the fact that
6552 no ioctl has all the bits at '1' in the size field */
6553 ie = ioctl_entries;
6554 while (ie->target_cmd != 0) {
6555 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6556 TARGET_IOC_SIZEMASK) {
6557 arg_type = ie->arg_type;
6558 if (arg_type[0] != TYPE_PTR) {
6559 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6560 ie->target_cmd);
6561 exit(1);
6562 }
6563 arg_type++;
6564 size = thunk_type_size(arg_type, 0);
6565 ie->target_cmd = (ie->target_cmd &
6566 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6567 (size << TARGET_IOC_SIZESHIFT);
6568 }
6569
6570 /* automatic consistency check if same arch */
6571 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6572 (defined(__x86_64__) && defined(TARGET_X86_64))
6573 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6574 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6575 ie->name, ie->target_cmd, ie->host_cmd);
6576 }
6577 #endif
6578 ie++;
6579 }
6580 }
6581
6582 #if TARGET_ABI_BITS == 32
6583 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6584 {
6585 #ifdef TARGET_WORDS_BIGENDIAN
6586 return ((uint64_t)word0 << 32) | word1;
6587 #else
6588 return ((uint64_t)word1 << 32) | word0;
6589 #endif
6590 }
6591 #else /* TARGET_ABI_BITS == 32 */
6592 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6593 {
6594 return word0;
6595 }
6596 #endif /* TARGET_ABI_BITS != 32 */
6597
6598 #ifdef TARGET_NR_truncate64
6599 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6600 abi_long arg2,
6601 abi_long arg3,
6602 abi_long arg4)
6603 {
6604 if (regpairs_aligned(cpu_env)) {
6605 arg2 = arg3;
6606 arg3 = arg4;
6607 }
6608 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6609 }
6610 #endif
6611
6612 #ifdef TARGET_NR_ftruncate64
6613 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6614 abi_long arg2,
6615 abi_long arg3,
6616 abi_long arg4)
6617 {
6618 if (regpairs_aligned(cpu_env)) {
6619 arg2 = arg3;
6620 arg3 = arg4;
6621 }
6622 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6623 }
6624 #endif
6625
6626 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6627 abi_ulong target_addr)
6628 {
6629 struct target_timespec *target_ts;
6630
6631 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6632 return -TARGET_EFAULT;
6633 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6634 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6635 unlock_user_struct(target_ts, target_addr, 0);
6636 return 0;
6637 }
6638
6639 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6640 struct timespec *host_ts)
6641 {
6642 struct target_timespec *target_ts;
6643
6644 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6645 return -TARGET_EFAULT;
6646 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6647 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6648 unlock_user_struct(target_ts, target_addr, 1);
6649 return 0;
6650 }
6651
6652 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6653 abi_ulong target_addr)
6654 {
6655 struct target_itimerspec *target_itspec;
6656
6657 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6658 return -TARGET_EFAULT;
6659 }
6660
6661 host_itspec->it_interval.tv_sec =
6662 tswapal(target_itspec->it_interval.tv_sec);
6663 host_itspec->it_interval.tv_nsec =
6664 tswapal(target_itspec->it_interval.tv_nsec);
6665 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6666 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6667
6668 unlock_user_struct(target_itspec, target_addr, 1);
6669 return 0;
6670 }
6671
6672 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6673 struct itimerspec *host_its)
6674 {
6675 struct target_itimerspec *target_itspec;
6676
6677 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6678 return -TARGET_EFAULT;
6679 }
6680
6681 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6682 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6683
6684 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6685 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6686
6687 unlock_user_struct(target_itspec, target_addr, 0);
6688 return 0;
6689 }
6690
6691 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6692 abi_ulong target_addr)
6693 {
6694 struct target_sigevent *target_sevp;
6695
6696 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6697 return -TARGET_EFAULT;
6698 }
6699
6700 /* This union is awkward on 64 bit systems because it has a 32 bit
6701 * integer and a pointer in it; we follow the conversion approach
6702 * used for handling sigval types in signal.c so the guest should get
6703 * the correct value back even if we did a 64 bit byteswap and it's
6704 * using the 32 bit integer.
6705 */
6706 host_sevp->sigev_value.sival_ptr =
6707 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6708 host_sevp->sigev_signo =
6709 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6710 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6711 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6712
6713 unlock_user_struct(target_sevp, target_addr, 1);
6714 return 0;
6715 }
6716
6717 #if defined(TARGET_NR_mlockall)
6718 static inline int target_to_host_mlockall_arg(int arg)
6719 {
6720 int result = 0;
6721
6722 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6723 result |= MCL_CURRENT;
6724 }
6725 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6726 result |= MCL_FUTURE;
6727 }
6728 return result;
6729 }
6730 #endif
6731
6732 static inline abi_long host_to_target_stat64(void *cpu_env,
6733 abi_ulong target_addr,
6734 struct stat *host_st)
6735 {
6736 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6737 if (((CPUARMState *)cpu_env)->eabi) {
6738 struct target_eabi_stat64 *target_st;
6739
6740 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6741 return -TARGET_EFAULT;
6742 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6743 __put_user(host_st->st_dev, &target_st->st_dev);
6744 __put_user(host_st->st_ino, &target_st->st_ino);
6745 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6746 __put_user(host_st->st_ino, &target_st->__st_ino);
6747 #endif
6748 __put_user(host_st->st_mode, &target_st->st_mode);
6749 __put_user(host_st->st_nlink, &target_st->st_nlink);
6750 __put_user(host_st->st_uid, &target_st->st_uid);
6751 __put_user(host_st->st_gid, &target_st->st_gid);
6752 __put_user(host_st->st_rdev, &target_st->st_rdev);
6753 __put_user(host_st->st_size, &target_st->st_size);
6754 __put_user(host_st->st_blksize, &target_st->st_blksize);
6755 __put_user(host_st->st_blocks, &target_st->st_blocks);
6756 __put_user(host_st->st_atime, &target_st->target_st_atime);
6757 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6758 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6759 unlock_user_struct(target_st, target_addr, 1);
6760 } else
6761 #endif
6762 {
6763 #if defined(TARGET_HAS_STRUCT_STAT64)
6764 struct target_stat64 *target_st;
6765 #else
6766 struct target_stat *target_st;
6767 #endif
6768
6769 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6770 return -TARGET_EFAULT;
6771 memset(target_st, 0, sizeof(*target_st));
6772 __put_user(host_st->st_dev, &target_st->st_dev);
6773 __put_user(host_st->st_ino, &target_st->st_ino);
6774 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6775 __put_user(host_st->st_ino, &target_st->__st_ino);
6776 #endif
6777 __put_user(host_st->st_mode, &target_st->st_mode);
6778 __put_user(host_st->st_nlink, &target_st->st_nlink);
6779 __put_user(host_st->st_uid, &target_st->st_uid);
6780 __put_user(host_st->st_gid, &target_st->st_gid);
6781 __put_user(host_st->st_rdev, &target_st->st_rdev);
6782 /* XXX: better use of kernel struct */
6783 __put_user(host_st->st_size, &target_st->st_size);
6784 __put_user(host_st->st_blksize, &target_st->st_blksize);
6785 __put_user(host_st->st_blocks, &target_st->st_blocks);
6786 __put_user(host_st->st_atime, &target_st->target_st_atime);
6787 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6788 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6789 unlock_user_struct(target_st, target_addr, 1);
6790 }
6791
6792 return 0;
6793 }
6794
6795 /* ??? Using host futex calls even when target atomic operations
6796 are not really atomic probably breaks things. However implementing
6797 futexes locally would make futexes shared between multiple processes
6798 tricky. However they're probably useless because guest atomic
6799 operations won't work either. */
6800 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6801 target_ulong uaddr2, int val3)
6802 {
6803 struct timespec ts, *pts;
6804 int base_op;
6805
6806 /* ??? We assume FUTEX_* constants are the same on both host
6807 and target. */
6808 #ifdef FUTEX_CMD_MASK
6809 base_op = op & FUTEX_CMD_MASK;
6810 #else
6811 base_op = op;
6812 #endif
6813 switch (base_op) {
6814 case FUTEX_WAIT:
6815 case FUTEX_WAIT_BITSET:
6816 if (timeout) {
6817 pts = &ts;
6818 target_to_host_timespec(pts, timeout);
6819 } else {
6820 pts = NULL;
6821 }
6822 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6823 pts, NULL, val3));
6824 case FUTEX_WAKE:
6825 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6826 case FUTEX_FD:
6827 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6828 case FUTEX_REQUEUE:
6829 case FUTEX_CMP_REQUEUE:
6830 case FUTEX_WAKE_OP:
6831 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6832 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6833 But the prototype takes a `struct timespec *'; insert casts
6834 to satisfy the compiler. We do not need to tswap TIMEOUT
6835 since it's not compared to guest memory. */
6836 pts = (struct timespec *)(uintptr_t) timeout;
6837 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6838 g2h(uaddr2),
6839 (base_op == FUTEX_CMP_REQUEUE
6840 ? tswap32(val3)
6841 : val3)));
6842 default:
6843 return -TARGET_ENOSYS;
6844 }
6845 }
6846 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6847 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6848 abi_long handle, abi_long mount_id,
6849 abi_long flags)
6850 {
6851 struct file_handle *target_fh;
6852 struct file_handle *fh;
6853 int mid = 0;
6854 abi_long ret;
6855 char *name;
6856 unsigned int size, total_size;
6857
6858 if (get_user_s32(size, handle)) {
6859 return -TARGET_EFAULT;
6860 }
6861
6862 name = lock_user_string(pathname);
6863 if (!name) {
6864 return -TARGET_EFAULT;
6865 }
6866
6867 total_size = sizeof(struct file_handle) + size;
6868 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6869 if (!target_fh) {
6870 unlock_user(name, pathname, 0);
6871 return -TARGET_EFAULT;
6872 }
6873
6874 fh = g_malloc0(total_size);
6875 fh->handle_bytes = size;
6876
6877 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6878 unlock_user(name, pathname, 0);
6879
6880 /* man name_to_handle_at(2):
6881 * Other than the use of the handle_bytes field, the caller should treat
6882 * the file_handle structure as an opaque data type
6883 */
6884
6885 memcpy(target_fh, fh, total_size);
6886 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6887 target_fh->handle_type = tswap32(fh->handle_type);
6888 g_free(fh);
6889 unlock_user(target_fh, handle, total_size);
6890
6891 if (put_user_s32(mid, mount_id)) {
6892 return -TARGET_EFAULT;
6893 }
6894
6895 return ret;
6896
6897 }
6898 #endif
6899
6900 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6901 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6902 abi_long flags)
6903 {
6904 struct file_handle *target_fh;
6905 struct file_handle *fh;
6906 unsigned int size, total_size;
6907 abi_long ret;
6908
6909 if (get_user_s32(size, handle)) {
6910 return -TARGET_EFAULT;
6911 }
6912
6913 total_size = sizeof(struct file_handle) + size;
6914 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6915 if (!target_fh) {
6916 return -TARGET_EFAULT;
6917 }
6918
6919 fh = g_memdup(target_fh, total_size);
6920 fh->handle_bytes = size;
6921 fh->handle_type = tswap32(target_fh->handle_type);
6922
6923 ret = get_errno(open_by_handle_at(mount_fd, fh,
6924 target_to_host_bitmask(flags, fcntl_flags_tbl)));
6925
6926 g_free(fh);
6927
6928 unlock_user(target_fh, handle, total_size);
6929
6930 return ret;
6931 }
6932 #endif
6933
6934 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6935
6936 /* signalfd siginfo conversion */
6937
6938 static void
6939 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
6940 const struct signalfd_siginfo *info)
6941 {
6942 int sig = host_to_target_signal(info->ssi_signo);
6943
6944 /* linux/signalfd.h defines a ssi_addr_lsb
6945 * not defined in sys/signalfd.h but used by some kernels
6946 */
6947
6948 #ifdef BUS_MCEERR_AO
6949 if (tinfo->ssi_signo == SIGBUS &&
6950 (tinfo->ssi_code == BUS_MCEERR_AR ||
6951 tinfo->ssi_code == BUS_MCEERR_AO)) {
6952 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
6953 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
6954 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
6955 }
6956 #endif
6957
6958 tinfo->ssi_signo = tswap32(sig);
6959 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
6960 tinfo->ssi_code = tswap32(info->ssi_code);
6961 tinfo->ssi_pid = tswap32(info->ssi_pid);
6962 tinfo->ssi_uid = tswap32(info->ssi_uid);
6963 tinfo->ssi_fd = tswap32(info->ssi_fd);
6964 tinfo->ssi_tid = tswap32(info->ssi_tid);
6965 tinfo->ssi_band = tswap32(info->ssi_band);
6966 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
6967 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
6968 tinfo->ssi_status = tswap32(info->ssi_status);
6969 tinfo->ssi_int = tswap32(info->ssi_int);
6970 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
6971 tinfo->ssi_utime = tswap64(info->ssi_utime);
6972 tinfo->ssi_stime = tswap64(info->ssi_stime);
6973 tinfo->ssi_addr = tswap64(info->ssi_addr);
6974 }
6975
6976 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
6977 {
6978 int i;
6979
6980 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
6981 host_to_target_signalfd_siginfo(buf + i, buf + i);
6982 }
6983
6984 return len;
6985 }
6986
6987 static TargetFdTrans target_signalfd_trans = {
6988 .host_to_target_data = host_to_target_data_signalfd,
6989 };
6990
6991 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
6992 {
6993 int host_flags;
6994 target_sigset_t *target_mask;
6995 sigset_t host_mask;
6996 abi_long ret;
6997
6998 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
6999 return -TARGET_EINVAL;
7000 }
7001 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7002 return -TARGET_EFAULT;
7003 }
7004
7005 target_to_host_sigset(&host_mask, target_mask);
7006
7007 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7008
7009 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7010 if (ret >= 0) {
7011 fd_trans_register(ret, &target_signalfd_trans);
7012 }
7013
7014 unlock_user_struct(target_mask, mask, 0);
7015
7016 return ret;
7017 }
7018 #endif
7019
7020 /* Map host to target signal numbers for the wait family of syscalls.
7021 Assume all other status bits are the same. */
7022 int host_to_target_waitstatus(int status)
7023 {
7024 if (WIFSIGNALED(status)) {
7025 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7026 }
7027 if (WIFSTOPPED(status)) {
7028 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7029 | (status & 0xff);
7030 }
7031 return status;
7032 }
7033
7034 static int open_self_cmdline(void *cpu_env, int fd)
7035 {
7036 int fd_orig = -1;
7037 bool word_skipped = false;
7038
7039 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7040 if (fd_orig < 0) {
7041 return fd_orig;
7042 }
7043
7044 while (true) {
7045 ssize_t nb_read;
7046 char buf[128];
7047 char *cp_buf = buf;
7048
7049 nb_read = read(fd_orig, buf, sizeof(buf));
7050 if (nb_read < 0) {
7051 int e = errno;
7052 fd_orig = close(fd_orig);
7053 errno = e;
7054 return -1;
7055 } else if (nb_read == 0) {
7056 break;
7057 }
7058
7059 if (!word_skipped) {
7060 /* Skip the first string, which is the path to qemu-*-static
7061 instead of the actual command. */
7062 cp_buf = memchr(buf, 0, nb_read);
7063 if (cp_buf) {
7064 /* Null byte found, skip one string */
7065 cp_buf++;
7066 nb_read -= cp_buf - buf;
7067 word_skipped = true;
7068 }
7069 }
7070
7071 if (word_skipped) {
7072 if (write(fd, cp_buf, nb_read) != nb_read) {
7073 int e = errno;
7074 close(fd_orig);
7075 errno = e;
7076 return -1;
7077 }
7078 }
7079 }
7080
7081 return close(fd_orig);
7082 }
7083
7084 static int open_self_maps(void *cpu_env, int fd)
7085 {
7086 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7087 TaskState *ts = cpu->opaque;
7088 FILE *fp;
7089 char *line = NULL;
7090 size_t len = 0;
7091 ssize_t read;
7092
7093 fp = fopen("/proc/self/maps", "r");
7094 if (fp == NULL) {
7095 return -1;
7096 }
7097
7098 while ((read = getline(&line, &len, fp)) != -1) {
7099 int fields, dev_maj, dev_min, inode;
7100 uint64_t min, max, offset;
7101 char flag_r, flag_w, flag_x, flag_p;
7102 char path[512] = "";
7103 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7104 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7105 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7106
7107 if ((fields < 10) || (fields > 11)) {
7108 continue;
7109 }
7110 if (h2g_valid(min)) {
7111 int flags = page_get_flags(h2g(min));
7112 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7113 if (page_check_range(h2g(min), max - min, flags) == -1) {
7114 continue;
7115 }
7116 if (h2g(min) == ts->info->stack_limit) {
7117 pstrcpy(path, sizeof(path), " [stack]");
7118 }
7119 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7120 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7121 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7122 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7123 path[0] ? " " : "", path);
7124 }
7125 }
7126
7127 free(line);
7128 fclose(fp);
7129
7130 return 0;
7131 }
7132
7133 static int open_self_stat(void *cpu_env, int fd)
7134 {
7135 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7136 TaskState *ts = cpu->opaque;
7137 abi_ulong start_stack = ts->info->start_stack;
7138 int i;
7139
7140 for (i = 0; i < 44; i++) {
7141 char buf[128];
7142 int len;
7143 uint64_t val = 0;
7144
7145 if (i == 0) {
7146 /* pid */
7147 val = getpid();
7148 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7149 } else if (i == 1) {
7150 /* app name */
7151 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7152 } else if (i == 27) {
7153 /* stack bottom */
7154 val = start_stack;
7155 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7156 } else {
7157 /* for the rest, there is MasterCard */
7158 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7159 }
7160
7161 len = strlen(buf);
7162 if (write(fd, buf, len) != len) {
7163 return -1;
7164 }
7165 }
7166
7167 return 0;
7168 }
7169
7170 static int open_self_auxv(void *cpu_env, int fd)
7171 {
7172 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7173 TaskState *ts = cpu->opaque;
7174 abi_ulong auxv = ts->info->saved_auxv;
7175 abi_ulong len = ts->info->auxv_len;
7176 char *ptr;
7177
7178 /*
7179 * Auxiliary vector is stored in target process stack.
7180 * read in whole auxv vector and copy it to file
7181 */
7182 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7183 if (ptr != NULL) {
7184 while (len > 0) {
7185 ssize_t r;
7186 r = write(fd, ptr, len);
7187 if (r <= 0) {
7188 break;
7189 }
7190 len -= r;
7191 ptr += r;
7192 }
7193 lseek(fd, 0, SEEK_SET);
7194 unlock_user(ptr, auxv, len);
7195 }
7196
7197 return 0;
7198 }
7199
7200 static int is_proc_myself(const char *filename, const char *entry)
7201 {
7202 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7203 filename += strlen("/proc/");
7204 if (!strncmp(filename, "self/", strlen("self/"))) {
7205 filename += strlen("self/");
7206 } else if (*filename >= '1' && *filename <= '9') {
7207 char myself[80];
7208 snprintf(myself, sizeof(myself), "%d/", getpid());
7209 if (!strncmp(filename, myself, strlen(myself))) {
7210 filename += strlen(myself);
7211 } else {
7212 return 0;
7213 }
7214 } else {
7215 return 0;
7216 }
7217 if (!strcmp(filename, entry)) {
7218 return 1;
7219 }
7220 }
7221 return 0;
7222 }
7223
7224 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7225 static int is_proc(const char *filename, const char *entry)
7226 {
7227 return strcmp(filename, entry) == 0;
7228 }
7229
7230 static int open_net_route(void *cpu_env, int fd)
7231 {
7232 FILE *fp;
7233 char *line = NULL;
7234 size_t len = 0;
7235 ssize_t read;
7236
7237 fp = fopen("/proc/net/route", "r");
7238 if (fp == NULL) {
7239 return -1;
7240 }
7241
7242 /* read header */
7243
7244 read = getline(&line, &len, fp);
7245 dprintf(fd, "%s", line);
7246
7247 /* read routes */
7248
7249 while ((read = getline(&line, &len, fp)) != -1) {
7250 char iface[16];
7251 uint32_t dest, gw, mask;
7252 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7253 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7254 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7255 &mask, &mtu, &window, &irtt);
7256 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7257 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7258 metric, tswap32(mask), mtu, window, irtt);
7259 }
7260
7261 free(line);
7262 fclose(fp);
7263
7264 return 0;
7265 }
7266 #endif
7267
7268 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7269 {
7270 struct fake_open {
7271 const char *filename;
7272 int (*fill)(void *cpu_env, int fd);
7273 int (*cmp)(const char *s1, const char *s2);
7274 };
7275 const struct fake_open *fake_open;
7276 static const struct fake_open fakes[] = {
7277 { "maps", open_self_maps, is_proc_myself },
7278 { "stat", open_self_stat, is_proc_myself },
7279 { "auxv", open_self_auxv, is_proc_myself },
7280 { "cmdline", open_self_cmdline, is_proc_myself },
7281 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7282 { "/proc/net/route", open_net_route, is_proc },
7283 #endif
7284 { NULL, NULL, NULL }
7285 };
7286
7287 if (is_proc_myself(pathname, "exe")) {
7288 int execfd = qemu_getauxval(AT_EXECFD);
7289 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7290 }
7291
7292 for (fake_open = fakes; fake_open->filename; fake_open++) {
7293 if (fake_open->cmp(pathname, fake_open->filename)) {
7294 break;
7295 }
7296 }
7297
7298 if (fake_open->filename) {
7299 const char *tmpdir;
7300 char filename[PATH_MAX];
7301 int fd, r;
7302
7303 /* create temporary file to map stat to */
7304 tmpdir = getenv("TMPDIR");
7305 if (!tmpdir)
7306 tmpdir = "/tmp";
7307 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7308 fd = mkstemp(filename);
7309 if (fd < 0) {
7310 return fd;
7311 }
7312 unlink(filename);
7313
7314 if ((r = fake_open->fill(cpu_env, fd))) {
7315 int e = errno;
7316 close(fd);
7317 errno = e;
7318 return r;
7319 }
7320 lseek(fd, 0, SEEK_SET);
7321
7322 return fd;
7323 }
7324
7325 return safe_openat(dirfd, path(pathname), flags, mode);
7326 }
7327
7328 #define TIMER_MAGIC 0x0caf0000
7329 #define TIMER_MAGIC_MASK 0xffff0000
7330
7331 /* Convert QEMU provided timer ID back to internal 16bit index format */
7332 static target_timer_t get_timer_id(abi_long arg)
7333 {
7334 target_timer_t timerid = arg;
7335
7336 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7337 return -TARGET_EINVAL;
7338 }
7339
7340 timerid &= 0xffff;
7341
7342 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7343 return -TARGET_EINVAL;
7344 }
7345
7346 return timerid;
7347 }
7348
7349 /* do_syscall() should always have a single exit point at the end so
7350 that actions, such as logging of syscall results, can be performed.
7351 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7352 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7353 abi_long arg2, abi_long arg3, abi_long arg4,
7354 abi_long arg5, abi_long arg6, abi_long arg7,
7355 abi_long arg8)
7356 {
7357 CPUState *cpu = ENV_GET_CPU(cpu_env);
7358 abi_long ret;
7359 struct stat st;
7360 struct statfs stfs;
7361 void *p;
7362
7363 #if defined(DEBUG_ERESTARTSYS)
7364 /* Debug-only code for exercising the syscall-restart code paths
7365 * in the per-architecture cpu main loops: restart every syscall
7366 * the guest makes once before letting it through.
7367 */
7368 {
7369 static int flag;
7370
7371 flag = !flag;
7372 if (flag) {
7373 return -TARGET_ERESTARTSYS;
7374 }
7375 }
7376 #endif
7377
7378 #ifdef DEBUG
7379 gemu_log("syscall %d", num);
7380 #endif
7381 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7382 if(do_strace)
7383 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7384
7385 switch(num) {
7386 case TARGET_NR_exit:
7387 /* In old applications this may be used to implement _exit(2).
7388 However in threaded applictions it is used for thread termination,
7389 and _exit_group is used for application termination.
7390 Do thread termination if we have more then one thread. */
7391
7392 if (block_signals()) {
7393 ret = -TARGET_ERESTARTSYS;
7394 break;
7395 }
7396
7397 if (CPU_NEXT(first_cpu)) {
7398 TaskState *ts;
7399
7400 cpu_list_lock();
7401 /* Remove the CPU from the list. */
7402 QTAILQ_REMOVE(&cpus, cpu, node);
7403 cpu_list_unlock();
7404 ts = cpu->opaque;
7405 if (ts->child_tidptr) {
7406 put_user_u32(0, ts->child_tidptr);
7407 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7408 NULL, NULL, 0);
7409 }
7410 thread_cpu = NULL;
7411 object_unref(OBJECT(cpu));
7412 g_free(ts);
7413 rcu_unregister_thread();
7414 pthread_exit(NULL);
7415 }
7416 #ifdef TARGET_GPROF
7417 _mcleanup();
7418 #endif
7419 gdb_exit(cpu_env, arg1);
7420 _exit(arg1);
7421 ret = 0; /* avoid warning */
7422 break;
7423 case TARGET_NR_read:
7424 if (arg3 == 0)
7425 ret = 0;
7426 else {
7427 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7428 goto efault;
7429 ret = get_errno(safe_read(arg1, p, arg3));
7430 if (ret >= 0 &&
7431 fd_trans_host_to_target_data(arg1)) {
7432 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7433 }
7434 unlock_user(p, arg2, ret);
7435 }
7436 break;
7437 case TARGET_NR_write:
7438 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7439 goto efault;
7440 ret = get_errno(safe_write(arg1, p, arg3));
7441 unlock_user(p, arg2, 0);
7442 break;
7443 #ifdef TARGET_NR_open
7444 case TARGET_NR_open:
7445 if (!(p = lock_user_string(arg1)))
7446 goto efault;
7447 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7448 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7449 arg3));
7450 fd_trans_unregister(ret);
7451 unlock_user(p, arg1, 0);
7452 break;
7453 #endif
7454 case TARGET_NR_openat:
7455 if (!(p = lock_user_string(arg2)))
7456 goto efault;
7457 ret = get_errno(do_openat(cpu_env, arg1, p,
7458 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7459 arg4));
7460 fd_trans_unregister(ret);
7461 unlock_user(p, arg2, 0);
7462 break;
7463 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7464 case TARGET_NR_name_to_handle_at:
7465 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7466 break;
7467 #endif
7468 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7469 case TARGET_NR_open_by_handle_at:
7470 ret = do_open_by_handle_at(arg1, arg2, arg3);
7471 fd_trans_unregister(ret);
7472 break;
7473 #endif
7474 case TARGET_NR_close:
7475 fd_trans_unregister(arg1);
7476 ret = get_errno(close(arg1));
7477 break;
7478 case TARGET_NR_brk:
7479 ret = do_brk(arg1);
7480 break;
7481 #ifdef TARGET_NR_fork
7482 case TARGET_NR_fork:
7483 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7484 break;
7485 #endif
7486 #ifdef TARGET_NR_waitpid
7487 case TARGET_NR_waitpid:
7488 {
7489 int status;
7490 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7491 if (!is_error(ret) && arg2 && ret
7492 && put_user_s32(host_to_target_waitstatus(status), arg2))
7493 goto efault;
7494 }
7495 break;
7496 #endif
7497 #ifdef TARGET_NR_waitid
7498 case TARGET_NR_waitid:
7499 {
7500 siginfo_t info;
7501 info.si_pid = 0;
7502 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7503 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7504 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7505 goto efault;
7506 host_to_target_siginfo(p, &info);
7507 unlock_user(p, arg3, sizeof(target_siginfo_t));
7508 }
7509 }
7510 break;
7511 #endif
7512 #ifdef TARGET_NR_creat /* not on alpha */
7513 case TARGET_NR_creat:
7514 if (!(p = lock_user_string(arg1)))
7515 goto efault;
7516 ret = get_errno(creat(p, arg2));
7517 fd_trans_unregister(ret);
7518 unlock_user(p, arg1, 0);
7519 break;
7520 #endif
7521 #ifdef TARGET_NR_link
7522 case TARGET_NR_link:
7523 {
7524 void * p2;
7525 p = lock_user_string(arg1);
7526 p2 = lock_user_string(arg2);
7527 if (!p || !p2)
7528 ret = -TARGET_EFAULT;
7529 else
7530 ret = get_errno(link(p, p2));
7531 unlock_user(p2, arg2, 0);
7532 unlock_user(p, arg1, 0);
7533 }
7534 break;
7535 #endif
7536 #if defined(TARGET_NR_linkat)
7537 case TARGET_NR_linkat:
7538 {
7539 void * p2 = NULL;
7540 if (!arg2 || !arg4)
7541 goto efault;
7542 p = lock_user_string(arg2);
7543 p2 = lock_user_string(arg4);
7544 if (!p || !p2)
7545 ret = -TARGET_EFAULT;
7546 else
7547 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7548 unlock_user(p, arg2, 0);
7549 unlock_user(p2, arg4, 0);
7550 }
7551 break;
7552 #endif
7553 #ifdef TARGET_NR_unlink
7554 case TARGET_NR_unlink:
7555 if (!(p = lock_user_string(arg1)))
7556 goto efault;
7557 ret = get_errno(unlink(p));
7558 unlock_user(p, arg1, 0);
7559 break;
7560 #endif
7561 #if defined(TARGET_NR_unlinkat)
7562 case TARGET_NR_unlinkat:
7563 if (!(p = lock_user_string(arg2)))
7564 goto efault;
7565 ret = get_errno(unlinkat(arg1, p, arg3));
7566 unlock_user(p, arg2, 0);
7567 break;
7568 #endif
7569 case TARGET_NR_execve:
7570 {
7571 char **argp, **envp;
7572 int argc, envc;
7573 abi_ulong gp;
7574 abi_ulong guest_argp;
7575 abi_ulong guest_envp;
7576 abi_ulong addr;
7577 char **q;
7578 int total_size = 0;
7579
7580 argc = 0;
7581 guest_argp = arg2;
7582 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7583 if (get_user_ual(addr, gp))
7584 goto efault;
7585 if (!addr)
7586 break;
7587 argc++;
7588 }
7589 envc = 0;
7590 guest_envp = arg3;
7591 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7592 if (get_user_ual(addr, gp))
7593 goto efault;
7594 if (!addr)
7595 break;
7596 envc++;
7597 }
7598
7599 argp = alloca((argc + 1) * sizeof(void *));
7600 envp = alloca((envc + 1) * sizeof(void *));
7601
7602 for (gp = guest_argp, q = argp; gp;
7603 gp += sizeof(abi_ulong), q++) {
7604 if (get_user_ual(addr, gp))
7605 goto execve_efault;
7606 if (!addr)
7607 break;
7608 if (!(*q = lock_user_string(addr)))
7609 goto execve_efault;
7610 total_size += strlen(*q) + 1;
7611 }
7612 *q = NULL;
7613
7614 for (gp = guest_envp, q = envp; gp;
7615 gp += sizeof(abi_ulong), q++) {
7616 if (get_user_ual(addr, gp))
7617 goto execve_efault;
7618 if (!addr)
7619 break;
7620 if (!(*q = lock_user_string(addr)))
7621 goto execve_efault;
7622 total_size += strlen(*q) + 1;
7623 }
7624 *q = NULL;
7625
7626 if (!(p = lock_user_string(arg1)))
7627 goto execve_efault;
7628 /* Although execve() is not an interruptible syscall it is
7629 * a special case where we must use the safe_syscall wrapper:
7630 * if we allow a signal to happen before we make the host
7631 * syscall then we will 'lose' it, because at the point of
7632 * execve the process leaves QEMU's control. So we use the
7633 * safe syscall wrapper to ensure that we either take the
7634 * signal as a guest signal, or else it does not happen
7635 * before the execve completes and makes it the other
7636 * program's problem.
7637 */
7638 ret = get_errno(safe_execve(p, argp, envp));
7639 unlock_user(p, arg1, 0);
7640
7641 goto execve_end;
7642
7643 execve_efault:
7644 ret = -TARGET_EFAULT;
7645
7646 execve_end:
7647 for (gp = guest_argp, q = argp; *q;
7648 gp += sizeof(abi_ulong), q++) {
7649 if (get_user_ual(addr, gp)
7650 || !addr)
7651 break;
7652 unlock_user(*q, addr, 0);
7653 }
7654 for (gp = guest_envp, q = envp; *q;
7655 gp += sizeof(abi_ulong), q++) {
7656 if (get_user_ual(addr, gp)
7657 || !addr)
7658 break;
7659 unlock_user(*q, addr, 0);
7660 }
7661 }
7662 break;
7663 case TARGET_NR_chdir:
7664 if (!(p = lock_user_string(arg1)))
7665 goto efault;
7666 ret = get_errno(chdir(p));
7667 unlock_user(p, arg1, 0);
7668 break;
7669 #ifdef TARGET_NR_time
7670 case TARGET_NR_time:
7671 {
7672 time_t host_time;
7673 ret = get_errno(time(&host_time));
7674 if (!is_error(ret)
7675 && arg1
7676 && put_user_sal(host_time, arg1))
7677 goto efault;
7678 }
7679 break;
7680 #endif
7681 #ifdef TARGET_NR_mknod
7682 case TARGET_NR_mknod:
7683 if (!(p = lock_user_string(arg1)))
7684 goto efault;
7685 ret = get_errno(mknod(p, arg2, arg3));
7686 unlock_user(p, arg1, 0);
7687 break;
7688 #endif
7689 #if defined(TARGET_NR_mknodat)
7690 case TARGET_NR_mknodat:
7691 if (!(p = lock_user_string(arg2)))
7692 goto efault;
7693 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7694 unlock_user(p, arg2, 0);
7695 break;
7696 #endif
7697 #ifdef TARGET_NR_chmod
7698 case TARGET_NR_chmod:
7699 if (!(p = lock_user_string(arg1)))
7700 goto efault;
7701 ret = get_errno(chmod(p, arg2));
7702 unlock_user(p, arg1, 0);
7703 break;
7704 #endif
7705 #ifdef TARGET_NR_break
7706 case TARGET_NR_break:
7707 goto unimplemented;
7708 #endif
7709 #ifdef TARGET_NR_oldstat
7710 case TARGET_NR_oldstat:
7711 goto unimplemented;
7712 #endif
7713 case TARGET_NR_lseek:
7714 ret = get_errno(lseek(arg1, arg2, arg3));
7715 break;
7716 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7717 /* Alpha specific */
7718 case TARGET_NR_getxpid:
7719 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7720 ret = get_errno(getpid());
7721 break;
7722 #endif
7723 #ifdef TARGET_NR_getpid
7724 case TARGET_NR_getpid:
7725 ret = get_errno(getpid());
7726 break;
7727 #endif
7728 case TARGET_NR_mount:
7729 {
7730 /* need to look at the data field */
7731 void *p2, *p3;
7732
7733 if (arg1) {
7734 p = lock_user_string(arg1);
7735 if (!p) {
7736 goto efault;
7737 }
7738 } else {
7739 p = NULL;
7740 }
7741
7742 p2 = lock_user_string(arg2);
7743 if (!p2) {
7744 if (arg1) {
7745 unlock_user(p, arg1, 0);
7746 }
7747 goto efault;
7748 }
7749
7750 if (arg3) {
7751 p3 = lock_user_string(arg3);
7752 if (!p3) {
7753 if (arg1) {
7754 unlock_user(p, arg1, 0);
7755 }
7756 unlock_user(p2, arg2, 0);
7757 goto efault;
7758 }
7759 } else {
7760 p3 = NULL;
7761 }
7762
7763 /* FIXME - arg5 should be locked, but it isn't clear how to
7764 * do that since it's not guaranteed to be a NULL-terminated
7765 * string.
7766 */
7767 if (!arg5) {
7768 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7769 } else {
7770 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7771 }
7772 ret = get_errno(ret);
7773
7774 if (arg1) {
7775 unlock_user(p, arg1, 0);
7776 }
7777 unlock_user(p2, arg2, 0);
7778 if (arg3) {
7779 unlock_user(p3, arg3, 0);
7780 }
7781 }
7782 break;
7783 #ifdef TARGET_NR_umount
7784 case TARGET_NR_umount:
7785 if (!(p = lock_user_string(arg1)))
7786 goto efault;
7787 ret = get_errno(umount(p));
7788 unlock_user(p, arg1, 0);
7789 break;
7790 #endif
7791 #ifdef TARGET_NR_stime /* not on alpha */
7792 case TARGET_NR_stime:
7793 {
7794 time_t host_time;
7795 if (get_user_sal(host_time, arg1))
7796 goto efault;
7797 ret = get_errno(stime(&host_time));
7798 }
7799 break;
7800 #endif
7801 case TARGET_NR_ptrace:
7802 goto unimplemented;
7803 #ifdef TARGET_NR_alarm /* not on alpha */
7804 case TARGET_NR_alarm:
7805 ret = alarm(arg1);
7806 break;
7807 #endif
7808 #ifdef TARGET_NR_oldfstat
7809 case TARGET_NR_oldfstat:
7810 goto unimplemented;
7811 #endif
7812 #ifdef TARGET_NR_pause /* not on alpha */
7813 case TARGET_NR_pause:
7814 if (!block_signals()) {
7815 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7816 }
7817 ret = -TARGET_EINTR;
7818 break;
7819 #endif
7820 #ifdef TARGET_NR_utime
7821 case TARGET_NR_utime:
7822 {
7823 struct utimbuf tbuf, *host_tbuf;
7824 struct target_utimbuf *target_tbuf;
7825 if (arg2) {
7826 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7827 goto efault;
7828 tbuf.actime = tswapal(target_tbuf->actime);
7829 tbuf.modtime = tswapal(target_tbuf->modtime);
7830 unlock_user_struct(target_tbuf, arg2, 0);
7831 host_tbuf = &tbuf;
7832 } else {
7833 host_tbuf = NULL;
7834 }
7835 if (!(p = lock_user_string(arg1)))
7836 goto efault;
7837 ret = get_errno(utime(p, host_tbuf));
7838 unlock_user(p, arg1, 0);
7839 }
7840 break;
7841 #endif
7842 #ifdef TARGET_NR_utimes
7843 case TARGET_NR_utimes:
7844 {
7845 struct timeval *tvp, tv[2];
7846 if (arg2) {
7847 if (copy_from_user_timeval(&tv[0], arg2)
7848 || copy_from_user_timeval(&tv[1],
7849 arg2 + sizeof(struct target_timeval)))
7850 goto efault;
7851 tvp = tv;
7852 } else {
7853 tvp = NULL;
7854 }
7855 if (!(p = lock_user_string(arg1)))
7856 goto efault;
7857 ret = get_errno(utimes(p, tvp));
7858 unlock_user(p, arg1, 0);
7859 }
7860 break;
7861 #endif
7862 #if defined(TARGET_NR_futimesat)
7863 case TARGET_NR_futimesat:
7864 {
7865 struct timeval *tvp, tv[2];
7866 if (arg3) {
7867 if (copy_from_user_timeval(&tv[0], arg3)
7868 || copy_from_user_timeval(&tv[1],
7869 arg3 + sizeof(struct target_timeval)))
7870 goto efault;
7871 tvp = tv;
7872 } else {
7873 tvp = NULL;
7874 }
7875 if (!(p = lock_user_string(arg2)))
7876 goto efault;
7877 ret = get_errno(futimesat(arg1, path(p), tvp));
7878 unlock_user(p, arg2, 0);
7879 }
7880 break;
7881 #endif
7882 #ifdef TARGET_NR_stty
7883 case TARGET_NR_stty:
7884 goto unimplemented;
7885 #endif
7886 #ifdef TARGET_NR_gtty
7887 case TARGET_NR_gtty:
7888 goto unimplemented;
7889 #endif
7890 #ifdef TARGET_NR_access
7891 case TARGET_NR_access:
7892 if (!(p = lock_user_string(arg1)))
7893 goto efault;
7894 ret = get_errno(access(path(p), arg2));
7895 unlock_user(p, arg1, 0);
7896 break;
7897 #endif
7898 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7899 case TARGET_NR_faccessat:
7900 if (!(p = lock_user_string(arg2)))
7901 goto efault;
7902 ret = get_errno(faccessat(arg1, p, arg3, 0));
7903 unlock_user(p, arg2, 0);
7904 break;
7905 #endif
7906 #ifdef TARGET_NR_nice /* not on alpha */
7907 case TARGET_NR_nice:
7908 ret = get_errno(nice(arg1));
7909 break;
7910 #endif
7911 #ifdef TARGET_NR_ftime
7912 case TARGET_NR_ftime:
7913 goto unimplemented;
7914 #endif
7915 case TARGET_NR_sync:
7916 sync();
7917 ret = 0;
7918 break;
7919 case TARGET_NR_kill:
7920 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
7921 break;
7922 #ifdef TARGET_NR_rename
7923 case TARGET_NR_rename:
7924 {
7925 void *p2;
7926 p = lock_user_string(arg1);
7927 p2 = lock_user_string(arg2);
7928 if (!p || !p2)
7929 ret = -TARGET_EFAULT;
7930 else
7931 ret = get_errno(rename(p, p2));
7932 unlock_user(p2, arg2, 0);
7933 unlock_user(p, arg1, 0);
7934 }
7935 break;
7936 #endif
7937 #if defined(TARGET_NR_renameat)
7938 case TARGET_NR_renameat:
7939 {
7940 void *p2;
7941 p = lock_user_string(arg2);
7942 p2 = lock_user_string(arg4);
7943 if (!p || !p2)
7944 ret = -TARGET_EFAULT;
7945 else
7946 ret = get_errno(renameat(arg1, p, arg3, p2));
7947 unlock_user(p2, arg4, 0);
7948 unlock_user(p, arg2, 0);
7949 }
7950 break;
7951 #endif
7952 #ifdef TARGET_NR_mkdir
7953 case TARGET_NR_mkdir:
7954 if (!(p = lock_user_string(arg1)))
7955 goto efault;
7956 ret = get_errno(mkdir(p, arg2));
7957 unlock_user(p, arg1, 0);
7958 break;
7959 #endif
7960 #if defined(TARGET_NR_mkdirat)
7961 case TARGET_NR_mkdirat:
7962 if (!(p = lock_user_string(arg2)))
7963 goto efault;
7964 ret = get_errno(mkdirat(arg1, p, arg3));
7965 unlock_user(p, arg2, 0);
7966 break;
7967 #endif
7968 #ifdef TARGET_NR_rmdir
7969 case TARGET_NR_rmdir:
7970 if (!(p = lock_user_string(arg1)))
7971 goto efault;
7972 ret = get_errno(rmdir(p));
7973 unlock_user(p, arg1, 0);
7974 break;
7975 #endif
7976 case TARGET_NR_dup:
7977 ret = get_errno(dup(arg1));
7978 if (ret >= 0) {
7979 fd_trans_dup(arg1, ret);
7980 }
7981 break;
7982 #ifdef TARGET_NR_pipe
7983 case TARGET_NR_pipe:
7984 ret = do_pipe(cpu_env, arg1, 0, 0);
7985 break;
7986 #endif
7987 #ifdef TARGET_NR_pipe2
7988 case TARGET_NR_pipe2:
7989 ret = do_pipe(cpu_env, arg1,
7990 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
7991 break;
7992 #endif
7993 case TARGET_NR_times:
7994 {
7995 struct target_tms *tmsp;
7996 struct tms tms;
7997 ret = get_errno(times(&tms));
7998 if (arg1) {
7999 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8000 if (!tmsp)
8001 goto efault;
8002 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8003 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8004 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8005 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8006 }
8007 if (!is_error(ret))
8008 ret = host_to_target_clock_t(ret);
8009 }
8010 break;
8011 #ifdef TARGET_NR_prof
8012 case TARGET_NR_prof:
8013 goto unimplemented;
8014 #endif
8015 #ifdef TARGET_NR_signal
8016 case TARGET_NR_signal:
8017 goto unimplemented;
8018 #endif
8019 case TARGET_NR_acct:
8020 if (arg1 == 0) {
8021 ret = get_errno(acct(NULL));
8022 } else {
8023 if (!(p = lock_user_string(arg1)))
8024 goto efault;
8025 ret = get_errno(acct(path(p)));
8026 unlock_user(p, arg1, 0);
8027 }
8028 break;
8029 #ifdef TARGET_NR_umount2
8030 case TARGET_NR_umount2:
8031 if (!(p = lock_user_string(arg1)))
8032 goto efault;
8033 ret = get_errno(umount2(p, arg2));
8034 unlock_user(p, arg1, 0);
8035 break;
8036 #endif
8037 #ifdef TARGET_NR_lock
8038 case TARGET_NR_lock:
8039 goto unimplemented;
8040 #endif
8041 case TARGET_NR_ioctl:
8042 ret = do_ioctl(arg1, arg2, arg3);
8043 break;
8044 case TARGET_NR_fcntl:
8045 ret = do_fcntl(arg1, arg2, arg3);
8046 break;
8047 #ifdef TARGET_NR_mpx
8048 case TARGET_NR_mpx:
8049 goto unimplemented;
8050 #endif
8051 case TARGET_NR_setpgid:
8052 ret = get_errno(setpgid(arg1, arg2));
8053 break;
8054 #ifdef TARGET_NR_ulimit
8055 case TARGET_NR_ulimit:
8056 goto unimplemented;
8057 #endif
8058 #ifdef TARGET_NR_oldolduname
8059 case TARGET_NR_oldolduname:
8060 goto unimplemented;
8061 #endif
8062 case TARGET_NR_umask:
8063 ret = get_errno(umask(arg1));
8064 break;
8065 case TARGET_NR_chroot:
8066 if (!(p = lock_user_string(arg1)))
8067 goto efault;
8068 ret = get_errno(chroot(p));
8069 unlock_user(p, arg1, 0);
8070 break;
8071 #ifdef TARGET_NR_ustat
8072 case TARGET_NR_ustat:
8073 goto unimplemented;
8074 #endif
8075 #ifdef TARGET_NR_dup2
8076 case TARGET_NR_dup2:
8077 ret = get_errno(dup2(arg1, arg2));
8078 if (ret >= 0) {
8079 fd_trans_dup(arg1, arg2);
8080 }
8081 break;
8082 #endif
8083 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8084 case TARGET_NR_dup3:
8085 ret = get_errno(dup3(arg1, arg2, arg3));
8086 if (ret >= 0) {
8087 fd_trans_dup(arg1, arg2);
8088 }
8089 break;
8090 #endif
8091 #ifdef TARGET_NR_getppid /* not on alpha */
8092 case TARGET_NR_getppid:
8093 ret = get_errno(getppid());
8094 break;
8095 #endif
8096 #ifdef TARGET_NR_getpgrp
8097 case TARGET_NR_getpgrp:
8098 ret = get_errno(getpgrp());
8099 break;
8100 #endif
8101 case TARGET_NR_setsid:
8102 ret = get_errno(setsid());
8103 break;
8104 #ifdef TARGET_NR_sigaction
8105 case TARGET_NR_sigaction:
8106 {
8107 #if defined(TARGET_ALPHA)
8108 struct target_sigaction act, oact, *pact = 0;
8109 struct target_old_sigaction *old_act;
8110 if (arg2) {
8111 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8112 goto efault;
8113 act._sa_handler = old_act->_sa_handler;
8114 target_siginitset(&act.sa_mask, old_act->sa_mask);
8115 act.sa_flags = old_act->sa_flags;
8116 act.sa_restorer = 0;
8117 unlock_user_struct(old_act, arg2, 0);
8118 pact = &act;
8119 }
8120 ret = get_errno(do_sigaction(arg1, pact, &oact));
8121 if (!is_error(ret) && arg3) {
8122 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8123 goto efault;
8124 old_act->_sa_handler = oact._sa_handler;
8125 old_act->sa_mask = oact.sa_mask.sig[0];
8126 old_act->sa_flags = oact.sa_flags;
8127 unlock_user_struct(old_act, arg3, 1);
8128 }
8129 #elif defined(TARGET_MIPS)
8130 struct target_sigaction act, oact, *pact, *old_act;
8131
8132 if (arg2) {
8133 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8134 goto efault;
8135 act._sa_handler = old_act->_sa_handler;
8136 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8137 act.sa_flags = old_act->sa_flags;
8138 unlock_user_struct(old_act, arg2, 0);
8139 pact = &act;
8140 } else {
8141 pact = NULL;
8142 }
8143
8144 ret = get_errno(do_sigaction(arg1, pact, &oact));
8145
8146 if (!is_error(ret) && arg3) {
8147 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8148 goto efault;
8149 old_act->_sa_handler = oact._sa_handler;
8150 old_act->sa_flags = oact.sa_flags;
8151 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8152 old_act->sa_mask.sig[1] = 0;
8153 old_act->sa_mask.sig[2] = 0;
8154 old_act->sa_mask.sig[3] = 0;
8155 unlock_user_struct(old_act, arg3, 1);
8156 }
8157 #else
8158 struct target_old_sigaction *old_act;
8159 struct target_sigaction act, oact, *pact;
8160 if (arg2) {
8161 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8162 goto efault;
8163 act._sa_handler = old_act->_sa_handler;
8164 target_siginitset(&act.sa_mask, old_act->sa_mask);
8165 act.sa_flags = old_act->sa_flags;
8166 act.sa_restorer = old_act->sa_restorer;
8167 unlock_user_struct(old_act, arg2, 0);
8168 pact = &act;
8169 } else {
8170 pact = NULL;
8171 }
8172 ret = get_errno(do_sigaction(arg1, pact, &oact));
8173 if (!is_error(ret) && arg3) {
8174 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8175 goto efault;
8176 old_act->_sa_handler = oact._sa_handler;
8177 old_act->sa_mask = oact.sa_mask.sig[0];
8178 old_act->sa_flags = oact.sa_flags;
8179 old_act->sa_restorer = oact.sa_restorer;
8180 unlock_user_struct(old_act, arg3, 1);
8181 }
8182 #endif
8183 }
8184 break;
8185 #endif
8186 case TARGET_NR_rt_sigaction:
8187 {
8188 #if defined(TARGET_ALPHA)
8189 struct target_sigaction act, oact, *pact = 0;
8190 struct target_rt_sigaction *rt_act;
8191
8192 if (arg4 != sizeof(target_sigset_t)) {
8193 ret = -TARGET_EINVAL;
8194 break;
8195 }
8196 if (arg2) {
8197 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8198 goto efault;
8199 act._sa_handler = rt_act->_sa_handler;
8200 act.sa_mask = rt_act->sa_mask;
8201 act.sa_flags = rt_act->sa_flags;
8202 act.sa_restorer = arg5;
8203 unlock_user_struct(rt_act, arg2, 0);
8204 pact = &act;
8205 }
8206 ret = get_errno(do_sigaction(arg1, pact, &oact));
8207 if (!is_error(ret) && arg3) {
8208 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8209 goto efault;
8210 rt_act->_sa_handler = oact._sa_handler;
8211 rt_act->sa_mask = oact.sa_mask;
8212 rt_act->sa_flags = oact.sa_flags;
8213 unlock_user_struct(rt_act, arg3, 1);
8214 }
8215 #else
8216 struct target_sigaction *act;
8217 struct target_sigaction *oact;
8218
8219 if (arg4 != sizeof(target_sigset_t)) {
8220 ret = -TARGET_EINVAL;
8221 break;
8222 }
8223 if (arg2) {
8224 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8225 goto efault;
8226 } else
8227 act = NULL;
8228 if (arg3) {
8229 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8230 ret = -TARGET_EFAULT;
8231 goto rt_sigaction_fail;
8232 }
8233 } else
8234 oact = NULL;
8235 ret = get_errno(do_sigaction(arg1, act, oact));
8236 rt_sigaction_fail:
8237 if (act)
8238 unlock_user_struct(act, arg2, 0);
8239 if (oact)
8240 unlock_user_struct(oact, arg3, 1);
8241 #endif
8242 }
8243 break;
8244 #ifdef TARGET_NR_sgetmask /* not on alpha */
8245 case TARGET_NR_sgetmask:
8246 {
8247 sigset_t cur_set;
8248 abi_ulong target_set;
8249 ret = do_sigprocmask(0, NULL, &cur_set);
8250 if (!ret) {
8251 host_to_target_old_sigset(&target_set, &cur_set);
8252 ret = target_set;
8253 }
8254 }
8255 break;
8256 #endif
8257 #ifdef TARGET_NR_ssetmask /* not on alpha */
8258 case TARGET_NR_ssetmask:
8259 {
8260 sigset_t set, oset, cur_set;
8261 abi_ulong target_set = arg1;
8262 /* We only have one word of the new mask so we must read
8263 * the rest of it with do_sigprocmask() and OR in this word.
8264 * We are guaranteed that a do_sigprocmask() that only queries
8265 * the signal mask will not fail.
8266 */
8267 ret = do_sigprocmask(0, NULL, &cur_set);
8268 assert(!ret);
8269 target_to_host_old_sigset(&set, &target_set);
8270 sigorset(&set, &set, &cur_set);
8271 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8272 if (!ret) {
8273 host_to_target_old_sigset(&target_set, &oset);
8274 ret = target_set;
8275 }
8276 }
8277 break;
8278 #endif
8279 #ifdef TARGET_NR_sigprocmask
8280 case TARGET_NR_sigprocmask:
8281 {
8282 #if defined(TARGET_ALPHA)
8283 sigset_t set, oldset;
8284 abi_ulong mask;
8285 int how;
8286
8287 switch (arg1) {
8288 case TARGET_SIG_BLOCK:
8289 how = SIG_BLOCK;
8290 break;
8291 case TARGET_SIG_UNBLOCK:
8292 how = SIG_UNBLOCK;
8293 break;
8294 case TARGET_SIG_SETMASK:
8295 how = SIG_SETMASK;
8296 break;
8297 default:
8298 ret = -TARGET_EINVAL;
8299 goto fail;
8300 }
8301 mask = arg2;
8302 target_to_host_old_sigset(&set, &mask);
8303
8304 ret = do_sigprocmask(how, &set, &oldset);
8305 if (!is_error(ret)) {
8306 host_to_target_old_sigset(&mask, &oldset);
8307 ret = mask;
8308 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8309 }
8310 #else
8311 sigset_t set, oldset, *set_ptr;
8312 int how;
8313
8314 if (arg2) {
8315 switch (arg1) {
8316 case TARGET_SIG_BLOCK:
8317 how = SIG_BLOCK;
8318 break;
8319 case TARGET_SIG_UNBLOCK:
8320 how = SIG_UNBLOCK;
8321 break;
8322 case TARGET_SIG_SETMASK:
8323 how = SIG_SETMASK;
8324 break;
8325 default:
8326 ret = -TARGET_EINVAL;
8327 goto fail;
8328 }
8329 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8330 goto efault;
8331 target_to_host_old_sigset(&set, p);
8332 unlock_user(p, arg2, 0);
8333 set_ptr = &set;
8334 } else {
8335 how = 0;
8336 set_ptr = NULL;
8337 }
8338 ret = do_sigprocmask(how, set_ptr, &oldset);
8339 if (!is_error(ret) && arg3) {
8340 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8341 goto efault;
8342 host_to_target_old_sigset(p, &oldset);
8343 unlock_user(p, arg3, sizeof(target_sigset_t));
8344 }
8345 #endif
8346 }
8347 break;
8348 #endif
8349 case TARGET_NR_rt_sigprocmask:
8350 {
8351 int how = arg1;
8352 sigset_t set, oldset, *set_ptr;
8353
8354 if (arg4 != sizeof(target_sigset_t)) {
8355 ret = -TARGET_EINVAL;
8356 break;
8357 }
8358
8359 if (arg2) {
8360 switch(how) {
8361 case TARGET_SIG_BLOCK:
8362 how = SIG_BLOCK;
8363 break;
8364 case TARGET_SIG_UNBLOCK:
8365 how = SIG_UNBLOCK;
8366 break;
8367 case TARGET_SIG_SETMASK:
8368 how = SIG_SETMASK;
8369 break;
8370 default:
8371 ret = -TARGET_EINVAL;
8372 goto fail;
8373 }
8374 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8375 goto efault;
8376 target_to_host_sigset(&set, p);
8377 unlock_user(p, arg2, 0);
8378 set_ptr = &set;
8379 } else {
8380 how = 0;
8381 set_ptr = NULL;
8382 }
8383 ret = do_sigprocmask(how, set_ptr, &oldset);
8384 if (!is_error(ret) && arg3) {
8385 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8386 goto efault;
8387 host_to_target_sigset(p, &oldset);
8388 unlock_user(p, arg3, sizeof(target_sigset_t));
8389 }
8390 }
8391 break;
8392 #ifdef TARGET_NR_sigpending
8393 case TARGET_NR_sigpending:
8394 {
8395 sigset_t set;
8396 ret = get_errno(sigpending(&set));
8397 if (!is_error(ret)) {
8398 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8399 goto efault;
8400 host_to_target_old_sigset(p, &set);
8401 unlock_user(p, arg1, sizeof(target_sigset_t));
8402 }
8403 }
8404 break;
8405 #endif
8406 case TARGET_NR_rt_sigpending:
8407 {
8408 sigset_t set;
8409
8410 /* Yes, this check is >, not != like most. We follow the kernel's
8411 * logic and it does it like this because it implements
8412 * NR_sigpending through the same code path, and in that case
8413 * the old_sigset_t is smaller in size.
8414 */
8415 if (arg2 > sizeof(target_sigset_t)) {
8416 ret = -TARGET_EINVAL;
8417 break;
8418 }
8419
8420 ret = get_errno(sigpending(&set));
8421 if (!is_error(ret)) {
8422 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8423 goto efault;
8424 host_to_target_sigset(p, &set);
8425 unlock_user(p, arg1, sizeof(target_sigset_t));
8426 }
8427 }
8428 break;
8429 #ifdef TARGET_NR_sigsuspend
8430 case TARGET_NR_sigsuspend:
8431 {
8432 TaskState *ts = cpu->opaque;
8433 #if defined(TARGET_ALPHA)
8434 abi_ulong mask = arg1;
8435 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8436 #else
8437 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8438 goto efault;
8439 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8440 unlock_user(p, arg1, 0);
8441 #endif
8442 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8443 SIGSET_T_SIZE));
8444 if (ret != -TARGET_ERESTARTSYS) {
8445 ts->in_sigsuspend = 1;
8446 }
8447 }
8448 break;
8449 #endif
8450 case TARGET_NR_rt_sigsuspend:
8451 {
8452 TaskState *ts = cpu->opaque;
8453
8454 if (arg2 != sizeof(target_sigset_t)) {
8455 ret = -TARGET_EINVAL;
8456 break;
8457 }
8458 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8459 goto efault;
8460 target_to_host_sigset(&ts->sigsuspend_mask, p);
8461 unlock_user(p, arg1, 0);
8462 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8463 SIGSET_T_SIZE));
8464 if (ret != -TARGET_ERESTARTSYS) {
8465 ts->in_sigsuspend = 1;
8466 }
8467 }
8468 break;
8469 case TARGET_NR_rt_sigtimedwait:
8470 {
8471 sigset_t set;
8472 struct timespec uts, *puts;
8473 siginfo_t uinfo;
8474
8475 if (arg4 != sizeof(target_sigset_t)) {
8476 ret = -TARGET_EINVAL;
8477 break;
8478 }
8479
8480 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8481 goto efault;
8482 target_to_host_sigset(&set, p);
8483 unlock_user(p, arg1, 0);
8484 if (arg3) {
8485 puts = &uts;
8486 target_to_host_timespec(puts, arg3);
8487 } else {
8488 puts = NULL;
8489 }
8490 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8491 SIGSET_T_SIZE));
8492 if (!is_error(ret)) {
8493 if (arg2) {
8494 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8495 0);
8496 if (!p) {
8497 goto efault;
8498 }
8499 host_to_target_siginfo(p, &uinfo);
8500 unlock_user(p, arg2, sizeof(target_siginfo_t));
8501 }
8502 ret = host_to_target_signal(ret);
8503 }
8504 }
8505 break;
8506 case TARGET_NR_rt_sigqueueinfo:
8507 {
8508 siginfo_t uinfo;
8509
8510 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8511 if (!p) {
8512 goto efault;
8513 }
8514 target_to_host_siginfo(&uinfo, p);
8515 unlock_user(p, arg1, 0);
8516 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8517 }
8518 break;
8519 #ifdef TARGET_NR_sigreturn
8520 case TARGET_NR_sigreturn:
8521 if (block_signals()) {
8522 ret = -TARGET_ERESTARTSYS;
8523 } else {
8524 ret = do_sigreturn(cpu_env);
8525 }
8526 break;
8527 #endif
8528 case TARGET_NR_rt_sigreturn:
8529 if (block_signals()) {
8530 ret = -TARGET_ERESTARTSYS;
8531 } else {
8532 ret = do_rt_sigreturn(cpu_env);
8533 }
8534 break;
8535 case TARGET_NR_sethostname:
8536 if (!(p = lock_user_string(arg1)))
8537 goto efault;
8538 ret = get_errno(sethostname(p, arg2));
8539 unlock_user(p, arg1, 0);
8540 break;
8541 case TARGET_NR_setrlimit:
8542 {
8543 int resource = target_to_host_resource(arg1);
8544 struct target_rlimit *target_rlim;
8545 struct rlimit rlim;
8546 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8547 goto efault;
8548 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8549 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8550 unlock_user_struct(target_rlim, arg2, 0);
8551 ret = get_errno(setrlimit(resource, &rlim));
8552 }
8553 break;
8554 case TARGET_NR_getrlimit:
8555 {
8556 int resource = target_to_host_resource(arg1);
8557 struct target_rlimit *target_rlim;
8558 struct rlimit rlim;
8559
8560 ret = get_errno(getrlimit(resource, &rlim));
8561 if (!is_error(ret)) {
8562 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8563 goto efault;
8564 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8565 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8566 unlock_user_struct(target_rlim, arg2, 1);
8567 }
8568 }
8569 break;
8570 case TARGET_NR_getrusage:
8571 {
8572 struct rusage rusage;
8573 ret = get_errno(getrusage(arg1, &rusage));
8574 if (!is_error(ret)) {
8575 ret = host_to_target_rusage(arg2, &rusage);
8576 }
8577 }
8578 break;
8579 case TARGET_NR_gettimeofday:
8580 {
8581 struct timeval tv;
8582 ret = get_errno(gettimeofday(&tv, NULL));
8583 if (!is_error(ret)) {
8584 if (copy_to_user_timeval(arg1, &tv))
8585 goto efault;
8586 }
8587 }
8588 break;
8589 case TARGET_NR_settimeofday:
8590 {
8591 struct timeval tv, *ptv = NULL;
8592 struct timezone tz, *ptz = NULL;
8593
8594 if (arg1) {
8595 if (copy_from_user_timeval(&tv, arg1)) {
8596 goto efault;
8597 }
8598 ptv = &tv;
8599 }
8600
8601 if (arg2) {
8602 if (copy_from_user_timezone(&tz, arg2)) {
8603 goto efault;
8604 }
8605 ptz = &tz;
8606 }
8607
8608 ret = get_errno(settimeofday(ptv, ptz));
8609 }
8610 break;
8611 #if defined(TARGET_NR_select)
8612 case TARGET_NR_select:
8613 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8614 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8615 #else
8616 {
8617 struct target_sel_arg_struct *sel;
8618 abi_ulong inp, outp, exp, tvp;
8619 long nsel;
8620
8621 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
8622 goto efault;
8623 nsel = tswapal(sel->n);
8624 inp = tswapal(sel->inp);
8625 outp = tswapal(sel->outp);
8626 exp = tswapal(sel->exp);
8627 tvp = tswapal(sel->tvp);
8628 unlock_user_struct(sel, arg1, 0);
8629 ret = do_select(nsel, inp, outp, exp, tvp);
8630 }
8631 #endif
8632 break;
8633 #endif
8634 #ifdef TARGET_NR_pselect6
8635 case TARGET_NR_pselect6:
8636 {
8637 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8638 fd_set rfds, wfds, efds;
8639 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8640 struct timespec ts, *ts_ptr;
8641
8642 /*
8643 * The 6th arg is actually two args smashed together,
8644 * so we cannot use the C library.
8645 */
8646 sigset_t set;
8647 struct {
8648 sigset_t *set;
8649 size_t size;
8650 } sig, *sig_ptr;
8651
8652 abi_ulong arg_sigset, arg_sigsize, *arg7;
8653 target_sigset_t *target_sigset;
8654
8655 n = arg1;
8656 rfd_addr = arg2;
8657 wfd_addr = arg3;
8658 efd_addr = arg4;
8659 ts_addr = arg5;
8660
8661 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8662 if (ret) {
8663 goto fail;
8664 }
8665 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8666 if (ret) {
8667 goto fail;
8668 }
8669 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8670 if (ret) {
8671 goto fail;
8672 }
8673
8674 /*
8675 * This takes a timespec, and not a timeval, so we cannot
8676 * use the do_select() helper ...
8677 */
8678 if (ts_addr) {
8679 if (target_to_host_timespec(&ts, ts_addr)) {
8680 goto efault;
8681 }
8682 ts_ptr = &ts;
8683 } else {
8684 ts_ptr = NULL;
8685 }
8686
8687 /* Extract the two packed args for the sigset */
8688 if (arg6) {
8689 sig_ptr = &sig;
8690 sig.size = SIGSET_T_SIZE;
8691
8692 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8693 if (!arg7) {
8694 goto efault;
8695 }
8696 arg_sigset = tswapal(arg7[0]);
8697 arg_sigsize = tswapal(arg7[1]);
8698 unlock_user(arg7, arg6, 0);
8699
8700 if (arg_sigset) {
8701 sig.set = &set;
8702 if (arg_sigsize != sizeof(*target_sigset)) {
8703 /* Like the kernel, we enforce correct size sigsets */
8704 ret = -TARGET_EINVAL;
8705 goto fail;
8706 }
8707 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8708 sizeof(*target_sigset), 1);
8709 if (!target_sigset) {
8710 goto efault;
8711 }
8712 target_to_host_sigset(&set, target_sigset);
8713 unlock_user(target_sigset, arg_sigset, 0);
8714 } else {
8715 sig.set = NULL;
8716 }
8717 } else {
8718 sig_ptr = NULL;
8719 }
8720
8721 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8722 ts_ptr, sig_ptr));
8723
8724 if (!is_error(ret)) {
8725 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8726 goto efault;
8727 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8728 goto efault;
8729 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8730 goto efault;
8731
8732 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8733 goto efault;
8734 }
8735 }
8736 break;
8737 #endif
8738 #ifdef TARGET_NR_symlink
8739 case TARGET_NR_symlink:
8740 {
8741 void *p2;
8742 p = lock_user_string(arg1);
8743 p2 = lock_user_string(arg2);
8744 if (!p || !p2)
8745 ret = -TARGET_EFAULT;
8746 else
8747 ret = get_errno(symlink(p, p2));
8748 unlock_user(p2, arg2, 0);
8749 unlock_user(p, arg1, 0);
8750 }
8751 break;
8752 #endif
8753 #if defined(TARGET_NR_symlinkat)
8754 case TARGET_NR_symlinkat:
8755 {
8756 void *p2;
8757 p = lock_user_string(arg1);
8758 p2 = lock_user_string(arg3);
8759 if (!p || !p2)
8760 ret = -TARGET_EFAULT;
8761 else
8762 ret = get_errno(symlinkat(p, arg2, p2));
8763 unlock_user(p2, arg3, 0);
8764 unlock_user(p, arg1, 0);
8765 }
8766 break;
8767 #endif
8768 #ifdef TARGET_NR_oldlstat
8769 case TARGET_NR_oldlstat:
8770 goto unimplemented;
8771 #endif
8772 #ifdef TARGET_NR_readlink
8773 case TARGET_NR_readlink:
8774 {
8775 void *p2;
8776 p = lock_user_string(arg1);
8777 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8778 if (!p || !p2) {
8779 ret = -TARGET_EFAULT;
8780 } else if (!arg3) {
8781 /* Short circuit this for the magic exe check. */
8782 ret = -TARGET_EINVAL;
8783 } else if (is_proc_myself((const char *)p, "exe")) {
8784 char real[PATH_MAX], *temp;
8785 temp = realpath(exec_path, real);
8786 /* Return value is # of bytes that we wrote to the buffer. */
8787 if (temp == NULL) {
8788 ret = get_errno(-1);
8789 } else {
8790 /* Don't worry about sign mismatch as earlier mapping
8791 * logic would have thrown a bad address error. */
8792 ret = MIN(strlen(real), arg3);
8793 /* We cannot NUL terminate the string. */
8794 memcpy(p2, real, ret);
8795 }
8796 } else {
8797 ret = get_errno(readlink(path(p), p2, arg3));
8798 }
8799 unlock_user(p2, arg2, ret);
8800 unlock_user(p, arg1, 0);
8801 }
8802 break;
8803 #endif
8804 #if defined(TARGET_NR_readlinkat)
8805 case TARGET_NR_readlinkat:
8806 {
8807 void *p2;
8808 p = lock_user_string(arg2);
8809 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8810 if (!p || !p2) {
8811 ret = -TARGET_EFAULT;
8812 } else if (is_proc_myself((const char *)p, "exe")) {
8813 char real[PATH_MAX], *temp;
8814 temp = realpath(exec_path, real);
8815 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8816 snprintf((char *)p2, arg4, "%s", real);
8817 } else {
8818 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8819 }
8820 unlock_user(p2, arg3, ret);
8821 unlock_user(p, arg2, 0);
8822 }
8823 break;
8824 #endif
8825 #ifdef TARGET_NR_uselib
8826 case TARGET_NR_uselib:
8827 goto unimplemented;
8828 #endif
8829 #ifdef TARGET_NR_swapon
8830 case TARGET_NR_swapon:
8831 if (!(p = lock_user_string(arg1)))
8832 goto efault;
8833 ret = get_errno(swapon(p, arg2));
8834 unlock_user(p, arg1, 0);
8835 break;
8836 #endif
8837 case TARGET_NR_reboot:
8838 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8839 /* arg4 must be ignored in all other cases */
8840 p = lock_user_string(arg4);
8841 if (!p) {
8842 goto efault;
8843 }
8844 ret = get_errno(reboot(arg1, arg2, arg3, p));
8845 unlock_user(p, arg4, 0);
8846 } else {
8847 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8848 }
8849 break;
8850 #ifdef TARGET_NR_readdir
8851 case TARGET_NR_readdir:
8852 goto unimplemented;
8853 #endif
8854 #ifdef TARGET_NR_mmap
8855 case TARGET_NR_mmap:
8856 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8857 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8858 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8859 || defined(TARGET_S390X)
8860 {
8861 abi_ulong *v;
8862 abi_ulong v1, v2, v3, v4, v5, v6;
8863 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8864 goto efault;
8865 v1 = tswapal(v[0]);
8866 v2 = tswapal(v[1]);
8867 v3 = tswapal(v[2]);
8868 v4 = tswapal(v[3]);
8869 v5 = tswapal(v[4]);
8870 v6 = tswapal(v[5]);
8871 unlock_user(v, arg1, 0);
8872 ret = get_errno(target_mmap(v1, v2, v3,
8873 target_to_host_bitmask(v4, mmap_flags_tbl),
8874 v5, v6));
8875 }
8876 #else
8877 ret = get_errno(target_mmap(arg1, arg2, arg3,
8878 target_to_host_bitmask(arg4, mmap_flags_tbl),
8879 arg5,
8880 arg6));
8881 #endif
8882 break;
8883 #endif
8884 #ifdef TARGET_NR_mmap2
8885 case TARGET_NR_mmap2:
8886 #ifndef MMAP_SHIFT
8887 #define MMAP_SHIFT 12
8888 #endif
8889 ret = get_errno(target_mmap(arg1, arg2, arg3,
8890 target_to_host_bitmask(arg4, mmap_flags_tbl),
8891 arg5,
8892 arg6 << MMAP_SHIFT));
8893 break;
8894 #endif
8895 case TARGET_NR_munmap:
8896 ret = get_errno(target_munmap(arg1, arg2));
8897 break;
8898 case TARGET_NR_mprotect:
8899 {
8900 TaskState *ts = cpu->opaque;
8901 /* Special hack to detect libc making the stack executable. */
8902 if ((arg3 & PROT_GROWSDOWN)
8903 && arg1 >= ts->info->stack_limit
8904 && arg1 <= ts->info->start_stack) {
8905 arg3 &= ~PROT_GROWSDOWN;
8906 arg2 = arg2 + arg1 - ts->info->stack_limit;
8907 arg1 = ts->info->stack_limit;
8908 }
8909 }
8910 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8911 break;
8912 #ifdef TARGET_NR_mremap
8913 case TARGET_NR_mremap:
8914 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8915 break;
8916 #endif
8917 /* ??? msync/mlock/munlock are broken for softmmu. */
8918 #ifdef TARGET_NR_msync
8919 case TARGET_NR_msync:
8920 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8921 break;
8922 #endif
8923 #ifdef TARGET_NR_mlock
8924 case TARGET_NR_mlock:
8925 ret = get_errno(mlock(g2h(arg1), arg2));
8926 break;
8927 #endif
8928 #ifdef TARGET_NR_munlock
8929 case TARGET_NR_munlock:
8930 ret = get_errno(munlock(g2h(arg1), arg2));
8931 break;
8932 #endif
8933 #ifdef TARGET_NR_mlockall
8934 case TARGET_NR_mlockall:
8935 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
8936 break;
8937 #endif
8938 #ifdef TARGET_NR_munlockall
8939 case TARGET_NR_munlockall:
8940 ret = get_errno(munlockall());
8941 break;
8942 #endif
8943 case TARGET_NR_truncate:
8944 if (!(p = lock_user_string(arg1)))
8945 goto efault;
8946 ret = get_errno(truncate(p, arg2));
8947 unlock_user(p, arg1, 0);
8948 break;
8949 case TARGET_NR_ftruncate:
8950 ret = get_errno(ftruncate(arg1, arg2));
8951 break;
8952 case TARGET_NR_fchmod:
8953 ret = get_errno(fchmod(arg1, arg2));
8954 break;
8955 #if defined(TARGET_NR_fchmodat)
8956 case TARGET_NR_fchmodat:
8957 if (!(p = lock_user_string(arg2)))
8958 goto efault;
8959 ret = get_errno(fchmodat(arg1, p, arg3, 0));
8960 unlock_user(p, arg2, 0);
8961 break;
8962 #endif
8963 case TARGET_NR_getpriority:
8964 /* Note that negative values are valid for getpriority, so we must
8965 differentiate based on errno settings. */
8966 errno = 0;
8967 ret = getpriority(arg1, arg2);
8968 if (ret == -1 && errno != 0) {
8969 ret = -host_to_target_errno(errno);
8970 break;
8971 }
8972 #ifdef TARGET_ALPHA
8973 /* Return value is the unbiased priority. Signal no error. */
8974 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
8975 #else
8976 /* Return value is a biased priority to avoid negative numbers. */
8977 ret = 20 - ret;
8978 #endif
8979 break;
8980 case TARGET_NR_setpriority:
8981 ret = get_errno(setpriority(arg1, arg2, arg3));
8982 break;
8983 #ifdef TARGET_NR_profil
8984 case TARGET_NR_profil:
8985 goto unimplemented;
8986 #endif
8987 case TARGET_NR_statfs:
8988 if (!(p = lock_user_string(arg1)))
8989 goto efault;
8990 ret = get_errno(statfs(path(p), &stfs));
8991 unlock_user(p, arg1, 0);
8992 convert_statfs:
8993 if (!is_error(ret)) {
8994 struct target_statfs *target_stfs;
8995
8996 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
8997 goto efault;
8998 __put_user(stfs.f_type, &target_stfs->f_type);
8999 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9000 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9001 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9002 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9003 __put_user(stfs.f_files, &target_stfs->f_files);
9004 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9005 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9006 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9007 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9008 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9009 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9010 unlock_user_struct(target_stfs, arg2, 1);
9011 }
9012 break;
9013 case TARGET_NR_fstatfs:
9014 ret = get_errno(fstatfs(arg1, &stfs));
9015 goto convert_statfs;
9016 #ifdef TARGET_NR_statfs64
9017 case TARGET_NR_statfs64:
9018 if (!(p = lock_user_string(arg1)))
9019 goto efault;
9020 ret = get_errno(statfs(path(p), &stfs));
9021 unlock_user(p, arg1, 0);
9022 convert_statfs64:
9023 if (!is_error(ret)) {
9024 struct target_statfs64 *target_stfs;
9025
9026 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9027 goto efault;
9028 __put_user(stfs.f_type, &target_stfs->f_type);
9029 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9030 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9031 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9032 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9033 __put_user(stfs.f_files, &target_stfs->f_files);
9034 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9035 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9036 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9037 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9038 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9039 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9040 unlock_user_struct(target_stfs, arg3, 1);
9041 }
9042 break;
9043 case TARGET_NR_fstatfs64:
9044 ret = get_errno(fstatfs(arg1, &stfs));
9045 goto convert_statfs64;
9046 #endif
9047 #ifdef TARGET_NR_ioperm
9048 case TARGET_NR_ioperm:
9049 goto unimplemented;
9050 #endif
9051 #ifdef TARGET_NR_socketcall
9052 case TARGET_NR_socketcall:
9053 ret = do_socketcall(arg1, arg2);
9054 break;
9055 #endif
9056 #ifdef TARGET_NR_accept
9057 case TARGET_NR_accept:
9058 ret = do_accept4(arg1, arg2, arg3, 0);
9059 break;
9060 #endif
9061 #ifdef TARGET_NR_accept4
9062 case TARGET_NR_accept4:
9063 ret = do_accept4(arg1, arg2, arg3, arg4);
9064 break;
9065 #endif
9066 #ifdef TARGET_NR_bind
9067 case TARGET_NR_bind:
9068 ret = do_bind(arg1, arg2, arg3);
9069 break;
9070 #endif
9071 #ifdef TARGET_NR_connect
9072 case TARGET_NR_connect:
9073 ret = do_connect(arg1, arg2, arg3);
9074 break;
9075 #endif
9076 #ifdef TARGET_NR_getpeername
9077 case TARGET_NR_getpeername:
9078 ret = do_getpeername(arg1, arg2, arg3);
9079 break;
9080 #endif
9081 #ifdef TARGET_NR_getsockname
9082 case TARGET_NR_getsockname:
9083 ret = do_getsockname(arg1, arg2, arg3);
9084 break;
9085 #endif
9086 #ifdef TARGET_NR_getsockopt
9087 case TARGET_NR_getsockopt:
9088 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9089 break;
9090 #endif
9091 #ifdef TARGET_NR_listen
9092 case TARGET_NR_listen:
9093 ret = get_errno(listen(arg1, arg2));
9094 break;
9095 #endif
9096 #ifdef TARGET_NR_recv
9097 case TARGET_NR_recv:
9098 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9099 break;
9100 #endif
9101 #ifdef TARGET_NR_recvfrom
9102 case TARGET_NR_recvfrom:
9103 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9104 break;
9105 #endif
9106 #ifdef TARGET_NR_recvmsg
9107 case TARGET_NR_recvmsg:
9108 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9109 break;
9110 #endif
9111 #ifdef TARGET_NR_send
9112 case TARGET_NR_send:
9113 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9114 break;
9115 #endif
9116 #ifdef TARGET_NR_sendmsg
9117 case TARGET_NR_sendmsg:
9118 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9119 break;
9120 #endif
9121 #ifdef TARGET_NR_sendmmsg
9122 case TARGET_NR_sendmmsg:
9123 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9124 break;
9125 case TARGET_NR_recvmmsg:
9126 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9127 break;
9128 #endif
9129 #ifdef TARGET_NR_sendto
9130 case TARGET_NR_sendto:
9131 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9132 break;
9133 #endif
9134 #ifdef TARGET_NR_shutdown
9135 case TARGET_NR_shutdown:
9136 ret = get_errno(shutdown(arg1, arg2));
9137 break;
9138 #endif
9139 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9140 case TARGET_NR_getrandom:
9141 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9142 if (!p) {
9143 goto efault;
9144 }
9145 ret = get_errno(getrandom(p, arg2, arg3));
9146 unlock_user(p, arg1, ret);
9147 break;
9148 #endif
9149 #ifdef TARGET_NR_socket
9150 case TARGET_NR_socket:
9151 ret = do_socket(arg1, arg2, arg3);
9152 fd_trans_unregister(ret);
9153 break;
9154 #endif
9155 #ifdef TARGET_NR_socketpair
9156 case TARGET_NR_socketpair:
9157 ret = do_socketpair(arg1, arg2, arg3, arg4);
9158 break;
9159 #endif
9160 #ifdef TARGET_NR_setsockopt
9161 case TARGET_NR_setsockopt:
9162 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9163 break;
9164 #endif
9165
9166 case TARGET_NR_syslog:
9167 if (!(p = lock_user_string(arg2)))
9168 goto efault;
9169 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9170 unlock_user(p, arg2, 0);
9171 break;
9172
9173 case TARGET_NR_setitimer:
9174 {
9175 struct itimerval value, ovalue, *pvalue;
9176
9177 if (arg2) {
9178 pvalue = &value;
9179 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9180 || copy_from_user_timeval(&pvalue->it_value,
9181 arg2 + sizeof(struct target_timeval)))
9182 goto efault;
9183 } else {
9184 pvalue = NULL;
9185 }
9186 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9187 if (!is_error(ret) && arg3) {
9188 if (copy_to_user_timeval(arg3,
9189 &ovalue.it_interval)
9190 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9191 &ovalue.it_value))
9192 goto efault;
9193 }
9194 }
9195 break;
9196 case TARGET_NR_getitimer:
9197 {
9198 struct itimerval value;
9199
9200 ret = get_errno(getitimer(arg1, &value));
9201 if (!is_error(ret) && arg2) {
9202 if (copy_to_user_timeval(arg2,
9203 &value.it_interval)
9204 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9205 &value.it_value))
9206 goto efault;
9207 }
9208 }
9209 break;
9210 #ifdef TARGET_NR_stat
9211 case TARGET_NR_stat:
9212 if (!(p = lock_user_string(arg1)))
9213 goto efault;
9214 ret = get_errno(stat(path(p), &st));
9215 unlock_user(p, arg1, 0);
9216 goto do_stat;
9217 #endif
9218 #ifdef TARGET_NR_lstat
9219 case TARGET_NR_lstat:
9220 if (!(p = lock_user_string(arg1)))
9221 goto efault;
9222 ret = get_errno(lstat(path(p), &st));
9223 unlock_user(p, arg1, 0);
9224 goto do_stat;
9225 #endif
9226 case TARGET_NR_fstat:
9227 {
9228 ret = get_errno(fstat(arg1, &st));
9229 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9230 do_stat:
9231 #endif
9232 if (!is_error(ret)) {
9233 struct target_stat *target_st;
9234
9235 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9236 goto efault;
9237 memset(target_st, 0, sizeof(*target_st));
9238 __put_user(st.st_dev, &target_st->st_dev);
9239 __put_user(st.st_ino, &target_st->st_ino);
9240 __put_user(st.st_mode, &target_st->st_mode);
9241 __put_user(st.st_uid, &target_st->st_uid);
9242 __put_user(st.st_gid, &target_st->st_gid);
9243 __put_user(st.st_nlink, &target_st->st_nlink);
9244 __put_user(st.st_rdev, &target_st->st_rdev);
9245 __put_user(st.st_size, &target_st->st_size);
9246 __put_user(st.st_blksize, &target_st->st_blksize);
9247 __put_user(st.st_blocks, &target_st->st_blocks);
9248 __put_user(st.st_atime, &target_st->target_st_atime);
9249 __put_user(st.st_mtime, &target_st->target_st_mtime);
9250 __put_user(st.st_ctime, &target_st->target_st_ctime);
9251 unlock_user_struct(target_st, arg2, 1);
9252 }
9253 }
9254 break;
9255 #ifdef TARGET_NR_olduname
9256 case TARGET_NR_olduname:
9257 goto unimplemented;
9258 #endif
9259 #ifdef TARGET_NR_iopl
9260 case TARGET_NR_iopl:
9261 goto unimplemented;
9262 #endif
9263 case TARGET_NR_vhangup:
9264 ret = get_errno(vhangup());
9265 break;
9266 #ifdef TARGET_NR_idle
9267 case TARGET_NR_idle:
9268 goto unimplemented;
9269 #endif
9270 #ifdef TARGET_NR_syscall
9271 case TARGET_NR_syscall:
9272 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9273 arg6, arg7, arg8, 0);
9274 break;
9275 #endif
9276 case TARGET_NR_wait4:
9277 {
9278 int status;
9279 abi_long status_ptr = arg2;
9280 struct rusage rusage, *rusage_ptr;
9281 abi_ulong target_rusage = arg4;
9282 abi_long rusage_err;
9283 if (target_rusage)
9284 rusage_ptr = &rusage;
9285 else
9286 rusage_ptr = NULL;
9287 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9288 if (!is_error(ret)) {
9289 if (status_ptr && ret) {
9290 status = host_to_target_waitstatus(status);
9291 if (put_user_s32(status, status_ptr))
9292 goto efault;
9293 }
9294 if (target_rusage) {
9295 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9296 if (rusage_err) {
9297 ret = rusage_err;
9298 }
9299 }
9300 }
9301 }
9302 break;
9303 #ifdef TARGET_NR_swapoff
9304 case TARGET_NR_swapoff:
9305 if (!(p = lock_user_string(arg1)))
9306 goto efault;
9307 ret = get_errno(swapoff(p));
9308 unlock_user(p, arg1, 0);
9309 break;
9310 #endif
9311 case TARGET_NR_sysinfo:
9312 {
9313 struct target_sysinfo *target_value;
9314 struct sysinfo value;
9315 ret = get_errno(sysinfo(&value));
9316 if (!is_error(ret) && arg1)
9317 {
9318 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9319 goto efault;
9320 __put_user(value.uptime, &target_value->uptime);
9321 __put_user(value.loads[0], &target_value->loads[0]);
9322 __put_user(value.loads[1], &target_value->loads[1]);
9323 __put_user(value.loads[2], &target_value->loads[2]);
9324 __put_user(value.totalram, &target_value->totalram);
9325 __put_user(value.freeram, &target_value->freeram);
9326 __put_user(value.sharedram, &target_value->sharedram);
9327 __put_user(value.bufferram, &target_value->bufferram);
9328 __put_user(value.totalswap, &target_value->totalswap);
9329 __put_user(value.freeswap, &target_value->freeswap);
9330 __put_user(value.procs, &target_value->procs);
9331 __put_user(value.totalhigh, &target_value->totalhigh);
9332 __put_user(value.freehigh, &target_value->freehigh);
9333 __put_user(value.mem_unit, &target_value->mem_unit);
9334 unlock_user_struct(target_value, arg1, 1);
9335 }
9336 }
9337 break;
9338 #ifdef TARGET_NR_ipc
9339 case TARGET_NR_ipc:
9340 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9341 break;
9342 #endif
9343 #ifdef TARGET_NR_semget
9344 case TARGET_NR_semget:
9345 ret = get_errno(semget(arg1, arg2, arg3));
9346 break;
9347 #endif
9348 #ifdef TARGET_NR_semop
9349 case TARGET_NR_semop:
9350 ret = do_semop(arg1, arg2, arg3);
9351 break;
9352 #endif
9353 #ifdef TARGET_NR_semctl
9354 case TARGET_NR_semctl:
9355 ret = do_semctl(arg1, arg2, arg3, arg4);
9356 break;
9357 #endif
9358 #ifdef TARGET_NR_msgctl
9359 case TARGET_NR_msgctl:
9360 ret = do_msgctl(arg1, arg2, arg3);
9361 break;
9362 #endif
9363 #ifdef TARGET_NR_msgget
9364 case TARGET_NR_msgget:
9365 ret = get_errno(msgget(arg1, arg2));
9366 break;
9367 #endif
9368 #ifdef TARGET_NR_msgrcv
9369 case TARGET_NR_msgrcv:
9370 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9371 break;
9372 #endif
9373 #ifdef TARGET_NR_msgsnd
9374 case TARGET_NR_msgsnd:
9375 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9376 break;
9377 #endif
9378 #ifdef TARGET_NR_shmget
9379 case TARGET_NR_shmget:
9380 ret = get_errno(shmget(arg1, arg2, arg3));
9381 break;
9382 #endif
9383 #ifdef TARGET_NR_shmctl
9384 case TARGET_NR_shmctl:
9385 ret = do_shmctl(arg1, arg2, arg3);
9386 break;
9387 #endif
9388 #ifdef TARGET_NR_shmat
9389 case TARGET_NR_shmat:
9390 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9391 break;
9392 #endif
9393 #ifdef TARGET_NR_shmdt
9394 case TARGET_NR_shmdt:
9395 ret = do_shmdt(arg1);
9396 break;
9397 #endif
9398 case TARGET_NR_fsync:
9399 ret = get_errno(fsync(arg1));
9400 break;
9401 case TARGET_NR_clone:
9402 /* Linux manages to have three different orderings for its
9403 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9404 * match the kernel's CONFIG_CLONE_* settings.
9405 * Microblaze is further special in that it uses a sixth
9406 * implicit argument to clone for the TLS pointer.
9407 */
9408 #if defined(TARGET_MICROBLAZE)
9409 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9410 #elif defined(TARGET_CLONE_BACKWARDS)
9411 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9412 #elif defined(TARGET_CLONE_BACKWARDS2)
9413 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9414 #else
9415 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9416 #endif
9417 break;
9418 #ifdef __NR_exit_group
9419 /* new thread calls */
9420 case TARGET_NR_exit_group:
9421 #ifdef TARGET_GPROF
9422 _mcleanup();
9423 #endif
9424 gdb_exit(cpu_env, arg1);
9425 ret = get_errno(exit_group(arg1));
9426 break;
9427 #endif
9428 case TARGET_NR_setdomainname:
9429 if (!(p = lock_user_string(arg1)))
9430 goto efault;
9431 ret = get_errno(setdomainname(p, arg2));
9432 unlock_user(p, arg1, 0);
9433 break;
9434 case TARGET_NR_uname:
9435 /* no need to transcode because we use the linux syscall */
9436 {
9437 struct new_utsname * buf;
9438
9439 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9440 goto efault;
9441 ret = get_errno(sys_uname(buf));
9442 if (!is_error(ret)) {
9443 /* Overwrite the native machine name with whatever is being
9444 emulated. */
9445 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9446 /* Allow the user to override the reported release. */
9447 if (qemu_uname_release && *qemu_uname_release) {
9448 g_strlcpy(buf->release, qemu_uname_release,
9449 sizeof(buf->release));
9450 }
9451 }
9452 unlock_user_struct(buf, arg1, 1);
9453 }
9454 break;
9455 #ifdef TARGET_I386
9456 case TARGET_NR_modify_ldt:
9457 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9458 break;
9459 #if !defined(TARGET_X86_64)
9460 case TARGET_NR_vm86old:
9461 goto unimplemented;
9462 case TARGET_NR_vm86:
9463 ret = do_vm86(cpu_env, arg1, arg2);
9464 break;
9465 #endif
9466 #endif
9467 case TARGET_NR_adjtimex:
9468 goto unimplemented;
9469 #ifdef TARGET_NR_create_module
9470 case TARGET_NR_create_module:
9471 #endif
9472 case TARGET_NR_init_module:
9473 case TARGET_NR_delete_module:
9474 #ifdef TARGET_NR_get_kernel_syms
9475 case TARGET_NR_get_kernel_syms:
9476 #endif
9477 goto unimplemented;
9478 case TARGET_NR_quotactl:
9479 goto unimplemented;
9480 case TARGET_NR_getpgid:
9481 ret = get_errno(getpgid(arg1));
9482 break;
9483 case TARGET_NR_fchdir:
9484 ret = get_errno(fchdir(arg1));
9485 break;
9486 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9487 case TARGET_NR_bdflush:
9488 goto unimplemented;
9489 #endif
9490 #ifdef TARGET_NR_sysfs
9491 case TARGET_NR_sysfs:
9492 goto unimplemented;
9493 #endif
9494 case TARGET_NR_personality:
9495 ret = get_errno(personality(arg1));
9496 break;
9497 #ifdef TARGET_NR_afs_syscall
9498 case TARGET_NR_afs_syscall:
9499 goto unimplemented;
9500 #endif
9501 #ifdef TARGET_NR__llseek /* Not on alpha */
9502 case TARGET_NR__llseek:
9503 {
9504 int64_t res;
9505 #if !defined(__NR_llseek)
9506 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9507 if (res == -1) {
9508 ret = get_errno(res);
9509 } else {
9510 ret = 0;
9511 }
9512 #else
9513 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9514 #endif
9515 if ((ret == 0) && put_user_s64(res, arg4)) {
9516 goto efault;
9517 }
9518 }
9519 break;
9520 #endif
9521 #ifdef TARGET_NR_getdents
9522 case TARGET_NR_getdents:
9523 #ifdef __NR_getdents
9524 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9525 {
9526 struct target_dirent *target_dirp;
9527 struct linux_dirent *dirp;
9528 abi_long count = arg3;
9529
9530 dirp = g_try_malloc(count);
9531 if (!dirp) {
9532 ret = -TARGET_ENOMEM;
9533 goto fail;
9534 }
9535
9536 ret = get_errno(sys_getdents(arg1, dirp, count));
9537 if (!is_error(ret)) {
9538 struct linux_dirent *de;
9539 struct target_dirent *tde;
9540 int len = ret;
9541 int reclen, treclen;
9542 int count1, tnamelen;
9543
9544 count1 = 0;
9545 de = dirp;
9546 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9547 goto efault;
9548 tde = target_dirp;
9549 while (len > 0) {
9550 reclen = de->d_reclen;
9551 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9552 assert(tnamelen >= 0);
9553 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9554 assert(count1 + treclen <= count);
9555 tde->d_reclen = tswap16(treclen);
9556 tde->d_ino = tswapal(de->d_ino);
9557 tde->d_off = tswapal(de->d_off);
9558 memcpy(tde->d_name, de->d_name, tnamelen);
9559 de = (struct linux_dirent *)((char *)de + reclen);
9560 len -= reclen;
9561 tde = (struct target_dirent *)((char *)tde + treclen);
9562 count1 += treclen;
9563 }
9564 ret = count1;
9565 unlock_user(target_dirp, arg2, ret);
9566 }
9567 g_free(dirp);
9568 }
9569 #else
9570 {
9571 struct linux_dirent *dirp;
9572 abi_long count = arg3;
9573
9574 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9575 goto efault;
9576 ret = get_errno(sys_getdents(arg1, dirp, count));
9577 if (!is_error(ret)) {
9578 struct linux_dirent *de;
9579 int len = ret;
9580 int reclen;
9581 de = dirp;
9582 while (len > 0) {
9583 reclen = de->d_reclen;
9584 if (reclen > len)
9585 break;
9586 de->d_reclen = tswap16(reclen);
9587 tswapls(&de->d_ino);
9588 tswapls(&de->d_off);
9589 de = (struct linux_dirent *)((char *)de + reclen);
9590 len -= reclen;
9591 }
9592 }
9593 unlock_user(dirp, arg2, ret);
9594 }
9595 #endif
9596 #else
9597 /* Implement getdents in terms of getdents64 */
9598 {
9599 struct linux_dirent64 *dirp;
9600 abi_long count = arg3;
9601
9602 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9603 if (!dirp) {
9604 goto efault;
9605 }
9606 ret = get_errno(sys_getdents64(arg1, dirp, count));
9607 if (!is_error(ret)) {
9608 /* Convert the dirent64 structs to target dirent. We do this
9609 * in-place, since we can guarantee that a target_dirent is no
9610 * larger than a dirent64; however this means we have to be
9611 * careful to read everything before writing in the new format.
9612 */
9613 struct linux_dirent64 *de;
9614 struct target_dirent *tde;
9615 int len = ret;
9616 int tlen = 0;
9617
9618 de = dirp;
9619 tde = (struct target_dirent *)dirp;
9620 while (len > 0) {
9621 int namelen, treclen;
9622 int reclen = de->d_reclen;
9623 uint64_t ino = de->d_ino;
9624 int64_t off = de->d_off;
9625 uint8_t type = de->d_type;
9626
9627 namelen = strlen(de->d_name);
9628 treclen = offsetof(struct target_dirent, d_name)
9629 + namelen + 2;
9630 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9631
9632 memmove(tde->d_name, de->d_name, namelen + 1);
9633 tde->d_ino = tswapal(ino);
9634 tde->d_off = tswapal(off);
9635 tde->d_reclen = tswap16(treclen);
9636 /* The target_dirent type is in what was formerly a padding
9637 * byte at the end of the structure:
9638 */
9639 *(((char *)tde) + treclen - 1) = type;
9640
9641 de = (struct linux_dirent64 *)((char *)de + reclen);
9642 tde = (struct target_dirent *)((char *)tde + treclen);
9643 len -= reclen;
9644 tlen += treclen;
9645 }
9646 ret = tlen;
9647 }
9648 unlock_user(dirp, arg2, ret);
9649 }
9650 #endif
9651 break;
9652 #endif /* TARGET_NR_getdents */
9653 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9654 case TARGET_NR_getdents64:
9655 {
9656 struct linux_dirent64 *dirp;
9657 abi_long count = arg3;
9658 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9659 goto efault;
9660 ret = get_errno(sys_getdents64(arg1, dirp, count));
9661 if (!is_error(ret)) {
9662 struct linux_dirent64 *de;
9663 int len = ret;
9664 int reclen;
9665 de = dirp;
9666 while (len > 0) {
9667 reclen = de->d_reclen;
9668 if (reclen > len)
9669 break;
9670 de->d_reclen = tswap16(reclen);
9671 tswap64s((uint64_t *)&de->d_ino);
9672 tswap64s((uint64_t *)&de->d_off);
9673 de = (struct linux_dirent64 *)((char *)de + reclen);
9674 len -= reclen;
9675 }
9676 }
9677 unlock_user(dirp, arg2, ret);
9678 }
9679 break;
9680 #endif /* TARGET_NR_getdents64 */
9681 #if defined(TARGET_NR__newselect)
9682 case TARGET_NR__newselect:
9683 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9684 break;
9685 #endif
9686 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9687 # ifdef TARGET_NR_poll
9688 case TARGET_NR_poll:
9689 # endif
9690 # ifdef TARGET_NR_ppoll
9691 case TARGET_NR_ppoll:
9692 # endif
9693 {
9694 struct target_pollfd *target_pfd;
9695 unsigned int nfds = arg2;
9696 struct pollfd *pfd;
9697 unsigned int i;
9698
9699 pfd = NULL;
9700 target_pfd = NULL;
9701 if (nfds) {
9702 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9703 ret = -TARGET_EINVAL;
9704 break;
9705 }
9706
9707 target_pfd = lock_user(VERIFY_WRITE, arg1,
9708 sizeof(struct target_pollfd) * nfds, 1);
9709 if (!target_pfd) {
9710 goto efault;
9711 }
9712
9713 pfd = alloca(sizeof(struct pollfd) * nfds);
9714 for (i = 0; i < nfds; i++) {
9715 pfd[i].fd = tswap32(target_pfd[i].fd);
9716 pfd[i].events = tswap16(target_pfd[i].events);
9717 }
9718 }
9719
9720 switch (num) {
9721 # ifdef TARGET_NR_ppoll
9722 case TARGET_NR_ppoll:
9723 {
9724 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9725 target_sigset_t *target_set;
9726 sigset_t _set, *set = &_set;
9727
9728 if (arg3) {
9729 if (target_to_host_timespec(timeout_ts, arg3)) {
9730 unlock_user(target_pfd, arg1, 0);
9731 goto efault;
9732 }
9733 } else {
9734 timeout_ts = NULL;
9735 }
9736
9737 if (arg4) {
9738 if (arg5 != sizeof(target_sigset_t)) {
9739 unlock_user(target_pfd, arg1, 0);
9740 ret = -TARGET_EINVAL;
9741 break;
9742 }
9743
9744 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9745 if (!target_set) {
9746 unlock_user(target_pfd, arg1, 0);
9747 goto efault;
9748 }
9749 target_to_host_sigset(set, target_set);
9750 } else {
9751 set = NULL;
9752 }
9753
9754 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9755 set, SIGSET_T_SIZE));
9756
9757 if (!is_error(ret) && arg3) {
9758 host_to_target_timespec(arg3, timeout_ts);
9759 }
9760 if (arg4) {
9761 unlock_user(target_set, arg4, 0);
9762 }
9763 break;
9764 }
9765 # endif
9766 # ifdef TARGET_NR_poll
9767 case TARGET_NR_poll:
9768 {
9769 struct timespec ts, *pts;
9770
9771 if (arg3 >= 0) {
9772 /* Convert ms to secs, ns */
9773 ts.tv_sec = arg3 / 1000;
9774 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9775 pts = &ts;
9776 } else {
9777 /* -ve poll() timeout means "infinite" */
9778 pts = NULL;
9779 }
9780 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9781 break;
9782 }
9783 # endif
9784 default:
9785 g_assert_not_reached();
9786 }
9787
9788 if (!is_error(ret)) {
9789 for(i = 0; i < nfds; i++) {
9790 target_pfd[i].revents = tswap16(pfd[i].revents);
9791 }
9792 }
9793 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9794 }
9795 break;
9796 #endif
9797 case TARGET_NR_flock:
9798 /* NOTE: the flock constant seems to be the same for every
9799 Linux platform */
9800 ret = get_errno(safe_flock(arg1, arg2));
9801 break;
9802 case TARGET_NR_readv:
9803 {
9804 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9805 if (vec != NULL) {
9806 ret = get_errno(safe_readv(arg1, vec, arg3));
9807 unlock_iovec(vec, arg2, arg3, 1);
9808 } else {
9809 ret = -host_to_target_errno(errno);
9810 }
9811 }
9812 break;
9813 case TARGET_NR_writev:
9814 {
9815 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9816 if (vec != NULL) {
9817 ret = get_errno(safe_writev(arg1, vec, arg3));
9818 unlock_iovec(vec, arg2, arg3, 0);
9819 } else {
9820 ret = -host_to_target_errno(errno);
9821 }
9822 }
9823 break;
9824 case TARGET_NR_getsid:
9825 ret = get_errno(getsid(arg1));
9826 break;
9827 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9828 case TARGET_NR_fdatasync:
9829 ret = get_errno(fdatasync(arg1));
9830 break;
9831 #endif
9832 #ifdef TARGET_NR__sysctl
9833 case TARGET_NR__sysctl:
9834 /* We don't implement this, but ENOTDIR is always a safe
9835 return value. */
9836 ret = -TARGET_ENOTDIR;
9837 break;
9838 #endif
9839 case TARGET_NR_sched_getaffinity:
9840 {
9841 unsigned int mask_size;
9842 unsigned long *mask;
9843
9844 /*
9845 * sched_getaffinity needs multiples of ulong, so need to take
9846 * care of mismatches between target ulong and host ulong sizes.
9847 */
9848 if (arg2 & (sizeof(abi_ulong) - 1)) {
9849 ret = -TARGET_EINVAL;
9850 break;
9851 }
9852 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9853
9854 mask = alloca(mask_size);
9855 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9856
9857 if (!is_error(ret)) {
9858 if (ret > arg2) {
9859 /* More data returned than the caller's buffer will fit.
9860 * This only happens if sizeof(abi_long) < sizeof(long)
9861 * and the caller passed us a buffer holding an odd number
9862 * of abi_longs. If the host kernel is actually using the
9863 * extra 4 bytes then fail EINVAL; otherwise we can just
9864 * ignore them and only copy the interesting part.
9865 */
9866 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9867 if (numcpus > arg2 * 8) {
9868 ret = -TARGET_EINVAL;
9869 break;
9870 }
9871 ret = arg2;
9872 }
9873
9874 if (copy_to_user(arg3, mask, ret)) {
9875 goto efault;
9876 }
9877 }
9878 }
9879 break;
9880 case TARGET_NR_sched_setaffinity:
9881 {
9882 unsigned int mask_size;
9883 unsigned long *mask;
9884
9885 /*
9886 * sched_setaffinity needs multiples of ulong, so need to take
9887 * care of mismatches between target ulong and host ulong sizes.
9888 */
9889 if (arg2 & (sizeof(abi_ulong) - 1)) {
9890 ret = -TARGET_EINVAL;
9891 break;
9892 }
9893 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9894
9895 mask = alloca(mask_size);
9896 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9897 goto efault;
9898 }
9899 memcpy(mask, p, arg2);
9900 unlock_user_struct(p, arg2, 0);
9901
9902 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9903 }
9904 break;
9905 case TARGET_NR_sched_setparam:
9906 {
9907 struct sched_param *target_schp;
9908 struct sched_param schp;
9909
9910 if (arg2 == 0) {
9911 return -TARGET_EINVAL;
9912 }
9913 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9914 goto efault;
9915 schp.sched_priority = tswap32(target_schp->sched_priority);
9916 unlock_user_struct(target_schp, arg2, 0);
9917 ret = get_errno(sched_setparam(arg1, &schp));
9918 }
9919 break;
9920 case TARGET_NR_sched_getparam:
9921 {
9922 struct sched_param *target_schp;
9923 struct sched_param schp;
9924
9925 if (arg2 == 0) {
9926 return -TARGET_EINVAL;
9927 }
9928 ret = get_errno(sched_getparam(arg1, &schp));
9929 if (!is_error(ret)) {
9930 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
9931 goto efault;
9932 target_schp->sched_priority = tswap32(schp.sched_priority);
9933 unlock_user_struct(target_schp, arg2, 1);
9934 }
9935 }
9936 break;
9937 case TARGET_NR_sched_setscheduler:
9938 {
9939 struct sched_param *target_schp;
9940 struct sched_param schp;
9941 if (arg3 == 0) {
9942 return -TARGET_EINVAL;
9943 }
9944 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
9945 goto efault;
9946 schp.sched_priority = tswap32(target_schp->sched_priority);
9947 unlock_user_struct(target_schp, arg3, 0);
9948 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
9949 }
9950 break;
9951 case TARGET_NR_sched_getscheduler:
9952 ret = get_errno(sched_getscheduler(arg1));
9953 break;
9954 case TARGET_NR_sched_yield:
9955 ret = get_errno(sched_yield());
9956 break;
9957 case TARGET_NR_sched_get_priority_max:
9958 ret = get_errno(sched_get_priority_max(arg1));
9959 break;
9960 case TARGET_NR_sched_get_priority_min:
9961 ret = get_errno(sched_get_priority_min(arg1));
9962 break;
9963 case TARGET_NR_sched_rr_get_interval:
9964 {
9965 struct timespec ts;
9966 ret = get_errno(sched_rr_get_interval(arg1, &ts));
9967 if (!is_error(ret)) {
9968 ret = host_to_target_timespec(arg2, &ts);
9969 }
9970 }
9971 break;
9972 case TARGET_NR_nanosleep:
9973 {
9974 struct timespec req, rem;
9975 target_to_host_timespec(&req, arg1);
9976 ret = get_errno(safe_nanosleep(&req, &rem));
9977 if (is_error(ret) && arg2) {
9978 host_to_target_timespec(arg2, &rem);
9979 }
9980 }
9981 break;
9982 #ifdef TARGET_NR_query_module
9983 case TARGET_NR_query_module:
9984 goto unimplemented;
9985 #endif
9986 #ifdef TARGET_NR_nfsservctl
9987 case TARGET_NR_nfsservctl:
9988 goto unimplemented;
9989 #endif
9990 case TARGET_NR_prctl:
9991 switch (arg1) {
9992 case PR_GET_PDEATHSIG:
9993 {
9994 int deathsig;
9995 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
9996 if (!is_error(ret) && arg2
9997 && put_user_ual(deathsig, arg2)) {
9998 goto efault;
9999 }
10000 break;
10001 }
10002 #ifdef PR_GET_NAME
10003 case PR_GET_NAME:
10004 {
10005 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10006 if (!name) {
10007 goto efault;
10008 }
10009 ret = get_errno(prctl(arg1, (unsigned long)name,
10010 arg3, arg4, arg5));
10011 unlock_user(name, arg2, 16);
10012 break;
10013 }
10014 case PR_SET_NAME:
10015 {
10016 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10017 if (!name) {
10018 goto efault;
10019 }
10020 ret = get_errno(prctl(arg1, (unsigned long)name,
10021 arg3, arg4, arg5));
10022 unlock_user(name, arg2, 0);
10023 break;
10024 }
10025 #endif
10026 default:
10027 /* Most prctl options have no pointer arguments */
10028 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10029 break;
10030 }
10031 break;
10032 #ifdef TARGET_NR_arch_prctl
10033 case TARGET_NR_arch_prctl:
10034 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10035 ret = do_arch_prctl(cpu_env, arg1, arg2);
10036 break;
10037 #else
10038 goto unimplemented;
10039 #endif
10040 #endif
10041 #ifdef TARGET_NR_pread64
10042 case TARGET_NR_pread64:
10043 if (regpairs_aligned(cpu_env)) {
10044 arg4 = arg5;
10045 arg5 = arg6;
10046 }
10047 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10048 goto efault;
10049 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10050 unlock_user(p, arg2, ret);
10051 break;
10052 case TARGET_NR_pwrite64:
10053 if (regpairs_aligned(cpu_env)) {
10054 arg4 = arg5;
10055 arg5 = arg6;
10056 }
10057 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10058 goto efault;
10059 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10060 unlock_user(p, arg2, 0);
10061 break;
10062 #endif
10063 case TARGET_NR_getcwd:
10064 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10065 goto efault;
10066 ret = get_errno(sys_getcwd1(p, arg2));
10067 unlock_user(p, arg1, ret);
10068 break;
10069 case TARGET_NR_capget:
10070 case TARGET_NR_capset:
10071 {
10072 struct target_user_cap_header *target_header;
10073 struct target_user_cap_data *target_data = NULL;
10074 struct __user_cap_header_struct header;
10075 struct __user_cap_data_struct data[2];
10076 struct __user_cap_data_struct *dataptr = NULL;
10077 int i, target_datalen;
10078 int data_items = 1;
10079
10080 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10081 goto efault;
10082 }
10083 header.version = tswap32(target_header->version);
10084 header.pid = tswap32(target_header->pid);
10085
10086 if (header.version != _LINUX_CAPABILITY_VERSION) {
10087 /* Version 2 and up takes pointer to two user_data structs */
10088 data_items = 2;
10089 }
10090
10091 target_datalen = sizeof(*target_data) * data_items;
10092
10093 if (arg2) {
10094 if (num == TARGET_NR_capget) {
10095 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10096 } else {
10097 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10098 }
10099 if (!target_data) {
10100 unlock_user_struct(target_header, arg1, 0);
10101 goto efault;
10102 }
10103
10104 if (num == TARGET_NR_capset) {
10105 for (i = 0; i < data_items; i++) {
10106 data[i].effective = tswap32(target_data[i].effective);
10107 data[i].permitted = tswap32(target_data[i].permitted);
10108 data[i].inheritable = tswap32(target_data[i].inheritable);
10109 }
10110 }
10111
10112 dataptr = data;
10113 }
10114
10115 if (num == TARGET_NR_capget) {
10116 ret = get_errno(capget(&header, dataptr));
10117 } else {
10118 ret = get_errno(capset(&header, dataptr));
10119 }
10120
10121 /* The kernel always updates version for both capget and capset */
10122 target_header->version = tswap32(header.version);
10123 unlock_user_struct(target_header, arg1, 1);
10124
10125 if (arg2) {
10126 if (num == TARGET_NR_capget) {
10127 for (i = 0; i < data_items; i++) {
10128 target_data[i].effective = tswap32(data[i].effective);
10129 target_data[i].permitted = tswap32(data[i].permitted);
10130 target_data[i].inheritable = tswap32(data[i].inheritable);
10131 }
10132 unlock_user(target_data, arg2, target_datalen);
10133 } else {
10134 unlock_user(target_data, arg2, 0);
10135 }
10136 }
10137 break;
10138 }
10139 case TARGET_NR_sigaltstack:
10140 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10141 break;
10142
10143 #ifdef CONFIG_SENDFILE
10144 case TARGET_NR_sendfile:
10145 {
10146 off_t *offp = NULL;
10147 off_t off;
10148 if (arg3) {
10149 ret = get_user_sal(off, arg3);
10150 if (is_error(ret)) {
10151 break;
10152 }
10153 offp = &off;
10154 }
10155 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10156 if (!is_error(ret) && arg3) {
10157 abi_long ret2 = put_user_sal(off, arg3);
10158 if (is_error(ret2)) {
10159 ret = ret2;
10160 }
10161 }
10162 break;
10163 }
10164 #ifdef TARGET_NR_sendfile64
10165 case TARGET_NR_sendfile64:
10166 {
10167 off_t *offp = NULL;
10168 off_t off;
10169 if (arg3) {
10170 ret = get_user_s64(off, arg3);
10171 if (is_error(ret)) {
10172 break;
10173 }
10174 offp = &off;
10175 }
10176 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10177 if (!is_error(ret) && arg3) {
10178 abi_long ret2 = put_user_s64(off, arg3);
10179 if (is_error(ret2)) {
10180 ret = ret2;
10181 }
10182 }
10183 break;
10184 }
10185 #endif
10186 #else
10187 case TARGET_NR_sendfile:
10188 #ifdef TARGET_NR_sendfile64
10189 case TARGET_NR_sendfile64:
10190 #endif
10191 goto unimplemented;
10192 #endif
10193
10194 #ifdef TARGET_NR_getpmsg
10195 case TARGET_NR_getpmsg:
10196 goto unimplemented;
10197 #endif
10198 #ifdef TARGET_NR_putpmsg
10199 case TARGET_NR_putpmsg:
10200 goto unimplemented;
10201 #endif
10202 #ifdef TARGET_NR_vfork
10203 case TARGET_NR_vfork:
10204 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10205 0, 0, 0, 0));
10206 break;
10207 #endif
10208 #ifdef TARGET_NR_ugetrlimit
10209 case TARGET_NR_ugetrlimit:
10210 {
10211 struct rlimit rlim;
10212 int resource = target_to_host_resource(arg1);
10213 ret = get_errno(getrlimit(resource, &rlim));
10214 if (!is_error(ret)) {
10215 struct target_rlimit *target_rlim;
10216 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10217 goto efault;
10218 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10219 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10220 unlock_user_struct(target_rlim, arg2, 1);
10221 }
10222 break;
10223 }
10224 #endif
10225 #ifdef TARGET_NR_truncate64
10226 case TARGET_NR_truncate64:
10227 if (!(p = lock_user_string(arg1)))
10228 goto efault;
10229 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10230 unlock_user(p, arg1, 0);
10231 break;
10232 #endif
10233 #ifdef TARGET_NR_ftruncate64
10234 case TARGET_NR_ftruncate64:
10235 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10236 break;
10237 #endif
10238 #ifdef TARGET_NR_stat64
10239 case TARGET_NR_stat64:
10240 if (!(p = lock_user_string(arg1)))
10241 goto efault;
10242 ret = get_errno(stat(path(p), &st));
10243 unlock_user(p, arg1, 0);
10244 if (!is_error(ret))
10245 ret = host_to_target_stat64(cpu_env, arg2, &st);
10246 break;
10247 #endif
10248 #ifdef TARGET_NR_lstat64
10249 case TARGET_NR_lstat64:
10250 if (!(p = lock_user_string(arg1)))
10251 goto efault;
10252 ret = get_errno(lstat(path(p), &st));
10253 unlock_user(p, arg1, 0);
10254 if (!is_error(ret))
10255 ret = host_to_target_stat64(cpu_env, arg2, &st);
10256 break;
10257 #endif
10258 #ifdef TARGET_NR_fstat64
10259 case TARGET_NR_fstat64:
10260 ret = get_errno(fstat(arg1, &st));
10261 if (!is_error(ret))
10262 ret = host_to_target_stat64(cpu_env, arg2, &st);
10263 break;
10264 #endif
10265 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10266 #ifdef TARGET_NR_fstatat64
10267 case TARGET_NR_fstatat64:
10268 #endif
10269 #ifdef TARGET_NR_newfstatat
10270 case TARGET_NR_newfstatat:
10271 #endif
10272 if (!(p = lock_user_string(arg2)))
10273 goto efault;
10274 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10275 if (!is_error(ret))
10276 ret = host_to_target_stat64(cpu_env, arg3, &st);
10277 break;
10278 #endif
10279 #ifdef TARGET_NR_lchown
10280 case TARGET_NR_lchown:
10281 if (!(p = lock_user_string(arg1)))
10282 goto efault;
10283 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10284 unlock_user(p, arg1, 0);
10285 break;
10286 #endif
10287 #ifdef TARGET_NR_getuid
10288 case TARGET_NR_getuid:
10289 ret = get_errno(high2lowuid(getuid()));
10290 break;
10291 #endif
10292 #ifdef TARGET_NR_getgid
10293 case TARGET_NR_getgid:
10294 ret = get_errno(high2lowgid(getgid()));
10295 break;
10296 #endif
10297 #ifdef TARGET_NR_geteuid
10298 case TARGET_NR_geteuid:
10299 ret = get_errno(high2lowuid(geteuid()));
10300 break;
10301 #endif
10302 #ifdef TARGET_NR_getegid
10303 case TARGET_NR_getegid:
10304 ret = get_errno(high2lowgid(getegid()));
10305 break;
10306 #endif
10307 case TARGET_NR_setreuid:
10308 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10309 break;
10310 case TARGET_NR_setregid:
10311 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10312 break;
10313 case TARGET_NR_getgroups:
10314 {
10315 int gidsetsize = arg1;
10316 target_id *target_grouplist;
10317 gid_t *grouplist;
10318 int i;
10319
10320 grouplist = alloca(gidsetsize * sizeof(gid_t));
10321 ret = get_errno(getgroups(gidsetsize, grouplist));
10322 if (gidsetsize == 0)
10323 break;
10324 if (!is_error(ret)) {
10325 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10326 if (!target_grouplist)
10327 goto efault;
10328 for(i = 0;i < ret; i++)
10329 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10330 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10331 }
10332 }
10333 break;
10334 case TARGET_NR_setgroups:
10335 {
10336 int gidsetsize = arg1;
10337 target_id *target_grouplist;
10338 gid_t *grouplist = NULL;
10339 int i;
10340 if (gidsetsize) {
10341 grouplist = alloca(gidsetsize * sizeof(gid_t));
10342 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10343 if (!target_grouplist) {
10344 ret = -TARGET_EFAULT;
10345 goto fail;
10346 }
10347 for (i = 0; i < gidsetsize; i++) {
10348 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10349 }
10350 unlock_user(target_grouplist, arg2, 0);
10351 }
10352 ret = get_errno(setgroups(gidsetsize, grouplist));
10353 }
10354 break;
10355 case TARGET_NR_fchown:
10356 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10357 break;
10358 #if defined(TARGET_NR_fchownat)
10359 case TARGET_NR_fchownat:
10360 if (!(p = lock_user_string(arg2)))
10361 goto efault;
10362 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10363 low2highgid(arg4), arg5));
10364 unlock_user(p, arg2, 0);
10365 break;
10366 #endif
10367 #ifdef TARGET_NR_setresuid
10368 case TARGET_NR_setresuid:
10369 ret = get_errno(sys_setresuid(low2highuid(arg1),
10370 low2highuid(arg2),
10371 low2highuid(arg3)));
10372 break;
10373 #endif
10374 #ifdef TARGET_NR_getresuid
10375 case TARGET_NR_getresuid:
10376 {
10377 uid_t ruid, euid, suid;
10378 ret = get_errno(getresuid(&ruid, &euid, &suid));
10379 if (!is_error(ret)) {
10380 if (put_user_id(high2lowuid(ruid), arg1)
10381 || put_user_id(high2lowuid(euid), arg2)
10382 || put_user_id(high2lowuid(suid), arg3))
10383 goto efault;
10384 }
10385 }
10386 break;
10387 #endif
10388 #ifdef TARGET_NR_getresgid
10389 case TARGET_NR_setresgid:
10390 ret = get_errno(sys_setresgid(low2highgid(arg1),
10391 low2highgid(arg2),
10392 low2highgid(arg3)));
10393 break;
10394 #endif
10395 #ifdef TARGET_NR_getresgid
10396 case TARGET_NR_getresgid:
10397 {
10398 gid_t rgid, egid, sgid;
10399 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10400 if (!is_error(ret)) {
10401 if (put_user_id(high2lowgid(rgid), arg1)
10402 || put_user_id(high2lowgid(egid), arg2)
10403 || put_user_id(high2lowgid(sgid), arg3))
10404 goto efault;
10405 }
10406 }
10407 break;
10408 #endif
10409 #ifdef TARGET_NR_chown
10410 case TARGET_NR_chown:
10411 if (!(p = lock_user_string(arg1)))
10412 goto efault;
10413 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10414 unlock_user(p, arg1, 0);
10415 break;
10416 #endif
10417 case TARGET_NR_setuid:
10418 ret = get_errno(sys_setuid(low2highuid(arg1)));
10419 break;
10420 case TARGET_NR_setgid:
10421 ret = get_errno(sys_setgid(low2highgid(arg1)));
10422 break;
10423 case TARGET_NR_setfsuid:
10424 ret = get_errno(setfsuid(arg1));
10425 break;
10426 case TARGET_NR_setfsgid:
10427 ret = get_errno(setfsgid(arg1));
10428 break;
10429
10430 #ifdef TARGET_NR_lchown32
10431 case TARGET_NR_lchown32:
10432 if (!(p = lock_user_string(arg1)))
10433 goto efault;
10434 ret = get_errno(lchown(p, arg2, arg3));
10435 unlock_user(p, arg1, 0);
10436 break;
10437 #endif
10438 #ifdef TARGET_NR_getuid32
10439 case TARGET_NR_getuid32:
10440 ret = get_errno(getuid());
10441 break;
10442 #endif
10443
10444 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10445 /* Alpha specific */
10446 case TARGET_NR_getxuid:
10447 {
10448 uid_t euid;
10449 euid=geteuid();
10450 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10451 }
10452 ret = get_errno(getuid());
10453 break;
10454 #endif
10455 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10456 /* Alpha specific */
10457 case TARGET_NR_getxgid:
10458 {
10459 uid_t egid;
10460 egid=getegid();
10461 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10462 }
10463 ret = get_errno(getgid());
10464 break;
10465 #endif
10466 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10467 /* Alpha specific */
10468 case TARGET_NR_osf_getsysinfo:
10469 ret = -TARGET_EOPNOTSUPP;
10470 switch (arg1) {
10471 case TARGET_GSI_IEEE_FP_CONTROL:
10472 {
10473 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10474
10475 /* Copied from linux ieee_fpcr_to_swcr. */
10476 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10477 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10478 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10479 | SWCR_TRAP_ENABLE_DZE
10480 | SWCR_TRAP_ENABLE_OVF);
10481 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10482 | SWCR_TRAP_ENABLE_INE);
10483 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10484 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10485
10486 if (put_user_u64 (swcr, arg2))
10487 goto efault;
10488 ret = 0;
10489 }
10490 break;
10491
10492 /* case GSI_IEEE_STATE_AT_SIGNAL:
10493 -- Not implemented in linux kernel.
10494 case GSI_UACPROC:
10495 -- Retrieves current unaligned access state; not much used.
10496 case GSI_PROC_TYPE:
10497 -- Retrieves implver information; surely not used.
10498 case GSI_GET_HWRPB:
10499 -- Grabs a copy of the HWRPB; surely not used.
10500 */
10501 }
10502 break;
10503 #endif
10504 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10505 /* Alpha specific */
10506 case TARGET_NR_osf_setsysinfo:
10507 ret = -TARGET_EOPNOTSUPP;
10508 switch (arg1) {
10509 case TARGET_SSI_IEEE_FP_CONTROL:
10510 {
10511 uint64_t swcr, fpcr, orig_fpcr;
10512
10513 if (get_user_u64 (swcr, arg2)) {
10514 goto efault;
10515 }
10516 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10517 fpcr = orig_fpcr & FPCR_DYN_MASK;
10518
10519 /* Copied from linux ieee_swcr_to_fpcr. */
10520 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10521 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10522 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10523 | SWCR_TRAP_ENABLE_DZE
10524 | SWCR_TRAP_ENABLE_OVF)) << 48;
10525 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10526 | SWCR_TRAP_ENABLE_INE)) << 57;
10527 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10528 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10529
10530 cpu_alpha_store_fpcr(cpu_env, fpcr);
10531 ret = 0;
10532 }
10533 break;
10534
10535 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10536 {
10537 uint64_t exc, fpcr, orig_fpcr;
10538 int si_code;
10539
10540 if (get_user_u64(exc, arg2)) {
10541 goto efault;
10542 }
10543
10544 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10545
10546 /* We only add to the exception status here. */
10547 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10548
10549 cpu_alpha_store_fpcr(cpu_env, fpcr);
10550 ret = 0;
10551
10552 /* Old exceptions are not signaled. */
10553 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10554
10555 /* If any exceptions set by this call,
10556 and are unmasked, send a signal. */
10557 si_code = 0;
10558 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10559 si_code = TARGET_FPE_FLTRES;
10560 }
10561 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10562 si_code = TARGET_FPE_FLTUND;
10563 }
10564 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10565 si_code = TARGET_FPE_FLTOVF;
10566 }
10567 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10568 si_code = TARGET_FPE_FLTDIV;
10569 }
10570 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10571 si_code = TARGET_FPE_FLTINV;
10572 }
10573 if (si_code != 0) {
10574 target_siginfo_t info;
10575 info.si_signo = SIGFPE;
10576 info.si_errno = 0;
10577 info.si_code = si_code;
10578 info._sifields._sigfault._addr
10579 = ((CPUArchState *)cpu_env)->pc;
10580 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
10581 }
10582 }
10583 break;
10584
10585 /* case SSI_NVPAIRS:
10586 -- Used with SSIN_UACPROC to enable unaligned accesses.
10587 case SSI_IEEE_STATE_AT_SIGNAL:
10588 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10589 -- Not implemented in linux kernel
10590 */
10591 }
10592 break;
10593 #endif
10594 #ifdef TARGET_NR_osf_sigprocmask
10595 /* Alpha specific. */
10596 case TARGET_NR_osf_sigprocmask:
10597 {
10598 abi_ulong mask;
10599 int how;
10600 sigset_t set, oldset;
10601
10602 switch(arg1) {
10603 case TARGET_SIG_BLOCK:
10604 how = SIG_BLOCK;
10605 break;
10606 case TARGET_SIG_UNBLOCK:
10607 how = SIG_UNBLOCK;
10608 break;
10609 case TARGET_SIG_SETMASK:
10610 how = SIG_SETMASK;
10611 break;
10612 default:
10613 ret = -TARGET_EINVAL;
10614 goto fail;
10615 }
10616 mask = arg2;
10617 target_to_host_old_sigset(&set, &mask);
10618 ret = do_sigprocmask(how, &set, &oldset);
10619 if (!ret) {
10620 host_to_target_old_sigset(&mask, &oldset);
10621 ret = mask;
10622 }
10623 }
10624 break;
10625 #endif
10626
10627 #ifdef TARGET_NR_getgid32
10628 case TARGET_NR_getgid32:
10629 ret = get_errno(getgid());
10630 break;
10631 #endif
10632 #ifdef TARGET_NR_geteuid32
10633 case TARGET_NR_geteuid32:
10634 ret = get_errno(geteuid());
10635 break;
10636 #endif
10637 #ifdef TARGET_NR_getegid32
10638 case TARGET_NR_getegid32:
10639 ret = get_errno(getegid());
10640 break;
10641 #endif
10642 #ifdef TARGET_NR_setreuid32
10643 case TARGET_NR_setreuid32:
10644 ret = get_errno(setreuid(arg1, arg2));
10645 break;
10646 #endif
10647 #ifdef TARGET_NR_setregid32
10648 case TARGET_NR_setregid32:
10649 ret = get_errno(setregid(arg1, arg2));
10650 break;
10651 #endif
10652 #ifdef TARGET_NR_getgroups32
10653 case TARGET_NR_getgroups32:
10654 {
10655 int gidsetsize = arg1;
10656 uint32_t *target_grouplist;
10657 gid_t *grouplist;
10658 int i;
10659
10660 grouplist = alloca(gidsetsize * sizeof(gid_t));
10661 ret = get_errno(getgroups(gidsetsize, grouplist));
10662 if (gidsetsize == 0)
10663 break;
10664 if (!is_error(ret)) {
10665 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10666 if (!target_grouplist) {
10667 ret = -TARGET_EFAULT;
10668 goto fail;
10669 }
10670 for(i = 0;i < ret; i++)
10671 target_grouplist[i] = tswap32(grouplist[i]);
10672 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10673 }
10674 }
10675 break;
10676 #endif
10677 #ifdef TARGET_NR_setgroups32
10678 case TARGET_NR_setgroups32:
10679 {
10680 int gidsetsize = arg1;
10681 uint32_t *target_grouplist;
10682 gid_t *grouplist;
10683 int i;
10684
10685 grouplist = alloca(gidsetsize * sizeof(gid_t));
10686 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10687 if (!target_grouplist) {
10688 ret = -TARGET_EFAULT;
10689 goto fail;
10690 }
10691 for(i = 0;i < gidsetsize; i++)
10692 grouplist[i] = tswap32(target_grouplist[i]);
10693 unlock_user(target_grouplist, arg2, 0);
10694 ret = get_errno(setgroups(gidsetsize, grouplist));
10695 }
10696 break;
10697 #endif
10698 #ifdef TARGET_NR_fchown32
10699 case TARGET_NR_fchown32:
10700 ret = get_errno(fchown(arg1, arg2, arg3));
10701 break;
10702 #endif
10703 #ifdef TARGET_NR_setresuid32
10704 case TARGET_NR_setresuid32:
10705 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10706 break;
10707 #endif
10708 #ifdef TARGET_NR_getresuid32
10709 case TARGET_NR_getresuid32:
10710 {
10711 uid_t ruid, euid, suid;
10712 ret = get_errno(getresuid(&ruid, &euid, &suid));
10713 if (!is_error(ret)) {
10714 if (put_user_u32(ruid, arg1)
10715 || put_user_u32(euid, arg2)
10716 || put_user_u32(suid, arg3))
10717 goto efault;
10718 }
10719 }
10720 break;
10721 #endif
10722 #ifdef TARGET_NR_setresgid32
10723 case TARGET_NR_setresgid32:
10724 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10725 break;
10726 #endif
10727 #ifdef TARGET_NR_getresgid32
10728 case TARGET_NR_getresgid32:
10729 {
10730 gid_t rgid, egid, sgid;
10731 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10732 if (!is_error(ret)) {
10733 if (put_user_u32(rgid, arg1)
10734 || put_user_u32(egid, arg2)
10735 || put_user_u32(sgid, arg3))
10736 goto efault;
10737 }
10738 }
10739 break;
10740 #endif
10741 #ifdef TARGET_NR_chown32
10742 case TARGET_NR_chown32:
10743 if (!(p = lock_user_string(arg1)))
10744 goto efault;
10745 ret = get_errno(chown(p, arg2, arg3));
10746 unlock_user(p, arg1, 0);
10747 break;
10748 #endif
10749 #ifdef TARGET_NR_setuid32
10750 case TARGET_NR_setuid32:
10751 ret = get_errno(sys_setuid(arg1));
10752 break;
10753 #endif
10754 #ifdef TARGET_NR_setgid32
10755 case TARGET_NR_setgid32:
10756 ret = get_errno(sys_setgid(arg1));
10757 break;
10758 #endif
10759 #ifdef TARGET_NR_setfsuid32
10760 case TARGET_NR_setfsuid32:
10761 ret = get_errno(setfsuid(arg1));
10762 break;
10763 #endif
10764 #ifdef TARGET_NR_setfsgid32
10765 case TARGET_NR_setfsgid32:
10766 ret = get_errno(setfsgid(arg1));
10767 break;
10768 #endif
10769
10770 case TARGET_NR_pivot_root:
10771 goto unimplemented;
10772 #ifdef TARGET_NR_mincore
10773 case TARGET_NR_mincore:
10774 {
10775 void *a;
10776 ret = -TARGET_EFAULT;
10777 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10778 goto efault;
10779 if (!(p = lock_user_string(arg3)))
10780 goto mincore_fail;
10781 ret = get_errno(mincore(a, arg2, p));
10782 unlock_user(p, arg3, ret);
10783 mincore_fail:
10784 unlock_user(a, arg1, 0);
10785 }
10786 break;
10787 #endif
10788 #ifdef TARGET_NR_arm_fadvise64_64
10789 case TARGET_NR_arm_fadvise64_64:
10790 /* arm_fadvise64_64 looks like fadvise64_64 but
10791 * with different argument order: fd, advice, offset, len
10792 * rather than the usual fd, offset, len, advice.
10793 * Note that offset and len are both 64-bit so appear as
10794 * pairs of 32-bit registers.
10795 */
10796 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10797 target_offset64(arg5, arg6), arg2);
10798 ret = -host_to_target_errno(ret);
10799 break;
10800 #endif
10801
10802 #if TARGET_ABI_BITS == 32
10803
10804 #ifdef TARGET_NR_fadvise64_64
10805 case TARGET_NR_fadvise64_64:
10806 /* 6 args: fd, offset (high, low), len (high, low), advice */
10807 if (regpairs_aligned(cpu_env)) {
10808 /* offset is in (3,4), len in (5,6) and advice in 7 */
10809 arg2 = arg3;
10810 arg3 = arg4;
10811 arg4 = arg5;
10812 arg5 = arg6;
10813 arg6 = arg7;
10814 }
10815 ret = -host_to_target_errno(posix_fadvise(arg1,
10816 target_offset64(arg2, arg3),
10817 target_offset64(arg4, arg5),
10818 arg6));
10819 break;
10820 #endif
10821
10822 #ifdef TARGET_NR_fadvise64
10823 case TARGET_NR_fadvise64:
10824 /* 5 args: fd, offset (high, low), len, advice */
10825 if (regpairs_aligned(cpu_env)) {
10826 /* offset is in (3,4), len in 5 and advice in 6 */
10827 arg2 = arg3;
10828 arg3 = arg4;
10829 arg4 = arg5;
10830 arg5 = arg6;
10831 }
10832 ret = -host_to_target_errno(posix_fadvise(arg1,
10833 target_offset64(arg2, arg3),
10834 arg4, arg5));
10835 break;
10836 #endif
10837
10838 #else /* not a 32-bit ABI */
10839 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10840 #ifdef TARGET_NR_fadvise64_64
10841 case TARGET_NR_fadvise64_64:
10842 #endif
10843 #ifdef TARGET_NR_fadvise64
10844 case TARGET_NR_fadvise64:
10845 #endif
10846 #ifdef TARGET_S390X
10847 switch (arg4) {
10848 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10849 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10850 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10851 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10852 default: break;
10853 }
10854 #endif
10855 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10856 break;
10857 #endif
10858 #endif /* end of 64-bit ABI fadvise handling */
10859
10860 #ifdef TARGET_NR_madvise
10861 case TARGET_NR_madvise:
10862 /* A straight passthrough may not be safe because qemu sometimes
10863 turns private file-backed mappings into anonymous mappings.
10864 This will break MADV_DONTNEED.
10865 This is a hint, so ignoring and returning success is ok. */
10866 ret = get_errno(0);
10867 break;
10868 #endif
10869 #if TARGET_ABI_BITS == 32
10870 case TARGET_NR_fcntl64:
10871 {
10872 int cmd;
10873 struct flock64 fl;
10874 from_flock64_fn *copyfrom = copy_from_user_flock64;
10875 to_flock64_fn *copyto = copy_to_user_flock64;
10876
10877 #ifdef TARGET_ARM
10878 if (((CPUARMState *)cpu_env)->eabi) {
10879 copyfrom = copy_from_user_eabi_flock64;
10880 copyto = copy_to_user_eabi_flock64;
10881 }
10882 #endif
10883
10884 cmd = target_to_host_fcntl_cmd(arg2);
10885 if (cmd == -TARGET_EINVAL) {
10886 ret = cmd;
10887 break;
10888 }
10889
10890 switch(arg2) {
10891 case TARGET_F_GETLK64:
10892 ret = copyfrom(&fl, arg3);
10893 if (ret) {
10894 break;
10895 }
10896 ret = get_errno(fcntl(arg1, cmd, &fl));
10897 if (ret == 0) {
10898 ret = copyto(arg3, &fl);
10899 }
10900 break;
10901
10902 case TARGET_F_SETLK64:
10903 case TARGET_F_SETLKW64:
10904 ret = copyfrom(&fl, arg3);
10905 if (ret) {
10906 break;
10907 }
10908 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10909 break;
10910 default:
10911 ret = do_fcntl(arg1, arg2, arg3);
10912 break;
10913 }
10914 break;
10915 }
10916 #endif
10917 #ifdef TARGET_NR_cacheflush
10918 case TARGET_NR_cacheflush:
10919 /* self-modifying code is handled automatically, so nothing needed */
10920 ret = 0;
10921 break;
10922 #endif
10923 #ifdef TARGET_NR_security
10924 case TARGET_NR_security:
10925 goto unimplemented;
10926 #endif
10927 #ifdef TARGET_NR_getpagesize
10928 case TARGET_NR_getpagesize:
10929 ret = TARGET_PAGE_SIZE;
10930 break;
10931 #endif
10932 case TARGET_NR_gettid:
10933 ret = get_errno(gettid());
10934 break;
10935 #ifdef TARGET_NR_readahead
10936 case TARGET_NR_readahead:
10937 #if TARGET_ABI_BITS == 32
10938 if (regpairs_aligned(cpu_env)) {
10939 arg2 = arg3;
10940 arg3 = arg4;
10941 arg4 = arg5;
10942 }
10943 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
10944 #else
10945 ret = get_errno(readahead(arg1, arg2, arg3));
10946 #endif
10947 break;
10948 #endif
10949 #ifdef CONFIG_ATTR
10950 #ifdef TARGET_NR_setxattr
10951 case TARGET_NR_listxattr:
10952 case TARGET_NR_llistxattr:
10953 {
10954 void *p, *b = 0;
10955 if (arg2) {
10956 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10957 if (!b) {
10958 ret = -TARGET_EFAULT;
10959 break;
10960 }
10961 }
10962 p = lock_user_string(arg1);
10963 if (p) {
10964 if (num == TARGET_NR_listxattr) {
10965 ret = get_errno(listxattr(p, b, arg3));
10966 } else {
10967 ret = get_errno(llistxattr(p, b, arg3));
10968 }
10969 } else {
10970 ret = -TARGET_EFAULT;
10971 }
10972 unlock_user(p, arg1, 0);
10973 unlock_user(b, arg2, arg3);
10974 break;
10975 }
10976 case TARGET_NR_flistxattr:
10977 {
10978 void *b = 0;
10979 if (arg2) {
10980 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10981 if (!b) {
10982 ret = -TARGET_EFAULT;
10983 break;
10984 }
10985 }
10986 ret = get_errno(flistxattr(arg1, b, arg3));
10987 unlock_user(b, arg2, arg3);
10988 break;
10989 }
10990 case TARGET_NR_setxattr:
10991 case TARGET_NR_lsetxattr:
10992 {
10993 void *p, *n, *v = 0;
10994 if (arg3) {
10995 v = lock_user(VERIFY_READ, arg3, arg4, 1);
10996 if (!v) {
10997 ret = -TARGET_EFAULT;
10998 break;
10999 }
11000 }
11001 p = lock_user_string(arg1);
11002 n = lock_user_string(arg2);
11003 if (p && n) {
11004 if (num == TARGET_NR_setxattr) {
11005 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11006 } else {
11007 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11008 }
11009 } else {
11010 ret = -TARGET_EFAULT;
11011 }
11012 unlock_user(p, arg1, 0);
11013 unlock_user(n, arg2, 0);
11014 unlock_user(v, arg3, 0);
11015 }
11016 break;
11017 case TARGET_NR_fsetxattr:
11018 {
11019 void *n, *v = 0;
11020 if (arg3) {
11021 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11022 if (!v) {
11023 ret = -TARGET_EFAULT;
11024 break;
11025 }
11026 }
11027 n = lock_user_string(arg2);
11028 if (n) {
11029 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11030 } else {
11031 ret = -TARGET_EFAULT;
11032 }
11033 unlock_user(n, arg2, 0);
11034 unlock_user(v, arg3, 0);
11035 }
11036 break;
11037 case TARGET_NR_getxattr:
11038 case TARGET_NR_lgetxattr:
11039 {
11040 void *p, *n, *v = 0;
11041 if (arg3) {
11042 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11043 if (!v) {
11044 ret = -TARGET_EFAULT;
11045 break;
11046 }
11047 }
11048 p = lock_user_string(arg1);
11049 n = lock_user_string(arg2);
11050 if (p && n) {
11051 if (num == TARGET_NR_getxattr) {
11052 ret = get_errno(getxattr(p, n, v, arg4));
11053 } else {
11054 ret = get_errno(lgetxattr(p, n, v, arg4));
11055 }
11056 } else {
11057 ret = -TARGET_EFAULT;
11058 }
11059 unlock_user(p, arg1, 0);
11060 unlock_user(n, arg2, 0);
11061 unlock_user(v, arg3, arg4);
11062 }
11063 break;
11064 case TARGET_NR_fgetxattr:
11065 {
11066 void *n, *v = 0;
11067 if (arg3) {
11068 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11069 if (!v) {
11070 ret = -TARGET_EFAULT;
11071 break;
11072 }
11073 }
11074 n = lock_user_string(arg2);
11075 if (n) {
11076 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11077 } else {
11078 ret = -TARGET_EFAULT;
11079 }
11080 unlock_user(n, arg2, 0);
11081 unlock_user(v, arg3, arg4);
11082 }
11083 break;
11084 case TARGET_NR_removexattr:
11085 case TARGET_NR_lremovexattr:
11086 {
11087 void *p, *n;
11088 p = lock_user_string(arg1);
11089 n = lock_user_string(arg2);
11090 if (p && n) {
11091 if (num == TARGET_NR_removexattr) {
11092 ret = get_errno(removexattr(p, n));
11093 } else {
11094 ret = get_errno(lremovexattr(p, n));
11095 }
11096 } else {
11097 ret = -TARGET_EFAULT;
11098 }
11099 unlock_user(p, arg1, 0);
11100 unlock_user(n, arg2, 0);
11101 }
11102 break;
11103 case TARGET_NR_fremovexattr:
11104 {
11105 void *n;
11106 n = lock_user_string(arg2);
11107 if (n) {
11108 ret = get_errno(fremovexattr(arg1, n));
11109 } else {
11110 ret = -TARGET_EFAULT;
11111 }
11112 unlock_user(n, arg2, 0);
11113 }
11114 break;
11115 #endif
11116 #endif /* CONFIG_ATTR */
11117 #ifdef TARGET_NR_set_thread_area
11118 case TARGET_NR_set_thread_area:
11119 #if defined(TARGET_MIPS)
11120 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11121 ret = 0;
11122 break;
11123 #elif defined(TARGET_CRIS)
11124 if (arg1 & 0xff)
11125 ret = -TARGET_EINVAL;
11126 else {
11127 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11128 ret = 0;
11129 }
11130 break;
11131 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11132 ret = do_set_thread_area(cpu_env, arg1);
11133 break;
11134 #elif defined(TARGET_M68K)
11135 {
11136 TaskState *ts = cpu->opaque;
11137 ts->tp_value = arg1;
11138 ret = 0;
11139 break;
11140 }
11141 #else
11142 goto unimplemented_nowarn;
11143 #endif
11144 #endif
11145 #ifdef TARGET_NR_get_thread_area
11146 case TARGET_NR_get_thread_area:
11147 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11148 ret = do_get_thread_area(cpu_env, arg1);
11149 break;
11150 #elif defined(TARGET_M68K)
11151 {
11152 TaskState *ts = cpu->opaque;
11153 ret = ts->tp_value;
11154 break;
11155 }
11156 #else
11157 goto unimplemented_nowarn;
11158 #endif
11159 #endif
11160 #ifdef TARGET_NR_getdomainname
11161 case TARGET_NR_getdomainname:
11162 goto unimplemented_nowarn;
11163 #endif
11164
11165 #ifdef TARGET_NR_clock_gettime
11166 case TARGET_NR_clock_gettime:
11167 {
11168 struct timespec ts;
11169 ret = get_errno(clock_gettime(arg1, &ts));
11170 if (!is_error(ret)) {
11171 host_to_target_timespec(arg2, &ts);
11172 }
11173 break;
11174 }
11175 #endif
11176 #ifdef TARGET_NR_clock_getres
11177 case TARGET_NR_clock_getres:
11178 {
11179 struct timespec ts;
11180 ret = get_errno(clock_getres(arg1, &ts));
11181 if (!is_error(ret)) {
11182 host_to_target_timespec(arg2, &ts);
11183 }
11184 break;
11185 }
11186 #endif
11187 #ifdef TARGET_NR_clock_nanosleep
11188 case TARGET_NR_clock_nanosleep:
11189 {
11190 struct timespec ts;
11191 target_to_host_timespec(&ts, arg3);
11192 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11193 &ts, arg4 ? &ts : NULL));
11194 if (arg4)
11195 host_to_target_timespec(arg4, &ts);
11196
11197 #if defined(TARGET_PPC)
11198 /* clock_nanosleep is odd in that it returns positive errno values.
11199 * On PPC, CR0 bit 3 should be set in such a situation. */
11200 if (ret && ret != -TARGET_ERESTARTSYS) {
11201 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11202 }
11203 #endif
11204 break;
11205 }
11206 #endif
11207
11208 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11209 case TARGET_NR_set_tid_address:
11210 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11211 break;
11212 #endif
11213
11214 case TARGET_NR_tkill:
11215 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11216 break;
11217
11218 case TARGET_NR_tgkill:
11219 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11220 target_to_host_signal(arg3)));
11221 break;
11222
11223 #ifdef TARGET_NR_set_robust_list
11224 case TARGET_NR_set_robust_list:
11225 case TARGET_NR_get_robust_list:
11226 /* The ABI for supporting robust futexes has userspace pass
11227 * the kernel a pointer to a linked list which is updated by
11228 * userspace after the syscall; the list is walked by the kernel
11229 * when the thread exits. Since the linked list in QEMU guest
11230 * memory isn't a valid linked list for the host and we have
11231 * no way to reliably intercept the thread-death event, we can't
11232 * support these. Silently return ENOSYS so that guest userspace
11233 * falls back to a non-robust futex implementation (which should
11234 * be OK except in the corner case of the guest crashing while
11235 * holding a mutex that is shared with another process via
11236 * shared memory).
11237 */
11238 goto unimplemented_nowarn;
11239 #endif
11240
11241 #if defined(TARGET_NR_utimensat)
11242 case TARGET_NR_utimensat:
11243 {
11244 struct timespec *tsp, ts[2];
11245 if (!arg3) {
11246 tsp = NULL;
11247 } else {
11248 target_to_host_timespec(ts, arg3);
11249 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11250 tsp = ts;
11251 }
11252 if (!arg2)
11253 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11254 else {
11255 if (!(p = lock_user_string(arg2))) {
11256 ret = -TARGET_EFAULT;
11257 goto fail;
11258 }
11259 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11260 unlock_user(p, arg2, 0);
11261 }
11262 }
11263 break;
11264 #endif
11265 case TARGET_NR_futex:
11266 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11267 break;
11268 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11269 case TARGET_NR_inotify_init:
11270 ret = get_errno(sys_inotify_init());
11271 break;
11272 #endif
11273 #ifdef CONFIG_INOTIFY1
11274 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11275 case TARGET_NR_inotify_init1:
11276 ret = get_errno(sys_inotify_init1(arg1));
11277 break;
11278 #endif
11279 #endif
11280 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11281 case TARGET_NR_inotify_add_watch:
11282 p = lock_user_string(arg2);
11283 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11284 unlock_user(p, arg2, 0);
11285 break;
11286 #endif
11287 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11288 case TARGET_NR_inotify_rm_watch:
11289 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11290 break;
11291 #endif
11292
11293 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11294 case TARGET_NR_mq_open:
11295 {
11296 struct mq_attr posix_mq_attr, *attrp;
11297
11298 p = lock_user_string(arg1 - 1);
11299 if (arg4 != 0) {
11300 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11301 attrp = &posix_mq_attr;
11302 } else {
11303 attrp = 0;
11304 }
11305 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11306 unlock_user (p, arg1, 0);
11307 }
11308 break;
11309
11310 case TARGET_NR_mq_unlink:
11311 p = lock_user_string(arg1 - 1);
11312 if (!p) {
11313 ret = -TARGET_EFAULT;
11314 break;
11315 }
11316 ret = get_errno(mq_unlink(p));
11317 unlock_user (p, arg1, 0);
11318 break;
11319
11320 case TARGET_NR_mq_timedsend:
11321 {
11322 struct timespec ts;
11323
11324 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11325 if (arg5 != 0) {
11326 target_to_host_timespec(&ts, arg5);
11327 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11328 host_to_target_timespec(arg5, &ts);
11329 } else {
11330 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11331 }
11332 unlock_user (p, arg2, arg3);
11333 }
11334 break;
11335
11336 case TARGET_NR_mq_timedreceive:
11337 {
11338 struct timespec ts;
11339 unsigned int prio;
11340
11341 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11342 if (arg5 != 0) {
11343 target_to_host_timespec(&ts, arg5);
11344 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11345 &prio, &ts));
11346 host_to_target_timespec(arg5, &ts);
11347 } else {
11348 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11349 &prio, NULL));
11350 }
11351 unlock_user (p, arg2, arg3);
11352 if (arg4 != 0)
11353 put_user_u32(prio, arg4);
11354 }
11355 break;
11356
11357 /* Not implemented for now... */
11358 /* case TARGET_NR_mq_notify: */
11359 /* break; */
11360
11361 case TARGET_NR_mq_getsetattr:
11362 {
11363 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11364 ret = 0;
11365 if (arg3 != 0) {
11366 ret = mq_getattr(arg1, &posix_mq_attr_out);
11367 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11368 }
11369 if (arg2 != 0) {
11370 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11371 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11372 }
11373
11374 }
11375 break;
11376 #endif
11377
11378 #ifdef CONFIG_SPLICE
11379 #ifdef TARGET_NR_tee
11380 case TARGET_NR_tee:
11381 {
11382 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11383 }
11384 break;
11385 #endif
11386 #ifdef TARGET_NR_splice
11387 case TARGET_NR_splice:
11388 {
11389 loff_t loff_in, loff_out;
11390 loff_t *ploff_in = NULL, *ploff_out = NULL;
11391 if (arg2) {
11392 if (get_user_u64(loff_in, arg2)) {
11393 goto efault;
11394 }
11395 ploff_in = &loff_in;
11396 }
11397 if (arg4) {
11398 if (get_user_u64(loff_out, arg4)) {
11399 goto efault;
11400 }
11401 ploff_out = &loff_out;
11402 }
11403 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11404 if (arg2) {
11405 if (put_user_u64(loff_in, arg2)) {
11406 goto efault;
11407 }
11408 }
11409 if (arg4) {
11410 if (put_user_u64(loff_out, arg4)) {
11411 goto efault;
11412 }
11413 }
11414 }
11415 break;
11416 #endif
11417 #ifdef TARGET_NR_vmsplice
11418 case TARGET_NR_vmsplice:
11419 {
11420 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11421 if (vec != NULL) {
11422 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11423 unlock_iovec(vec, arg2, arg3, 0);
11424 } else {
11425 ret = -host_to_target_errno(errno);
11426 }
11427 }
11428 break;
11429 #endif
11430 #endif /* CONFIG_SPLICE */
11431 #ifdef CONFIG_EVENTFD
11432 #if defined(TARGET_NR_eventfd)
11433 case TARGET_NR_eventfd:
11434 ret = get_errno(eventfd(arg1, 0));
11435 fd_trans_unregister(ret);
11436 break;
11437 #endif
11438 #if defined(TARGET_NR_eventfd2)
11439 case TARGET_NR_eventfd2:
11440 {
11441 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11442 if (arg2 & TARGET_O_NONBLOCK) {
11443 host_flags |= O_NONBLOCK;
11444 }
11445 if (arg2 & TARGET_O_CLOEXEC) {
11446 host_flags |= O_CLOEXEC;
11447 }
11448 ret = get_errno(eventfd(arg1, host_flags));
11449 fd_trans_unregister(ret);
11450 break;
11451 }
11452 #endif
11453 #endif /* CONFIG_EVENTFD */
11454 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11455 case TARGET_NR_fallocate:
11456 #if TARGET_ABI_BITS == 32
11457 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11458 target_offset64(arg5, arg6)));
11459 #else
11460 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11461 #endif
11462 break;
11463 #endif
11464 #if defined(CONFIG_SYNC_FILE_RANGE)
11465 #if defined(TARGET_NR_sync_file_range)
11466 case TARGET_NR_sync_file_range:
11467 #if TARGET_ABI_BITS == 32
11468 #if defined(TARGET_MIPS)
11469 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11470 target_offset64(arg5, arg6), arg7));
11471 #else
11472 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11473 target_offset64(arg4, arg5), arg6));
11474 #endif /* !TARGET_MIPS */
11475 #else
11476 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11477 #endif
11478 break;
11479 #endif
11480 #if defined(TARGET_NR_sync_file_range2)
11481 case TARGET_NR_sync_file_range2:
11482 /* This is like sync_file_range but the arguments are reordered */
11483 #if TARGET_ABI_BITS == 32
11484 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11485 target_offset64(arg5, arg6), arg2));
11486 #else
11487 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11488 #endif
11489 break;
11490 #endif
11491 #endif
11492 #if defined(TARGET_NR_signalfd4)
11493 case TARGET_NR_signalfd4:
11494 ret = do_signalfd4(arg1, arg2, arg4);
11495 break;
11496 #endif
11497 #if defined(TARGET_NR_signalfd)
11498 case TARGET_NR_signalfd:
11499 ret = do_signalfd4(arg1, arg2, 0);
11500 break;
11501 #endif
11502 #if defined(CONFIG_EPOLL)
11503 #if defined(TARGET_NR_epoll_create)
11504 case TARGET_NR_epoll_create:
11505 ret = get_errno(epoll_create(arg1));
11506 break;
11507 #endif
11508 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11509 case TARGET_NR_epoll_create1:
11510 ret = get_errno(epoll_create1(arg1));
11511 break;
11512 #endif
11513 #if defined(TARGET_NR_epoll_ctl)
11514 case TARGET_NR_epoll_ctl:
11515 {
11516 struct epoll_event ep;
11517 struct epoll_event *epp = 0;
11518 if (arg4) {
11519 struct target_epoll_event *target_ep;
11520 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11521 goto efault;
11522 }
11523 ep.events = tswap32(target_ep->events);
11524 /* The epoll_data_t union is just opaque data to the kernel,
11525 * so we transfer all 64 bits across and need not worry what
11526 * actual data type it is.
11527 */
11528 ep.data.u64 = tswap64(target_ep->data.u64);
11529 unlock_user_struct(target_ep, arg4, 0);
11530 epp = &ep;
11531 }
11532 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11533 break;
11534 }
11535 #endif
11536
11537 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11538 #if defined(TARGET_NR_epoll_wait)
11539 case TARGET_NR_epoll_wait:
11540 #endif
11541 #if defined(TARGET_NR_epoll_pwait)
11542 case TARGET_NR_epoll_pwait:
11543 #endif
11544 {
11545 struct target_epoll_event *target_ep;
11546 struct epoll_event *ep;
11547 int epfd = arg1;
11548 int maxevents = arg3;
11549 int timeout = arg4;
11550
11551 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11552 ret = -TARGET_EINVAL;
11553 break;
11554 }
11555
11556 target_ep = lock_user(VERIFY_WRITE, arg2,
11557 maxevents * sizeof(struct target_epoll_event), 1);
11558 if (!target_ep) {
11559 goto efault;
11560 }
11561
11562 ep = alloca(maxevents * sizeof(struct epoll_event));
11563
11564 switch (num) {
11565 #if defined(TARGET_NR_epoll_pwait)
11566 case TARGET_NR_epoll_pwait:
11567 {
11568 target_sigset_t *target_set;
11569 sigset_t _set, *set = &_set;
11570
11571 if (arg5) {
11572 if (arg6 != sizeof(target_sigset_t)) {
11573 ret = -TARGET_EINVAL;
11574 break;
11575 }
11576
11577 target_set = lock_user(VERIFY_READ, arg5,
11578 sizeof(target_sigset_t), 1);
11579 if (!target_set) {
11580 unlock_user(target_ep, arg2, 0);
11581 goto efault;
11582 }
11583 target_to_host_sigset(set, target_set);
11584 unlock_user(target_set, arg5, 0);
11585 } else {
11586 set = NULL;
11587 }
11588
11589 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11590 set, SIGSET_T_SIZE));
11591 break;
11592 }
11593 #endif
11594 #if defined(TARGET_NR_epoll_wait)
11595 case TARGET_NR_epoll_wait:
11596 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11597 NULL, 0));
11598 break;
11599 #endif
11600 default:
11601 ret = -TARGET_ENOSYS;
11602 }
11603 if (!is_error(ret)) {
11604 int i;
11605 for (i = 0; i < ret; i++) {
11606 target_ep[i].events = tswap32(ep[i].events);
11607 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11608 }
11609 }
11610 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11611 break;
11612 }
11613 #endif
11614 #endif
11615 #ifdef TARGET_NR_prlimit64
11616 case TARGET_NR_prlimit64:
11617 {
11618 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11619 struct target_rlimit64 *target_rnew, *target_rold;
11620 struct host_rlimit64 rnew, rold, *rnewp = 0;
11621 int resource = target_to_host_resource(arg2);
11622 if (arg3) {
11623 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11624 goto efault;
11625 }
11626 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11627 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11628 unlock_user_struct(target_rnew, arg3, 0);
11629 rnewp = &rnew;
11630 }
11631
11632 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11633 if (!is_error(ret) && arg4) {
11634 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11635 goto efault;
11636 }
11637 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11638 target_rold->rlim_max = tswap64(rold.rlim_max);
11639 unlock_user_struct(target_rold, arg4, 1);
11640 }
11641 break;
11642 }
11643 #endif
11644 #ifdef TARGET_NR_gethostname
11645 case TARGET_NR_gethostname:
11646 {
11647 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11648 if (name) {
11649 ret = get_errno(gethostname(name, arg2));
11650 unlock_user(name, arg1, arg2);
11651 } else {
11652 ret = -TARGET_EFAULT;
11653 }
11654 break;
11655 }
11656 #endif
11657 #ifdef TARGET_NR_atomic_cmpxchg_32
11658 case TARGET_NR_atomic_cmpxchg_32:
11659 {
11660 /* should use start_exclusive from main.c */
11661 abi_ulong mem_value;
11662 if (get_user_u32(mem_value, arg6)) {
11663 target_siginfo_t info;
11664 info.si_signo = SIGSEGV;
11665 info.si_errno = 0;
11666 info.si_code = TARGET_SEGV_MAPERR;
11667 info._sifields._sigfault._addr = arg6;
11668 queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
11669 ret = 0xdeadbeef;
11670
11671 }
11672 if (mem_value == arg2)
11673 put_user_u32(arg1, arg6);
11674 ret = mem_value;
11675 break;
11676 }
11677 #endif
11678 #ifdef TARGET_NR_atomic_barrier
11679 case TARGET_NR_atomic_barrier:
11680 {
11681 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11682 ret = 0;
11683 break;
11684 }
11685 #endif
11686
11687 #ifdef TARGET_NR_timer_create
11688 case TARGET_NR_timer_create:
11689 {
11690 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11691
11692 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11693
11694 int clkid = arg1;
11695 int timer_index = next_free_host_timer();
11696
11697 if (timer_index < 0) {
11698 ret = -TARGET_EAGAIN;
11699 } else {
11700 timer_t *phtimer = g_posix_timers + timer_index;
11701
11702 if (arg2) {
11703 phost_sevp = &host_sevp;
11704 ret = target_to_host_sigevent(phost_sevp, arg2);
11705 if (ret != 0) {
11706 break;
11707 }
11708 }
11709
11710 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11711 if (ret) {
11712 phtimer = NULL;
11713 } else {
11714 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11715 goto efault;
11716 }
11717 }
11718 }
11719 break;
11720 }
11721 #endif
11722
11723 #ifdef TARGET_NR_timer_settime
11724 case TARGET_NR_timer_settime:
11725 {
11726 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11727 * struct itimerspec * old_value */
11728 target_timer_t timerid = get_timer_id(arg1);
11729
11730 if (timerid < 0) {
11731 ret = timerid;
11732 } else if (arg3 == 0) {
11733 ret = -TARGET_EINVAL;
11734 } else {
11735 timer_t htimer = g_posix_timers[timerid];
11736 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11737
11738 target_to_host_itimerspec(&hspec_new, arg3);
11739 ret = get_errno(
11740 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11741 host_to_target_itimerspec(arg2, &hspec_old);
11742 }
11743 break;
11744 }
11745 #endif
11746
11747 #ifdef TARGET_NR_timer_gettime
11748 case TARGET_NR_timer_gettime:
11749 {
11750 /* args: timer_t timerid, struct itimerspec *curr_value */
11751 target_timer_t timerid = get_timer_id(arg1);
11752
11753 if (timerid < 0) {
11754 ret = timerid;
11755 } else if (!arg2) {
11756 ret = -TARGET_EFAULT;
11757 } else {
11758 timer_t htimer = g_posix_timers[timerid];
11759 struct itimerspec hspec;
11760 ret = get_errno(timer_gettime(htimer, &hspec));
11761
11762 if (host_to_target_itimerspec(arg2, &hspec)) {
11763 ret = -TARGET_EFAULT;
11764 }
11765 }
11766 break;
11767 }
11768 #endif
11769
11770 #ifdef TARGET_NR_timer_getoverrun
11771 case TARGET_NR_timer_getoverrun:
11772 {
11773 /* args: timer_t timerid */
11774 target_timer_t timerid = get_timer_id(arg1);
11775
11776 if (timerid < 0) {
11777 ret = timerid;
11778 } else {
11779 timer_t htimer = g_posix_timers[timerid];
11780 ret = get_errno(timer_getoverrun(htimer));
11781 }
11782 fd_trans_unregister(ret);
11783 break;
11784 }
11785 #endif
11786
11787 #ifdef TARGET_NR_timer_delete
11788 case TARGET_NR_timer_delete:
11789 {
11790 /* args: timer_t timerid */
11791 target_timer_t timerid = get_timer_id(arg1);
11792
11793 if (timerid < 0) {
11794 ret = timerid;
11795 } else {
11796 timer_t htimer = g_posix_timers[timerid];
11797 ret = get_errno(timer_delete(htimer));
11798 g_posix_timers[timerid] = 0;
11799 }
11800 break;
11801 }
11802 #endif
11803
11804 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11805 case TARGET_NR_timerfd_create:
11806 ret = get_errno(timerfd_create(arg1,
11807 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11808 break;
11809 #endif
11810
11811 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11812 case TARGET_NR_timerfd_gettime:
11813 {
11814 struct itimerspec its_curr;
11815
11816 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11817
11818 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11819 goto efault;
11820 }
11821 }
11822 break;
11823 #endif
11824
11825 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11826 case TARGET_NR_timerfd_settime:
11827 {
11828 struct itimerspec its_new, its_old, *p_new;
11829
11830 if (arg3) {
11831 if (target_to_host_itimerspec(&its_new, arg3)) {
11832 goto efault;
11833 }
11834 p_new = &its_new;
11835 } else {
11836 p_new = NULL;
11837 }
11838
11839 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11840
11841 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11842 goto efault;
11843 }
11844 }
11845 break;
11846 #endif
11847
11848 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11849 case TARGET_NR_ioprio_get:
11850 ret = get_errno(ioprio_get(arg1, arg2));
11851 break;
11852 #endif
11853
11854 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11855 case TARGET_NR_ioprio_set:
11856 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11857 break;
11858 #endif
11859
11860 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11861 case TARGET_NR_setns:
11862 ret = get_errno(setns(arg1, arg2));
11863 break;
11864 #endif
11865 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11866 case TARGET_NR_unshare:
11867 ret = get_errno(unshare(arg1));
11868 break;
11869 #endif
11870
11871 default:
11872 unimplemented:
11873 gemu_log("qemu: Unsupported syscall: %d\n", num);
11874 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11875 unimplemented_nowarn:
11876 #endif
11877 ret = -TARGET_ENOSYS;
11878 break;
11879 }
11880 fail:
11881 #ifdef DEBUG
11882 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11883 #endif
11884 if(do_strace)
11885 print_syscall_ret(num, ret);
11886 trace_guest_user_syscall_ret(cpu, num, ret);
11887 return ret;
11888 efault:
11889 ret = -TARGET_EFAULT;
11890 goto fail;
11891 }