4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/syscalls.h>
8 #include <linux/init.h>
10 #include <linux/sched/task.h>
12 #include <linux/file.h>
13 #include <linux/fdtable.h>
14 #include <linux/capability.h>
15 #include <linux/dnotify.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <linux/pipe_fs_i.h>
19 #include <linux/security.h>
20 #include <linux/ptrace.h>
21 #include <linux/signal.h>
22 #include <linux/rcupdate.h>
23 #include <linux/pid_namespace.h>
24 #include <linux/user_namespace.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/compat.h>
29 #include <asm/siginfo.h>
30 #include <linux/uaccess.h>
32 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
34 static int setfl(int fd
, struct file
* filp
, unsigned long arg
)
36 struct inode
* inode
= file_inode(filp
);
40 * O_APPEND cannot be cleared if the file is marked as append-only
41 * and the file is open for write.
43 if (((arg
^ filp
->f_flags
) & O_APPEND
) && IS_APPEND(inode
))
46 /* O_NOATIME can only be set by the owner or superuser */
47 if ((arg
& O_NOATIME
) && !(filp
->f_flags
& O_NOATIME
))
48 if (!inode_owner_or_capable(inode
))
51 /* required for strict SunOS emulation */
52 if (O_NONBLOCK
!= O_NDELAY
)
56 /* Pipe packetized mode is controlled by O_DIRECT flag */
57 if (!S_ISFIFO(inode
->i_mode
) && (arg
& O_DIRECT
)) {
58 if (!filp
->f_mapping
|| !filp
->f_mapping
->a_ops
||
59 !filp
->f_mapping
->a_ops
->direct_IO
)
63 if (filp
->f_op
->check_flags
)
64 error
= filp
->f_op
->check_flags(arg
);
69 * ->fasync() is responsible for setting the FASYNC bit.
71 if (((arg
^ filp
->f_flags
) & FASYNC
) && filp
->f_op
->fasync
) {
72 error
= filp
->f_op
->fasync(fd
, filp
, (arg
& FASYNC
) != 0);
78 spin_lock(&filp
->f_lock
);
79 filp
->f_flags
= (arg
& SETFL_MASK
) | (filp
->f_flags
& ~SETFL_MASK
);
80 spin_unlock(&filp
->f_lock
);
86 static void f_modown(struct file
*filp
, struct pid
*pid
, enum pid_type type
,
89 write_lock_irq(&filp
->f_owner
.lock
);
90 if (force
|| !filp
->f_owner
.pid
) {
91 put_pid(filp
->f_owner
.pid
);
92 filp
->f_owner
.pid
= get_pid(pid
);
93 filp
->f_owner
.pid_type
= type
;
96 const struct cred
*cred
= current_cred();
97 filp
->f_owner
.uid
= cred
->uid
;
98 filp
->f_owner
.euid
= cred
->euid
;
101 write_unlock_irq(&filp
->f_owner
.lock
);
104 void __f_setown(struct file
*filp
, struct pid
*pid
, enum pid_type type
,
107 security_file_set_fowner(filp
);
108 f_modown(filp
, pid
, type
, force
);
110 EXPORT_SYMBOL(__f_setown
);
112 int f_setown(struct file
*filp
, unsigned long arg
, int force
)
123 pid
= find_vpid(who
);
124 __f_setown(filp
, pid
, type
, force
);
129 EXPORT_SYMBOL(f_setown
);
131 void f_delown(struct file
*filp
)
133 f_modown(filp
, NULL
, PIDTYPE_PID
, 1);
136 pid_t
f_getown(struct file
*filp
)
139 read_lock(&filp
->f_owner
.lock
);
140 pid
= pid_vnr(filp
->f_owner
.pid
);
141 if (filp
->f_owner
.pid_type
== PIDTYPE_PGID
)
143 read_unlock(&filp
->f_owner
.lock
);
147 static int f_setown_ex(struct file
*filp
, unsigned long arg
)
149 struct f_owner_ex __user
*owner_p
= (void __user
*)arg
;
150 struct f_owner_ex owner
;
155 ret
= copy_from_user(&owner
, owner_p
, sizeof(owner
));
159 switch (owner
.type
) {
177 pid
= find_vpid(owner
.pid
);
178 if (owner
.pid
&& !pid
)
181 __f_setown(filp
, pid
, type
, 1);
187 static int f_getown_ex(struct file
*filp
, unsigned long arg
)
189 struct f_owner_ex __user
*owner_p
= (void __user
*)arg
;
190 struct f_owner_ex owner
;
193 read_lock(&filp
->f_owner
.lock
);
194 owner
.pid
= pid_vnr(filp
->f_owner
.pid
);
195 switch (filp
->f_owner
.pid_type
) {
197 owner
.type
= F_OWNER_TID
;
201 owner
.type
= F_OWNER_PID
;
205 owner
.type
= F_OWNER_PGRP
;
213 read_unlock(&filp
->f_owner
.lock
);
216 ret
= copy_to_user(owner_p
, &owner
, sizeof(owner
));
223 #ifdef CONFIG_CHECKPOINT_RESTORE
224 static int f_getowner_uids(struct file
*filp
, unsigned long arg
)
226 struct user_namespace
*user_ns
= current_user_ns();
227 uid_t __user
*dst
= (void __user
*)arg
;
231 read_lock(&filp
->f_owner
.lock
);
232 src
[0] = from_kuid(user_ns
, filp
->f_owner
.uid
);
233 src
[1] = from_kuid(user_ns
, filp
->f_owner
.euid
);
234 read_unlock(&filp
->f_owner
.lock
);
236 err
= put_user(src
[0], &dst
[0]);
237 err
|= put_user(src
[1], &dst
[1]);
242 static int f_getowner_uids(struct file
*filp
, unsigned long arg
)
248 static long do_fcntl(int fd
, unsigned int cmd
, unsigned long arg
,
251 void __user
*argp
= (void __user
*)arg
;
257 err
= f_dupfd(arg
, filp
, 0);
259 case F_DUPFD_CLOEXEC
:
260 err
= f_dupfd(arg
, filp
, O_CLOEXEC
);
263 err
= get_close_on_exec(fd
) ? FD_CLOEXEC
: 0;
267 set_close_on_exec(fd
, arg
& FD_CLOEXEC
);
273 err
= setfl(fd
, filp
, arg
);
275 #if BITS_PER_LONG != 32
276 /* 32-bit arches must use fcntl64() */
280 if (copy_from_user(&flock
, argp
, sizeof(flock
)))
282 err
= fcntl_getlk(filp
, cmd
, &flock
);
283 if (!err
&& copy_to_user(argp
, &flock
, sizeof(flock
)))
286 #if BITS_PER_LONG != 32
287 /* 32-bit arches must use fcntl64() */
294 if (copy_from_user(&flock
, argp
, sizeof(flock
)))
296 err
= fcntl_setlk(fd
, filp
, cmd
, &flock
);
300 * XXX If f_owner is a process group, the
301 * negative return value will get converted
302 * into an error. Oops. If we keep the
303 * current syscall conventions, the only way
304 * to fix this will be in libc.
306 err
= f_getown(filp
);
307 force_successful_syscall_return();
310 err
= f_setown(filp
, arg
, 1);
313 err
= f_getown_ex(filp
, arg
);
316 err
= f_setown_ex(filp
, arg
);
318 case F_GETOWNER_UIDS
:
319 err
= f_getowner_uids(filp
, arg
);
322 err
= filp
->f_owner
.signum
;
325 /* arg == 0 restores default behaviour. */
326 if (!valid_signal(arg
)) {
330 filp
->f_owner
.signum
= arg
;
333 err
= fcntl_getlease(filp
);
336 err
= fcntl_setlease(fd
, filp
, arg
);
339 err
= fcntl_dirnotify(fd
, filp
, arg
);
343 err
= pipe_fcntl(filp
, cmd
, arg
);
347 err
= shmem_fcntl(filp
, cmd
, arg
);
355 static int check_fcntl_cmd(unsigned cmd
)
359 case F_DUPFD_CLOEXEC
:
368 SYSCALL_DEFINE3(fcntl
, unsigned int, fd
, unsigned int, cmd
, unsigned long, arg
)
370 struct fd f
= fdget_raw(fd
);
376 if (unlikely(f
.file
->f_mode
& FMODE_PATH
)) {
377 if (!check_fcntl_cmd(cmd
))
381 err
= security_file_fcntl(f
.file
, cmd
, arg
);
383 err
= do_fcntl(fd
, cmd
, arg
, f
.file
);
391 #if BITS_PER_LONG == 32
392 SYSCALL_DEFINE3(fcntl64
, unsigned int, fd
, unsigned int, cmd
,
395 void __user
*argp
= (void __user
*)arg
;
396 struct fd f
= fdget_raw(fd
);
397 struct flock64 flock
;
403 if (unlikely(f
.file
->f_mode
& FMODE_PATH
)) {
404 if (!check_fcntl_cmd(cmd
))
408 err
= security_file_fcntl(f
.file
, cmd
, arg
);
416 if (copy_from_user(&flock
, argp
, sizeof(flock
)))
418 err
= fcntl_getlk64(f
.file
, cmd
, &flock
);
419 if (!err
&& copy_to_user(argp
, &flock
, sizeof(flock
)))
427 if (copy_from_user(&flock
, argp
, sizeof(flock
)))
429 err
= fcntl_setlk64(fd
, f
.file
, cmd
, &flock
);
432 err
= do_fcntl(fd
, cmd
, arg
, f
.file
);
443 static int get_compat_flock(struct flock
*kfl
, struct compat_flock __user
*ufl
)
445 if (!access_ok(VERIFY_READ
, ufl
, sizeof(*ufl
)) ||
446 __get_user(kfl
->l_type
, &ufl
->l_type
) ||
447 __get_user(kfl
->l_whence
, &ufl
->l_whence
) ||
448 __get_user(kfl
->l_start
, &ufl
->l_start
) ||
449 __get_user(kfl
->l_len
, &ufl
->l_len
) ||
450 __get_user(kfl
->l_pid
, &ufl
->l_pid
))
455 static int put_compat_flock(struct flock
*kfl
, struct compat_flock __user
*ufl
)
457 if (!access_ok(VERIFY_WRITE
, ufl
, sizeof(*ufl
)) ||
458 __put_user(kfl
->l_type
, &ufl
->l_type
) ||
459 __put_user(kfl
->l_whence
, &ufl
->l_whence
) ||
460 __put_user(kfl
->l_start
, &ufl
->l_start
) ||
461 __put_user(kfl
->l_len
, &ufl
->l_len
) ||
462 __put_user(kfl
->l_pid
, &ufl
->l_pid
))
467 #ifndef HAVE_ARCH_GET_COMPAT_FLOCK64
468 static int get_compat_flock64(struct flock
*kfl
, struct compat_flock64 __user
*ufl
)
470 if (!access_ok(VERIFY_READ
, ufl
, sizeof(*ufl
)) ||
471 __get_user(kfl
->l_type
, &ufl
->l_type
) ||
472 __get_user(kfl
->l_whence
, &ufl
->l_whence
) ||
473 __get_user(kfl
->l_start
, &ufl
->l_start
) ||
474 __get_user(kfl
->l_len
, &ufl
->l_len
) ||
475 __get_user(kfl
->l_pid
, &ufl
->l_pid
))
481 #ifndef HAVE_ARCH_PUT_COMPAT_FLOCK64
482 static int put_compat_flock64(struct flock
*kfl
, struct compat_flock64 __user
*ufl
)
484 if (!access_ok(VERIFY_WRITE
, ufl
, sizeof(*ufl
)) ||
485 __put_user(kfl
->l_type
, &ufl
->l_type
) ||
486 __put_user(kfl
->l_whence
, &ufl
->l_whence
) ||
487 __put_user(kfl
->l_start
, &ufl
->l_start
) ||
488 __put_user(kfl
->l_len
, &ufl
->l_len
) ||
489 __put_user(kfl
->l_pid
, &ufl
->l_pid
))
496 convert_fcntl_cmd(unsigned int cmd
)
511 * GETLK was successful and we need to return the data, but it needs to fit in
512 * the compat structure.
513 * l_start shouldn't be too big, unless the original start + end is greater than
514 * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
515 * -EOVERFLOW in that case. l_len could be too big, in which case we just
516 * truncate it, and only allow the app to see that part of the conflicting lock
517 * that might make sense to it anyway
519 static int fixup_compat_flock(struct flock
*flock
)
521 if (flock
->l_start
> COMPAT_OFF_T_MAX
)
523 if (flock
->l_len
> COMPAT_OFF_T_MAX
)
524 flock
->l_len
= COMPAT_OFF_T_MAX
;
528 COMPAT_SYSCALL_DEFINE3(fcntl64
, unsigned int, fd
, unsigned int, cmd
,
531 struct fd f
= fdget_raw(fd
);
538 if (unlikely(f
.file
->f_mode
& FMODE_PATH
)) {
539 if (!check_fcntl_cmd(cmd
))
543 err
= security_file_fcntl(f
.file
, cmd
, arg
);
549 err
= get_compat_flock(&flock
, compat_ptr(arg
));
552 err
= fcntl_getlk(f
.file
, convert_fcntl_cmd(cmd
), &flock
);
555 err
= fixup_compat_flock(&flock
);
558 err
= put_compat_flock(&flock
, compat_ptr(arg
));
562 err
= get_compat_flock64(&flock
, compat_ptr(arg
));
565 err
= fcntl_getlk(f
.file
, convert_fcntl_cmd(cmd
), &flock
);
568 err
= fixup_compat_flock(&flock
);
571 err
= put_compat_flock64(&flock
, compat_ptr(arg
));
575 err
= get_compat_flock(&flock
, compat_ptr(arg
));
578 err
= fcntl_setlk(fd
, f
.file
, convert_fcntl_cmd(cmd
), &flock
);
584 err
= get_compat_flock64(&flock
, compat_ptr(arg
));
587 err
= fcntl_setlk(fd
, f
.file
, convert_fcntl_cmd(cmd
), &flock
);
590 err
= do_fcntl(fd
, cmd
, arg
, f
.file
);
598 COMPAT_SYSCALL_DEFINE3(fcntl
, unsigned int, fd
, unsigned int, cmd
,
610 return compat_sys_fcntl64(fd
, cmd
, arg
);
614 /* Table to convert sigio signal codes into poll band bitmaps */
616 static const long band_table
[NSIGPOLL
] = {
617 POLLIN
| POLLRDNORM
, /* POLL_IN */
618 POLLOUT
| POLLWRNORM
| POLLWRBAND
, /* POLL_OUT */
619 POLLIN
| POLLRDNORM
| POLLMSG
, /* POLL_MSG */
620 POLLERR
, /* POLL_ERR */
621 POLLPRI
| POLLRDBAND
, /* POLL_PRI */
622 POLLHUP
| POLLERR
/* POLL_HUP */
625 static inline int sigio_perm(struct task_struct
*p
,
626 struct fown_struct
*fown
, int sig
)
628 const struct cred
*cred
;
632 cred
= __task_cred(p
);
633 ret
= ((uid_eq(fown
->euid
, GLOBAL_ROOT_UID
) ||
634 uid_eq(fown
->euid
, cred
->suid
) || uid_eq(fown
->euid
, cred
->uid
) ||
635 uid_eq(fown
->uid
, cred
->suid
) || uid_eq(fown
->uid
, cred
->uid
)) &&
636 !security_file_send_sigiotask(p
, fown
, sig
));
641 static void send_sigio_to_task(struct task_struct
*p
,
642 struct fown_struct
*fown
,
643 int fd
, int reason
, int group
)
646 * F_SETSIG can change ->signum lockless in parallel, make
647 * sure we read it once and use the same value throughout.
649 int signum
= ACCESS_ONCE(fown
->signum
);
651 if (!sigio_perm(p
, fown
, signum
))
657 /* Queue a rt signal with the appropriate fd as its
658 value. We use SI_SIGIO as the source, not
659 SI_KERNEL, since kernel signals always get
660 delivered even if we can't queue. Failure to
661 queue in this case _should_ be reported; we fall
662 back to SIGIO in that case. --sct */
663 si
.si_signo
= signum
;
666 /* Make sure we are called with one of the POLL_*
667 reasons, otherwise we could leak kernel stack into
669 BUG_ON((reason
& __SI_MASK
) != __SI_POLL
);
670 if (reason
- POLL_IN
>= NSIGPOLL
)
673 si
.si_band
= band_table
[reason
- POLL_IN
];
675 if (!do_send_sig_info(signum
, &si
, p
, group
))
677 /* fall-through: fall back on the old plain SIGIO signal */
679 do_send_sig_info(SIGIO
, SEND_SIG_PRIV
, p
, group
);
683 void send_sigio(struct fown_struct
*fown
, int fd
, int band
)
685 struct task_struct
*p
;
690 read_lock(&fown
->lock
);
692 type
= fown
->pid_type
;
693 if (type
== PIDTYPE_MAX
) {
700 goto out_unlock_fown
;
702 read_lock(&tasklist_lock
);
703 do_each_pid_task(pid
, type
, p
) {
704 send_sigio_to_task(p
, fown
, fd
, band
, group
);
705 } while_each_pid_task(pid
, type
, p
);
706 read_unlock(&tasklist_lock
);
708 read_unlock(&fown
->lock
);
711 static void send_sigurg_to_task(struct task_struct
*p
,
712 struct fown_struct
*fown
, int group
)
714 if (sigio_perm(p
, fown
, SIGURG
))
715 do_send_sig_info(SIGURG
, SEND_SIG_PRIV
, p
, group
);
718 int send_sigurg(struct fown_struct
*fown
)
720 struct task_struct
*p
;
726 read_lock(&fown
->lock
);
728 type
= fown
->pid_type
;
729 if (type
== PIDTYPE_MAX
) {
736 goto out_unlock_fown
;
740 read_lock(&tasklist_lock
);
741 do_each_pid_task(pid
, type
, p
) {
742 send_sigurg_to_task(p
, fown
, group
);
743 } while_each_pid_task(pid
, type
, p
);
744 read_unlock(&tasklist_lock
);
746 read_unlock(&fown
->lock
);
750 static DEFINE_SPINLOCK(fasync_lock
);
751 static struct kmem_cache
*fasync_cache __read_mostly
;
753 static void fasync_free_rcu(struct rcu_head
*head
)
755 kmem_cache_free(fasync_cache
,
756 container_of(head
, struct fasync_struct
, fa_rcu
));
760 * Remove a fasync entry. If successfully removed, return
761 * positive and clear the FASYNC flag. If no entry exists,
762 * do nothing and return 0.
764 * NOTE! It is very important that the FASYNC flag always
765 * match the state "is the filp on a fasync list".
768 int fasync_remove_entry(struct file
*filp
, struct fasync_struct
**fapp
)
770 struct fasync_struct
*fa
, **fp
;
773 spin_lock(&filp
->f_lock
);
774 spin_lock(&fasync_lock
);
775 for (fp
= fapp
; (fa
= *fp
) != NULL
; fp
= &fa
->fa_next
) {
776 if (fa
->fa_file
!= filp
)
779 spin_lock_irq(&fa
->fa_lock
);
781 spin_unlock_irq(&fa
->fa_lock
);
784 call_rcu(&fa
->fa_rcu
, fasync_free_rcu
);
785 filp
->f_flags
&= ~FASYNC
;
789 spin_unlock(&fasync_lock
);
790 spin_unlock(&filp
->f_lock
);
794 struct fasync_struct
*fasync_alloc(void)
796 return kmem_cache_alloc(fasync_cache
, GFP_KERNEL
);
800 * NOTE! This can be used only for unused fasync entries:
801 * entries that actually got inserted on the fasync list
802 * need to be released by rcu - see fasync_remove_entry.
804 void fasync_free(struct fasync_struct
*new)
806 kmem_cache_free(fasync_cache
, new);
810 * Insert a new entry into the fasync list. Return the pointer to the
811 * old one if we didn't use the new one.
813 * NOTE! It is very important that the FASYNC flag always
814 * match the state "is the filp on a fasync list".
816 struct fasync_struct
*fasync_insert_entry(int fd
, struct file
*filp
, struct fasync_struct
**fapp
, struct fasync_struct
*new)
818 struct fasync_struct
*fa
, **fp
;
820 spin_lock(&filp
->f_lock
);
821 spin_lock(&fasync_lock
);
822 for (fp
= fapp
; (fa
= *fp
) != NULL
; fp
= &fa
->fa_next
) {
823 if (fa
->fa_file
!= filp
)
826 spin_lock_irq(&fa
->fa_lock
);
828 spin_unlock_irq(&fa
->fa_lock
);
832 spin_lock_init(&new->fa_lock
);
833 new->magic
= FASYNC_MAGIC
;
836 new->fa_next
= *fapp
;
837 rcu_assign_pointer(*fapp
, new);
838 filp
->f_flags
|= FASYNC
;
841 spin_unlock(&fasync_lock
);
842 spin_unlock(&filp
->f_lock
);
847 * Add a fasync entry. Return negative on error, positive if
848 * added, and zero if did nothing but change an existing one.
850 static int fasync_add_entry(int fd
, struct file
*filp
, struct fasync_struct
**fapp
)
852 struct fasync_struct
*new;
854 new = fasync_alloc();
859 * fasync_insert_entry() returns the old (update) entry if
862 * So free the (unused) new entry and return 0 to let the
863 * caller know that we didn't add any new fasync entries.
865 if (fasync_insert_entry(fd
, filp
, fapp
, new)) {
874 * fasync_helper() is used by almost all character device drivers
875 * to set up the fasync queue, and for regular files by the file
876 * lease code. It returns negative on error, 0 if it did no changes
877 * and positive if it added/deleted the entry.
879 int fasync_helper(int fd
, struct file
* filp
, int on
, struct fasync_struct
**fapp
)
882 return fasync_remove_entry(filp
, fapp
);
883 return fasync_add_entry(fd
, filp
, fapp
);
886 EXPORT_SYMBOL(fasync_helper
);
889 * rcu_read_lock() is held
891 static void kill_fasync_rcu(struct fasync_struct
*fa
, int sig
, int band
)
894 struct fown_struct
*fown
;
897 if (fa
->magic
!= FASYNC_MAGIC
) {
898 printk(KERN_ERR
"kill_fasync: bad magic number in "
902 spin_lock_irqsave(&fa
->fa_lock
, flags
);
904 fown
= &fa
->fa_file
->f_owner
;
905 /* Don't send SIGURG to processes which have not set a
906 queued signum: SIGURG has its own default signalling
908 if (!(sig
== SIGURG
&& fown
->signum
== 0))
909 send_sigio(fown
, fa
->fa_fd
, band
);
911 spin_unlock_irqrestore(&fa
->fa_lock
, flags
);
912 fa
= rcu_dereference(fa
->fa_next
);
916 void kill_fasync(struct fasync_struct
**fp
, int sig
, int band
)
918 /* First a quick test without locking: usually
923 kill_fasync_rcu(rcu_dereference(*fp
), sig
, band
);
927 EXPORT_SYMBOL(kill_fasync
);
929 static int __init
fcntl_init(void)
932 * Please add new bits here to ensure allocation uniqueness.
933 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
934 * is defined as O_NONBLOCK on some platforms and not on others.
936 BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
938 (VALID_OPEN_FLAGS
& ~(O_NONBLOCK
| O_NDELAY
)) |
939 __FMODE_EXEC
| __FMODE_NONOTIFY
));
941 fasync_cache
= kmem_cache_create("fasync_cache",
942 sizeof(struct fasync_struct
), 0, SLAB_PANIC
, NULL
);
946 module_init(fcntl_init
)