1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/syscalls.h>
9 #include <linux/init.h>
11 #include <linux/sched/task.h>
13 #include <linux/file.h>
14 #include <linux/fdtable.h>
15 #include <linux/capability.h>
16 #include <linux/dnotify.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/security.h>
21 #include <linux/ptrace.h>
22 #include <linux/signal.h>
23 #include <linux/rcupdate.h>
24 #include <linux/pid_namespace.h>
25 #include <linux/user_namespace.h>
26 #include <linux/memfd.h>
27 #include <linux/compat.h>
28 #include <linux/mount.h>
30 #include <linux/poll.h>
31 #include <asm/siginfo.h>
32 #include <linux/uaccess.h>
34 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
36 static int setfl(int fd
, struct file
* filp
, unsigned long arg
)
38 struct inode
* inode
= file_inode(filp
);
42 * O_APPEND cannot be cleared if the file is marked as append-only
43 * and the file is open for write.
45 if (((arg
^ filp
->f_flags
) & O_APPEND
) && IS_APPEND(inode
))
48 /* O_NOATIME can only be set by the owner or superuser */
49 if ((arg
& O_NOATIME
) && !(filp
->f_flags
& O_NOATIME
))
50 if (!inode_owner_or_capable(file_mnt_user_ns(filp
), inode
))
53 /* required for strict SunOS emulation */
54 if (O_NONBLOCK
!= O_NDELAY
)
58 /* Pipe packetized mode is controlled by O_DIRECT flag */
59 if (!S_ISFIFO(inode
->i_mode
) && (arg
& O_DIRECT
)) {
60 if (!filp
->f_mapping
|| !filp
->f_mapping
->a_ops
||
61 !filp
->f_mapping
->a_ops
->direct_IO
)
65 if (filp
->f_op
->check_flags
)
66 error
= filp
->f_op
->check_flags(arg
);
71 * ->fasync() is responsible for setting the FASYNC bit.
73 if (((arg
^ filp
->f_flags
) & FASYNC
) && filp
->f_op
->fasync
) {
74 error
= filp
->f_op
->fasync(fd
, filp
, (arg
& FASYNC
) != 0);
80 spin_lock(&filp
->f_lock
);
81 filp
->f_flags
= (arg
& SETFL_MASK
) | (filp
->f_flags
& ~SETFL_MASK
);
82 spin_unlock(&filp
->f_lock
);
88 static void f_modown(struct file
*filp
, struct pid
*pid
, enum pid_type type
,
91 write_lock_irq(&filp
->f_owner
.lock
);
92 if (force
|| !filp
->f_owner
.pid
) {
93 put_pid(filp
->f_owner
.pid
);
94 filp
->f_owner
.pid
= get_pid(pid
);
95 filp
->f_owner
.pid_type
= type
;
98 const struct cred
*cred
= current_cred();
99 filp
->f_owner
.uid
= cred
->uid
;
100 filp
->f_owner
.euid
= cred
->euid
;
103 write_unlock_irq(&filp
->f_owner
.lock
);
106 void __f_setown(struct file
*filp
, struct pid
*pid
, enum pid_type type
,
109 security_file_set_fowner(filp
);
110 f_modown(filp
, pid
, type
, force
);
112 EXPORT_SYMBOL(__f_setown
);
114 int f_setown(struct file
*filp
, unsigned long arg
, int force
)
117 struct pid
*pid
= NULL
;
118 int who
= arg
, ret
= 0;
122 /* avoid overflow below */
132 pid
= find_vpid(who
);
138 __f_setown(filp
, pid
, type
, force
);
143 EXPORT_SYMBOL(f_setown
);
145 void f_delown(struct file
*filp
)
147 f_modown(filp
, NULL
, PIDTYPE_TGID
, 1);
150 pid_t
f_getown(struct file
*filp
)
154 read_lock_irq(&filp
->f_owner
.lock
);
156 if (pid_task(filp
->f_owner
.pid
, filp
->f_owner
.pid_type
)) {
157 pid
= pid_vnr(filp
->f_owner
.pid
);
158 if (filp
->f_owner
.pid_type
== PIDTYPE_PGID
)
162 read_unlock_irq(&filp
->f_owner
.lock
);
166 static int f_setown_ex(struct file
*filp
, unsigned long arg
)
168 struct f_owner_ex __user
*owner_p
= (void __user
*)arg
;
169 struct f_owner_ex owner
;
174 ret
= copy_from_user(&owner
, owner_p
, sizeof(owner
));
178 switch (owner
.type
) {
196 pid
= find_vpid(owner
.pid
);
197 if (owner
.pid
&& !pid
)
200 __f_setown(filp
, pid
, type
, 1);
206 static int f_getown_ex(struct file
*filp
, unsigned long arg
)
208 struct f_owner_ex __user
*owner_p
= (void __user
*)arg
;
209 struct f_owner_ex owner
= {};
212 read_lock_irq(&filp
->f_owner
.lock
);
214 if (pid_task(filp
->f_owner
.pid
, filp
->f_owner
.pid_type
))
215 owner
.pid
= pid_vnr(filp
->f_owner
.pid
);
217 switch (filp
->f_owner
.pid_type
) {
219 owner
.type
= F_OWNER_TID
;
223 owner
.type
= F_OWNER_PID
;
227 owner
.type
= F_OWNER_PGRP
;
235 read_unlock_irq(&filp
->f_owner
.lock
);
238 ret
= copy_to_user(owner_p
, &owner
, sizeof(owner
));
245 #ifdef CONFIG_CHECKPOINT_RESTORE
246 static int f_getowner_uids(struct file
*filp
, unsigned long arg
)
248 struct user_namespace
*user_ns
= current_user_ns();
249 uid_t __user
*dst
= (void __user
*)arg
;
253 read_lock_irq(&filp
->f_owner
.lock
);
254 src
[0] = from_kuid(user_ns
, filp
->f_owner
.uid
);
255 src
[1] = from_kuid(user_ns
, filp
->f_owner
.euid
);
256 read_unlock_irq(&filp
->f_owner
.lock
);
258 err
= put_user(src
[0], &dst
[0]);
259 err
|= put_user(src
[1], &dst
[1]);
264 static int f_getowner_uids(struct file
*filp
, unsigned long arg
)
270 static bool rw_hint_valid(enum rw_hint hint
)
273 case RWH_WRITE_LIFE_NOT_SET
:
274 case RWH_WRITE_LIFE_NONE
:
275 case RWH_WRITE_LIFE_SHORT
:
276 case RWH_WRITE_LIFE_MEDIUM
:
277 case RWH_WRITE_LIFE_LONG
:
278 case RWH_WRITE_LIFE_EXTREME
:
285 static long fcntl_rw_hint(struct file
*file
, unsigned int cmd
,
288 struct inode
*inode
= file_inode(file
);
289 u64 __user
*argp
= (u64 __user
*)arg
;
294 case F_GET_FILE_RW_HINT
:
295 h
= file_write_hint(file
);
296 if (copy_to_user(argp
, &h
, sizeof(*argp
)))
299 case F_SET_FILE_RW_HINT
:
300 if (copy_from_user(&h
, argp
, sizeof(h
)))
302 hint
= (enum rw_hint
) h
;
303 if (!rw_hint_valid(hint
))
306 spin_lock(&file
->f_lock
);
307 file
->f_write_hint
= hint
;
308 spin_unlock(&file
->f_lock
);
311 h
= inode
->i_write_hint
;
312 if (copy_to_user(argp
, &h
, sizeof(*argp
)))
316 if (copy_from_user(&h
, argp
, sizeof(h
)))
318 hint
= (enum rw_hint
) h
;
319 if (!rw_hint_valid(hint
))
323 inode
->i_write_hint
= hint
;
331 static long do_fcntl(int fd
, unsigned int cmd
, unsigned long arg
,
334 void __user
*argp
= (void __user
*)arg
;
340 err
= f_dupfd(arg
, filp
, 0);
342 case F_DUPFD_CLOEXEC
:
343 err
= f_dupfd(arg
, filp
, O_CLOEXEC
);
346 err
= get_close_on_exec(fd
) ? FD_CLOEXEC
: 0;
350 set_close_on_exec(fd
, arg
& FD_CLOEXEC
);
356 err
= setfl(fd
, filp
, arg
);
358 #if BITS_PER_LONG != 32
359 /* 32-bit arches must use fcntl64() */
363 if (copy_from_user(&flock
, argp
, sizeof(flock
)))
365 err
= fcntl_getlk(filp
, cmd
, &flock
);
366 if (!err
&& copy_to_user(argp
, &flock
, sizeof(flock
)))
369 #if BITS_PER_LONG != 32
370 /* 32-bit arches must use fcntl64() */
377 if (copy_from_user(&flock
, argp
, sizeof(flock
)))
379 err
= fcntl_setlk(fd
, filp
, cmd
, &flock
);
383 * XXX If f_owner is a process group, the
384 * negative return value will get converted
385 * into an error. Oops. If we keep the
386 * current syscall conventions, the only way
387 * to fix this will be in libc.
389 err
= f_getown(filp
);
390 force_successful_syscall_return();
393 err
= f_setown(filp
, arg
, 1);
396 err
= f_getown_ex(filp
, arg
);
399 err
= f_setown_ex(filp
, arg
);
401 case F_GETOWNER_UIDS
:
402 err
= f_getowner_uids(filp
, arg
);
405 err
= filp
->f_owner
.signum
;
408 /* arg == 0 restores default behaviour. */
409 if (!valid_signal(arg
)) {
413 filp
->f_owner
.signum
= arg
;
416 err
= fcntl_getlease(filp
);
419 err
= fcntl_setlease(fd
, filp
, arg
);
422 err
= fcntl_dirnotify(fd
, filp
, arg
);
426 err
= pipe_fcntl(filp
, cmd
, arg
);
430 err
= memfd_fcntl(filp
, cmd
, arg
);
434 case F_GET_FILE_RW_HINT
:
435 case F_SET_FILE_RW_HINT
:
436 err
= fcntl_rw_hint(filp
, cmd
, arg
);
444 static int check_fcntl_cmd(unsigned cmd
)
448 case F_DUPFD_CLOEXEC
:
457 SYSCALL_DEFINE3(fcntl
, unsigned int, fd
, unsigned int, cmd
, unsigned long, arg
)
459 struct fd f
= fdget_raw(fd
);
465 if (unlikely(f
.file
->f_mode
& FMODE_PATH
)) {
466 if (!check_fcntl_cmd(cmd
))
470 err
= security_file_fcntl(f
.file
, cmd
, arg
);
472 err
= do_fcntl(fd
, cmd
, arg
, f
.file
);
480 #if BITS_PER_LONG == 32
481 SYSCALL_DEFINE3(fcntl64
, unsigned int, fd
, unsigned int, cmd
,
484 void __user
*argp
= (void __user
*)arg
;
485 struct fd f
= fdget_raw(fd
);
486 struct flock64 flock
;
492 if (unlikely(f
.file
->f_mode
& FMODE_PATH
)) {
493 if (!check_fcntl_cmd(cmd
))
497 err
= security_file_fcntl(f
.file
, cmd
, arg
);
505 if (copy_from_user(&flock
, argp
, sizeof(flock
)))
507 err
= fcntl_getlk64(f
.file
, cmd
, &flock
);
508 if (!err
&& copy_to_user(argp
, &flock
, sizeof(flock
)))
516 if (copy_from_user(&flock
, argp
, sizeof(flock
)))
518 err
= fcntl_setlk64(fd
, f
.file
, cmd
, &flock
);
521 err
= do_fcntl(fd
, cmd
, arg
, f
.file
);
532 /* careful - don't use anywhere else */
533 #define copy_flock_fields(dst, src) \
534 (dst)->l_type = (src)->l_type; \
535 (dst)->l_whence = (src)->l_whence; \
536 (dst)->l_start = (src)->l_start; \
537 (dst)->l_len = (src)->l_len; \
538 (dst)->l_pid = (src)->l_pid;
540 static int get_compat_flock(struct flock
*kfl
, const struct compat_flock __user
*ufl
)
542 struct compat_flock fl
;
544 if (copy_from_user(&fl
, ufl
, sizeof(struct compat_flock
)))
546 copy_flock_fields(kfl
, &fl
);
550 static int get_compat_flock64(struct flock
*kfl
, const struct compat_flock64 __user
*ufl
)
552 struct compat_flock64 fl
;
554 if (copy_from_user(&fl
, ufl
, sizeof(struct compat_flock64
)))
556 copy_flock_fields(kfl
, &fl
);
560 static int put_compat_flock(const struct flock
*kfl
, struct compat_flock __user
*ufl
)
562 struct compat_flock fl
;
564 memset(&fl
, 0, sizeof(struct compat_flock
));
565 copy_flock_fields(&fl
, kfl
);
566 if (copy_to_user(ufl
, &fl
, sizeof(struct compat_flock
)))
571 static int put_compat_flock64(const struct flock
*kfl
, struct compat_flock64 __user
*ufl
)
573 struct compat_flock64 fl
;
575 BUILD_BUG_ON(sizeof(kfl
->l_start
) > sizeof(ufl
->l_start
));
576 BUILD_BUG_ON(sizeof(kfl
->l_len
) > sizeof(ufl
->l_len
));
578 memset(&fl
, 0, sizeof(struct compat_flock64
));
579 copy_flock_fields(&fl
, kfl
);
580 if (copy_to_user(ufl
, &fl
, sizeof(struct compat_flock64
)))
584 #undef copy_flock_fields
587 convert_fcntl_cmd(unsigned int cmd
)
602 * GETLK was successful and we need to return the data, but it needs to fit in
603 * the compat structure.
604 * l_start shouldn't be too big, unless the original start + end is greater than
605 * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
606 * -EOVERFLOW in that case. l_len could be too big, in which case we just
607 * truncate it, and only allow the app to see that part of the conflicting lock
608 * that might make sense to it anyway
610 static int fixup_compat_flock(struct flock
*flock
)
612 if (flock
->l_start
> COMPAT_OFF_T_MAX
)
614 if (flock
->l_len
> COMPAT_OFF_T_MAX
)
615 flock
->l_len
= COMPAT_OFF_T_MAX
;
619 static long do_compat_fcntl64(unsigned int fd
, unsigned int cmd
,
622 struct fd f
= fdget_raw(fd
);
629 if (unlikely(f
.file
->f_mode
& FMODE_PATH
)) {
630 if (!check_fcntl_cmd(cmd
))
634 err
= security_file_fcntl(f
.file
, cmd
, arg
);
640 err
= get_compat_flock(&flock
, compat_ptr(arg
));
643 err
= fcntl_getlk(f
.file
, convert_fcntl_cmd(cmd
), &flock
);
646 err
= fixup_compat_flock(&flock
);
648 err
= put_compat_flock(&flock
, compat_ptr(arg
));
652 err
= get_compat_flock64(&flock
, compat_ptr(arg
));
655 err
= fcntl_getlk(f
.file
, convert_fcntl_cmd(cmd
), &flock
);
657 err
= put_compat_flock64(&flock
, compat_ptr(arg
));
661 err
= get_compat_flock(&flock
, compat_ptr(arg
));
664 err
= fcntl_setlk(fd
, f
.file
, convert_fcntl_cmd(cmd
), &flock
);
670 err
= get_compat_flock64(&flock
, compat_ptr(arg
));
673 err
= fcntl_setlk(fd
, f
.file
, convert_fcntl_cmd(cmd
), &flock
);
676 err
= do_fcntl(fd
, cmd
, arg
, f
.file
);
684 COMPAT_SYSCALL_DEFINE3(fcntl64
, unsigned int, fd
, unsigned int, cmd
,
687 return do_compat_fcntl64(fd
, cmd
, arg
);
690 COMPAT_SYSCALL_DEFINE3(fcntl
, unsigned int, fd
, unsigned int, cmd
,
702 return do_compat_fcntl64(fd
, cmd
, arg
);
706 /* Table to convert sigio signal codes into poll band bitmaps */
708 static const __poll_t band_table
[NSIGPOLL
] = {
709 EPOLLIN
| EPOLLRDNORM
, /* POLL_IN */
710 EPOLLOUT
| EPOLLWRNORM
| EPOLLWRBAND
, /* POLL_OUT */
711 EPOLLIN
| EPOLLRDNORM
| EPOLLMSG
, /* POLL_MSG */
712 EPOLLERR
, /* POLL_ERR */
713 EPOLLPRI
| EPOLLRDBAND
, /* POLL_PRI */
714 EPOLLHUP
| EPOLLERR
/* POLL_HUP */
717 static inline int sigio_perm(struct task_struct
*p
,
718 struct fown_struct
*fown
, int sig
)
720 const struct cred
*cred
;
724 cred
= __task_cred(p
);
725 ret
= ((uid_eq(fown
->euid
, GLOBAL_ROOT_UID
) ||
726 uid_eq(fown
->euid
, cred
->suid
) || uid_eq(fown
->euid
, cred
->uid
) ||
727 uid_eq(fown
->uid
, cred
->suid
) || uid_eq(fown
->uid
, cred
->uid
)) &&
728 !security_file_send_sigiotask(p
, fown
, sig
));
733 static void send_sigio_to_task(struct task_struct
*p
,
734 struct fown_struct
*fown
,
735 int fd
, int reason
, enum pid_type type
)
738 * F_SETSIG can change ->signum lockless in parallel, make
739 * sure we read it once and use the same value throughout.
741 int signum
= READ_ONCE(fown
->signum
);
743 if (!sigio_perm(p
, fown
, signum
))
750 /* Queue a rt signal with the appropriate fd as its
751 value. We use SI_SIGIO as the source, not
752 SI_KERNEL, since kernel signals always get
753 delivered even if we can't queue. Failure to
754 queue in this case _should_ be reported; we fall
755 back to SIGIO in that case. --sct */
757 si
.si_signo
= signum
;
761 * Posix definies POLL_IN and friends to be signal
762 * specific si_codes for SIG_POLL. Linux extended
763 * these si_codes to other signals in a way that is
764 * ambiguous if other signals also have signal
765 * specific si_codes. In that case use SI_SIGIO instead
766 * to remove the ambiguity.
768 if ((signum
!= SIGPOLL
) && sig_specific_sicodes(signum
))
769 si
.si_code
= SI_SIGIO
;
771 /* Make sure we are called with one of the POLL_*
772 reasons, otherwise we could leak kernel stack into
774 BUG_ON((reason
< POLL_IN
) || ((reason
- POLL_IN
) >= NSIGPOLL
));
775 if (reason
- POLL_IN
>= NSIGPOLL
)
778 si
.si_band
= mangle_poll(band_table
[reason
- POLL_IN
]);
780 if (!do_send_sig_info(signum
, &si
, p
, type
))
783 fallthrough
; /* fall back on the old plain SIGIO signal */
785 do_send_sig_info(SIGIO
, SEND_SIG_PRIV
, p
, type
);
789 void send_sigio(struct fown_struct
*fown
, int fd
, int band
)
791 struct task_struct
*p
;
796 read_lock_irqsave(&fown
->lock
, flags
);
798 type
= fown
->pid_type
;
801 goto out_unlock_fown
;
803 if (type
<= PIDTYPE_TGID
) {
805 p
= pid_task(pid
, PIDTYPE_PID
);
807 send_sigio_to_task(p
, fown
, fd
, band
, type
);
810 read_lock(&tasklist_lock
);
811 do_each_pid_task(pid
, type
, p
) {
812 send_sigio_to_task(p
, fown
, fd
, band
, type
);
813 } while_each_pid_task(pid
, type
, p
);
814 read_unlock(&tasklist_lock
);
817 read_unlock_irqrestore(&fown
->lock
, flags
);
820 static void send_sigurg_to_task(struct task_struct
*p
,
821 struct fown_struct
*fown
, enum pid_type type
)
823 if (sigio_perm(p
, fown
, SIGURG
))
824 do_send_sig_info(SIGURG
, SEND_SIG_PRIV
, p
, type
);
827 int send_sigurg(struct fown_struct
*fown
)
829 struct task_struct
*p
;
835 read_lock_irqsave(&fown
->lock
, flags
);
837 type
= fown
->pid_type
;
840 goto out_unlock_fown
;
844 if (type
<= PIDTYPE_TGID
) {
846 p
= pid_task(pid
, PIDTYPE_PID
);
848 send_sigurg_to_task(p
, fown
, type
);
851 read_lock(&tasklist_lock
);
852 do_each_pid_task(pid
, type
, p
) {
853 send_sigurg_to_task(p
, fown
, type
);
854 } while_each_pid_task(pid
, type
, p
);
855 read_unlock(&tasklist_lock
);
858 read_unlock_irqrestore(&fown
->lock
, flags
);
862 static DEFINE_SPINLOCK(fasync_lock
);
863 static struct kmem_cache
*fasync_cache __read_mostly
;
865 static void fasync_free_rcu(struct rcu_head
*head
)
867 kmem_cache_free(fasync_cache
,
868 container_of(head
, struct fasync_struct
, fa_rcu
));
872 * Remove a fasync entry. If successfully removed, return
873 * positive and clear the FASYNC flag. If no entry exists,
874 * do nothing and return 0.
876 * NOTE! It is very important that the FASYNC flag always
877 * match the state "is the filp on a fasync list".
880 int fasync_remove_entry(struct file
*filp
, struct fasync_struct
**fapp
)
882 struct fasync_struct
*fa
, **fp
;
885 spin_lock(&filp
->f_lock
);
886 spin_lock(&fasync_lock
);
887 for (fp
= fapp
; (fa
= *fp
) != NULL
; fp
= &fa
->fa_next
) {
888 if (fa
->fa_file
!= filp
)
891 write_lock_irq(&fa
->fa_lock
);
893 write_unlock_irq(&fa
->fa_lock
);
896 call_rcu(&fa
->fa_rcu
, fasync_free_rcu
);
897 filp
->f_flags
&= ~FASYNC
;
901 spin_unlock(&fasync_lock
);
902 spin_unlock(&filp
->f_lock
);
906 struct fasync_struct
*fasync_alloc(void)
908 return kmem_cache_alloc(fasync_cache
, GFP_KERNEL
);
912 * NOTE! This can be used only for unused fasync entries:
913 * entries that actually got inserted on the fasync list
914 * need to be released by rcu - see fasync_remove_entry.
916 void fasync_free(struct fasync_struct
*new)
918 kmem_cache_free(fasync_cache
, new);
922 * Insert a new entry into the fasync list. Return the pointer to the
923 * old one if we didn't use the new one.
925 * NOTE! It is very important that the FASYNC flag always
926 * match the state "is the filp on a fasync list".
928 struct fasync_struct
*fasync_insert_entry(int fd
, struct file
*filp
, struct fasync_struct
**fapp
, struct fasync_struct
*new)
930 struct fasync_struct
*fa
, **fp
;
932 spin_lock(&filp
->f_lock
);
933 spin_lock(&fasync_lock
);
934 for (fp
= fapp
; (fa
= *fp
) != NULL
; fp
= &fa
->fa_next
) {
935 if (fa
->fa_file
!= filp
)
938 write_lock_irq(&fa
->fa_lock
);
940 write_unlock_irq(&fa
->fa_lock
);
944 rwlock_init(&new->fa_lock
);
945 new->magic
= FASYNC_MAGIC
;
948 new->fa_next
= *fapp
;
949 rcu_assign_pointer(*fapp
, new);
950 filp
->f_flags
|= FASYNC
;
953 spin_unlock(&fasync_lock
);
954 spin_unlock(&filp
->f_lock
);
959 * Add a fasync entry. Return negative on error, positive if
960 * added, and zero if did nothing but change an existing one.
962 static int fasync_add_entry(int fd
, struct file
*filp
, struct fasync_struct
**fapp
)
964 struct fasync_struct
*new;
966 new = fasync_alloc();
971 * fasync_insert_entry() returns the old (update) entry if
974 * So free the (unused) new entry and return 0 to let the
975 * caller know that we didn't add any new fasync entries.
977 if (fasync_insert_entry(fd
, filp
, fapp
, new)) {
986 * fasync_helper() is used by almost all character device drivers
987 * to set up the fasync queue, and for regular files by the file
988 * lease code. It returns negative on error, 0 if it did no changes
989 * and positive if it added/deleted the entry.
991 int fasync_helper(int fd
, struct file
* filp
, int on
, struct fasync_struct
**fapp
)
994 return fasync_remove_entry(filp
, fapp
);
995 return fasync_add_entry(fd
, filp
, fapp
);
998 EXPORT_SYMBOL(fasync_helper
);
1001 * rcu_read_lock() is held
1003 static void kill_fasync_rcu(struct fasync_struct
*fa
, int sig
, int band
)
1006 struct fown_struct
*fown
;
1007 unsigned long flags
;
1009 if (fa
->magic
!= FASYNC_MAGIC
) {
1010 printk(KERN_ERR
"kill_fasync: bad magic number in "
1011 "fasync_struct!\n");
1014 read_lock_irqsave(&fa
->fa_lock
, flags
);
1016 fown
= &fa
->fa_file
->f_owner
;
1017 /* Don't send SIGURG to processes which have not set a
1018 queued signum: SIGURG has its own default signalling
1020 if (!(sig
== SIGURG
&& fown
->signum
== 0))
1021 send_sigio(fown
, fa
->fa_fd
, band
);
1023 read_unlock_irqrestore(&fa
->fa_lock
, flags
);
1024 fa
= rcu_dereference(fa
->fa_next
);
1028 void kill_fasync(struct fasync_struct
**fp
, int sig
, int band
)
1030 /* First a quick test without locking: usually
1031 * the list is empty.
1035 kill_fasync_rcu(rcu_dereference(*fp
), sig
, band
);
1039 EXPORT_SYMBOL(kill_fasync
);
1041 static int __init
fcntl_init(void)
1044 * Please add new bits here to ensure allocation uniqueness.
1045 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
1046 * is defined as O_NONBLOCK on some platforms and not on others.
1048 BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
1050 (VALID_OPEN_FLAGS
& ~(O_NONBLOCK
| O_NDELAY
)) |
1051 __FMODE_EXEC
| __FMODE_NONOTIFY
));
1053 fasync_cache
= kmem_cache_create("fasync_cache",
1054 sizeof(struct fasync_struct
), 0,
1055 SLAB_PANIC
| SLAB_ACCOUNT
, NULL
);
1059 module_init(fcntl_init
)