]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame_incremental - fs/fcntl.c
tools/accounting/getdelays.c: fix netlink attribute length
[mirror_ubuntu-focal-kernel.git] / fs / fcntl.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/fcntl.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/syscalls.h>
9#include <linux/init.h>
10#include <linux/mm.h>
11#include <linux/sched/task.h>
12#include <linux/fs.h>
13#include <linux/file.h>
14#include <linux/fdtable.h>
15#include <linux/capability.h>
16#include <linux/dnotify.h>
17#include <linux/slab.h>
18#include <linux/module.h>
19#include <linux/pipe_fs_i.h>
20#include <linux/security.h>
21#include <linux/ptrace.h>
22#include <linux/signal.h>
23#include <linux/rcupdate.h>
24#include <linux/pid_namespace.h>
25#include <linux/user_namespace.h>
26#include <linux/memfd.h>
27#include <linux/compat.h>
28
29#include <linux/poll.h>
30#include <asm/siginfo.h>
31#include <linux/uaccess.h>
32
33#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
34
35int setfl(int fd, struct file *filp, unsigned long arg)
36{
37 struct inode * inode = file_inode(filp);
38 int error = 0;
39
40 /*
41 * O_APPEND cannot be cleared if the file is marked as append-only
42 * and the file is open for write.
43 */
44 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
45 return -EPERM;
46
47 /* O_NOATIME can only be set by the owner or superuser */
48 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
49 if (!inode_owner_or_capable(inode))
50 return -EPERM;
51
52 /* required for strict SunOS emulation */
53 if (O_NONBLOCK != O_NDELAY)
54 if (arg & O_NDELAY)
55 arg |= O_NONBLOCK;
56
57 /* Pipe packetized mode is controlled by O_DIRECT flag */
58 if (!S_ISFIFO(inode->i_mode) && (arg & O_DIRECT)) {
59 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
60 !filp->f_mapping->a_ops->direct_IO)
61 return -EINVAL;
62 }
63
64 if (filp->f_op->check_flags)
65 error = filp->f_op->check_flags(arg);
66 if (!error && filp->f_op->setfl)
67 error = filp->f_op->setfl(filp, arg);
68 if (error)
69 return error;
70
71 /*
72 * ->fasync() is responsible for setting the FASYNC bit.
73 */
74 if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
75 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
76 if (error < 0)
77 goto out;
78 if (error > 0)
79 error = 0;
80 }
81 spin_lock(&filp->f_lock);
82 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
83 spin_unlock(&filp->f_lock);
84
85 out:
86 return error;
87}
88EXPORT_SYMBOL_GPL(setfl);
89
90static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
91 int force)
92{
93 write_lock_irq(&filp->f_owner.lock);
94 if (force || !filp->f_owner.pid) {
95 put_pid(filp->f_owner.pid);
96 filp->f_owner.pid = get_pid(pid);
97 filp->f_owner.pid_type = type;
98
99 if (pid) {
100 const struct cred *cred = current_cred();
101 filp->f_owner.uid = cred->uid;
102 filp->f_owner.euid = cred->euid;
103 }
104 }
105 write_unlock_irq(&filp->f_owner.lock);
106}
107
108void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
109 int force)
110{
111 security_file_set_fowner(filp);
112 f_modown(filp, pid, type, force);
113}
114EXPORT_SYMBOL(__f_setown);
115
116int f_setown(struct file *filp, unsigned long arg, int force)
117{
118 enum pid_type type;
119 struct pid *pid = NULL;
120 int who = arg, ret = 0;
121
122 type = PIDTYPE_TGID;
123 if (who < 0) {
124 /* avoid overflow below */
125 if (who == INT_MIN)
126 return -EINVAL;
127
128 type = PIDTYPE_PGID;
129 who = -who;
130 }
131
132 rcu_read_lock();
133 if (who) {
134 pid = find_vpid(who);
135 if (!pid)
136 ret = -ESRCH;
137 }
138
139 if (!ret)
140 __f_setown(filp, pid, type, force);
141 rcu_read_unlock();
142
143 return ret;
144}
145EXPORT_SYMBOL(f_setown);
146
147void f_delown(struct file *filp)
148{
149 f_modown(filp, NULL, PIDTYPE_TGID, 1);
150}
151
152pid_t f_getown(struct file *filp)
153{
154 pid_t pid;
155 read_lock(&filp->f_owner.lock);
156 pid = pid_vnr(filp->f_owner.pid);
157 if (filp->f_owner.pid_type == PIDTYPE_PGID)
158 pid = -pid;
159 read_unlock(&filp->f_owner.lock);
160 return pid;
161}
162
163static int f_setown_ex(struct file *filp, unsigned long arg)
164{
165 struct f_owner_ex __user *owner_p = (void __user *)arg;
166 struct f_owner_ex owner;
167 struct pid *pid;
168 int type;
169 int ret;
170
171 ret = copy_from_user(&owner, owner_p, sizeof(owner));
172 if (ret)
173 return -EFAULT;
174
175 switch (owner.type) {
176 case F_OWNER_TID:
177 type = PIDTYPE_PID;
178 break;
179
180 case F_OWNER_PID:
181 type = PIDTYPE_TGID;
182 break;
183
184 case F_OWNER_PGRP:
185 type = PIDTYPE_PGID;
186 break;
187
188 default:
189 return -EINVAL;
190 }
191
192 rcu_read_lock();
193 pid = find_vpid(owner.pid);
194 if (owner.pid && !pid)
195 ret = -ESRCH;
196 else
197 __f_setown(filp, pid, type, 1);
198 rcu_read_unlock();
199
200 return ret;
201}
202
203static int f_getown_ex(struct file *filp, unsigned long arg)
204{
205 struct f_owner_ex __user *owner_p = (void __user *)arg;
206 struct f_owner_ex owner;
207 int ret = 0;
208
209 read_lock(&filp->f_owner.lock);
210 owner.pid = pid_vnr(filp->f_owner.pid);
211 switch (filp->f_owner.pid_type) {
212 case PIDTYPE_PID:
213 owner.type = F_OWNER_TID;
214 break;
215
216 case PIDTYPE_TGID:
217 owner.type = F_OWNER_PID;
218 break;
219
220 case PIDTYPE_PGID:
221 owner.type = F_OWNER_PGRP;
222 break;
223
224 default:
225 WARN_ON(1);
226 ret = -EINVAL;
227 break;
228 }
229 read_unlock(&filp->f_owner.lock);
230
231 if (!ret) {
232 ret = copy_to_user(owner_p, &owner, sizeof(owner));
233 if (ret)
234 ret = -EFAULT;
235 }
236 return ret;
237}
238
239#ifdef CONFIG_CHECKPOINT_RESTORE
240static int f_getowner_uids(struct file *filp, unsigned long arg)
241{
242 struct user_namespace *user_ns = current_user_ns();
243 uid_t __user *dst = (void __user *)arg;
244 uid_t src[2];
245 int err;
246
247 read_lock(&filp->f_owner.lock);
248 src[0] = from_kuid(user_ns, filp->f_owner.uid);
249 src[1] = from_kuid(user_ns, filp->f_owner.euid);
250 read_unlock(&filp->f_owner.lock);
251
252 err = put_user(src[0], &dst[0]);
253 err |= put_user(src[1], &dst[1]);
254
255 return err;
256}
257#else
258static int f_getowner_uids(struct file *filp, unsigned long arg)
259{
260 return -EINVAL;
261}
262#endif
263
264static bool rw_hint_valid(enum rw_hint hint)
265{
266 switch (hint) {
267 case RWF_WRITE_LIFE_NOT_SET:
268 case RWH_WRITE_LIFE_NONE:
269 case RWH_WRITE_LIFE_SHORT:
270 case RWH_WRITE_LIFE_MEDIUM:
271 case RWH_WRITE_LIFE_LONG:
272 case RWH_WRITE_LIFE_EXTREME:
273 return true;
274 default:
275 return false;
276 }
277}
278
279static long fcntl_rw_hint(struct file *file, unsigned int cmd,
280 unsigned long arg)
281{
282 struct inode *inode = file_inode(file);
283 u64 *argp = (u64 __user *)arg;
284 enum rw_hint hint;
285 u64 h;
286
287 switch (cmd) {
288 case F_GET_FILE_RW_HINT:
289 h = file_write_hint(file);
290 if (copy_to_user(argp, &h, sizeof(*argp)))
291 return -EFAULT;
292 return 0;
293 case F_SET_FILE_RW_HINT:
294 if (copy_from_user(&h, argp, sizeof(h)))
295 return -EFAULT;
296 hint = (enum rw_hint) h;
297 if (!rw_hint_valid(hint))
298 return -EINVAL;
299
300 spin_lock(&file->f_lock);
301 file->f_write_hint = hint;
302 spin_unlock(&file->f_lock);
303 return 0;
304 case F_GET_RW_HINT:
305 h = inode->i_write_hint;
306 if (copy_to_user(argp, &h, sizeof(*argp)))
307 return -EFAULT;
308 return 0;
309 case F_SET_RW_HINT:
310 if (copy_from_user(&h, argp, sizeof(h)))
311 return -EFAULT;
312 hint = (enum rw_hint) h;
313 if (!rw_hint_valid(hint))
314 return -EINVAL;
315
316 inode_lock(inode);
317 inode->i_write_hint = hint;
318 inode_unlock(inode);
319 return 0;
320 default:
321 return -EINVAL;
322 }
323}
324
325static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
326 struct file *filp)
327{
328 void __user *argp = (void __user *)arg;
329 struct flock flock;
330 long err = -EINVAL;
331
332 switch (cmd) {
333 case F_DUPFD:
334 err = f_dupfd(arg, filp, 0);
335 break;
336 case F_DUPFD_CLOEXEC:
337 err = f_dupfd(arg, filp, O_CLOEXEC);
338 break;
339 case F_GETFD:
340 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
341 break;
342 case F_SETFD:
343 err = 0;
344 set_close_on_exec(fd, arg & FD_CLOEXEC);
345 break;
346 case F_GETFL:
347 err = filp->f_flags;
348 break;
349 case F_SETFL:
350 err = setfl(fd, filp, arg);
351 break;
352#if BITS_PER_LONG != 32
353 /* 32-bit arches must use fcntl64() */
354 case F_OFD_GETLK:
355#endif
356 case F_GETLK:
357 if (copy_from_user(&flock, argp, sizeof(flock)))
358 return -EFAULT;
359 err = fcntl_getlk(filp, cmd, &flock);
360 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
361 return -EFAULT;
362 break;
363#if BITS_PER_LONG != 32
364 /* 32-bit arches must use fcntl64() */
365 case F_OFD_SETLK:
366 case F_OFD_SETLKW:
367#endif
368 /* Fallthrough */
369 case F_SETLK:
370 case F_SETLKW:
371 if (copy_from_user(&flock, argp, sizeof(flock)))
372 return -EFAULT;
373 err = fcntl_setlk(fd, filp, cmd, &flock);
374 break;
375 case F_GETOWN:
376 /*
377 * XXX If f_owner is a process group, the
378 * negative return value will get converted
379 * into an error. Oops. If we keep the
380 * current syscall conventions, the only way
381 * to fix this will be in libc.
382 */
383 err = f_getown(filp);
384 force_successful_syscall_return();
385 break;
386 case F_SETOWN:
387 err = f_setown(filp, arg, 1);
388 break;
389 case F_GETOWN_EX:
390 err = f_getown_ex(filp, arg);
391 break;
392 case F_SETOWN_EX:
393 err = f_setown_ex(filp, arg);
394 break;
395 case F_GETOWNER_UIDS:
396 err = f_getowner_uids(filp, arg);
397 break;
398 case F_GETSIG:
399 err = filp->f_owner.signum;
400 break;
401 case F_SETSIG:
402 /* arg == 0 restores default behaviour. */
403 if (!valid_signal(arg)) {
404 break;
405 }
406 err = 0;
407 filp->f_owner.signum = arg;
408 break;
409 case F_GETLEASE:
410 err = fcntl_getlease(filp);
411 break;
412 case F_SETLEASE:
413 err = fcntl_setlease(fd, filp, arg);
414 break;
415 case F_NOTIFY:
416 err = fcntl_dirnotify(fd, filp, arg);
417 break;
418 case F_SETPIPE_SZ:
419 case F_GETPIPE_SZ:
420 err = pipe_fcntl(filp, cmd, arg);
421 break;
422 case F_ADD_SEALS:
423 case F_GET_SEALS:
424 err = memfd_fcntl(filp, cmd, arg);
425 break;
426 case F_GET_RW_HINT:
427 case F_SET_RW_HINT:
428 case F_GET_FILE_RW_HINT:
429 case F_SET_FILE_RW_HINT:
430 err = fcntl_rw_hint(filp, cmd, arg);
431 break;
432 default:
433 break;
434 }
435 return err;
436}
437
438static int check_fcntl_cmd(unsigned cmd)
439{
440 switch (cmd) {
441 case F_DUPFD:
442 case F_DUPFD_CLOEXEC:
443 case F_GETFD:
444 case F_SETFD:
445 case F_GETFL:
446 return 1;
447 }
448 return 0;
449}
450
451SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
452{
453 struct fd f = fdget_raw(fd);
454 long err = -EBADF;
455
456 if (!f.file)
457 goto out;
458
459 if (unlikely(f.file->f_mode & FMODE_PATH)) {
460 if (!check_fcntl_cmd(cmd))
461 goto out1;
462 }
463
464 err = security_file_fcntl(f.file, cmd, arg);
465 if (!err)
466 err = do_fcntl(fd, cmd, arg, f.file);
467
468out1:
469 fdput(f);
470out:
471 return err;
472}
473
474#if BITS_PER_LONG == 32
475SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
476 unsigned long, arg)
477{
478 void __user *argp = (void __user *)arg;
479 struct fd f = fdget_raw(fd);
480 struct flock64 flock;
481 long err = -EBADF;
482
483 if (!f.file)
484 goto out;
485
486 if (unlikely(f.file->f_mode & FMODE_PATH)) {
487 if (!check_fcntl_cmd(cmd))
488 goto out1;
489 }
490
491 err = security_file_fcntl(f.file, cmd, arg);
492 if (err)
493 goto out1;
494
495 switch (cmd) {
496 case F_GETLK64:
497 case F_OFD_GETLK:
498 err = -EFAULT;
499 if (copy_from_user(&flock, argp, sizeof(flock)))
500 break;
501 err = fcntl_getlk64(f.file, cmd, &flock);
502 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
503 err = -EFAULT;
504 break;
505 case F_SETLK64:
506 case F_SETLKW64:
507 case F_OFD_SETLK:
508 case F_OFD_SETLKW:
509 err = -EFAULT;
510 if (copy_from_user(&flock, argp, sizeof(flock)))
511 break;
512 err = fcntl_setlk64(fd, f.file, cmd, &flock);
513 break;
514 default:
515 err = do_fcntl(fd, cmd, arg, f.file);
516 break;
517 }
518out1:
519 fdput(f);
520out:
521 return err;
522}
523#endif
524
525#ifdef CONFIG_COMPAT
526/* careful - don't use anywhere else */
527#define copy_flock_fields(dst, src) \
528 (dst)->l_type = (src)->l_type; \
529 (dst)->l_whence = (src)->l_whence; \
530 (dst)->l_start = (src)->l_start; \
531 (dst)->l_len = (src)->l_len; \
532 (dst)->l_pid = (src)->l_pid;
533
534static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl)
535{
536 struct compat_flock fl;
537
538 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock)))
539 return -EFAULT;
540 copy_flock_fields(kfl, &fl);
541 return 0;
542}
543
544static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl)
545{
546 struct compat_flock64 fl;
547
548 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64)))
549 return -EFAULT;
550 copy_flock_fields(kfl, &fl);
551 return 0;
552}
553
554static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl)
555{
556 struct compat_flock fl;
557
558 memset(&fl, 0, sizeof(struct compat_flock));
559 copy_flock_fields(&fl, kfl);
560 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock)))
561 return -EFAULT;
562 return 0;
563}
564
565static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl)
566{
567 struct compat_flock64 fl;
568
569 BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start));
570 BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len));
571
572 memset(&fl, 0, sizeof(struct compat_flock64));
573 copy_flock_fields(&fl, kfl);
574 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
575 return -EFAULT;
576 return 0;
577}
578#undef copy_flock_fields
579
580static unsigned int
581convert_fcntl_cmd(unsigned int cmd)
582{
583 switch (cmd) {
584 case F_GETLK64:
585 return F_GETLK;
586 case F_SETLK64:
587 return F_SETLK;
588 case F_SETLKW64:
589 return F_SETLKW;
590 }
591
592 return cmd;
593}
594
595/*
596 * GETLK was successful and we need to return the data, but it needs to fit in
597 * the compat structure.
598 * l_start shouldn't be too big, unless the original start + end is greater than
599 * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
600 * -EOVERFLOW in that case. l_len could be too big, in which case we just
601 * truncate it, and only allow the app to see that part of the conflicting lock
602 * that might make sense to it anyway
603 */
604static int fixup_compat_flock(struct flock *flock)
605{
606 if (flock->l_start > COMPAT_OFF_T_MAX)
607 return -EOVERFLOW;
608 if (flock->l_len > COMPAT_OFF_T_MAX)
609 flock->l_len = COMPAT_OFF_T_MAX;
610 return 0;
611}
612
613static long do_compat_fcntl64(unsigned int fd, unsigned int cmd,
614 compat_ulong_t arg)
615{
616 struct fd f = fdget_raw(fd);
617 struct flock flock;
618 long err = -EBADF;
619
620 if (!f.file)
621 return err;
622
623 if (unlikely(f.file->f_mode & FMODE_PATH)) {
624 if (!check_fcntl_cmd(cmd))
625 goto out_put;
626 }
627
628 err = security_file_fcntl(f.file, cmd, arg);
629 if (err)
630 goto out_put;
631
632 switch (cmd) {
633 case F_GETLK:
634 err = get_compat_flock(&flock, compat_ptr(arg));
635 if (err)
636 break;
637 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
638 if (err)
639 break;
640 err = fixup_compat_flock(&flock);
641 if (!err)
642 err = put_compat_flock(&flock, compat_ptr(arg));
643 break;
644 case F_GETLK64:
645 case F_OFD_GETLK:
646 err = get_compat_flock64(&flock, compat_ptr(arg));
647 if (err)
648 break;
649 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
650 if (!err)
651 err = put_compat_flock64(&flock, compat_ptr(arg));
652 break;
653 case F_SETLK:
654 case F_SETLKW:
655 err = get_compat_flock(&flock, compat_ptr(arg));
656 if (err)
657 break;
658 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
659 break;
660 case F_SETLK64:
661 case F_SETLKW64:
662 case F_OFD_SETLK:
663 case F_OFD_SETLKW:
664 err = get_compat_flock64(&flock, compat_ptr(arg));
665 if (err)
666 break;
667 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
668 break;
669 default:
670 err = do_fcntl(fd, cmd, arg, f.file);
671 break;
672 }
673out_put:
674 fdput(f);
675 return err;
676}
677
678COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
679 compat_ulong_t, arg)
680{
681 return do_compat_fcntl64(fd, cmd, arg);
682}
683
684COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
685 compat_ulong_t, arg)
686{
687 switch (cmd) {
688 case F_GETLK64:
689 case F_SETLK64:
690 case F_SETLKW64:
691 case F_OFD_GETLK:
692 case F_OFD_SETLK:
693 case F_OFD_SETLKW:
694 return -EINVAL;
695 }
696 return do_compat_fcntl64(fd, cmd, arg);
697}
698#endif
699
700/* Table to convert sigio signal codes into poll band bitmaps */
701
702static const __poll_t band_table[NSIGPOLL] = {
703 EPOLLIN | EPOLLRDNORM, /* POLL_IN */
704 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND, /* POLL_OUT */
705 EPOLLIN | EPOLLRDNORM | EPOLLMSG, /* POLL_MSG */
706 EPOLLERR, /* POLL_ERR */
707 EPOLLPRI | EPOLLRDBAND, /* POLL_PRI */
708 EPOLLHUP | EPOLLERR /* POLL_HUP */
709};
710
711static inline int sigio_perm(struct task_struct *p,
712 struct fown_struct *fown, int sig)
713{
714 const struct cred *cred;
715 int ret;
716
717 rcu_read_lock();
718 cred = __task_cred(p);
719 ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
720 uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
721 uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) &&
722 !security_file_send_sigiotask(p, fown, sig));
723 rcu_read_unlock();
724 return ret;
725}
726
727static void send_sigio_to_task(struct task_struct *p,
728 struct fown_struct *fown,
729 int fd, int reason, enum pid_type type)
730{
731 /*
732 * F_SETSIG can change ->signum lockless in parallel, make
733 * sure we read it once and use the same value throughout.
734 */
735 int signum = READ_ONCE(fown->signum);
736
737 if (!sigio_perm(p, fown, signum))
738 return;
739
740 switch (signum) {
741 kernel_siginfo_t si;
742 default:
743 /* Queue a rt signal with the appropriate fd as its
744 value. We use SI_SIGIO as the source, not
745 SI_KERNEL, since kernel signals always get
746 delivered even if we can't queue. Failure to
747 queue in this case _should_ be reported; we fall
748 back to SIGIO in that case. --sct */
749 clear_siginfo(&si);
750 si.si_signo = signum;
751 si.si_errno = 0;
752 si.si_code = reason;
753 /*
754 * Posix definies POLL_IN and friends to be signal
755 * specific si_codes for SIG_POLL. Linux extended
756 * these si_codes to other signals in a way that is
757 * ambiguous if other signals also have signal
758 * specific si_codes. In that case use SI_SIGIO instead
759 * to remove the ambiguity.
760 */
761 if ((signum != SIGPOLL) && sig_specific_sicodes(signum))
762 si.si_code = SI_SIGIO;
763
764 /* Make sure we are called with one of the POLL_*
765 reasons, otherwise we could leak kernel stack into
766 userspace. */
767 BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL));
768 if (reason - POLL_IN >= NSIGPOLL)
769 si.si_band = ~0L;
770 else
771 si.si_band = mangle_poll(band_table[reason - POLL_IN]);
772 si.si_fd = fd;
773 if (!do_send_sig_info(signum, &si, p, type))
774 break;
775 /* fall-through - fall back on the old plain SIGIO signal */
776 case 0:
777 do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
778 }
779}
780
781void send_sigio(struct fown_struct *fown, int fd, int band)
782{
783 struct task_struct *p;
784 enum pid_type type;
785 struct pid *pid;
786
787 read_lock(&fown->lock);
788
789 type = fown->pid_type;
790 pid = fown->pid;
791 if (!pid)
792 goto out_unlock_fown;
793
794 if (type <= PIDTYPE_TGID) {
795 rcu_read_lock();
796 p = pid_task(pid, PIDTYPE_PID);
797 if (p)
798 send_sigio_to_task(p, fown, fd, band, type);
799 rcu_read_unlock();
800 } else {
801 read_lock(&tasklist_lock);
802 do_each_pid_task(pid, type, p) {
803 send_sigio_to_task(p, fown, fd, band, type);
804 } while_each_pid_task(pid, type, p);
805 read_unlock(&tasklist_lock);
806 }
807 out_unlock_fown:
808 read_unlock(&fown->lock);
809}
810
811static void send_sigurg_to_task(struct task_struct *p,
812 struct fown_struct *fown, enum pid_type type)
813{
814 if (sigio_perm(p, fown, SIGURG))
815 do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type);
816}
817
818int send_sigurg(struct fown_struct *fown)
819{
820 struct task_struct *p;
821 enum pid_type type;
822 struct pid *pid;
823 int ret = 0;
824
825 read_lock(&fown->lock);
826
827 type = fown->pid_type;
828 pid = fown->pid;
829 if (!pid)
830 goto out_unlock_fown;
831
832 ret = 1;
833
834 if (type <= PIDTYPE_TGID) {
835 rcu_read_lock();
836 p = pid_task(pid, PIDTYPE_PID);
837 if (p)
838 send_sigurg_to_task(p, fown, type);
839 rcu_read_unlock();
840 } else {
841 read_lock(&tasklist_lock);
842 do_each_pid_task(pid, type, p) {
843 send_sigurg_to_task(p, fown, type);
844 } while_each_pid_task(pid, type, p);
845 read_unlock(&tasklist_lock);
846 }
847 out_unlock_fown:
848 read_unlock(&fown->lock);
849 return ret;
850}
851
852static DEFINE_SPINLOCK(fasync_lock);
853static struct kmem_cache *fasync_cache __read_mostly;
854
855static void fasync_free_rcu(struct rcu_head *head)
856{
857 kmem_cache_free(fasync_cache,
858 container_of(head, struct fasync_struct, fa_rcu));
859}
860
861/*
862 * Remove a fasync entry. If successfully removed, return
863 * positive and clear the FASYNC flag. If no entry exists,
864 * do nothing and return 0.
865 *
866 * NOTE! It is very important that the FASYNC flag always
867 * match the state "is the filp on a fasync list".
868 *
869 */
870int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
871{
872 struct fasync_struct *fa, **fp;
873 int result = 0;
874
875 spin_lock(&filp->f_lock);
876 spin_lock(&fasync_lock);
877 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
878 if (fa->fa_file != filp)
879 continue;
880
881 write_lock_irq(&fa->fa_lock);
882 fa->fa_file = NULL;
883 write_unlock_irq(&fa->fa_lock);
884
885 *fp = fa->fa_next;
886 call_rcu(&fa->fa_rcu, fasync_free_rcu);
887 filp->f_flags &= ~FASYNC;
888 result = 1;
889 break;
890 }
891 spin_unlock(&fasync_lock);
892 spin_unlock(&filp->f_lock);
893 return result;
894}
895
896struct fasync_struct *fasync_alloc(void)
897{
898 return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
899}
900
901/*
902 * NOTE! This can be used only for unused fasync entries:
903 * entries that actually got inserted on the fasync list
904 * need to be released by rcu - see fasync_remove_entry.
905 */
906void fasync_free(struct fasync_struct *new)
907{
908 kmem_cache_free(fasync_cache, new);
909}
910
911/*
912 * Insert a new entry into the fasync list. Return the pointer to the
913 * old one if we didn't use the new one.
914 *
915 * NOTE! It is very important that the FASYNC flag always
916 * match the state "is the filp on a fasync list".
917 */
918struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
919{
920 struct fasync_struct *fa, **fp;
921
922 spin_lock(&filp->f_lock);
923 spin_lock(&fasync_lock);
924 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
925 if (fa->fa_file != filp)
926 continue;
927
928 write_lock_irq(&fa->fa_lock);
929 fa->fa_fd = fd;
930 write_unlock_irq(&fa->fa_lock);
931 goto out;
932 }
933
934 rwlock_init(&new->fa_lock);
935 new->magic = FASYNC_MAGIC;
936 new->fa_file = filp;
937 new->fa_fd = fd;
938 new->fa_next = *fapp;
939 rcu_assign_pointer(*fapp, new);
940 filp->f_flags |= FASYNC;
941
942out:
943 spin_unlock(&fasync_lock);
944 spin_unlock(&filp->f_lock);
945 return fa;
946}
947
948/*
949 * Add a fasync entry. Return negative on error, positive if
950 * added, and zero if did nothing but change an existing one.
951 */
952static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
953{
954 struct fasync_struct *new;
955
956 new = fasync_alloc();
957 if (!new)
958 return -ENOMEM;
959
960 /*
961 * fasync_insert_entry() returns the old (update) entry if
962 * it existed.
963 *
964 * So free the (unused) new entry and return 0 to let the
965 * caller know that we didn't add any new fasync entries.
966 */
967 if (fasync_insert_entry(fd, filp, fapp, new)) {
968 fasync_free(new);
969 return 0;
970 }
971
972 return 1;
973}
974
975/*
976 * fasync_helper() is used by almost all character device drivers
977 * to set up the fasync queue, and for regular files by the file
978 * lease code. It returns negative on error, 0 if it did no changes
979 * and positive if it added/deleted the entry.
980 */
981int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
982{
983 if (!on)
984 return fasync_remove_entry(filp, fapp);
985 return fasync_add_entry(fd, filp, fapp);
986}
987
988EXPORT_SYMBOL(fasync_helper);
989
990/*
991 * rcu_read_lock() is held
992 */
993static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
994{
995 while (fa) {
996 struct fown_struct *fown;
997
998 if (fa->magic != FASYNC_MAGIC) {
999 printk(KERN_ERR "kill_fasync: bad magic number in "
1000 "fasync_struct!\n");
1001 return;
1002 }
1003 read_lock(&fa->fa_lock);
1004 if (fa->fa_file) {
1005 fown = &fa->fa_file->f_owner;
1006 /* Don't send SIGURG to processes which have not set a
1007 queued signum: SIGURG has its own default signalling
1008 mechanism. */
1009 if (!(sig == SIGURG && fown->signum == 0))
1010 send_sigio(fown, fa->fa_fd, band);
1011 }
1012 read_unlock(&fa->fa_lock);
1013 fa = rcu_dereference(fa->fa_next);
1014 }
1015}
1016
1017void kill_fasync(struct fasync_struct **fp, int sig, int band)
1018{
1019 /* First a quick test without locking: usually
1020 * the list is empty.
1021 */
1022 if (*fp) {
1023 rcu_read_lock();
1024 kill_fasync_rcu(rcu_dereference(*fp), sig, band);
1025 rcu_read_unlock();
1026 }
1027}
1028EXPORT_SYMBOL(kill_fasync);
1029
1030static int __init fcntl_init(void)
1031{
1032 /*
1033 * Please add new bits here to ensure allocation uniqueness.
1034 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
1035 * is defined as O_NONBLOCK on some platforms and not on others.
1036 */
1037 BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
1038 HWEIGHT32(
1039 (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
1040 __FMODE_EXEC | __FMODE_NONOTIFY));
1041
1042 fasync_cache = kmem_cache_create("fasync_cache",
1043 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
1044 return 0;
1045}
1046
1047module_init(fcntl_init)