]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/fcntl.c
x86/msr-index: Cleanup bit defines
[mirror_ubuntu-bionic-kernel.git] / fs / fcntl.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/fcntl.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8 #include <linux/syscalls.h>
9 #include <linux/init.h>
10 #include <linux/mm.h>
11 #include <linux/sched/task.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/fdtable.h>
15 #include <linux/capability.h>
16 #include <linux/dnotify.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/security.h>
21 #include <linux/ptrace.h>
22 #include <linux/signal.h>
23 #include <linux/rcupdate.h>
24 #include <linux/pid_namespace.h>
25 #include <linux/user_namespace.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/compat.h>
28
29 #include <asm/poll.h>
30 #include <asm/siginfo.h>
31 #include <linux/uaccess.h>
32
33 #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
34
35 int setfl(int fd, struct file * filp, unsigned long arg)
36 {
37 struct inode * inode = file_inode(filp);
38 int error = 0;
39
40 /*
41 * O_APPEND cannot be cleared if the file is marked as append-only
42 * and the file is open for write.
43 */
44 if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
45 return -EPERM;
46
47 /* O_NOATIME can only be set by the owner or superuser */
48 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
49 if (!inode_owner_or_capable(inode))
50 return -EPERM;
51
52 /* required for strict SunOS emulation */
53 if (O_NONBLOCK != O_NDELAY)
54 if (arg & O_NDELAY)
55 arg |= O_NONBLOCK;
56
57 /* Pipe packetized mode is controlled by O_DIRECT flag */
58 if (!S_ISFIFO(inode->i_mode) && (arg & O_DIRECT)) {
59 if (!filp->f_mapping || !filp->f_mapping->a_ops ||
60 !filp->f_mapping->a_ops->direct_IO)
61 return -EINVAL;
62 }
63
64 if (filp->f_op->check_flags)
65 error = filp->f_op->check_flags(arg);
66 if (!error && filp->f_op->setfl)
67 error = filp->f_op->setfl(filp, arg);
68 if (error)
69 return error;
70
71 /*
72 * ->fasync() is responsible for setting the FASYNC bit.
73 */
74 if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) {
75 error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
76 if (error < 0)
77 goto out;
78 if (error > 0)
79 error = 0;
80 }
81 spin_lock(&filp->f_lock);
82 filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
83 spin_unlock(&filp->f_lock);
84
85 out:
86 return error;
87 }
88 EXPORT_SYMBOL_GPL(setfl);
89
90 static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
91 int force)
92 {
93 write_lock_irq(&filp->f_owner.lock);
94 if (force || !filp->f_owner.pid) {
95 put_pid(filp->f_owner.pid);
96 filp->f_owner.pid = get_pid(pid);
97 filp->f_owner.pid_type = type;
98
99 if (pid) {
100 const struct cred *cred = current_cred();
101 filp->f_owner.uid = cred->uid;
102 filp->f_owner.euid = cred->euid;
103 }
104 }
105 write_unlock_irq(&filp->f_owner.lock);
106 }
107
108 void __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
109 int force)
110 {
111 security_file_set_fowner(filp);
112 f_modown(filp, pid, type, force);
113 }
114 EXPORT_SYMBOL(__f_setown);
115
116 int f_setown(struct file *filp, unsigned long arg, int force)
117 {
118 enum pid_type type;
119 struct pid *pid = NULL;
120 int who = arg, ret = 0;
121
122 type = PIDTYPE_PID;
123 if (who < 0) {
124 /* avoid overflow below */
125 if (who == INT_MIN)
126 return -EINVAL;
127
128 type = PIDTYPE_PGID;
129 who = -who;
130 }
131
132 rcu_read_lock();
133 if (who) {
134 pid = find_vpid(who);
135 if (!pid)
136 ret = -ESRCH;
137 }
138
139 if (!ret)
140 __f_setown(filp, pid, type, force);
141 rcu_read_unlock();
142
143 return ret;
144 }
145 EXPORT_SYMBOL(f_setown);
146
147 void f_delown(struct file *filp)
148 {
149 f_modown(filp, NULL, PIDTYPE_PID, 1);
150 }
151
152 pid_t f_getown(struct file *filp)
153 {
154 pid_t pid;
155 read_lock(&filp->f_owner.lock);
156 pid = pid_vnr(filp->f_owner.pid);
157 if (filp->f_owner.pid_type == PIDTYPE_PGID)
158 pid = -pid;
159 read_unlock(&filp->f_owner.lock);
160 return pid;
161 }
162
163 static int f_setown_ex(struct file *filp, unsigned long arg)
164 {
165 struct f_owner_ex __user *owner_p = (void __user *)arg;
166 struct f_owner_ex owner;
167 struct pid *pid;
168 int type;
169 int ret;
170
171 ret = copy_from_user(&owner, owner_p, sizeof(owner));
172 if (ret)
173 return -EFAULT;
174
175 switch (owner.type) {
176 case F_OWNER_TID:
177 type = PIDTYPE_MAX;
178 break;
179
180 case F_OWNER_PID:
181 type = PIDTYPE_PID;
182 break;
183
184 case F_OWNER_PGRP:
185 type = PIDTYPE_PGID;
186 break;
187
188 default:
189 return -EINVAL;
190 }
191
192 rcu_read_lock();
193 pid = find_vpid(owner.pid);
194 if (owner.pid && !pid)
195 ret = -ESRCH;
196 else
197 __f_setown(filp, pid, type, 1);
198 rcu_read_unlock();
199
200 return ret;
201 }
202
203 static int f_getown_ex(struct file *filp, unsigned long arg)
204 {
205 struct f_owner_ex __user *owner_p = (void __user *)arg;
206 struct f_owner_ex owner;
207 int ret = 0;
208
209 read_lock(&filp->f_owner.lock);
210 owner.pid = pid_vnr(filp->f_owner.pid);
211 switch (filp->f_owner.pid_type) {
212 case PIDTYPE_MAX:
213 owner.type = F_OWNER_TID;
214 break;
215
216 case PIDTYPE_PID:
217 owner.type = F_OWNER_PID;
218 break;
219
220 case PIDTYPE_PGID:
221 owner.type = F_OWNER_PGRP;
222 break;
223
224 default:
225 WARN_ON(1);
226 ret = -EINVAL;
227 break;
228 }
229 read_unlock(&filp->f_owner.lock);
230
231 if (!ret) {
232 ret = copy_to_user(owner_p, &owner, sizeof(owner));
233 if (ret)
234 ret = -EFAULT;
235 }
236 return ret;
237 }
238
239 #ifdef CONFIG_CHECKPOINT_RESTORE
240 static int f_getowner_uids(struct file *filp, unsigned long arg)
241 {
242 struct user_namespace *user_ns = current_user_ns();
243 uid_t __user *dst = (void __user *)arg;
244 uid_t src[2];
245 int err;
246
247 read_lock(&filp->f_owner.lock);
248 src[0] = from_kuid(user_ns, filp->f_owner.uid);
249 src[1] = from_kuid(user_ns, filp->f_owner.euid);
250 read_unlock(&filp->f_owner.lock);
251
252 err = put_user(src[0], &dst[0]);
253 err |= put_user(src[1], &dst[1]);
254
255 return err;
256 }
257 #else
258 static int f_getowner_uids(struct file *filp, unsigned long arg)
259 {
260 return -EINVAL;
261 }
262 #endif
263
264 static bool rw_hint_valid(enum rw_hint hint)
265 {
266 switch (hint) {
267 case RWF_WRITE_LIFE_NOT_SET:
268 case RWH_WRITE_LIFE_NONE:
269 case RWH_WRITE_LIFE_SHORT:
270 case RWH_WRITE_LIFE_MEDIUM:
271 case RWH_WRITE_LIFE_LONG:
272 case RWH_WRITE_LIFE_EXTREME:
273 return true;
274 default:
275 return false;
276 }
277 }
278
279 static long fcntl_rw_hint(struct file *file, unsigned int cmd,
280 unsigned long arg)
281 {
282 struct inode *inode = file_inode(file);
283 u64 *argp = (u64 __user *)arg;
284 enum rw_hint hint;
285 u64 h;
286
287 switch (cmd) {
288 case F_GET_FILE_RW_HINT:
289 h = file_write_hint(file);
290 if (copy_to_user(argp, &h, sizeof(*argp)))
291 return -EFAULT;
292 return 0;
293 case F_SET_FILE_RW_HINT:
294 if (copy_from_user(&h, argp, sizeof(h)))
295 return -EFAULT;
296 hint = (enum rw_hint) h;
297 if (!rw_hint_valid(hint))
298 return -EINVAL;
299
300 spin_lock(&file->f_lock);
301 file->f_write_hint = hint;
302 spin_unlock(&file->f_lock);
303 return 0;
304 case F_GET_RW_HINT:
305 h = inode->i_write_hint;
306 if (copy_to_user(argp, &h, sizeof(*argp)))
307 return -EFAULT;
308 return 0;
309 case F_SET_RW_HINT:
310 if (copy_from_user(&h, argp, sizeof(h)))
311 return -EFAULT;
312 hint = (enum rw_hint) h;
313 if (!rw_hint_valid(hint))
314 return -EINVAL;
315
316 inode_lock(inode);
317 inode->i_write_hint = hint;
318 inode_unlock(inode);
319 return 0;
320 default:
321 return -EINVAL;
322 }
323 }
324
325 static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
326 struct file *filp)
327 {
328 void __user *argp = (void __user *)arg;
329 struct flock flock;
330 long err = -EINVAL;
331
332 switch (cmd) {
333 case F_DUPFD:
334 err = f_dupfd(arg, filp, 0);
335 break;
336 case F_DUPFD_CLOEXEC:
337 err = f_dupfd(arg, filp, O_CLOEXEC);
338 break;
339 case F_GETFD:
340 err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
341 break;
342 case F_SETFD:
343 err = 0;
344 set_close_on_exec(fd, arg & FD_CLOEXEC);
345 break;
346 case F_GETFL:
347 err = filp->f_flags;
348 break;
349 case F_SETFL:
350 err = setfl(fd, filp, arg);
351 break;
352 #if BITS_PER_LONG != 32
353 /* 32-bit arches must use fcntl64() */
354 case F_OFD_GETLK:
355 #endif
356 case F_GETLK:
357 if (copy_from_user(&flock, argp, sizeof(flock)))
358 return -EFAULT;
359 err = fcntl_getlk(filp, cmd, &flock);
360 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
361 return -EFAULT;
362 break;
363 #if BITS_PER_LONG != 32
364 /* 32-bit arches must use fcntl64() */
365 case F_OFD_SETLK:
366 case F_OFD_SETLKW:
367 #endif
368 /* Fallthrough */
369 case F_SETLK:
370 case F_SETLKW:
371 if (copy_from_user(&flock, argp, sizeof(flock)))
372 return -EFAULT;
373 err = fcntl_setlk(fd, filp, cmd, &flock);
374 break;
375 case F_GETOWN:
376 /*
377 * XXX If f_owner is a process group, the
378 * negative return value will get converted
379 * into an error. Oops. If we keep the
380 * current syscall conventions, the only way
381 * to fix this will be in libc.
382 */
383 err = f_getown(filp);
384 force_successful_syscall_return();
385 break;
386 case F_SETOWN:
387 err = f_setown(filp, arg, 1);
388 break;
389 case F_GETOWN_EX:
390 err = f_getown_ex(filp, arg);
391 break;
392 case F_SETOWN_EX:
393 err = f_setown_ex(filp, arg);
394 break;
395 case F_GETOWNER_UIDS:
396 err = f_getowner_uids(filp, arg);
397 break;
398 case F_GETSIG:
399 err = filp->f_owner.signum;
400 break;
401 case F_SETSIG:
402 /* arg == 0 restores default behaviour. */
403 if (!valid_signal(arg)) {
404 break;
405 }
406 err = 0;
407 filp->f_owner.signum = arg;
408 break;
409 case F_GETLEASE:
410 err = fcntl_getlease(filp);
411 break;
412 case F_SETLEASE:
413 err = fcntl_setlease(fd, filp, arg);
414 break;
415 case F_NOTIFY:
416 err = fcntl_dirnotify(fd, filp, arg);
417 break;
418 case F_SETPIPE_SZ:
419 case F_GETPIPE_SZ:
420 err = pipe_fcntl(filp, cmd, arg);
421 break;
422 case F_ADD_SEALS:
423 case F_GET_SEALS:
424 err = shmem_fcntl(filp, cmd, arg);
425 break;
426 case F_GET_RW_HINT:
427 case F_SET_RW_HINT:
428 case F_GET_FILE_RW_HINT:
429 case F_SET_FILE_RW_HINT:
430 err = fcntl_rw_hint(filp, cmd, arg);
431 break;
432 default:
433 break;
434 }
435 return err;
436 }
437
438 static int check_fcntl_cmd(unsigned cmd)
439 {
440 switch (cmd) {
441 case F_DUPFD:
442 case F_DUPFD_CLOEXEC:
443 case F_GETFD:
444 case F_SETFD:
445 case F_GETFL:
446 return 1;
447 }
448 return 0;
449 }
450
451 SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
452 {
453 struct fd f = fdget_raw(fd);
454 long err = -EBADF;
455
456 if (!f.file)
457 goto out;
458
459 if (unlikely(f.file->f_mode & FMODE_PATH)) {
460 if (!check_fcntl_cmd(cmd))
461 goto out1;
462 }
463
464 err = security_file_fcntl(f.file, cmd, arg);
465 if (!err)
466 err = do_fcntl(fd, cmd, arg, f.file);
467
468 out1:
469 fdput(f);
470 out:
471 return err;
472 }
473
474 #if BITS_PER_LONG == 32
475 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
476 unsigned long, arg)
477 {
478 void __user *argp = (void __user *)arg;
479 struct fd f = fdget_raw(fd);
480 struct flock64 flock;
481 long err = -EBADF;
482
483 if (!f.file)
484 goto out;
485
486 if (unlikely(f.file->f_mode & FMODE_PATH)) {
487 if (!check_fcntl_cmd(cmd))
488 goto out1;
489 }
490
491 err = security_file_fcntl(f.file, cmd, arg);
492 if (err)
493 goto out1;
494
495 switch (cmd) {
496 case F_GETLK64:
497 case F_OFD_GETLK:
498 err = -EFAULT;
499 if (copy_from_user(&flock, argp, sizeof(flock)))
500 break;
501 err = fcntl_getlk64(f.file, cmd, &flock);
502 if (!err && copy_to_user(argp, &flock, sizeof(flock)))
503 err = -EFAULT;
504 break;
505 case F_SETLK64:
506 case F_SETLKW64:
507 case F_OFD_SETLK:
508 case F_OFD_SETLKW:
509 err = -EFAULT;
510 if (copy_from_user(&flock, argp, sizeof(flock)))
511 break;
512 err = fcntl_setlk64(fd, f.file, cmd, &flock);
513 break;
514 default:
515 err = do_fcntl(fd, cmd, arg, f.file);
516 break;
517 }
518 out1:
519 fdput(f);
520 out:
521 return err;
522 }
523 #endif
524
525 #ifdef CONFIG_COMPAT
526 /* careful - don't use anywhere else */
527 #define copy_flock_fields(dst, src) \
528 (dst)->l_type = (src)->l_type; \
529 (dst)->l_whence = (src)->l_whence; \
530 (dst)->l_start = (src)->l_start; \
531 (dst)->l_len = (src)->l_len; \
532 (dst)->l_pid = (src)->l_pid;
533
534 static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl)
535 {
536 struct compat_flock fl;
537
538 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock)))
539 return -EFAULT;
540 copy_flock_fields(kfl, &fl);
541 return 0;
542 }
543
544 static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl)
545 {
546 struct compat_flock64 fl;
547
548 if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64)))
549 return -EFAULT;
550 copy_flock_fields(kfl, &fl);
551 return 0;
552 }
553
554 static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl)
555 {
556 struct compat_flock fl;
557
558 memset(&fl, 0, sizeof(struct compat_flock));
559 copy_flock_fields(&fl, kfl);
560 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock)))
561 return -EFAULT;
562 return 0;
563 }
564
565 static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl)
566 {
567 struct compat_flock64 fl;
568
569 BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start));
570 BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len));
571
572 memset(&fl, 0, sizeof(struct compat_flock64));
573 copy_flock_fields(&fl, kfl);
574 if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64)))
575 return -EFAULT;
576 return 0;
577 }
578 #undef copy_flock_fields
579
580 static unsigned int
581 convert_fcntl_cmd(unsigned int cmd)
582 {
583 switch (cmd) {
584 case F_GETLK64:
585 return F_GETLK;
586 case F_SETLK64:
587 return F_SETLK;
588 case F_SETLKW64:
589 return F_SETLKW;
590 }
591
592 return cmd;
593 }
594
595 /*
596 * GETLK was successful and we need to return the data, but it needs to fit in
597 * the compat structure.
598 * l_start shouldn't be too big, unless the original start + end is greater than
599 * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return
600 * -EOVERFLOW in that case. l_len could be too big, in which case we just
601 * truncate it, and only allow the app to see that part of the conflicting lock
602 * that might make sense to it anyway
603 */
604 static int fixup_compat_flock(struct flock *flock)
605 {
606 if (flock->l_start > COMPAT_OFF_T_MAX)
607 return -EOVERFLOW;
608 if (flock->l_len > COMPAT_OFF_T_MAX)
609 flock->l_len = COMPAT_OFF_T_MAX;
610 return 0;
611 }
612
613 COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
614 compat_ulong_t, arg)
615 {
616 struct fd f = fdget_raw(fd);
617 struct flock flock;
618 long err = -EBADF;
619
620 if (!f.file)
621 return err;
622
623 if (unlikely(f.file->f_mode & FMODE_PATH)) {
624 if (!check_fcntl_cmd(cmd))
625 goto out_put;
626 }
627
628 err = security_file_fcntl(f.file, cmd, arg);
629 if (err)
630 goto out_put;
631
632 switch (cmd) {
633 case F_GETLK:
634 err = get_compat_flock(&flock, compat_ptr(arg));
635 if (err)
636 break;
637 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
638 if (err)
639 break;
640 err = fixup_compat_flock(&flock);
641 if (!err)
642 err = put_compat_flock(&flock, compat_ptr(arg));
643 break;
644 case F_GETLK64:
645 case F_OFD_GETLK:
646 err = get_compat_flock64(&flock, compat_ptr(arg));
647 if (err)
648 break;
649 err = fcntl_getlk(f.file, convert_fcntl_cmd(cmd), &flock);
650 if (!err)
651 err = put_compat_flock64(&flock, compat_ptr(arg));
652 break;
653 case F_SETLK:
654 case F_SETLKW:
655 err = get_compat_flock(&flock, compat_ptr(arg));
656 if (err)
657 break;
658 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
659 break;
660 case F_SETLK64:
661 case F_SETLKW64:
662 case F_OFD_SETLK:
663 case F_OFD_SETLKW:
664 err = get_compat_flock64(&flock, compat_ptr(arg));
665 if (err)
666 break;
667 err = fcntl_setlk(fd, f.file, convert_fcntl_cmd(cmd), &flock);
668 break;
669 default:
670 err = do_fcntl(fd, cmd, arg, f.file);
671 break;
672 }
673 out_put:
674 fdput(f);
675 return err;
676 }
677
678 COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd,
679 compat_ulong_t, arg)
680 {
681 switch (cmd) {
682 case F_GETLK64:
683 case F_SETLK64:
684 case F_SETLKW64:
685 case F_OFD_GETLK:
686 case F_OFD_SETLK:
687 case F_OFD_SETLKW:
688 return -EINVAL;
689 }
690 return compat_sys_fcntl64(fd, cmd, arg);
691 }
692 #endif
693
694 /* Table to convert sigio signal codes into poll band bitmaps */
695
696 static const long band_table[NSIGPOLL] = {
697 POLLIN | POLLRDNORM, /* POLL_IN */
698 POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
699 POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
700 POLLERR, /* POLL_ERR */
701 POLLPRI | POLLRDBAND, /* POLL_PRI */
702 POLLHUP | POLLERR /* POLL_HUP */
703 };
704
705 static inline int sigio_perm(struct task_struct *p,
706 struct fown_struct *fown, int sig)
707 {
708 const struct cred *cred;
709 int ret;
710
711 rcu_read_lock();
712 cred = __task_cred(p);
713 ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) ||
714 uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) ||
715 uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) &&
716 !security_file_send_sigiotask(p, fown, sig));
717 rcu_read_unlock();
718 return ret;
719 }
720
721 static void send_sigio_to_task(struct task_struct *p,
722 struct fown_struct *fown,
723 int fd, int reason, int group)
724 {
725 /*
726 * F_SETSIG can change ->signum lockless in parallel, make
727 * sure we read it once and use the same value throughout.
728 */
729 int signum = READ_ONCE(fown->signum);
730
731 if (!sigio_perm(p, fown, signum))
732 return;
733
734 switch (signum) {
735 siginfo_t si;
736 default:
737 /* Queue a rt signal with the appropriate fd as its
738 value. We use SI_SIGIO as the source, not
739 SI_KERNEL, since kernel signals always get
740 delivered even if we can't queue. Failure to
741 queue in this case _should_ be reported; we fall
742 back to SIGIO in that case. --sct */
743 si.si_signo = signum;
744 si.si_errno = 0;
745 si.si_code = reason;
746 /*
747 * Posix definies POLL_IN and friends to be signal
748 * specific si_codes for SIG_POLL. Linux extended
749 * these si_codes to other signals in a way that is
750 * ambiguous if other signals also have signal
751 * specific si_codes. In that case use SI_SIGIO instead
752 * to remove the ambiguity.
753 */
754 if ((signum != SIGPOLL) && sig_specific_sicodes(signum))
755 si.si_code = SI_SIGIO;
756
757 /* Make sure we are called with one of the POLL_*
758 reasons, otherwise we could leak kernel stack into
759 userspace. */
760 BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL));
761 if (reason - POLL_IN >= NSIGPOLL)
762 si.si_band = ~0L;
763 else
764 si.si_band = band_table[reason - POLL_IN];
765 si.si_fd = fd;
766 if (!do_send_sig_info(signum, &si, p, group))
767 break;
768 /* fall-through: fall back on the old plain SIGIO signal */
769 case 0:
770 do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
771 }
772 }
773
774 void send_sigio(struct fown_struct *fown, int fd, int band)
775 {
776 struct task_struct *p;
777 enum pid_type type;
778 struct pid *pid;
779 int group = 1;
780
781 read_lock(&fown->lock);
782
783 type = fown->pid_type;
784 if (type == PIDTYPE_MAX) {
785 group = 0;
786 type = PIDTYPE_PID;
787 }
788
789 pid = fown->pid;
790 if (!pid)
791 goto out_unlock_fown;
792
793 read_lock(&tasklist_lock);
794 do_each_pid_task(pid, type, p) {
795 send_sigio_to_task(p, fown, fd, band, group);
796 } while_each_pid_task(pid, type, p);
797 read_unlock(&tasklist_lock);
798 out_unlock_fown:
799 read_unlock(&fown->lock);
800 }
801
802 static void send_sigurg_to_task(struct task_struct *p,
803 struct fown_struct *fown, int group)
804 {
805 if (sigio_perm(p, fown, SIGURG))
806 do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
807 }
808
809 int send_sigurg(struct fown_struct *fown)
810 {
811 struct task_struct *p;
812 enum pid_type type;
813 struct pid *pid;
814 int group = 1;
815 int ret = 0;
816
817 read_lock(&fown->lock);
818
819 type = fown->pid_type;
820 if (type == PIDTYPE_MAX) {
821 group = 0;
822 type = PIDTYPE_PID;
823 }
824
825 pid = fown->pid;
826 if (!pid)
827 goto out_unlock_fown;
828
829 ret = 1;
830
831 read_lock(&tasklist_lock);
832 do_each_pid_task(pid, type, p) {
833 send_sigurg_to_task(p, fown, group);
834 } while_each_pid_task(pid, type, p);
835 read_unlock(&tasklist_lock);
836 out_unlock_fown:
837 read_unlock(&fown->lock);
838 return ret;
839 }
840
841 static DEFINE_SPINLOCK(fasync_lock);
842 static struct kmem_cache *fasync_cache __read_mostly;
843
844 static void fasync_free_rcu(struct rcu_head *head)
845 {
846 kmem_cache_free(fasync_cache,
847 container_of(head, struct fasync_struct, fa_rcu));
848 }
849
850 /*
851 * Remove a fasync entry. If successfully removed, return
852 * positive and clear the FASYNC flag. If no entry exists,
853 * do nothing and return 0.
854 *
855 * NOTE! It is very important that the FASYNC flag always
856 * match the state "is the filp on a fasync list".
857 *
858 */
859 int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
860 {
861 struct fasync_struct *fa, **fp;
862 int result = 0;
863
864 spin_lock(&filp->f_lock);
865 spin_lock(&fasync_lock);
866 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
867 if (fa->fa_file != filp)
868 continue;
869
870 spin_lock_irq(&fa->fa_lock);
871 fa->fa_file = NULL;
872 spin_unlock_irq(&fa->fa_lock);
873
874 *fp = fa->fa_next;
875 call_rcu(&fa->fa_rcu, fasync_free_rcu);
876 filp->f_flags &= ~FASYNC;
877 result = 1;
878 break;
879 }
880 spin_unlock(&fasync_lock);
881 spin_unlock(&filp->f_lock);
882 return result;
883 }
884
885 struct fasync_struct *fasync_alloc(void)
886 {
887 return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
888 }
889
890 /*
891 * NOTE! This can be used only for unused fasync entries:
892 * entries that actually got inserted on the fasync list
893 * need to be released by rcu - see fasync_remove_entry.
894 */
895 void fasync_free(struct fasync_struct *new)
896 {
897 kmem_cache_free(fasync_cache, new);
898 }
899
900 /*
901 * Insert a new entry into the fasync list. Return the pointer to the
902 * old one if we didn't use the new one.
903 *
904 * NOTE! It is very important that the FASYNC flag always
905 * match the state "is the filp on a fasync list".
906 */
907 struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
908 {
909 struct fasync_struct *fa, **fp;
910
911 spin_lock(&filp->f_lock);
912 spin_lock(&fasync_lock);
913 for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
914 if (fa->fa_file != filp)
915 continue;
916
917 spin_lock_irq(&fa->fa_lock);
918 fa->fa_fd = fd;
919 spin_unlock_irq(&fa->fa_lock);
920 goto out;
921 }
922
923 spin_lock_init(&new->fa_lock);
924 new->magic = FASYNC_MAGIC;
925 new->fa_file = filp;
926 new->fa_fd = fd;
927 new->fa_next = *fapp;
928 rcu_assign_pointer(*fapp, new);
929 filp->f_flags |= FASYNC;
930
931 out:
932 spin_unlock(&fasync_lock);
933 spin_unlock(&filp->f_lock);
934 return fa;
935 }
936
937 /*
938 * Add a fasync entry. Return negative on error, positive if
939 * added, and zero if did nothing but change an existing one.
940 */
941 static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
942 {
943 struct fasync_struct *new;
944
945 new = fasync_alloc();
946 if (!new)
947 return -ENOMEM;
948
949 /*
950 * fasync_insert_entry() returns the old (update) entry if
951 * it existed.
952 *
953 * So free the (unused) new entry and return 0 to let the
954 * caller know that we didn't add any new fasync entries.
955 */
956 if (fasync_insert_entry(fd, filp, fapp, new)) {
957 fasync_free(new);
958 return 0;
959 }
960
961 return 1;
962 }
963
964 /*
965 * fasync_helper() is used by almost all character device drivers
966 * to set up the fasync queue, and for regular files by the file
967 * lease code. It returns negative on error, 0 if it did no changes
968 * and positive if it added/deleted the entry.
969 */
970 int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
971 {
972 if (!on)
973 return fasync_remove_entry(filp, fapp);
974 return fasync_add_entry(fd, filp, fapp);
975 }
976
977 EXPORT_SYMBOL(fasync_helper);
978
979 /*
980 * rcu_read_lock() is held
981 */
982 static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
983 {
984 while (fa) {
985 struct fown_struct *fown;
986 unsigned long flags;
987
988 if (fa->magic != FASYNC_MAGIC) {
989 printk(KERN_ERR "kill_fasync: bad magic number in "
990 "fasync_struct!\n");
991 return;
992 }
993 spin_lock_irqsave(&fa->fa_lock, flags);
994 if (fa->fa_file) {
995 fown = &fa->fa_file->f_owner;
996 /* Don't send SIGURG to processes which have not set a
997 queued signum: SIGURG has its own default signalling
998 mechanism. */
999 if (!(sig == SIGURG && fown->signum == 0))
1000 send_sigio(fown, fa->fa_fd, band);
1001 }
1002 spin_unlock_irqrestore(&fa->fa_lock, flags);
1003 fa = rcu_dereference(fa->fa_next);
1004 }
1005 }
1006
1007 void kill_fasync(struct fasync_struct **fp, int sig, int band)
1008 {
1009 /* First a quick test without locking: usually
1010 * the list is empty.
1011 */
1012 if (*fp) {
1013 rcu_read_lock();
1014 kill_fasync_rcu(rcu_dereference(*fp), sig, band);
1015 rcu_read_unlock();
1016 }
1017 }
1018 EXPORT_SYMBOL(kill_fasync);
1019
1020 static int __init fcntl_init(void)
1021 {
1022 /*
1023 * Please add new bits here to ensure allocation uniqueness.
1024 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
1025 * is defined as O_NONBLOCK on some platforms and not on others.
1026 */
1027 BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
1028 HWEIGHT32(
1029 (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
1030 __FMODE_EXEC | __FMODE_NONOTIFY));
1031
1032 fasync_cache = kmem_cache_create("fasync_cache",
1033 sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
1034 return 0;
1035 }
1036
1037 module_init(fcntl_init)