]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/notify/fanotify/fanotify_user.c
Merge branch 'fsnotify' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs
[mirror_ubuntu-bionic-kernel.git] / fs / notify / fanotify / fanotify_user.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/fanotify.h>
3 #include <linux/fcntl.h>
4 #include <linux/file.h>
5 #include <linux/fs.h>
6 #include <linux/anon_inodes.h>
7 #include <linux/fsnotify_backend.h>
8 #include <linux/init.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/poll.h>
12 #include <linux/security.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17 #include <linux/compat.h>
18 #include <linux/sched/signal.h>
19
20 #include <asm/ioctls.h>
21
22 #include "../../mount.h"
23 #include "../fdinfo.h"
24 #include "fanotify.h"
25
26 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
27 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
28 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
29
30 /*
31 * All flags that may be specified in parameter event_f_flags of fanotify_init.
32 *
33 * Internal and external open flags are stored together in field f_flags of
34 * struct file. Only external open flags shall be allowed in event_f_flags.
35 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
36 * excluded.
37 */
38 #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
39 O_ACCMODE | O_APPEND | O_NONBLOCK | \
40 __O_SYNC | O_DSYNC | O_CLOEXEC | \
41 O_LARGEFILE | O_NOATIME )
42
43 extern const struct fsnotify_ops fanotify_fsnotify_ops;
44
45 struct kmem_cache *fanotify_mark_cache __read_mostly;
46 struct kmem_cache *fanotify_event_cachep __read_mostly;
47 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
48
49 /*
50 * Get an fsnotify notification event if one exists and is small
51 * enough to fit in "count". Return an error pointer if the count
52 * is not large enough.
53 *
54 * Called with the group->notification_lock held.
55 */
56 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
57 size_t count)
58 {
59 assert_spin_locked(&group->notification_lock);
60
61 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
62
63 if (fsnotify_notify_queue_is_empty(group))
64 return NULL;
65
66 if (FAN_EVENT_METADATA_LEN > count)
67 return ERR_PTR(-EINVAL);
68
69 /* held the notification_lock the whole time, so this is the
70 * same event we peeked above */
71 return fsnotify_remove_first_event(group);
72 }
73
74 static int create_fd(struct fsnotify_group *group,
75 struct fanotify_event_info *event,
76 struct file **file)
77 {
78 int client_fd;
79 struct file *new_file;
80
81 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
82
83 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
84 if (client_fd < 0)
85 return client_fd;
86
87 /*
88 * we need a new file handle for the userspace program so it can read even if it was
89 * originally opened O_WRONLY.
90 */
91 /* it's possible this event was an overflow event. in that case dentry and mnt
92 * are NULL; That's fine, just don't call dentry open */
93 if (event->path.dentry && event->path.mnt)
94 new_file = dentry_open(&event->path,
95 group->fanotify_data.f_flags | FMODE_NONOTIFY,
96 current_cred());
97 else
98 new_file = ERR_PTR(-EOVERFLOW);
99 if (IS_ERR(new_file)) {
100 /*
101 * we still send an event even if we can't open the file. this
102 * can happen when say tasks are gone and we try to open their
103 * /proc files or we try to open a WRONLY file like in sysfs
104 * we just send the errno to userspace since there isn't much
105 * else we can do.
106 */
107 put_unused_fd(client_fd);
108 client_fd = PTR_ERR(new_file);
109 } else {
110 *file = new_file;
111 }
112
113 return client_fd;
114 }
115
116 static int fill_event_metadata(struct fsnotify_group *group,
117 struct fanotify_event_metadata *metadata,
118 struct fsnotify_event *fsn_event,
119 struct file **file)
120 {
121 int ret = 0;
122 struct fanotify_event_info *event;
123
124 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
125 group, metadata, fsn_event);
126
127 *file = NULL;
128 event = container_of(fsn_event, struct fanotify_event_info, fse);
129 metadata->event_len = FAN_EVENT_METADATA_LEN;
130 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
131 metadata->vers = FANOTIFY_METADATA_VERSION;
132 metadata->reserved = 0;
133 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
134 metadata->pid = pid_vnr(event->tgid);
135 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
136 metadata->fd = FAN_NOFD;
137 else {
138 metadata->fd = create_fd(group, event, file);
139 if (metadata->fd < 0)
140 ret = metadata->fd;
141 }
142
143 return ret;
144 }
145
146 static struct fanotify_perm_event_info *dequeue_event(
147 struct fsnotify_group *group, int fd)
148 {
149 struct fanotify_perm_event_info *event, *return_e = NULL;
150
151 spin_lock(&group->notification_lock);
152 list_for_each_entry(event, &group->fanotify_data.access_list,
153 fae.fse.list) {
154 if (event->fd != fd)
155 continue;
156
157 list_del_init(&event->fae.fse.list);
158 return_e = event;
159 break;
160 }
161 spin_unlock(&group->notification_lock);
162
163 pr_debug("%s: found return_re=%p\n", __func__, return_e);
164
165 return return_e;
166 }
167
168 static int process_access_response(struct fsnotify_group *group,
169 struct fanotify_response *response_struct)
170 {
171 struct fanotify_perm_event_info *event;
172 int fd = response_struct->fd;
173 int response = response_struct->response;
174
175 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
176 fd, response);
177 /*
178 * make sure the response is valid, if invalid we do nothing and either
179 * userspace can send a valid response or we will clean it up after the
180 * timeout
181 */
182 switch (response) {
183 case FAN_ALLOW:
184 case FAN_DENY:
185 break;
186 default:
187 return -EINVAL;
188 }
189
190 if (fd < 0)
191 return -EINVAL;
192
193 event = dequeue_event(group, fd);
194 if (!event)
195 return -ENOENT;
196
197 event->response = response;
198 wake_up(&group->fanotify_data.access_waitq);
199
200 return 0;
201 }
202
203 static ssize_t copy_event_to_user(struct fsnotify_group *group,
204 struct fsnotify_event *event,
205 char __user *buf)
206 {
207 struct fanotify_event_metadata fanotify_event_metadata;
208 struct file *f;
209 int fd, ret;
210
211 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
212
213 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
214 if (ret < 0)
215 return ret;
216
217 fd = fanotify_event_metadata.fd;
218 ret = -EFAULT;
219 if (copy_to_user(buf, &fanotify_event_metadata,
220 fanotify_event_metadata.event_len))
221 goto out_close_fd;
222
223 if (fanotify_is_perm_event(event->mask))
224 FANOTIFY_PE(event)->fd = fd;
225
226 if (fd != FAN_NOFD)
227 fd_install(fd, f);
228 return fanotify_event_metadata.event_len;
229
230 out_close_fd:
231 if (fd != FAN_NOFD) {
232 put_unused_fd(fd);
233 fput(f);
234 }
235 return ret;
236 }
237
238 /* intofiy userspace file descriptor functions */
239 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
240 {
241 struct fsnotify_group *group = file->private_data;
242 int ret = 0;
243
244 poll_wait(file, &group->notification_waitq, wait);
245 spin_lock(&group->notification_lock);
246 if (!fsnotify_notify_queue_is_empty(group))
247 ret = POLLIN | POLLRDNORM;
248 spin_unlock(&group->notification_lock);
249
250 return ret;
251 }
252
253 static ssize_t fanotify_read(struct file *file, char __user *buf,
254 size_t count, loff_t *pos)
255 {
256 struct fsnotify_group *group;
257 struct fsnotify_event *kevent;
258 char __user *start;
259 int ret;
260 DEFINE_WAIT_FUNC(wait, woken_wake_function);
261
262 start = buf;
263 group = file->private_data;
264
265 pr_debug("%s: group=%p\n", __func__, group);
266
267 add_wait_queue(&group->notification_waitq, &wait);
268 while (1) {
269 spin_lock(&group->notification_lock);
270 kevent = get_one_event(group, count);
271 spin_unlock(&group->notification_lock);
272
273 if (IS_ERR(kevent)) {
274 ret = PTR_ERR(kevent);
275 break;
276 }
277
278 if (!kevent) {
279 ret = -EAGAIN;
280 if (file->f_flags & O_NONBLOCK)
281 break;
282
283 ret = -ERESTARTSYS;
284 if (signal_pending(current))
285 break;
286
287 if (start != buf)
288 break;
289
290 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
291 continue;
292 }
293
294 ret = copy_event_to_user(group, kevent, buf);
295 if (unlikely(ret == -EOPENSTALE)) {
296 /*
297 * We cannot report events with stale fd so drop it.
298 * Setting ret to 0 will continue the event loop and
299 * do the right thing if there are no more events to
300 * read (i.e. return bytes read, -EAGAIN or wait).
301 */
302 ret = 0;
303 }
304
305 /*
306 * Permission events get queued to wait for response. Other
307 * events can be destroyed now.
308 */
309 if (!fanotify_is_perm_event(kevent->mask)) {
310 fsnotify_destroy_event(group, kevent);
311 } else {
312 if (ret <= 0) {
313 FANOTIFY_PE(kevent)->response = FAN_DENY;
314 wake_up(&group->fanotify_data.access_waitq);
315 } else {
316 spin_lock(&group->notification_lock);
317 list_add_tail(&kevent->list,
318 &group->fanotify_data.access_list);
319 spin_unlock(&group->notification_lock);
320 }
321 }
322 if (ret < 0)
323 break;
324 buf += ret;
325 count -= ret;
326 }
327 remove_wait_queue(&group->notification_waitq, &wait);
328
329 if (start != buf && ret != -EFAULT)
330 ret = buf - start;
331 return ret;
332 }
333
334 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
335 {
336 struct fanotify_response response = { .fd = -1, .response = -1 };
337 struct fsnotify_group *group;
338 int ret;
339
340 if (!IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
341 return -EINVAL;
342
343 group = file->private_data;
344
345 if (count > sizeof(response))
346 count = sizeof(response);
347
348 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
349
350 if (copy_from_user(&response, buf, count))
351 return -EFAULT;
352
353 ret = process_access_response(group, &response);
354 if (ret < 0)
355 count = ret;
356
357 return count;
358 }
359
360 static int fanotify_release(struct inode *ignored, struct file *file)
361 {
362 struct fsnotify_group *group = file->private_data;
363 struct fanotify_perm_event_info *event, *next;
364 struct fsnotify_event *fsn_event;
365
366 /*
367 * Stop new events from arriving in the notification queue. since
368 * userspace cannot use fanotify fd anymore, no event can enter or
369 * leave access_list by now either.
370 */
371 fsnotify_group_stop_queueing(group);
372
373 /*
374 * Process all permission events on access_list and notification queue
375 * and simulate reply from userspace.
376 */
377 spin_lock(&group->notification_lock);
378 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
379 fae.fse.list) {
380 pr_debug("%s: found group=%p event=%p\n", __func__, group,
381 event);
382
383 list_del_init(&event->fae.fse.list);
384 event->response = FAN_ALLOW;
385 }
386
387 /*
388 * Destroy all non-permission events. For permission events just
389 * dequeue them and set the response. They will be freed once the
390 * response is consumed and fanotify_get_response() returns.
391 */
392 while (!fsnotify_notify_queue_is_empty(group)) {
393 fsn_event = fsnotify_remove_first_event(group);
394 if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
395 spin_unlock(&group->notification_lock);
396 fsnotify_destroy_event(group, fsn_event);
397 spin_lock(&group->notification_lock);
398 } else {
399 FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
400 }
401 }
402 spin_unlock(&group->notification_lock);
403
404 /* Response for all permission events it set, wakeup waiters */
405 wake_up(&group->fanotify_data.access_waitq);
406
407 /* matches the fanotify_init->fsnotify_alloc_group */
408 fsnotify_destroy_group(group);
409
410 return 0;
411 }
412
413 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
414 {
415 struct fsnotify_group *group;
416 struct fsnotify_event *fsn_event;
417 void __user *p;
418 int ret = -ENOTTY;
419 size_t send_len = 0;
420
421 group = file->private_data;
422
423 p = (void __user *) arg;
424
425 switch (cmd) {
426 case FIONREAD:
427 spin_lock(&group->notification_lock);
428 list_for_each_entry(fsn_event, &group->notification_list, list)
429 send_len += FAN_EVENT_METADATA_LEN;
430 spin_unlock(&group->notification_lock);
431 ret = put_user(send_len, (int __user *) p);
432 break;
433 }
434
435 return ret;
436 }
437
438 static const struct file_operations fanotify_fops = {
439 .show_fdinfo = fanotify_show_fdinfo,
440 .poll = fanotify_poll,
441 .read = fanotify_read,
442 .write = fanotify_write,
443 .fasync = NULL,
444 .release = fanotify_release,
445 .unlocked_ioctl = fanotify_ioctl,
446 .compat_ioctl = fanotify_ioctl,
447 .llseek = noop_llseek,
448 };
449
450 static int fanotify_find_path(int dfd, const char __user *filename,
451 struct path *path, unsigned int flags)
452 {
453 int ret;
454
455 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
456 dfd, filename, flags);
457
458 if (filename == NULL) {
459 struct fd f = fdget(dfd);
460
461 ret = -EBADF;
462 if (!f.file)
463 goto out;
464
465 ret = -ENOTDIR;
466 if ((flags & FAN_MARK_ONLYDIR) &&
467 !(S_ISDIR(file_inode(f.file)->i_mode))) {
468 fdput(f);
469 goto out;
470 }
471
472 *path = f.file->f_path;
473 path_get(path);
474 fdput(f);
475 } else {
476 unsigned int lookup_flags = 0;
477
478 if (!(flags & FAN_MARK_DONT_FOLLOW))
479 lookup_flags |= LOOKUP_FOLLOW;
480 if (flags & FAN_MARK_ONLYDIR)
481 lookup_flags |= LOOKUP_DIRECTORY;
482
483 ret = user_path_at(dfd, filename, lookup_flags, path);
484 if (ret)
485 goto out;
486 }
487
488 /* you can only watch an inode if you have read permissions on it */
489 ret = inode_permission(path->dentry->d_inode, MAY_READ);
490 if (ret)
491 path_put(path);
492 out:
493 return ret;
494 }
495
496 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
497 __u32 mask,
498 unsigned int flags,
499 int *destroy)
500 {
501 __u32 oldmask = 0;
502
503 spin_lock(&fsn_mark->lock);
504 if (!(flags & FAN_MARK_IGNORED_MASK)) {
505 __u32 tmask = fsn_mark->mask & ~mask;
506
507 if (flags & FAN_MARK_ONDIR)
508 tmask &= ~FAN_ONDIR;
509
510 oldmask = fsn_mark->mask;
511 fsn_mark->mask = tmask;
512 } else {
513 __u32 tmask = fsn_mark->ignored_mask & ~mask;
514 if (flags & FAN_MARK_ONDIR)
515 tmask &= ~FAN_ONDIR;
516 fsn_mark->ignored_mask = tmask;
517 }
518 *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
519 spin_unlock(&fsn_mark->lock);
520
521 return mask & oldmask;
522 }
523
524 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
525 struct vfsmount *mnt, __u32 mask,
526 unsigned int flags)
527 {
528 struct fsnotify_mark *fsn_mark = NULL;
529 __u32 removed;
530 int destroy_mark;
531
532 mutex_lock(&group->mark_mutex);
533 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
534 group);
535 if (!fsn_mark) {
536 mutex_unlock(&group->mark_mutex);
537 return -ENOENT;
538 }
539
540 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
541 &destroy_mark);
542 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
543 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
544 if (destroy_mark)
545 fsnotify_detach_mark(fsn_mark);
546 mutex_unlock(&group->mark_mutex);
547 if (destroy_mark)
548 fsnotify_free_mark(fsn_mark);
549
550 fsnotify_put_mark(fsn_mark);
551 return 0;
552 }
553
554 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
555 struct inode *inode, __u32 mask,
556 unsigned int flags)
557 {
558 struct fsnotify_mark *fsn_mark = NULL;
559 __u32 removed;
560 int destroy_mark;
561
562 mutex_lock(&group->mark_mutex);
563 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
564 if (!fsn_mark) {
565 mutex_unlock(&group->mark_mutex);
566 return -ENOENT;
567 }
568
569 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
570 &destroy_mark);
571 if (removed & inode->i_fsnotify_mask)
572 fsnotify_recalc_mask(inode->i_fsnotify_marks);
573 if (destroy_mark)
574 fsnotify_detach_mark(fsn_mark);
575 mutex_unlock(&group->mark_mutex);
576 if (destroy_mark)
577 fsnotify_free_mark(fsn_mark);
578
579 /* matches the fsnotify_find_mark() */
580 fsnotify_put_mark(fsn_mark);
581
582 return 0;
583 }
584
585 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
586 __u32 mask,
587 unsigned int flags)
588 {
589 __u32 oldmask = -1;
590
591 spin_lock(&fsn_mark->lock);
592 if (!(flags & FAN_MARK_IGNORED_MASK)) {
593 __u32 tmask = fsn_mark->mask | mask;
594
595 if (flags & FAN_MARK_ONDIR)
596 tmask |= FAN_ONDIR;
597
598 oldmask = fsn_mark->mask;
599 fsn_mark->mask = tmask;
600 } else {
601 __u32 tmask = fsn_mark->ignored_mask | mask;
602 if (flags & FAN_MARK_ONDIR)
603 tmask |= FAN_ONDIR;
604
605 fsn_mark->ignored_mask = tmask;
606 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
607 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
608 }
609 spin_unlock(&fsn_mark->lock);
610
611 return mask & ~oldmask;
612 }
613
614 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
615 struct inode *inode,
616 struct vfsmount *mnt)
617 {
618 struct fsnotify_mark *mark;
619 int ret;
620
621 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
622 return ERR_PTR(-ENOSPC);
623
624 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
625 if (!mark)
626 return ERR_PTR(-ENOMEM);
627
628 fsnotify_init_mark(mark, group);
629 ret = fsnotify_add_mark_locked(mark, inode, mnt, 0);
630 if (ret) {
631 fsnotify_put_mark(mark);
632 return ERR_PTR(ret);
633 }
634
635 return mark;
636 }
637
638
639 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
640 struct vfsmount *mnt, __u32 mask,
641 unsigned int flags)
642 {
643 struct fsnotify_mark *fsn_mark;
644 __u32 added;
645
646 mutex_lock(&group->mark_mutex);
647 fsn_mark = fsnotify_find_mark(&real_mount(mnt)->mnt_fsnotify_marks,
648 group);
649 if (!fsn_mark) {
650 fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
651 if (IS_ERR(fsn_mark)) {
652 mutex_unlock(&group->mark_mutex);
653 return PTR_ERR(fsn_mark);
654 }
655 }
656 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
657 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
658 fsnotify_recalc_mask(real_mount(mnt)->mnt_fsnotify_marks);
659 mutex_unlock(&group->mark_mutex);
660
661 fsnotify_put_mark(fsn_mark);
662 return 0;
663 }
664
665 static int fanotify_add_inode_mark(struct fsnotify_group *group,
666 struct inode *inode, __u32 mask,
667 unsigned int flags)
668 {
669 struct fsnotify_mark *fsn_mark;
670 __u32 added;
671
672 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
673
674 /*
675 * If some other task has this inode open for write we should not add
676 * an ignored mark, unless that ignored mark is supposed to survive
677 * modification changes anyway.
678 */
679 if ((flags & FAN_MARK_IGNORED_MASK) &&
680 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
681 (atomic_read(&inode->i_writecount) > 0))
682 return 0;
683
684 mutex_lock(&group->mark_mutex);
685 fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
686 if (!fsn_mark) {
687 fsn_mark = fanotify_add_new_mark(group, inode, NULL);
688 if (IS_ERR(fsn_mark)) {
689 mutex_unlock(&group->mark_mutex);
690 return PTR_ERR(fsn_mark);
691 }
692 }
693 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
694 if (added & ~inode->i_fsnotify_mask)
695 fsnotify_recalc_mask(inode->i_fsnotify_marks);
696 mutex_unlock(&group->mark_mutex);
697
698 fsnotify_put_mark(fsn_mark);
699 return 0;
700 }
701
702 /* fanotify syscalls */
703 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
704 {
705 struct fsnotify_group *group;
706 int f_flags, fd;
707 struct user_struct *user;
708 struct fanotify_event_info *oevent;
709
710 pr_debug("%s: flags=%d event_f_flags=%d\n",
711 __func__, flags, event_f_flags);
712
713 if (!capable(CAP_SYS_ADMIN))
714 return -EPERM;
715
716 if (flags & ~FAN_ALL_INIT_FLAGS)
717 return -EINVAL;
718
719 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
720 return -EINVAL;
721
722 switch (event_f_flags & O_ACCMODE) {
723 case O_RDONLY:
724 case O_RDWR:
725 case O_WRONLY:
726 break;
727 default:
728 return -EINVAL;
729 }
730
731 user = get_current_user();
732 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
733 free_uid(user);
734 return -EMFILE;
735 }
736
737 f_flags = O_RDWR | FMODE_NONOTIFY;
738 if (flags & FAN_CLOEXEC)
739 f_flags |= O_CLOEXEC;
740 if (flags & FAN_NONBLOCK)
741 f_flags |= O_NONBLOCK;
742
743 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
744 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
745 if (IS_ERR(group)) {
746 free_uid(user);
747 return PTR_ERR(group);
748 }
749
750 group->fanotify_data.user = user;
751 atomic_inc(&user->fanotify_listeners);
752
753 oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
754 if (unlikely(!oevent)) {
755 fd = -ENOMEM;
756 goto out_destroy_group;
757 }
758 group->overflow_event = &oevent->fse;
759
760 if (force_o_largefile())
761 event_f_flags |= O_LARGEFILE;
762 group->fanotify_data.f_flags = event_f_flags;
763 init_waitqueue_head(&group->fanotify_data.access_waitq);
764 INIT_LIST_HEAD(&group->fanotify_data.access_list);
765 switch (flags & FAN_ALL_CLASS_BITS) {
766 case FAN_CLASS_NOTIF:
767 group->priority = FS_PRIO_0;
768 break;
769 case FAN_CLASS_CONTENT:
770 group->priority = FS_PRIO_1;
771 break;
772 case FAN_CLASS_PRE_CONTENT:
773 group->priority = FS_PRIO_2;
774 break;
775 default:
776 fd = -EINVAL;
777 goto out_destroy_group;
778 }
779
780 if (flags & FAN_UNLIMITED_QUEUE) {
781 fd = -EPERM;
782 if (!capable(CAP_SYS_ADMIN))
783 goto out_destroy_group;
784 group->max_events = UINT_MAX;
785 } else {
786 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
787 }
788
789 if (flags & FAN_UNLIMITED_MARKS) {
790 fd = -EPERM;
791 if (!capable(CAP_SYS_ADMIN))
792 goto out_destroy_group;
793 group->fanotify_data.max_marks = UINT_MAX;
794 } else {
795 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
796 }
797
798 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
799 if (fd < 0)
800 goto out_destroy_group;
801
802 return fd;
803
804 out_destroy_group:
805 fsnotify_destroy_group(group);
806 return fd;
807 }
808
809 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
810 __u64, mask, int, dfd,
811 const char __user *, pathname)
812 {
813 struct inode *inode = NULL;
814 struct vfsmount *mnt = NULL;
815 struct fsnotify_group *group;
816 struct fd f;
817 struct path path;
818 u32 valid_mask = FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD;
819 int ret;
820
821 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
822 __func__, fanotify_fd, flags, dfd, pathname, mask);
823
824 /* we only use the lower 32 bits as of right now. */
825 if (mask & ((__u64)0xffffffff << 32))
826 return -EINVAL;
827
828 if (flags & ~FAN_ALL_MARK_FLAGS)
829 return -EINVAL;
830 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
831 case FAN_MARK_ADD: /* fallthrough */
832 case FAN_MARK_REMOVE:
833 if (!mask)
834 return -EINVAL;
835 break;
836 case FAN_MARK_FLUSH:
837 if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
838 return -EINVAL;
839 break;
840 default:
841 return -EINVAL;
842 }
843
844 if (mask & FAN_ONDIR) {
845 flags |= FAN_MARK_ONDIR;
846 mask &= ~FAN_ONDIR;
847 }
848
849 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS))
850 valid_mask |= FAN_ALL_PERM_EVENTS;
851
852 if (mask & ~valid_mask)
853 return -EINVAL;
854
855 f = fdget(fanotify_fd);
856 if (unlikely(!f.file))
857 return -EBADF;
858
859 /* verify that this is indeed an fanotify instance */
860 ret = -EINVAL;
861 if (unlikely(f.file->f_op != &fanotify_fops))
862 goto fput_and_out;
863 group = f.file->private_data;
864
865 /*
866 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
867 * allowed to set permissions events.
868 */
869 ret = -EINVAL;
870 if (mask & FAN_ALL_PERM_EVENTS &&
871 group->priority == FS_PRIO_0)
872 goto fput_and_out;
873
874 if (flags & FAN_MARK_FLUSH) {
875 ret = 0;
876 if (flags & FAN_MARK_MOUNT)
877 fsnotify_clear_vfsmount_marks_by_group(group);
878 else
879 fsnotify_clear_inode_marks_by_group(group);
880 goto fput_and_out;
881 }
882
883 ret = fanotify_find_path(dfd, pathname, &path, flags);
884 if (ret)
885 goto fput_and_out;
886
887 /* inode held in place by reference to path; group by fget on fd */
888 if (!(flags & FAN_MARK_MOUNT))
889 inode = path.dentry->d_inode;
890 else
891 mnt = path.mnt;
892
893 /* create/update an inode mark */
894 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
895 case FAN_MARK_ADD:
896 if (flags & FAN_MARK_MOUNT)
897 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
898 else
899 ret = fanotify_add_inode_mark(group, inode, mask, flags);
900 break;
901 case FAN_MARK_REMOVE:
902 if (flags & FAN_MARK_MOUNT)
903 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
904 else
905 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
906 break;
907 default:
908 ret = -EINVAL;
909 }
910
911 path_put(&path);
912 fput_and_out:
913 fdput(f);
914 return ret;
915 }
916
917 #ifdef CONFIG_COMPAT
918 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
919 int, fanotify_fd, unsigned int, flags,
920 __u32, mask0, __u32, mask1, int, dfd,
921 const char __user *, pathname)
922 {
923 return sys_fanotify_mark(fanotify_fd, flags,
924 #ifdef __BIG_ENDIAN
925 ((__u64)mask0 << 32) | mask1,
926 #else
927 ((__u64)mask1 << 32) | mask0,
928 #endif
929 dfd, pathname);
930 }
931 #endif
932
933 /*
934 * fanotify_user_setup - Our initialization function. Note that we cannot return
935 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
936 * must result in panic().
937 */
938 static int __init fanotify_user_setup(void)
939 {
940 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
941 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
942 if (IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS)) {
943 fanotify_perm_event_cachep =
944 KMEM_CACHE(fanotify_perm_event_info, SLAB_PANIC);
945 }
946
947 return 0;
948 }
949 device_initcall(fanotify_user_setup);