1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
17 #include <linux/sched/signal.h>
19 #include <asm/ioctls.h>
21 #include "../../mount.h"
22 #include "../fdinfo.h"
25 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
26 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
27 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
30 * All flags that may be specified in parameter event_f_flags of fanotify_init.
32 * Internal and external open flags are stored together in field f_flags of
33 * struct file. Only external open flags shall be allowed in event_f_flags.
34 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
37 #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
38 O_ACCMODE | O_APPEND | O_NONBLOCK | \
39 __O_SYNC | O_DSYNC | O_CLOEXEC | \
40 O_LARGEFILE | O_NOATIME )
42 extern const struct fsnotify_ops fanotify_fsnotify_ops
;
44 struct kmem_cache
*fanotify_mark_cache __read_mostly
;
45 struct kmem_cache
*fanotify_event_cachep __read_mostly
;
46 struct kmem_cache
*fanotify_perm_event_cachep __read_mostly
;
49 * Get an fsnotify notification event if one exists and is small
50 * enough to fit in "count". Return an error pointer if the count
51 * is not large enough.
53 * Called with the group->notification_lock held.
55 static struct fsnotify_event
*get_one_event(struct fsnotify_group
*group
,
58 assert_spin_locked(&group
->notification_lock
);
60 pr_debug("%s: group=%p count=%zd\n", __func__
, group
, count
);
62 if (fsnotify_notify_queue_is_empty(group
))
65 if (FAN_EVENT_METADATA_LEN
> count
)
66 return ERR_PTR(-EINVAL
);
68 /* held the notification_lock the whole time, so this is the
69 * same event we peeked above */
70 return fsnotify_remove_first_event(group
);
73 static int create_fd(struct fsnotify_group
*group
,
74 struct fanotify_event_info
*event
,
78 struct file
*new_file
;
80 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
82 client_fd
= get_unused_fd_flags(group
->fanotify_data
.f_flags
);
87 * we need a new file handle for the userspace program so it can read even if it was
88 * originally opened O_WRONLY.
90 /* it's possible this event was an overflow event. in that case dentry and mnt
91 * are NULL; That's fine, just don't call dentry open */
92 if (event
->path
.dentry
&& event
->path
.mnt
)
93 new_file
= dentry_open(&event
->path
,
94 group
->fanotify_data
.f_flags
| FMODE_NONOTIFY
,
97 new_file
= ERR_PTR(-EOVERFLOW
);
98 if (IS_ERR(new_file
)) {
100 * we still send an event even if we can't open the file. this
101 * can happen when say tasks are gone and we try to open their
102 * /proc files or we try to open a WRONLY file like in sysfs
103 * we just send the errno to userspace since there isn't much
106 put_unused_fd(client_fd
);
107 client_fd
= PTR_ERR(new_file
);
115 static int fill_event_metadata(struct fsnotify_group
*group
,
116 struct fanotify_event_metadata
*metadata
,
117 struct fsnotify_event
*fsn_event
,
121 struct fanotify_event_info
*event
;
123 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__
,
124 group
, metadata
, fsn_event
);
127 event
= container_of(fsn_event
, struct fanotify_event_info
, fse
);
128 metadata
->event_len
= FAN_EVENT_METADATA_LEN
;
129 metadata
->metadata_len
= FAN_EVENT_METADATA_LEN
;
130 metadata
->vers
= FANOTIFY_METADATA_VERSION
;
131 metadata
->reserved
= 0;
132 metadata
->mask
= fsn_event
->mask
& FAN_ALL_OUTGOING_EVENTS
;
133 metadata
->pid
= pid_vnr(event
->tgid
);
134 if (unlikely(fsn_event
->mask
& FAN_Q_OVERFLOW
))
135 metadata
->fd
= FAN_NOFD
;
137 metadata
->fd
= create_fd(group
, event
, file
);
138 if (metadata
->fd
< 0)
145 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
146 static struct fanotify_perm_event_info
*dequeue_event(
147 struct fsnotify_group
*group
, int fd
)
149 struct fanotify_perm_event_info
*event
, *return_e
= NULL
;
151 spin_lock(&group
->notification_lock
);
152 list_for_each_entry(event
, &group
->fanotify_data
.access_list
,
157 list_del_init(&event
->fae
.fse
.list
);
161 spin_unlock(&group
->notification_lock
);
163 pr_debug("%s: found return_re=%p\n", __func__
, return_e
);
168 static int process_access_response(struct fsnotify_group
*group
,
169 struct fanotify_response
*response_struct
)
171 struct fanotify_perm_event_info
*event
;
172 int fd
= response_struct
->fd
;
173 int response
= response_struct
->response
;
175 pr_debug("%s: group=%p fd=%d response=%d\n", __func__
, group
,
178 * make sure the response is valid, if invalid we do nothing and either
179 * userspace can send a valid response or we will clean it up after the
193 event
= dequeue_event(group
, fd
);
197 event
->response
= response
;
198 wake_up(&group
->fanotify_data
.access_waitq
);
204 static ssize_t
copy_event_to_user(struct fsnotify_group
*group
,
205 struct fsnotify_event
*event
,
208 struct fanotify_event_metadata fanotify_event_metadata
;
212 pr_debug("%s: group=%p event=%p\n", __func__
, group
, event
);
214 ret
= fill_event_metadata(group
, &fanotify_event_metadata
, event
, &f
);
218 fd
= fanotify_event_metadata
.fd
;
220 if (copy_to_user(buf
, &fanotify_event_metadata
,
221 fanotify_event_metadata
.event_len
))
224 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
225 if (event
->mask
& FAN_ALL_PERM_EVENTS
)
226 FANOTIFY_PE(event
)->fd
= fd
;
231 return fanotify_event_metadata
.event_len
;
234 if (fd
!= FAN_NOFD
) {
241 /* intofiy userspace file descriptor functions */
242 static unsigned int fanotify_poll(struct file
*file
, poll_table
*wait
)
244 struct fsnotify_group
*group
= file
->private_data
;
247 poll_wait(file
, &group
->notification_waitq
, wait
);
248 spin_lock(&group
->notification_lock
);
249 if (!fsnotify_notify_queue_is_empty(group
))
250 ret
= POLLIN
| POLLRDNORM
;
251 spin_unlock(&group
->notification_lock
);
256 static ssize_t
fanotify_read(struct file
*file
, char __user
*buf
,
257 size_t count
, loff_t
*pos
)
259 struct fsnotify_group
*group
;
260 struct fsnotify_event
*kevent
;
263 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
266 group
= file
->private_data
;
268 pr_debug("%s: group=%p\n", __func__
, group
);
270 add_wait_queue(&group
->notification_waitq
, &wait
);
272 spin_lock(&group
->notification_lock
);
273 kevent
= get_one_event(group
, count
);
274 spin_unlock(&group
->notification_lock
);
276 if (IS_ERR(kevent
)) {
277 ret
= PTR_ERR(kevent
);
283 if (file
->f_flags
& O_NONBLOCK
)
287 if (signal_pending(current
))
293 wait_woken(&wait
, TASK_INTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
297 ret
= copy_event_to_user(group
, kevent
, buf
);
298 if (unlikely(ret
== -EOPENSTALE
)) {
300 * We cannot report events with stale fd so drop it.
301 * Setting ret to 0 will continue the event loop and
302 * do the right thing if there are no more events to
303 * read (i.e. return bytes read, -EAGAIN or wait).
309 * Permission events get queued to wait for response. Other
310 * events can be destroyed now.
312 if (!(kevent
->mask
& FAN_ALL_PERM_EVENTS
)) {
313 fsnotify_destroy_event(group
, kevent
);
315 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
317 FANOTIFY_PE(kevent
)->response
= FAN_DENY
;
318 wake_up(&group
->fanotify_data
.access_waitq
);
320 spin_lock(&group
->notification_lock
);
321 list_add_tail(&kevent
->list
,
322 &group
->fanotify_data
.access_list
);
323 spin_unlock(&group
->notification_lock
);
332 remove_wait_queue(&group
->notification_waitq
, &wait
);
334 if (start
!= buf
&& ret
!= -EFAULT
)
339 static ssize_t
fanotify_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*pos
)
341 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
342 struct fanotify_response response
= { .fd
= -1, .response
= -1 };
343 struct fsnotify_group
*group
;
346 group
= file
->private_data
;
348 if (count
> sizeof(response
))
349 count
= sizeof(response
);
351 pr_debug("%s: group=%p count=%zu\n", __func__
, group
, count
);
353 if (copy_from_user(&response
, buf
, count
))
356 ret
= process_access_response(group
, &response
);
366 static int fanotify_release(struct inode
*ignored
, struct file
*file
)
368 struct fsnotify_group
*group
= file
->private_data
;
370 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
371 struct fanotify_perm_event_info
*event
, *next
;
372 struct fsnotify_event
*fsn_event
;
375 * Stop new events from arriving in the notification queue. since
376 * userspace cannot use fanotify fd anymore, no event can enter or
377 * leave access_list by now either.
379 fsnotify_group_stop_queueing(group
);
382 * Process all permission events on access_list and notification queue
383 * and simulate reply from userspace.
385 spin_lock(&group
->notification_lock
);
386 list_for_each_entry_safe(event
, next
, &group
->fanotify_data
.access_list
,
388 pr_debug("%s: found group=%p event=%p\n", __func__
, group
,
391 list_del_init(&event
->fae
.fse
.list
);
392 event
->response
= FAN_ALLOW
;
396 * Destroy all non-permission events. For permission events just
397 * dequeue them and set the response. They will be freed once the
398 * response is consumed and fanotify_get_response() returns.
400 while (!fsnotify_notify_queue_is_empty(group
)) {
401 fsn_event
= fsnotify_remove_first_event(group
);
402 if (!(fsn_event
->mask
& FAN_ALL_PERM_EVENTS
)) {
403 spin_unlock(&group
->notification_lock
);
404 fsnotify_destroy_event(group
, fsn_event
);
405 spin_lock(&group
->notification_lock
);
407 FANOTIFY_PE(fsn_event
)->response
= FAN_ALLOW
;
409 spin_unlock(&group
->notification_lock
);
411 /* Response for all permission events it set, wakeup waiters */
412 wake_up(&group
->fanotify_data
.access_waitq
);
415 /* matches the fanotify_init->fsnotify_alloc_group */
416 fsnotify_destroy_group(group
);
421 static long fanotify_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
423 struct fsnotify_group
*group
;
424 struct fsnotify_event
*fsn_event
;
429 group
= file
->private_data
;
431 p
= (void __user
*) arg
;
435 spin_lock(&group
->notification_lock
);
436 list_for_each_entry(fsn_event
, &group
->notification_list
, list
)
437 send_len
+= FAN_EVENT_METADATA_LEN
;
438 spin_unlock(&group
->notification_lock
);
439 ret
= put_user(send_len
, (int __user
*) p
);
446 static const struct file_operations fanotify_fops
= {
447 .show_fdinfo
= fanotify_show_fdinfo
,
448 .poll
= fanotify_poll
,
449 .read
= fanotify_read
,
450 .write
= fanotify_write
,
452 .release
= fanotify_release
,
453 .unlocked_ioctl
= fanotify_ioctl
,
454 .compat_ioctl
= fanotify_ioctl
,
455 .llseek
= noop_llseek
,
458 static int fanotify_find_path(int dfd
, const char __user
*filename
,
459 struct path
*path
, unsigned int flags
)
463 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__
,
464 dfd
, filename
, flags
);
466 if (filename
== NULL
) {
467 struct fd f
= fdget(dfd
);
474 if ((flags
& FAN_MARK_ONLYDIR
) &&
475 !(S_ISDIR(file_inode(f
.file
)->i_mode
))) {
480 *path
= f
.file
->f_path
;
484 unsigned int lookup_flags
= 0;
486 if (!(flags
& FAN_MARK_DONT_FOLLOW
))
487 lookup_flags
|= LOOKUP_FOLLOW
;
488 if (flags
& FAN_MARK_ONLYDIR
)
489 lookup_flags
|= LOOKUP_DIRECTORY
;
491 ret
= user_path_at(dfd
, filename
, lookup_flags
, path
);
496 /* you can only watch an inode if you have read permissions on it */
497 ret
= inode_permission(path
->dentry
->d_inode
, MAY_READ
);
504 static __u32
fanotify_mark_remove_from_mask(struct fsnotify_mark
*fsn_mark
,
511 spin_lock(&fsn_mark
->lock
);
512 if (!(flags
& FAN_MARK_IGNORED_MASK
)) {
513 __u32 tmask
= fsn_mark
->mask
& ~mask
;
515 if (flags
& FAN_MARK_ONDIR
)
518 oldmask
= fsn_mark
->mask
;
519 fsn_mark
->mask
= tmask
;
521 __u32 tmask
= fsn_mark
->ignored_mask
& ~mask
;
522 if (flags
& FAN_MARK_ONDIR
)
524 fsn_mark
->ignored_mask
= tmask
;
526 *destroy
= !(fsn_mark
->mask
| fsn_mark
->ignored_mask
);
527 spin_unlock(&fsn_mark
->lock
);
529 return mask
& oldmask
;
532 static int fanotify_remove_vfsmount_mark(struct fsnotify_group
*group
,
533 struct vfsmount
*mnt
, __u32 mask
,
536 struct fsnotify_mark
*fsn_mark
= NULL
;
540 mutex_lock(&group
->mark_mutex
);
541 fsn_mark
= fsnotify_find_mark(&real_mount(mnt
)->mnt_fsnotify_marks
,
544 mutex_unlock(&group
->mark_mutex
);
548 removed
= fanotify_mark_remove_from_mask(fsn_mark
, mask
, flags
,
550 if (removed
& real_mount(mnt
)->mnt_fsnotify_mask
)
551 fsnotify_recalc_mask(real_mount(mnt
)->mnt_fsnotify_marks
);
553 fsnotify_detach_mark(fsn_mark
);
554 mutex_unlock(&group
->mark_mutex
);
556 fsnotify_free_mark(fsn_mark
);
558 fsnotify_put_mark(fsn_mark
);
562 static int fanotify_remove_inode_mark(struct fsnotify_group
*group
,
563 struct inode
*inode
, __u32 mask
,
566 struct fsnotify_mark
*fsn_mark
= NULL
;
570 mutex_lock(&group
->mark_mutex
);
571 fsn_mark
= fsnotify_find_mark(&inode
->i_fsnotify_marks
, group
);
573 mutex_unlock(&group
->mark_mutex
);
577 removed
= fanotify_mark_remove_from_mask(fsn_mark
, mask
, flags
,
579 if (removed
& inode
->i_fsnotify_mask
)
580 fsnotify_recalc_mask(inode
->i_fsnotify_marks
);
582 fsnotify_detach_mark(fsn_mark
);
583 mutex_unlock(&group
->mark_mutex
);
585 fsnotify_free_mark(fsn_mark
);
587 /* matches the fsnotify_find_mark() */
588 fsnotify_put_mark(fsn_mark
);
593 static __u32
fanotify_mark_add_to_mask(struct fsnotify_mark
*fsn_mark
,
599 spin_lock(&fsn_mark
->lock
);
600 if (!(flags
& FAN_MARK_IGNORED_MASK
)) {
601 __u32 tmask
= fsn_mark
->mask
| mask
;
603 if (flags
& FAN_MARK_ONDIR
)
606 oldmask
= fsn_mark
->mask
;
607 fsn_mark
->mask
= tmask
;
609 __u32 tmask
= fsn_mark
->ignored_mask
| mask
;
610 if (flags
& FAN_MARK_ONDIR
)
613 fsn_mark
->ignored_mask
= tmask
;
614 if (flags
& FAN_MARK_IGNORED_SURV_MODIFY
)
615 fsn_mark
->flags
|= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY
;
617 spin_unlock(&fsn_mark
->lock
);
619 return mask
& ~oldmask
;
622 static struct fsnotify_mark
*fanotify_add_new_mark(struct fsnotify_group
*group
,
624 struct vfsmount
*mnt
)
626 struct fsnotify_mark
*mark
;
629 if (atomic_read(&group
->num_marks
) > group
->fanotify_data
.max_marks
)
630 return ERR_PTR(-ENOSPC
);
632 mark
= kmem_cache_alloc(fanotify_mark_cache
, GFP_KERNEL
);
634 return ERR_PTR(-ENOMEM
);
636 fsnotify_init_mark(mark
, group
);
637 ret
= fsnotify_add_mark_locked(mark
, inode
, mnt
, 0);
639 fsnotify_put_mark(mark
);
647 static int fanotify_add_vfsmount_mark(struct fsnotify_group
*group
,
648 struct vfsmount
*mnt
, __u32 mask
,
651 struct fsnotify_mark
*fsn_mark
;
654 mutex_lock(&group
->mark_mutex
);
655 fsn_mark
= fsnotify_find_mark(&real_mount(mnt
)->mnt_fsnotify_marks
,
658 fsn_mark
= fanotify_add_new_mark(group
, NULL
, mnt
);
659 if (IS_ERR(fsn_mark
)) {
660 mutex_unlock(&group
->mark_mutex
);
661 return PTR_ERR(fsn_mark
);
664 added
= fanotify_mark_add_to_mask(fsn_mark
, mask
, flags
);
665 if (added
& ~real_mount(mnt
)->mnt_fsnotify_mask
)
666 fsnotify_recalc_mask(real_mount(mnt
)->mnt_fsnotify_marks
);
667 mutex_unlock(&group
->mark_mutex
);
669 fsnotify_put_mark(fsn_mark
);
673 static int fanotify_add_inode_mark(struct fsnotify_group
*group
,
674 struct inode
*inode
, __u32 mask
,
677 struct fsnotify_mark
*fsn_mark
;
680 pr_debug("%s: group=%p inode=%p\n", __func__
, group
, inode
);
683 * If some other task has this inode open for write we should not add
684 * an ignored mark, unless that ignored mark is supposed to survive
685 * modification changes anyway.
687 if ((flags
& FAN_MARK_IGNORED_MASK
) &&
688 !(flags
& FAN_MARK_IGNORED_SURV_MODIFY
) &&
689 (atomic_read(&inode
->i_writecount
) > 0))
692 mutex_lock(&group
->mark_mutex
);
693 fsn_mark
= fsnotify_find_mark(&inode
->i_fsnotify_marks
, group
);
695 fsn_mark
= fanotify_add_new_mark(group
, inode
, NULL
);
696 if (IS_ERR(fsn_mark
)) {
697 mutex_unlock(&group
->mark_mutex
);
698 return PTR_ERR(fsn_mark
);
701 added
= fanotify_mark_add_to_mask(fsn_mark
, mask
, flags
);
702 if (added
& ~inode
->i_fsnotify_mask
)
703 fsnotify_recalc_mask(inode
->i_fsnotify_marks
);
704 mutex_unlock(&group
->mark_mutex
);
706 fsnotify_put_mark(fsn_mark
);
710 /* fanotify syscalls */
711 SYSCALL_DEFINE2(fanotify_init
, unsigned int, flags
, unsigned int, event_f_flags
)
713 struct fsnotify_group
*group
;
715 struct user_struct
*user
;
716 struct fanotify_event_info
*oevent
;
718 pr_debug("%s: flags=%d event_f_flags=%d\n",
719 __func__
, flags
, event_f_flags
);
721 if (!capable(CAP_SYS_ADMIN
))
724 if (flags
& ~FAN_ALL_INIT_FLAGS
)
727 if (event_f_flags
& ~FANOTIFY_INIT_ALL_EVENT_F_BITS
)
730 switch (event_f_flags
& O_ACCMODE
) {
739 user
= get_current_user();
740 if (atomic_read(&user
->fanotify_listeners
) > FANOTIFY_DEFAULT_MAX_LISTENERS
) {
745 f_flags
= O_RDWR
| FMODE_NONOTIFY
;
746 if (flags
& FAN_CLOEXEC
)
747 f_flags
|= O_CLOEXEC
;
748 if (flags
& FAN_NONBLOCK
)
749 f_flags
|= O_NONBLOCK
;
751 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
752 group
= fsnotify_alloc_group(&fanotify_fsnotify_ops
);
755 return PTR_ERR(group
);
758 group
->fanotify_data
.user
= user
;
759 atomic_inc(&user
->fanotify_listeners
);
761 oevent
= fanotify_alloc_event(NULL
, FS_Q_OVERFLOW
, NULL
);
762 if (unlikely(!oevent
)) {
764 goto out_destroy_group
;
766 group
->overflow_event
= &oevent
->fse
;
768 if (force_o_largefile())
769 event_f_flags
|= O_LARGEFILE
;
770 group
->fanotify_data
.f_flags
= event_f_flags
;
771 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
772 init_waitqueue_head(&group
->fanotify_data
.access_waitq
);
773 INIT_LIST_HEAD(&group
->fanotify_data
.access_list
);
775 switch (flags
& FAN_ALL_CLASS_BITS
) {
776 case FAN_CLASS_NOTIF
:
777 group
->priority
= FS_PRIO_0
;
779 case FAN_CLASS_CONTENT
:
780 group
->priority
= FS_PRIO_1
;
782 case FAN_CLASS_PRE_CONTENT
:
783 group
->priority
= FS_PRIO_2
;
787 goto out_destroy_group
;
790 if (flags
& FAN_UNLIMITED_QUEUE
) {
792 if (!capable(CAP_SYS_ADMIN
))
793 goto out_destroy_group
;
794 group
->max_events
= UINT_MAX
;
796 group
->max_events
= FANOTIFY_DEFAULT_MAX_EVENTS
;
799 if (flags
& FAN_UNLIMITED_MARKS
) {
801 if (!capable(CAP_SYS_ADMIN
))
802 goto out_destroy_group
;
803 group
->fanotify_data
.max_marks
= UINT_MAX
;
805 group
->fanotify_data
.max_marks
= FANOTIFY_DEFAULT_MAX_MARKS
;
808 fd
= anon_inode_getfd("[fanotify]", &fanotify_fops
, group
, f_flags
);
810 goto out_destroy_group
;
815 fsnotify_destroy_group(group
);
819 SYSCALL_DEFINE5(fanotify_mark
, int, fanotify_fd
, unsigned int, flags
,
820 __u64
, mask
, int, dfd
,
821 const char __user
*, pathname
)
823 struct inode
*inode
= NULL
;
824 struct vfsmount
*mnt
= NULL
;
825 struct fsnotify_group
*group
;
830 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
831 __func__
, fanotify_fd
, flags
, dfd
, pathname
, mask
);
833 /* we only use the lower 32 bits as of right now. */
834 if (mask
& ((__u64
)0xffffffff << 32))
837 if (flags
& ~FAN_ALL_MARK_FLAGS
)
839 switch (flags
& (FAN_MARK_ADD
| FAN_MARK_REMOVE
| FAN_MARK_FLUSH
)) {
840 case FAN_MARK_ADD
: /* fallthrough */
841 case FAN_MARK_REMOVE
:
846 if (flags
& ~(FAN_MARK_MOUNT
| FAN_MARK_FLUSH
))
853 if (mask
& FAN_ONDIR
) {
854 flags
|= FAN_MARK_ONDIR
;
858 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
859 if (mask
& ~(FAN_ALL_EVENTS
| FAN_ALL_PERM_EVENTS
| FAN_EVENT_ON_CHILD
))
861 if (mask
& ~(FAN_ALL_EVENTS
| FAN_EVENT_ON_CHILD
))
865 f
= fdget(fanotify_fd
);
866 if (unlikely(!f
.file
))
869 /* verify that this is indeed an fanotify instance */
871 if (unlikely(f
.file
->f_op
!= &fanotify_fops
))
873 group
= f
.file
->private_data
;
876 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
877 * allowed to set permissions events.
880 if (mask
& FAN_ALL_PERM_EVENTS
&&
881 group
->priority
== FS_PRIO_0
)
884 if (flags
& FAN_MARK_FLUSH
) {
886 if (flags
& FAN_MARK_MOUNT
)
887 fsnotify_clear_vfsmount_marks_by_group(group
);
889 fsnotify_clear_inode_marks_by_group(group
);
893 ret
= fanotify_find_path(dfd
, pathname
, &path
, flags
);
897 /* inode held in place by reference to path; group by fget on fd */
898 if (!(flags
& FAN_MARK_MOUNT
))
899 inode
= path
.dentry
->d_inode
;
903 /* create/update an inode mark */
904 switch (flags
& (FAN_MARK_ADD
| FAN_MARK_REMOVE
)) {
906 if (flags
& FAN_MARK_MOUNT
)
907 ret
= fanotify_add_vfsmount_mark(group
, mnt
, mask
, flags
);
909 ret
= fanotify_add_inode_mark(group
, inode
, mask
, flags
);
911 case FAN_MARK_REMOVE
:
912 if (flags
& FAN_MARK_MOUNT
)
913 ret
= fanotify_remove_vfsmount_mark(group
, mnt
, mask
, flags
);
915 ret
= fanotify_remove_inode_mark(group
, inode
, mask
, flags
);
928 COMPAT_SYSCALL_DEFINE6(fanotify_mark
,
929 int, fanotify_fd
, unsigned int, flags
,
930 __u32
, mask0
, __u32
, mask1
, int, dfd
,
931 const char __user
*, pathname
)
933 return sys_fanotify_mark(fanotify_fd
, flags
,
935 ((__u64
)mask0
<< 32) | mask1
,
937 ((__u64
)mask1
<< 32) | mask0
,
944 * fanotify_user_setup - Our initialization function. Note that we cannot return
945 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
946 * must result in panic().
948 static int __init
fanotify_user_setup(void)
950 fanotify_mark_cache
= KMEM_CACHE(fsnotify_mark
, SLAB_PANIC
);
951 fanotify_event_cachep
= KMEM_CACHE(fanotify_event_info
, SLAB_PANIC
);
952 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
953 fanotify_perm_event_cachep
= KMEM_CACHE(fanotify_perm_event_info
,
959 device_initcall(fanotify_user_setup
);