]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/notify/fanotify/fanotify_user.c
2b37f27858345ccf7bca6747768df11680067a1e
[mirror_ubuntu-artful-kernel.git] / fs / notify / fanotify / fanotify_user.c
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
17 #include <linux/sched/signal.h>
18
19 #include <asm/ioctls.h>
20
21 #include "../../mount.h"
22 #include "../fdinfo.h"
23 #include "fanotify.h"
24
25 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
26 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
27 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
28
29 /*
30 * All flags that may be specified in parameter event_f_flags of fanotify_init.
31 *
32 * Internal and external open flags are stored together in field f_flags of
33 * struct file. Only external open flags shall be allowed in event_f_flags.
34 * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be
35 * excluded.
36 */
37 #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \
38 O_ACCMODE | O_APPEND | O_NONBLOCK | \
39 __O_SYNC | O_DSYNC | O_CLOEXEC | \
40 O_LARGEFILE | O_NOATIME )
41
42 extern const struct fsnotify_ops fanotify_fsnotify_ops;
43
44 static struct kmem_cache *fanotify_mark_cache __read_mostly;
45 struct kmem_cache *fanotify_event_cachep __read_mostly;
46 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
47
48 /*
49 * Get an fsnotify notification event if one exists and is small
50 * enough to fit in "count". Return an error pointer if the count
51 * is not large enough.
52 *
53 * Called with the group->notification_lock held.
54 */
55 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
56 size_t count)
57 {
58 assert_spin_locked(&group->notification_lock);
59
60 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
61
62 if (fsnotify_notify_queue_is_empty(group))
63 return NULL;
64
65 if (FAN_EVENT_METADATA_LEN > count)
66 return ERR_PTR(-EINVAL);
67
68 /* held the notification_lock the whole time, so this is the
69 * same event we peeked above */
70 return fsnotify_remove_first_event(group);
71 }
72
73 static int create_fd(struct fsnotify_group *group,
74 struct fanotify_event_info *event,
75 struct file **file)
76 {
77 int client_fd;
78 struct file *new_file;
79
80 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
81
82 client_fd = get_unused_fd_flags(group->fanotify_data.f_flags);
83 if (client_fd < 0)
84 return client_fd;
85
86 /*
87 * we need a new file handle for the userspace program so it can read even if it was
88 * originally opened O_WRONLY.
89 */
90 /* it's possible this event was an overflow event. in that case dentry and mnt
91 * are NULL; That's fine, just don't call dentry open */
92 if (event->path.dentry && event->path.mnt)
93 new_file = dentry_open(&event->path,
94 group->fanotify_data.f_flags | FMODE_NONOTIFY,
95 current_cred());
96 else
97 new_file = ERR_PTR(-EOVERFLOW);
98 if (IS_ERR(new_file)) {
99 /*
100 * we still send an event even if we can't open the file. this
101 * can happen when say tasks are gone and we try to open their
102 * /proc files or we try to open a WRONLY file like in sysfs
103 * we just send the errno to userspace since there isn't much
104 * else we can do.
105 */
106 put_unused_fd(client_fd);
107 client_fd = PTR_ERR(new_file);
108 } else {
109 *file = new_file;
110 }
111
112 return client_fd;
113 }
114
115 static int fill_event_metadata(struct fsnotify_group *group,
116 struct fanotify_event_metadata *metadata,
117 struct fsnotify_event *fsn_event,
118 struct file **file)
119 {
120 int ret = 0;
121 struct fanotify_event_info *event;
122
123 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
124 group, metadata, fsn_event);
125
126 *file = NULL;
127 event = container_of(fsn_event, struct fanotify_event_info, fse);
128 metadata->event_len = FAN_EVENT_METADATA_LEN;
129 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
130 metadata->vers = FANOTIFY_METADATA_VERSION;
131 metadata->reserved = 0;
132 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
133 metadata->pid = pid_vnr(event->tgid);
134 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
135 metadata->fd = FAN_NOFD;
136 else {
137 metadata->fd = create_fd(group, event, file);
138 if (metadata->fd < 0)
139 ret = metadata->fd;
140 }
141
142 return ret;
143 }
144
145 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
146 static struct fanotify_perm_event_info *dequeue_event(
147 struct fsnotify_group *group, int fd)
148 {
149 struct fanotify_perm_event_info *event, *return_e = NULL;
150
151 spin_lock(&group->notification_lock);
152 list_for_each_entry(event, &group->fanotify_data.access_list,
153 fae.fse.list) {
154 if (event->fd != fd)
155 continue;
156
157 list_del_init(&event->fae.fse.list);
158 return_e = event;
159 break;
160 }
161 spin_unlock(&group->notification_lock);
162
163 pr_debug("%s: found return_re=%p\n", __func__, return_e);
164
165 return return_e;
166 }
167
168 static int process_access_response(struct fsnotify_group *group,
169 struct fanotify_response *response_struct)
170 {
171 struct fanotify_perm_event_info *event;
172 int fd = response_struct->fd;
173 int response = response_struct->response;
174
175 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
176 fd, response);
177 /*
178 * make sure the response is valid, if invalid we do nothing and either
179 * userspace can send a valid response or we will clean it up after the
180 * timeout
181 */
182 switch (response) {
183 case FAN_ALLOW:
184 case FAN_DENY:
185 break;
186 default:
187 return -EINVAL;
188 }
189
190 if (fd < 0)
191 return -EINVAL;
192
193 event = dequeue_event(group, fd);
194 if (!event)
195 return -ENOENT;
196
197 event->response = response;
198 wake_up(&group->fanotify_data.access_waitq);
199
200 return 0;
201 }
202 #endif
203
204 static ssize_t copy_event_to_user(struct fsnotify_group *group,
205 struct fsnotify_event *event,
206 char __user *buf)
207 {
208 struct fanotify_event_metadata fanotify_event_metadata;
209 struct file *f;
210 int fd, ret;
211
212 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
213
214 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
215 if (ret < 0)
216 return ret;
217
218 fd = fanotify_event_metadata.fd;
219 ret = -EFAULT;
220 if (copy_to_user(buf, &fanotify_event_metadata,
221 fanotify_event_metadata.event_len))
222 goto out_close_fd;
223
224 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
225 if (event->mask & FAN_ALL_PERM_EVENTS)
226 FANOTIFY_PE(event)->fd = fd;
227 #endif
228
229 if (fd != FAN_NOFD)
230 fd_install(fd, f);
231 return fanotify_event_metadata.event_len;
232
233 out_close_fd:
234 if (fd != FAN_NOFD) {
235 put_unused_fd(fd);
236 fput(f);
237 }
238 return ret;
239 }
240
241 /* intofiy userspace file descriptor functions */
242 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
243 {
244 struct fsnotify_group *group = file->private_data;
245 int ret = 0;
246
247 poll_wait(file, &group->notification_waitq, wait);
248 spin_lock(&group->notification_lock);
249 if (!fsnotify_notify_queue_is_empty(group))
250 ret = POLLIN | POLLRDNORM;
251 spin_unlock(&group->notification_lock);
252
253 return ret;
254 }
255
256 static ssize_t fanotify_read(struct file *file, char __user *buf,
257 size_t count, loff_t *pos)
258 {
259 struct fsnotify_group *group;
260 struct fsnotify_event *kevent;
261 char __user *start;
262 int ret;
263 DEFINE_WAIT_FUNC(wait, woken_wake_function);
264
265 start = buf;
266 group = file->private_data;
267
268 pr_debug("%s: group=%p\n", __func__, group);
269
270 add_wait_queue(&group->notification_waitq, &wait);
271 while (1) {
272 spin_lock(&group->notification_lock);
273 kevent = get_one_event(group, count);
274 spin_unlock(&group->notification_lock);
275
276 if (IS_ERR(kevent)) {
277 ret = PTR_ERR(kevent);
278 break;
279 }
280
281 if (!kevent) {
282 ret = -EAGAIN;
283 if (file->f_flags & O_NONBLOCK)
284 break;
285
286 ret = -ERESTARTSYS;
287 if (signal_pending(current))
288 break;
289
290 if (start != buf)
291 break;
292
293 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
294 continue;
295 }
296
297 ret = copy_event_to_user(group, kevent, buf);
298 /*
299 * Permission events get queued to wait for response. Other
300 * events can be destroyed now.
301 */
302 if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
303 fsnotify_destroy_event(group, kevent);
304 if (ret < 0)
305 break;
306 } else {
307 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
308 if (ret < 0) {
309 FANOTIFY_PE(kevent)->response = FAN_DENY;
310 wake_up(&group->fanotify_data.access_waitq);
311 break;
312 }
313 spin_lock(&group->notification_lock);
314 list_add_tail(&kevent->list,
315 &group->fanotify_data.access_list);
316 spin_unlock(&group->notification_lock);
317 #endif
318 }
319 buf += ret;
320 count -= ret;
321 }
322 remove_wait_queue(&group->notification_waitq, &wait);
323
324 if (start != buf && ret != -EFAULT)
325 ret = buf - start;
326 return ret;
327 }
328
329 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
330 {
331 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
332 struct fanotify_response response = { .fd = -1, .response = -1 };
333 struct fsnotify_group *group;
334 int ret;
335
336 group = file->private_data;
337
338 if (count > sizeof(response))
339 count = sizeof(response);
340
341 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
342
343 if (copy_from_user(&response, buf, count))
344 return -EFAULT;
345
346 ret = process_access_response(group, &response);
347 if (ret < 0)
348 count = ret;
349
350 return count;
351 #else
352 return -EINVAL;
353 #endif
354 }
355
356 static int fanotify_release(struct inode *ignored, struct file *file)
357 {
358 struct fsnotify_group *group = file->private_data;
359
360 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
361 struct fanotify_perm_event_info *event, *next;
362 struct fsnotify_event *fsn_event;
363
364 /*
365 * Stop new events from arriving in the notification queue. since
366 * userspace cannot use fanotify fd anymore, no event can enter or
367 * leave access_list by now either.
368 */
369 fsnotify_group_stop_queueing(group);
370
371 /*
372 * Process all permission events on access_list and notification queue
373 * and simulate reply from userspace.
374 */
375 spin_lock(&group->notification_lock);
376 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
377 fae.fse.list) {
378 pr_debug("%s: found group=%p event=%p\n", __func__, group,
379 event);
380
381 list_del_init(&event->fae.fse.list);
382 event->response = FAN_ALLOW;
383 }
384
385 /*
386 * Destroy all non-permission events. For permission events just
387 * dequeue them and set the response. They will be freed once the
388 * response is consumed and fanotify_get_response() returns.
389 */
390 while (!fsnotify_notify_queue_is_empty(group)) {
391 fsn_event = fsnotify_remove_first_event(group);
392 if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS)) {
393 spin_unlock(&group->notification_lock);
394 fsnotify_destroy_event(group, fsn_event);
395 spin_lock(&group->notification_lock);
396 } else
397 FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
398 }
399 spin_unlock(&group->notification_lock);
400
401 /* Response for all permission events it set, wakeup waiters */
402 wake_up(&group->fanotify_data.access_waitq);
403 #endif
404
405 /* matches the fanotify_init->fsnotify_alloc_group */
406 fsnotify_destroy_group(group);
407
408 return 0;
409 }
410
411 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
412 {
413 struct fsnotify_group *group;
414 struct fsnotify_event *fsn_event;
415 void __user *p;
416 int ret = -ENOTTY;
417 size_t send_len = 0;
418
419 group = file->private_data;
420
421 p = (void __user *) arg;
422
423 switch (cmd) {
424 case FIONREAD:
425 spin_lock(&group->notification_lock);
426 list_for_each_entry(fsn_event, &group->notification_list, list)
427 send_len += FAN_EVENT_METADATA_LEN;
428 spin_unlock(&group->notification_lock);
429 ret = put_user(send_len, (int __user *) p);
430 break;
431 }
432
433 return ret;
434 }
435
436 static const struct file_operations fanotify_fops = {
437 .show_fdinfo = fanotify_show_fdinfo,
438 .poll = fanotify_poll,
439 .read = fanotify_read,
440 .write = fanotify_write,
441 .fasync = NULL,
442 .release = fanotify_release,
443 .unlocked_ioctl = fanotify_ioctl,
444 .compat_ioctl = fanotify_ioctl,
445 .llseek = noop_llseek,
446 };
447
448 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
449 {
450 kmem_cache_free(fanotify_mark_cache, fsn_mark);
451 }
452
453 static int fanotify_find_path(int dfd, const char __user *filename,
454 struct path *path, unsigned int flags)
455 {
456 int ret;
457
458 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
459 dfd, filename, flags);
460
461 if (filename == NULL) {
462 struct fd f = fdget(dfd);
463
464 ret = -EBADF;
465 if (!f.file)
466 goto out;
467
468 ret = -ENOTDIR;
469 if ((flags & FAN_MARK_ONLYDIR) &&
470 !(S_ISDIR(file_inode(f.file)->i_mode))) {
471 fdput(f);
472 goto out;
473 }
474
475 *path = f.file->f_path;
476 path_get(path);
477 fdput(f);
478 } else {
479 unsigned int lookup_flags = 0;
480
481 if (!(flags & FAN_MARK_DONT_FOLLOW))
482 lookup_flags |= LOOKUP_FOLLOW;
483 if (flags & FAN_MARK_ONLYDIR)
484 lookup_flags |= LOOKUP_DIRECTORY;
485
486 ret = user_path_at(dfd, filename, lookup_flags, path);
487 if (ret)
488 goto out;
489 }
490
491 /* you can only watch an inode if you have read permissions on it */
492 ret = inode_permission(path->dentry->d_inode, MAY_READ);
493 if (ret)
494 path_put(path);
495 out:
496 return ret;
497 }
498
499 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
500 __u32 mask,
501 unsigned int flags,
502 int *destroy)
503 {
504 __u32 oldmask = 0;
505
506 spin_lock(&fsn_mark->lock);
507 if (!(flags & FAN_MARK_IGNORED_MASK)) {
508 __u32 tmask = fsn_mark->mask & ~mask;
509
510 if (flags & FAN_MARK_ONDIR)
511 tmask &= ~FAN_ONDIR;
512
513 oldmask = fsn_mark->mask;
514 fsnotify_set_mark_mask_locked(fsn_mark, tmask);
515 } else {
516 __u32 tmask = fsn_mark->ignored_mask & ~mask;
517 if (flags & FAN_MARK_ONDIR)
518 tmask &= ~FAN_ONDIR;
519
520 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
521 }
522 *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask);
523 spin_unlock(&fsn_mark->lock);
524
525 return mask & oldmask;
526 }
527
528 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
529 struct vfsmount *mnt, __u32 mask,
530 unsigned int flags)
531 {
532 struct fsnotify_mark *fsn_mark = NULL;
533 __u32 removed;
534 int destroy_mark;
535
536 mutex_lock(&group->mark_mutex);
537 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
538 if (!fsn_mark) {
539 mutex_unlock(&group->mark_mutex);
540 return -ENOENT;
541 }
542
543 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
544 &destroy_mark);
545 if (destroy_mark)
546 fsnotify_detach_mark(fsn_mark);
547 mutex_unlock(&group->mark_mutex);
548 if (destroy_mark)
549 fsnotify_free_mark(fsn_mark);
550
551 fsnotify_put_mark(fsn_mark);
552 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
553 fsnotify_recalc_vfsmount_mask(mnt);
554
555 return 0;
556 }
557
558 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
559 struct inode *inode, __u32 mask,
560 unsigned int flags)
561 {
562 struct fsnotify_mark *fsn_mark = NULL;
563 __u32 removed;
564 int destroy_mark;
565
566 mutex_lock(&group->mark_mutex);
567 fsn_mark = fsnotify_find_inode_mark(group, inode);
568 if (!fsn_mark) {
569 mutex_unlock(&group->mark_mutex);
570 return -ENOENT;
571 }
572
573 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
574 &destroy_mark);
575 if (destroy_mark)
576 fsnotify_detach_mark(fsn_mark);
577 mutex_unlock(&group->mark_mutex);
578 if (destroy_mark)
579 fsnotify_free_mark(fsn_mark);
580
581 /* matches the fsnotify_find_inode_mark() */
582 fsnotify_put_mark(fsn_mark);
583 if (removed & inode->i_fsnotify_mask)
584 fsnotify_recalc_inode_mask(inode);
585
586 return 0;
587 }
588
589 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
590 __u32 mask,
591 unsigned int flags)
592 {
593 __u32 oldmask = -1;
594
595 spin_lock(&fsn_mark->lock);
596 if (!(flags & FAN_MARK_IGNORED_MASK)) {
597 __u32 tmask = fsn_mark->mask | mask;
598
599 if (flags & FAN_MARK_ONDIR)
600 tmask |= FAN_ONDIR;
601
602 oldmask = fsn_mark->mask;
603 fsnotify_set_mark_mask_locked(fsn_mark, tmask);
604 } else {
605 __u32 tmask = fsn_mark->ignored_mask | mask;
606 if (flags & FAN_MARK_ONDIR)
607 tmask |= FAN_ONDIR;
608
609 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
610 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
611 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
612 }
613 spin_unlock(&fsn_mark->lock);
614
615 return mask & ~oldmask;
616 }
617
618 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
619 struct inode *inode,
620 struct vfsmount *mnt)
621 {
622 struct fsnotify_mark *mark;
623 int ret;
624
625 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
626 return ERR_PTR(-ENOSPC);
627
628 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
629 if (!mark)
630 return ERR_PTR(-ENOMEM);
631
632 fsnotify_init_mark(mark, fanotify_free_mark);
633 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
634 if (ret) {
635 fsnotify_put_mark(mark);
636 return ERR_PTR(ret);
637 }
638
639 return mark;
640 }
641
642
643 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
644 struct vfsmount *mnt, __u32 mask,
645 unsigned int flags)
646 {
647 struct fsnotify_mark *fsn_mark;
648 __u32 added;
649
650 mutex_lock(&group->mark_mutex);
651 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
652 if (!fsn_mark) {
653 fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
654 if (IS_ERR(fsn_mark)) {
655 mutex_unlock(&group->mark_mutex);
656 return PTR_ERR(fsn_mark);
657 }
658 }
659 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
660 mutex_unlock(&group->mark_mutex);
661
662 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
663 fsnotify_recalc_vfsmount_mask(mnt);
664
665 fsnotify_put_mark(fsn_mark);
666 return 0;
667 }
668
669 static int fanotify_add_inode_mark(struct fsnotify_group *group,
670 struct inode *inode, __u32 mask,
671 unsigned int flags)
672 {
673 struct fsnotify_mark *fsn_mark;
674 __u32 added;
675
676 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
677
678 /*
679 * If some other task has this inode open for write we should not add
680 * an ignored mark, unless that ignored mark is supposed to survive
681 * modification changes anyway.
682 */
683 if ((flags & FAN_MARK_IGNORED_MASK) &&
684 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
685 (atomic_read(&inode->i_writecount) > 0))
686 return 0;
687
688 mutex_lock(&group->mark_mutex);
689 fsn_mark = fsnotify_find_inode_mark(group, inode);
690 if (!fsn_mark) {
691 fsn_mark = fanotify_add_new_mark(group, inode, NULL);
692 if (IS_ERR(fsn_mark)) {
693 mutex_unlock(&group->mark_mutex);
694 return PTR_ERR(fsn_mark);
695 }
696 }
697 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
698 mutex_unlock(&group->mark_mutex);
699
700 if (added & ~inode->i_fsnotify_mask)
701 fsnotify_recalc_inode_mask(inode);
702
703 fsnotify_put_mark(fsn_mark);
704 return 0;
705 }
706
707 /* fanotify syscalls */
708 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
709 {
710 struct fsnotify_group *group;
711 int f_flags, fd;
712 struct user_struct *user;
713 struct fanotify_event_info *oevent;
714
715 pr_debug("%s: flags=%d event_f_flags=%d\n",
716 __func__, flags, event_f_flags);
717
718 if (!capable(CAP_SYS_ADMIN))
719 return -EPERM;
720
721 if (flags & ~FAN_ALL_INIT_FLAGS)
722 return -EINVAL;
723
724 if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS)
725 return -EINVAL;
726
727 switch (event_f_flags & O_ACCMODE) {
728 case O_RDONLY:
729 case O_RDWR:
730 case O_WRONLY:
731 break;
732 default:
733 return -EINVAL;
734 }
735
736 user = get_current_user();
737 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
738 free_uid(user);
739 return -EMFILE;
740 }
741
742 f_flags = O_RDWR | FMODE_NONOTIFY;
743 if (flags & FAN_CLOEXEC)
744 f_flags |= O_CLOEXEC;
745 if (flags & FAN_NONBLOCK)
746 f_flags |= O_NONBLOCK;
747
748 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
749 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
750 if (IS_ERR(group)) {
751 free_uid(user);
752 return PTR_ERR(group);
753 }
754
755 group->fanotify_data.user = user;
756 atomic_inc(&user->fanotify_listeners);
757
758 oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
759 if (unlikely(!oevent)) {
760 fd = -ENOMEM;
761 goto out_destroy_group;
762 }
763 group->overflow_event = &oevent->fse;
764
765 if (force_o_largefile())
766 event_f_flags |= O_LARGEFILE;
767 group->fanotify_data.f_flags = event_f_flags;
768 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
769 init_waitqueue_head(&group->fanotify_data.access_waitq);
770 INIT_LIST_HEAD(&group->fanotify_data.access_list);
771 #endif
772 switch (flags & FAN_ALL_CLASS_BITS) {
773 case FAN_CLASS_NOTIF:
774 group->priority = FS_PRIO_0;
775 break;
776 case FAN_CLASS_CONTENT:
777 group->priority = FS_PRIO_1;
778 break;
779 case FAN_CLASS_PRE_CONTENT:
780 group->priority = FS_PRIO_2;
781 break;
782 default:
783 fd = -EINVAL;
784 goto out_destroy_group;
785 }
786
787 if (flags & FAN_UNLIMITED_QUEUE) {
788 fd = -EPERM;
789 if (!capable(CAP_SYS_ADMIN))
790 goto out_destroy_group;
791 group->max_events = UINT_MAX;
792 } else {
793 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
794 }
795
796 if (flags & FAN_UNLIMITED_MARKS) {
797 fd = -EPERM;
798 if (!capable(CAP_SYS_ADMIN))
799 goto out_destroy_group;
800 group->fanotify_data.max_marks = UINT_MAX;
801 } else {
802 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
803 }
804
805 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
806 if (fd < 0)
807 goto out_destroy_group;
808
809 return fd;
810
811 out_destroy_group:
812 fsnotify_destroy_group(group);
813 return fd;
814 }
815
816 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
817 __u64, mask, int, dfd,
818 const char __user *, pathname)
819 {
820 struct inode *inode = NULL;
821 struct vfsmount *mnt = NULL;
822 struct fsnotify_group *group;
823 struct fd f;
824 struct path path;
825 int ret;
826
827 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
828 __func__, fanotify_fd, flags, dfd, pathname, mask);
829
830 /* we only use the lower 32 bits as of right now. */
831 if (mask & ((__u64)0xffffffff << 32))
832 return -EINVAL;
833
834 if (flags & ~FAN_ALL_MARK_FLAGS)
835 return -EINVAL;
836 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
837 case FAN_MARK_ADD: /* fallthrough */
838 case FAN_MARK_REMOVE:
839 if (!mask)
840 return -EINVAL;
841 break;
842 case FAN_MARK_FLUSH:
843 if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH))
844 return -EINVAL;
845 break;
846 default:
847 return -EINVAL;
848 }
849
850 if (mask & FAN_ONDIR) {
851 flags |= FAN_MARK_ONDIR;
852 mask &= ~FAN_ONDIR;
853 }
854
855 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
856 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
857 #else
858 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
859 #endif
860 return -EINVAL;
861
862 f = fdget(fanotify_fd);
863 if (unlikely(!f.file))
864 return -EBADF;
865
866 /* verify that this is indeed an fanotify instance */
867 ret = -EINVAL;
868 if (unlikely(f.file->f_op != &fanotify_fops))
869 goto fput_and_out;
870 group = f.file->private_data;
871
872 /*
873 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
874 * allowed to set permissions events.
875 */
876 ret = -EINVAL;
877 if (mask & FAN_ALL_PERM_EVENTS &&
878 group->priority == FS_PRIO_0)
879 goto fput_and_out;
880
881 if (flags & FAN_MARK_FLUSH) {
882 ret = 0;
883 if (flags & FAN_MARK_MOUNT)
884 fsnotify_clear_vfsmount_marks_by_group(group);
885 else
886 fsnotify_clear_inode_marks_by_group(group);
887 goto fput_and_out;
888 }
889
890 ret = fanotify_find_path(dfd, pathname, &path, flags);
891 if (ret)
892 goto fput_and_out;
893
894 /* inode held in place by reference to path; group by fget on fd */
895 if (!(flags & FAN_MARK_MOUNT))
896 inode = path.dentry->d_inode;
897 else
898 mnt = path.mnt;
899
900 /* create/update an inode mark */
901 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {
902 case FAN_MARK_ADD:
903 if (flags & FAN_MARK_MOUNT)
904 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
905 else
906 ret = fanotify_add_inode_mark(group, inode, mask, flags);
907 break;
908 case FAN_MARK_REMOVE:
909 if (flags & FAN_MARK_MOUNT)
910 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
911 else
912 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
913 break;
914 default:
915 ret = -EINVAL;
916 }
917
918 path_put(&path);
919 fput_and_out:
920 fdput(f);
921 return ret;
922 }
923
924 #ifdef CONFIG_COMPAT
925 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
926 int, fanotify_fd, unsigned int, flags,
927 __u32, mask0, __u32, mask1, int, dfd,
928 const char __user *, pathname)
929 {
930 return sys_fanotify_mark(fanotify_fd, flags,
931 #ifdef __BIG_ENDIAN
932 ((__u64)mask0 << 32) | mask1,
933 #else
934 ((__u64)mask1 << 32) | mask0,
935 #endif
936 dfd, pathname);
937 }
938 #endif
939
940 /*
941 * fanotify_user_setup - Our initialization function. Note that we cannot return
942 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
943 * must result in panic().
944 */
945 static int __init fanotify_user_setup(void)
946 {
947 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
948 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
949 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
950 fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
951 SLAB_PANIC);
952 #endif
953
954 return 0;
955 }
956 device_initcall(fanotify_user_setup);