]>
Commit | Line | Data |
---|---|---|
33d3dfff | 1 | #include <linux/fanotify.h> |
11637e4b | 2 | #include <linux/fcntl.h> |
2a3edf86 | 3 | #include <linux/file.h> |
11637e4b | 4 | #include <linux/fs.h> |
52c923dd | 5 | #include <linux/anon_inodes.h> |
11637e4b | 6 | #include <linux/fsnotify_backend.h> |
2a3edf86 | 7 | #include <linux/init.h> |
a1014f10 | 8 | #include <linux/mount.h> |
2a3edf86 | 9 | #include <linux/namei.h> |
a1014f10 | 10 | #include <linux/poll.h> |
11637e4b EP |
11 | #include <linux/security.h> |
12 | #include <linux/syscalls.h> | |
e4e047a2 | 13 | #include <linux/slab.h> |
2a3edf86 | 14 | #include <linux/types.h> |
a1014f10 EP |
15 | #include <linux/uaccess.h> |
16 | ||
17 | #include <asm/ioctls.h> | |
11637e4b | 18 | |
33d3dfff | 19 | extern const struct fsnotify_ops fanotify_fsnotify_ops; |
11637e4b | 20 | |
2a3edf86 | 21 | static struct kmem_cache *fanotify_mark_cache __read_mostly; |
b2d87909 EP |
22 | static struct kmem_cache *fanotify_response_event_cache __read_mostly; |
23 | ||
24 | struct fanotify_response_event { | |
25 | struct list_head list; | |
26 | __s32 fd; | |
27 | struct fsnotify_event *event; | |
28 | }; | |
2a3edf86 | 29 | |
a1014f10 EP |
30 | /* |
31 | * Get an fsnotify notification event if one exists and is small | |
32 | * enough to fit in "count". Return an error pointer if the count | |
33 | * is not large enough. | |
34 | * | |
35 | * Called with the group->notification_mutex held. | |
36 | */ | |
37 | static struct fsnotify_event *get_one_event(struct fsnotify_group *group, | |
38 | size_t count) | |
39 | { | |
40 | BUG_ON(!mutex_is_locked(&group->notification_mutex)); | |
41 | ||
42 | pr_debug("%s: group=%p count=%zd\n", __func__, group, count); | |
43 | ||
44 | if (fsnotify_notify_queue_is_empty(group)) | |
45 | return NULL; | |
46 | ||
47 | if (FAN_EVENT_METADATA_LEN > count) | |
48 | return ERR_PTR(-EINVAL); | |
49 | ||
50 | /* held the notification_mutex the whole time, so this is the | |
51 | * same event we peeked above */ | |
52 | return fsnotify_remove_notify_event(group); | |
53 | } | |
54 | ||
22aa425d | 55 | static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) |
a1014f10 EP |
56 | { |
57 | int client_fd; | |
58 | struct dentry *dentry; | |
59 | struct vfsmount *mnt; | |
60 | struct file *new_file; | |
61 | ||
22aa425d | 62 | pr_debug("%s: group=%p event=%p\n", __func__, group, event); |
a1014f10 EP |
63 | |
64 | client_fd = get_unused_fd(); | |
65 | if (client_fd < 0) | |
66 | return client_fd; | |
67 | ||
2069601b | 68 | if (event->data_type != FSNOTIFY_EVENT_PATH) { |
a1014f10 EP |
69 | WARN_ON(1); |
70 | put_unused_fd(client_fd); | |
71 | return -EINVAL; | |
72 | } | |
73 | ||
74 | /* | |
75 | * we need a new file handle for the userspace program so it can read even if it was | |
76 | * originally opened O_WRONLY. | |
77 | */ | |
2069601b LT |
78 | dentry = dget(event->path.dentry); |
79 | mnt = mntget(event->path.mnt); | |
a1014f10 EP |
80 | /* it's possible this event was an overflow event. in that case dentry and mnt |
81 | * are NULL; That's fine, just don't call dentry open */ | |
82 | if (dentry && mnt) | |
83 | new_file = dentry_open(dentry, mnt, | |
80af2588 | 84 | group->fanotify_data.f_flags | FMODE_NONOTIFY, |
a1014f10 EP |
85 | current_cred()); |
86 | else | |
87 | new_file = ERR_PTR(-EOVERFLOW); | |
88 | if (IS_ERR(new_file)) { | |
89 | /* | |
90 | * we still send an event even if we can't open the file. this | |
91 | * can happen when say tasks are gone and we try to open their | |
92 | * /proc files or we try to open a WRONLY file like in sysfs | |
93 | * we just send the errno to userspace since there isn't much | |
94 | * else we can do. | |
95 | */ | |
96 | put_unused_fd(client_fd); | |
97 | client_fd = PTR_ERR(new_file); | |
98 | } else { | |
99 | fd_install(client_fd, new_file); | |
100 | } | |
101 | ||
22aa425d | 102 | return client_fd; |
a1014f10 EP |
103 | } |
104 | ||
105 | static ssize_t fill_event_metadata(struct fsnotify_group *group, | |
106 | struct fanotify_event_metadata *metadata, | |
107 | struct fsnotify_event *event) | |
108 | { | |
109 | pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, | |
110 | group, metadata, event); | |
111 | ||
112 | metadata->event_len = FAN_EVENT_METADATA_LEN; | |
113 | metadata->vers = FANOTIFY_METADATA_VERSION; | |
33d3dfff | 114 | metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; |
32c32632 | 115 | metadata->pid = pid_vnr(event->tgid); |
22aa425d | 116 | metadata->fd = create_fd(group, event); |
a1014f10 | 117 | |
22aa425d | 118 | return metadata->fd; |
a1014f10 EP |
119 | } |
120 | ||
b2d87909 EP |
121 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
122 | static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, | |
123 | __s32 fd) | |
124 | { | |
125 | struct fanotify_response_event *re, *return_re = NULL; | |
126 | ||
127 | mutex_lock(&group->fanotify_data.access_mutex); | |
128 | list_for_each_entry(re, &group->fanotify_data.access_list, list) { | |
129 | if (re->fd != fd) | |
130 | continue; | |
131 | ||
132 | list_del_init(&re->list); | |
133 | return_re = re; | |
134 | break; | |
135 | } | |
136 | mutex_unlock(&group->fanotify_data.access_mutex); | |
137 | ||
138 | pr_debug("%s: found return_re=%p\n", __func__, return_re); | |
139 | ||
140 | return return_re; | |
141 | } | |
142 | ||
143 | static int process_access_response(struct fsnotify_group *group, | |
144 | struct fanotify_response *response_struct) | |
145 | { | |
146 | struct fanotify_response_event *re; | |
147 | __s32 fd = response_struct->fd; | |
148 | __u32 response = response_struct->response; | |
149 | ||
150 | pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, | |
151 | fd, response); | |
152 | /* | |
153 | * make sure the response is valid, if invalid we do nothing and either | |
154 | * userspace can send a valid responce or we will clean it up after the | |
155 | * timeout | |
156 | */ | |
157 | switch (response) { | |
158 | case FAN_ALLOW: | |
159 | case FAN_DENY: | |
160 | break; | |
161 | default: | |
162 | return -EINVAL; | |
163 | } | |
164 | ||
165 | if (fd < 0) | |
166 | return -EINVAL; | |
167 | ||
168 | re = dequeue_re(group, fd); | |
169 | if (!re) | |
170 | return -ENOENT; | |
171 | ||
172 | re->event->response = response; | |
173 | ||
174 | wake_up(&group->fanotify_data.access_waitq); | |
175 | ||
176 | kmem_cache_free(fanotify_response_event_cache, re); | |
177 | ||
178 | return 0; | |
179 | } | |
180 | ||
181 | static int prepare_for_access_response(struct fsnotify_group *group, | |
182 | struct fsnotify_event *event, | |
183 | __s32 fd) | |
184 | { | |
185 | struct fanotify_response_event *re; | |
186 | ||
187 | if (!(event->mask & FAN_ALL_PERM_EVENTS)) | |
188 | return 0; | |
189 | ||
190 | re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); | |
191 | if (!re) | |
192 | return -ENOMEM; | |
193 | ||
194 | re->event = event; | |
195 | re->fd = fd; | |
196 | ||
197 | mutex_lock(&group->fanotify_data.access_mutex); | |
2eebf582 EP |
198 | |
199 | if (group->fanotify_data.bypass_perm) { | |
200 | mutex_unlock(&group->fanotify_data.access_mutex); | |
201 | kmem_cache_free(fanotify_response_event_cache, re); | |
202 | event->response = FAN_ALLOW; | |
203 | return 0; | |
204 | } | |
205 | ||
b2d87909 EP |
206 | list_add_tail(&re->list, &group->fanotify_data.access_list); |
207 | mutex_unlock(&group->fanotify_data.access_mutex); | |
208 | ||
209 | return 0; | |
210 | } | |
211 | ||
212 | static void remove_access_response(struct fsnotify_group *group, | |
213 | struct fsnotify_event *event, | |
214 | __s32 fd) | |
215 | { | |
216 | struct fanotify_response_event *re; | |
217 | ||
218 | if (!(event->mask & FAN_ALL_PERM_EVENTS)) | |
219 | return; | |
220 | ||
221 | re = dequeue_re(group, fd); | |
222 | if (!re) | |
223 | return; | |
224 | ||
225 | BUG_ON(re->event != event); | |
226 | ||
227 | kmem_cache_free(fanotify_response_event_cache, re); | |
228 | ||
229 | return; | |
230 | } | |
231 | #else | |
232 | static int prepare_for_access_response(struct fsnotify_group *group, | |
233 | struct fsnotify_event *event, | |
234 | __s32 fd) | |
235 | { | |
236 | return 0; | |
237 | } | |
238 | ||
239 | static void remove_access_response(struct fsnotify_group *group, | |
240 | struct fsnotify_event *event, | |
241 | __s32 fd) | |
242 | { | |
8860f060 | 243 | return; |
b2d87909 EP |
244 | } |
245 | #endif | |
246 | ||
a1014f10 EP |
247 | static ssize_t copy_event_to_user(struct fsnotify_group *group, |
248 | struct fsnotify_event *event, | |
249 | char __user *buf) | |
250 | { | |
251 | struct fanotify_event_metadata fanotify_event_metadata; | |
b2d87909 | 252 | int fd, ret; |
a1014f10 EP |
253 | |
254 | pr_debug("%s: group=%p event=%p\n", __func__, group, event); | |
255 | ||
b2d87909 EP |
256 | fd = fill_event_metadata(group, &fanotify_event_metadata, event); |
257 | if (fd < 0) | |
258 | return fd; | |
259 | ||
260 | ret = prepare_for_access_response(group, event, fd); | |
261 | if (ret) | |
262 | goto out_close_fd; | |
a1014f10 | 263 | |
b2d87909 | 264 | ret = -EFAULT; |
a1014f10 | 265 | if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN)) |
b2d87909 | 266 | goto out_kill_access_response; |
a1014f10 EP |
267 | |
268 | return FAN_EVENT_METADATA_LEN; | |
b2d87909 EP |
269 | |
270 | out_kill_access_response: | |
271 | remove_access_response(group, event, fd); | |
272 | out_close_fd: | |
273 | sys_close(fd); | |
274 | return ret; | |
a1014f10 EP |
275 | } |
276 | ||
277 | /* intofiy userspace file descriptor functions */ | |
278 | static unsigned int fanotify_poll(struct file *file, poll_table *wait) | |
279 | { | |
280 | struct fsnotify_group *group = file->private_data; | |
281 | int ret = 0; | |
282 | ||
283 | poll_wait(file, &group->notification_waitq, wait); | |
284 | mutex_lock(&group->notification_mutex); | |
285 | if (!fsnotify_notify_queue_is_empty(group)) | |
286 | ret = POLLIN | POLLRDNORM; | |
287 | mutex_unlock(&group->notification_mutex); | |
288 | ||
289 | return ret; | |
290 | } | |
291 | ||
292 | static ssize_t fanotify_read(struct file *file, char __user *buf, | |
293 | size_t count, loff_t *pos) | |
294 | { | |
295 | struct fsnotify_group *group; | |
296 | struct fsnotify_event *kevent; | |
297 | char __user *start; | |
298 | int ret; | |
299 | DEFINE_WAIT(wait); | |
300 | ||
301 | start = buf; | |
302 | group = file->private_data; | |
303 | ||
304 | pr_debug("%s: group=%p\n", __func__, group); | |
305 | ||
306 | while (1) { | |
307 | prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); | |
308 | ||
309 | mutex_lock(&group->notification_mutex); | |
310 | kevent = get_one_event(group, count); | |
311 | mutex_unlock(&group->notification_mutex); | |
312 | ||
313 | if (kevent) { | |
314 | ret = PTR_ERR(kevent); | |
315 | if (IS_ERR(kevent)) | |
316 | break; | |
317 | ret = copy_event_to_user(group, kevent, buf); | |
318 | fsnotify_put_event(kevent); | |
319 | if (ret < 0) | |
320 | break; | |
321 | buf += ret; | |
322 | count -= ret; | |
323 | continue; | |
324 | } | |
325 | ||
326 | ret = -EAGAIN; | |
327 | if (file->f_flags & O_NONBLOCK) | |
328 | break; | |
329 | ret = -EINTR; | |
330 | if (signal_pending(current)) | |
331 | break; | |
332 | ||
333 | if (start != buf) | |
334 | break; | |
335 | ||
336 | schedule(); | |
337 | } | |
338 | ||
339 | finish_wait(&group->notification_waitq, &wait); | |
340 | if (start != buf && ret != -EFAULT) | |
341 | ret = buf - start; | |
342 | return ret; | |
343 | } | |
344 | ||
b2d87909 EP |
345 | static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) |
346 | { | |
347 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | |
348 | struct fanotify_response response = { .fd = -1, .response = -1 }; | |
349 | struct fsnotify_group *group; | |
350 | int ret; | |
351 | ||
352 | group = file->private_data; | |
353 | ||
354 | if (count > sizeof(response)) | |
355 | count = sizeof(response); | |
356 | ||
357 | pr_debug("%s: group=%p count=%zu\n", __func__, group, count); | |
358 | ||
359 | if (copy_from_user(&response, buf, count)) | |
360 | return -EFAULT; | |
361 | ||
362 | ret = process_access_response(group, &response); | |
363 | if (ret < 0) | |
364 | count = ret; | |
365 | ||
366 | return count; | |
367 | #else | |
368 | return -EINVAL; | |
369 | #endif | |
370 | } | |
371 | ||
52c923dd EP |
372 | static int fanotify_release(struct inode *ignored, struct file *file) |
373 | { | |
374 | struct fsnotify_group *group = file->private_data; | |
2eebf582 | 375 | struct fanotify_response_event *re, *lre; |
52c923dd EP |
376 | |
377 | pr_debug("%s: file=%p group=%p\n", __func__, file, group); | |
378 | ||
2eebf582 EP |
379 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
380 | mutex_lock(&group->fanotify_data.access_mutex); | |
381 | ||
382 | group->fanotify_data.bypass_perm = true; | |
383 | ||
384 | list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { | |
385 | pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, | |
386 | re, re->event); | |
387 | ||
388 | list_del_init(&re->list); | |
389 | re->event->response = FAN_ALLOW; | |
390 | ||
391 | kmem_cache_free(fanotify_response_event_cache, re); | |
392 | } | |
393 | mutex_unlock(&group->fanotify_data.access_mutex); | |
394 | ||
395 | wake_up(&group->fanotify_data.access_waitq); | |
396 | #endif | |
52c923dd EP |
397 | /* matches the fanotify_init->fsnotify_alloc_group */ |
398 | fsnotify_put_group(group); | |
399 | ||
400 | return 0; | |
401 | } | |
402 | ||
a1014f10 EP |
403 | static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
404 | { | |
405 | struct fsnotify_group *group; | |
406 | struct fsnotify_event_holder *holder; | |
407 | void __user *p; | |
408 | int ret = -ENOTTY; | |
409 | size_t send_len = 0; | |
410 | ||
411 | group = file->private_data; | |
412 | ||
413 | p = (void __user *) arg; | |
414 | ||
415 | switch (cmd) { | |
416 | case FIONREAD: | |
417 | mutex_lock(&group->notification_mutex); | |
418 | list_for_each_entry(holder, &group->notification_list, event_list) | |
419 | send_len += FAN_EVENT_METADATA_LEN; | |
420 | mutex_unlock(&group->notification_mutex); | |
421 | ret = put_user(send_len, (int __user *) p); | |
422 | break; | |
423 | } | |
424 | ||
425 | return ret; | |
426 | } | |
427 | ||
52c923dd | 428 | static const struct file_operations fanotify_fops = { |
a1014f10 EP |
429 | .poll = fanotify_poll, |
430 | .read = fanotify_read, | |
b2d87909 | 431 | .write = fanotify_write, |
52c923dd EP |
432 | .fasync = NULL, |
433 | .release = fanotify_release, | |
a1014f10 EP |
434 | .unlocked_ioctl = fanotify_ioctl, |
435 | .compat_ioctl = fanotify_ioctl, | |
6038f373 | 436 | .llseek = noop_llseek, |
52c923dd EP |
437 | }; |
438 | ||
2a3edf86 EP |
439 | static void fanotify_free_mark(struct fsnotify_mark *fsn_mark) |
440 | { | |
441 | kmem_cache_free(fanotify_mark_cache, fsn_mark); | |
442 | } | |
443 | ||
444 | static int fanotify_find_path(int dfd, const char __user *filename, | |
445 | struct path *path, unsigned int flags) | |
446 | { | |
447 | int ret; | |
448 | ||
449 | pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__, | |
450 | dfd, filename, flags); | |
451 | ||
452 | if (filename == NULL) { | |
453 | struct file *file; | |
454 | int fput_needed; | |
455 | ||
456 | ret = -EBADF; | |
457 | file = fget_light(dfd, &fput_needed); | |
458 | if (!file) | |
459 | goto out; | |
460 | ||
461 | ret = -ENOTDIR; | |
462 | if ((flags & FAN_MARK_ONLYDIR) && | |
463 | !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) { | |
464 | fput_light(file, fput_needed); | |
465 | goto out; | |
466 | } | |
467 | ||
468 | *path = file->f_path; | |
469 | path_get(path); | |
470 | fput_light(file, fput_needed); | |
471 | } else { | |
472 | unsigned int lookup_flags = 0; | |
473 | ||
474 | if (!(flags & FAN_MARK_DONT_FOLLOW)) | |
475 | lookup_flags |= LOOKUP_FOLLOW; | |
476 | if (flags & FAN_MARK_ONLYDIR) | |
477 | lookup_flags |= LOOKUP_DIRECTORY; | |
478 | ||
479 | ret = user_path_at(dfd, filename, lookup_flags, path); | |
480 | if (ret) | |
481 | goto out; | |
482 | } | |
483 | ||
484 | /* you can only watch an inode if you have read permissions on it */ | |
485 | ret = inode_permission(path->dentry->d_inode, MAY_READ); | |
486 | if (ret) | |
487 | path_put(path); | |
488 | out: | |
489 | return ret; | |
490 | } | |
491 | ||
b9e4e3bd EP |
492 | static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, |
493 | __u32 mask, | |
494 | unsigned int flags) | |
088b09b0 AG |
495 | { |
496 | __u32 oldmask; | |
497 | ||
498 | spin_lock(&fsn_mark->lock); | |
b9e4e3bd EP |
499 | if (!(flags & FAN_MARK_IGNORED_MASK)) { |
500 | oldmask = fsn_mark->mask; | |
501 | fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask)); | |
502 | } else { | |
503 | oldmask = fsn_mark->ignored_mask; | |
504 | fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask)); | |
505 | } | |
088b09b0 AG |
506 | spin_unlock(&fsn_mark->lock); |
507 | ||
508 | if (!(oldmask & ~mask)) | |
509 | fsnotify_destroy_mark(fsn_mark); | |
510 | ||
511 | return mask & oldmask; | |
512 | } | |
513 | ||
f3640192 | 514 | static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
515 | struct vfsmount *mnt, __u32 mask, |
516 | unsigned int flags) | |
88826276 EP |
517 | { |
518 | struct fsnotify_mark *fsn_mark = NULL; | |
088b09b0 | 519 | __u32 removed; |
88826276 | 520 | |
f3640192 AG |
521 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); |
522 | if (!fsn_mark) | |
523 | return -ENOENT; | |
88826276 | 524 | |
b9e4e3bd | 525 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); |
f3640192 | 526 | fsnotify_put_mark(fsn_mark); |
f3640192 AG |
527 | if (removed & mnt->mnt_fsnotify_mask) |
528 | fsnotify_recalc_vfsmount_mask(mnt); | |
529 | ||
530 | return 0; | |
531 | } | |
2a3edf86 | 532 | |
f3640192 | 533 | static int fanotify_remove_inode_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
534 | struct inode *inode, __u32 mask, |
535 | unsigned int flags) | |
f3640192 AG |
536 | { |
537 | struct fsnotify_mark *fsn_mark = NULL; | |
538 | __u32 removed; | |
539 | ||
540 | fsn_mark = fsnotify_find_inode_mark(group, inode); | |
88826276 EP |
541 | if (!fsn_mark) |
542 | return -ENOENT; | |
543 | ||
b9e4e3bd | 544 | removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); |
5444e298 | 545 | /* matches the fsnotify_find_inode_mark() */ |
2a3edf86 | 546 | fsnotify_put_mark(fsn_mark); |
f3640192 AG |
547 | if (removed & inode->i_fsnotify_mask) |
548 | fsnotify_recalc_inode_mask(inode); | |
088b09b0 | 549 | |
2a3edf86 EP |
550 | return 0; |
551 | } | |
552 | ||
b9e4e3bd EP |
553 | static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, |
554 | __u32 mask, | |
555 | unsigned int flags) | |
912ee394 AG |
556 | { |
557 | __u32 oldmask; | |
558 | ||
559 | spin_lock(&fsn_mark->lock); | |
b9e4e3bd EP |
560 | if (!(flags & FAN_MARK_IGNORED_MASK)) { |
561 | oldmask = fsn_mark->mask; | |
562 | fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask)); | |
563 | } else { | |
564 | oldmask = fsn_mark->ignored_mask; | |
565 | fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask)); | |
c9778a98 EP |
566 | if (flags & FAN_MARK_IGNORED_SURV_MODIFY) |
567 | fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY; | |
b9e4e3bd | 568 | } |
912ee394 AG |
569 | spin_unlock(&fsn_mark->lock); |
570 | ||
571 | return mask & ~oldmask; | |
572 | } | |
573 | ||
52202dfb | 574 | static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
575 | struct vfsmount *mnt, __u32 mask, |
576 | unsigned int flags) | |
2a3edf86 EP |
577 | { |
578 | struct fsnotify_mark *fsn_mark; | |
912ee394 | 579 | __u32 added; |
2a3edf86 | 580 | |
88826276 EP |
581 | fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); |
582 | if (!fsn_mark) { | |
88826276 EP |
583 | int ret; |
584 | ||
912ee394 AG |
585 | fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); |
586 | if (!fsn_mark) | |
52202dfb | 587 | return -ENOMEM; |
88826276 | 588 | |
912ee394 AG |
589 | fsnotify_init_mark(fsn_mark, fanotify_free_mark); |
590 | ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); | |
88826276 | 591 | if (ret) { |
912ee394 | 592 | fanotify_free_mark(fsn_mark); |
52202dfb | 593 | return ret; |
88826276 | 594 | } |
88826276 | 595 | } |
b9e4e3bd | 596 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); |
52202dfb | 597 | fsnotify_put_mark(fsn_mark); |
43709a28 EP |
598 | if (added & ~mnt->mnt_fsnotify_mask) |
599 | fsnotify_recalc_vfsmount_mask(mnt); | |
600 | ||
52202dfb | 601 | return 0; |
88826276 EP |
602 | } |
603 | ||
52202dfb | 604 | static int fanotify_add_inode_mark(struct fsnotify_group *group, |
b9e4e3bd EP |
605 | struct inode *inode, __u32 mask, |
606 | unsigned int flags) | |
88826276 EP |
607 | { |
608 | struct fsnotify_mark *fsn_mark; | |
912ee394 | 609 | __u32 added; |
88826276 EP |
610 | |
611 | pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); | |
2a3edf86 | 612 | |
5444e298 | 613 | fsn_mark = fsnotify_find_inode_mark(group, inode); |
2a3edf86 | 614 | if (!fsn_mark) { |
88826276 | 615 | int ret; |
2a3edf86 | 616 | |
912ee394 AG |
617 | fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); |
618 | if (!fsn_mark) | |
52202dfb | 619 | return -ENOMEM; |
2a3edf86 | 620 | |
912ee394 AG |
621 | fsnotify_init_mark(fsn_mark, fanotify_free_mark); |
622 | ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); | |
2a3edf86 | 623 | if (ret) { |
912ee394 | 624 | fanotify_free_mark(fsn_mark); |
52202dfb | 625 | return ret; |
2a3edf86 | 626 | } |
2a3edf86 | 627 | } |
b9e4e3bd | 628 | added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); |
52202dfb | 629 | fsnotify_put_mark(fsn_mark); |
43709a28 EP |
630 | if (added & ~inode->i_fsnotify_mask) |
631 | fsnotify_recalc_inode_mask(inode); | |
52202dfb | 632 | return 0; |
88826276 | 633 | } |
2a3edf86 | 634 | |
52c923dd | 635 | /* fanotify syscalls */ |
08ae8938 | 636 | SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) |
11637e4b | 637 | { |
52c923dd EP |
638 | struct fsnotify_group *group; |
639 | int f_flags, fd; | |
640 | ||
08ae8938 EP |
641 | pr_debug("%s: flags=%d event_f_flags=%d\n", |
642 | __func__, flags, event_f_flags); | |
52c923dd | 643 | |
52c923dd | 644 | if (!capable(CAP_SYS_ADMIN)) |
a2f13ad0 | 645 | return -EPERM; |
52c923dd EP |
646 | |
647 | if (flags & ~FAN_ALL_INIT_FLAGS) | |
648 | return -EINVAL; | |
649 | ||
b2d87909 | 650 | f_flags = O_RDWR | FMODE_NONOTIFY; |
52c923dd EP |
651 | if (flags & FAN_CLOEXEC) |
652 | f_flags |= O_CLOEXEC; | |
653 | if (flags & FAN_NONBLOCK) | |
654 | f_flags |= O_NONBLOCK; | |
655 | ||
656 | /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ | |
657 | group = fsnotify_alloc_group(&fanotify_fsnotify_ops); | |
658 | if (IS_ERR(group)) | |
659 | return PTR_ERR(group); | |
660 | ||
80af2588 | 661 | group->fanotify_data.f_flags = event_f_flags; |
9e66e423 EP |
662 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
663 | mutex_init(&group->fanotify_data.access_mutex); | |
664 | init_waitqueue_head(&group->fanotify_data.access_waitq); | |
665 | INIT_LIST_HEAD(&group->fanotify_data.access_list); | |
666 | #endif | |
cb2d429f | 667 | |
52c923dd EP |
668 | fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); |
669 | if (fd < 0) | |
670 | goto out_put_group; | |
671 | ||
672 | return fd; | |
673 | ||
674 | out_put_group: | |
675 | fsnotify_put_group(group); | |
676 | return fd; | |
11637e4b | 677 | } |
bbaa4168 | 678 | |
9bbfc964 HC |
679 | SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, |
680 | __u64 mask, int dfd, | |
681 | const char __user * pathname) | |
bbaa4168 | 682 | { |
0ff21db9 EP |
683 | struct inode *inode = NULL; |
684 | struct vfsmount *mnt = NULL; | |
2a3edf86 EP |
685 | struct fsnotify_group *group; |
686 | struct file *filp; | |
687 | struct path path; | |
688 | int ret, fput_needed; | |
689 | ||
690 | pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", | |
691 | __func__, fanotify_fd, flags, dfd, pathname, mask); | |
692 | ||
693 | /* we only use the lower 32 bits as of right now. */ | |
694 | if (mask & ((__u64)0xffffffff << 32)) | |
695 | return -EINVAL; | |
696 | ||
88380fe6 AG |
697 | if (flags & ~FAN_ALL_MARK_FLAGS) |
698 | return -EINVAL; | |
4d92604c | 699 | switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { |
88380fe6 AG |
700 | case FAN_MARK_ADD: |
701 | case FAN_MARK_REMOVE: | |
4d92604c | 702 | case FAN_MARK_FLUSH: |
88380fe6 AG |
703 | break; |
704 | default: | |
705 | return -EINVAL; | |
706 | } | |
b2d87909 EP |
707 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
708 | if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD)) | |
709 | #else | |
88380fe6 | 710 | if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD)) |
b2d87909 | 711 | #endif |
2a3edf86 EP |
712 | return -EINVAL; |
713 | ||
714 | filp = fget_light(fanotify_fd, &fput_needed); | |
715 | if (unlikely(!filp)) | |
716 | return -EBADF; | |
717 | ||
718 | /* verify that this is indeed an fanotify instance */ | |
719 | ret = -EINVAL; | |
720 | if (unlikely(filp->f_op != &fanotify_fops)) | |
721 | goto fput_and_out; | |
722 | ||
723 | ret = fanotify_find_path(dfd, pathname, &path, flags); | |
724 | if (ret) | |
725 | goto fput_and_out; | |
726 | ||
727 | /* inode held in place by reference to path; group by fget on fd */ | |
eac8e9e8 | 728 | if (!(flags & FAN_MARK_MOUNT)) |
0ff21db9 EP |
729 | inode = path.dentry->d_inode; |
730 | else | |
731 | mnt = path.mnt; | |
2a3edf86 EP |
732 | group = filp->private_data; |
733 | ||
734 | /* create/update an inode mark */ | |
4d92604c | 735 | switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { |
c6223f46 | 736 | case FAN_MARK_ADD: |
eac8e9e8 | 737 | if (flags & FAN_MARK_MOUNT) |
b9e4e3bd | 738 | ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); |
0ff21db9 | 739 | else |
b9e4e3bd | 740 | ret = fanotify_add_inode_mark(group, inode, mask, flags); |
c6223f46 AG |
741 | break; |
742 | case FAN_MARK_REMOVE: | |
f3640192 | 743 | if (flags & FAN_MARK_MOUNT) |
b9e4e3bd | 744 | ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags); |
f3640192 | 745 | else |
b9e4e3bd | 746 | ret = fanotify_remove_inode_mark(group, inode, mask, flags); |
c6223f46 | 747 | break; |
4d92604c EP |
748 | case FAN_MARK_FLUSH: |
749 | if (flags & FAN_MARK_MOUNT) | |
750 | fsnotify_clear_vfsmount_marks_by_group(group); | |
751 | else | |
752 | fsnotify_clear_inode_marks_by_group(group); | |
4d92604c | 753 | break; |
c6223f46 AG |
754 | default: |
755 | ret = -EINVAL; | |
756 | } | |
2a3edf86 EP |
757 | |
758 | path_put(&path); | |
759 | fput_and_out: | |
760 | fput_light(filp, fput_needed); | |
761 | return ret; | |
762 | } | |
763 | ||
9bbfc964 HC |
764 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
765 | asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask, | |
766 | long dfd, long pathname) | |
767 | { | |
768 | return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags, | |
769 | mask, (int) dfd, | |
770 | (const char __user *) pathname); | |
771 | } | |
772 | SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark); | |
773 | #endif | |
774 | ||
2a3edf86 EP |
775 | /* |
776 | * fanotify_user_setup - Our initialization function. Note that we cannnot return | |
777 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | |
778 | * must result in panic(). | |
779 | */ | |
780 | static int __init fanotify_user_setup(void) | |
781 | { | |
782 | fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); | |
b2d87909 EP |
783 | fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, |
784 | SLAB_PANIC); | |
2a3edf86 EP |
785 | |
786 | return 0; | |
bbaa4168 | 787 | } |
2a3edf86 | 788 | device_initcall(fanotify_user_setup); |