]>
Commit | Line | Data |
---|---|---|
3e0a4e85 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2d9048e2 AG |
2 | /* |
3 | * fs/inotify_user.c - inotify support for userspace | |
4 | * | |
5 | * Authors: | |
6 | * John McCutchan <ttb@tentacle.dhs.org> | |
7 | * Robert Love <rml@novell.com> | |
8 | * | |
9 | * Copyright (C) 2005 John McCutchan | |
10 | * Copyright 2006 Hewlett-Packard Development Company, L.P. | |
11 | * | |
63c882a0 EP |
12 | * Copyright (C) 2009 Eric Paris <Red Hat Inc> |
13 | * inotify was largely rewriten to make use of the fsnotify infrastructure | |
2d9048e2 AG |
14 | */ |
15 | ||
2d9048e2 | 16 | #include <linux/file.h> |
63c882a0 EP |
17 | #include <linux/fs.h> /* struct inode */ |
18 | #include <linux/fsnotify_backend.h> | |
19 | #include <linux/idr.h> | |
c013d5a4 | 20 | #include <linux/init.h> /* fs_initcall */ |
2d9048e2 | 21 | #include <linux/inotify.h> |
63c882a0 | 22 | #include <linux/kernel.h> /* roundup() */ |
63c882a0 | 23 | #include <linux/namei.h> /* LOOKUP_FOLLOW */ |
174cd4b1 | 24 | #include <linux/sched/signal.h> |
63c882a0 | 25 | #include <linux/slab.h> /* struct kmem_cache */ |
2d9048e2 | 26 | #include <linux/syscalls.h> |
63c882a0 | 27 | #include <linux/types.h> |
c44dcc56 | 28 | #include <linux/anon_inodes.h> |
63c882a0 EP |
29 | #include <linux/uaccess.h> |
30 | #include <linux/poll.h> | |
31 | #include <linux/wait.h> | |
d46eb14b | 32 | #include <linux/memcontrol.h> |
ac5656d8 | 33 | #include <linux/security.h> |
2d9048e2 | 34 | |
63c882a0 | 35 | #include "inotify.h" |
be77196b | 36 | #include "../fdinfo.h" |
2d9048e2 | 37 | |
63c882a0 | 38 | #include <asm/ioctls.h> |
2d9048e2 | 39 | |
92890123 WL |
40 | /* |
41 | * An inotify watch requires allocating an inotify_inode_mark structure as | |
42 | * well as pinning the watched inode. Doubling the size of a VFS inode | |
43 | * should be more than enough to cover the additional filesystem inode | |
44 | * size increase. | |
45 | */ | |
46 | #define INOTIFY_WATCH_COST (sizeof(struct inotify_inode_mark) + \ | |
47 | 2 * sizeof(struct inode)) | |
48 | ||
1cce1eea | 49 | /* configurable via /proc/sys/fs/inotify/ */ |
3c828e49 | 50 | static int inotify_max_queued_events __read_mostly; |
2d9048e2 | 51 | |
054c636e | 52 | struct kmem_cache *inotify_inode_mark_cachep __read_mostly; |
2d9048e2 | 53 | |
2d9048e2 AG |
54 | #ifdef CONFIG_SYSCTL |
55 | ||
56 | #include <linux/sysctl.h> | |
57 | ||
92f778dd | 58 | struct ctl_table inotify_table[] = { |
2d9048e2 | 59 | { |
2d9048e2 | 60 | .procname = "max_user_instances", |
1cce1eea | 61 | .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES], |
2d9048e2 AG |
62 | .maxlen = sizeof(int), |
63 | .mode = 0644, | |
6d456111 | 64 | .proc_handler = proc_dointvec_minmax, |
eec4844f | 65 | .extra1 = SYSCTL_ZERO, |
2d9048e2 AG |
66 | }, |
67 | { | |
2d9048e2 | 68 | .procname = "max_user_watches", |
1cce1eea | 69 | .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES], |
2d9048e2 AG |
70 | .maxlen = sizeof(int), |
71 | .mode = 0644, | |
6d456111 | 72 | .proc_handler = proc_dointvec_minmax, |
eec4844f | 73 | .extra1 = SYSCTL_ZERO, |
2d9048e2 AG |
74 | }, |
75 | { | |
2d9048e2 AG |
76 | .procname = "max_queued_events", |
77 | .data = &inotify_max_queued_events, | |
78 | .maxlen = sizeof(int), | |
79 | .mode = 0644, | |
6d456111 | 80 | .proc_handler = proc_dointvec_minmax, |
eec4844f | 81 | .extra1 = SYSCTL_ZERO |
2d9048e2 | 82 | }, |
ab09203e | 83 | { } |
2d9048e2 AG |
84 | }; |
85 | #endif /* CONFIG_SYSCTL */ | |
86 | ||
957f7b47 | 87 | static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg) |
1c17d18e | 88 | { |
63c882a0 | 89 | __u32 mask; |
1c17d18e | 90 | |
611da04f | 91 | /* |
957f7b47 AG |
92 | * Everything should accept their own ignored and should receive events |
93 | * when the inode is unmounted. All directories care about children. | |
611da04f | 94 | */ |
957f7b47 AG |
95 | mask = (FS_IN_IGNORED | FS_UNMOUNT); |
96 | if (S_ISDIR(inode->i_mode)) | |
97 | mask |= FS_EVENT_ON_CHILD; | |
2d9048e2 | 98 | |
63c882a0 | 99 | /* mask off the flags used to open the fd */ |
8c1934c8 | 100 | mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK)); |
2d9048e2 | 101 | |
63c882a0 | 102 | return mask; |
2d9048e2 AG |
103 | } |
104 | ||
63c882a0 | 105 | static inline u32 inotify_mask_to_arg(__u32 mask) |
2d9048e2 | 106 | { |
63c882a0 EP |
107 | return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED | |
108 | IN_Q_OVERFLOW); | |
2d9048e2 AG |
109 | } |
110 | ||
63c882a0 | 111 | /* intofiy userspace file descriptor functions */ |
076ccb76 | 112 | static __poll_t inotify_poll(struct file *file, poll_table *wait) |
2d9048e2 | 113 | { |
63c882a0 | 114 | struct fsnotify_group *group = file->private_data; |
076ccb76 | 115 | __poll_t ret = 0; |
2d9048e2 | 116 | |
63c882a0 | 117 | poll_wait(file, &group->notification_waitq, wait); |
c21dbe20 | 118 | spin_lock(&group->notification_lock); |
63c882a0 | 119 | if (!fsnotify_notify_queue_is_empty(group)) |
a9a08845 | 120 | ret = EPOLLIN | EPOLLRDNORM; |
c21dbe20 | 121 | spin_unlock(&group->notification_lock); |
2d9048e2 AG |
122 | |
123 | return ret; | |
124 | } | |
125 | ||
7053aee2 | 126 | static int round_event_name_len(struct fsnotify_event *fsn_event) |
e9fe6904 | 127 | { |
7053aee2 JK |
128 | struct inotify_event_info *event; |
129 | ||
130 | event = INOTIFY_E(fsn_event); | |
e9fe6904 JK |
131 | if (!event->name_len) |
132 | return 0; | |
133 | return roundup(event->name_len + 1, sizeof(struct inotify_event)); | |
134 | } | |
135 | ||
3632dee2 VN |
136 | /* |
137 | * Get an inotify_kernel_event if one exists and is small | |
138 | * enough to fit in "count". Return an error pointer if | |
139 | * not large enough. | |
140 | * | |
c21dbe20 | 141 | * Called with the group->notification_lock held. |
3632dee2 | 142 | */ |
63c882a0 EP |
143 | static struct fsnotify_event *get_one_event(struct fsnotify_group *group, |
144 | size_t count) | |
3632dee2 VN |
145 | { |
146 | size_t event_size = sizeof(struct inotify_event); | |
63c882a0 | 147 | struct fsnotify_event *event; |
3632dee2 | 148 | |
63c882a0 | 149 | if (fsnotify_notify_queue_is_empty(group)) |
3632dee2 VN |
150 | return NULL; |
151 | ||
8ba8fa91 | 152 | event = fsnotify_peek_first_event(group); |
63c882a0 | 153 | |
5ba08e2e EP |
154 | pr_debug("%s: group=%p event=%p\n", __func__, group, event); |
155 | ||
e9fe6904 | 156 | event_size += round_event_name_len(event); |
3632dee2 VN |
157 | if (event_size > count) |
158 | return ERR_PTR(-EINVAL); | |
159 | ||
c21dbe20 | 160 | /* held the notification_lock the whole time, so this is the |
63c882a0 | 161 | * same event we peeked above */ |
8ba8fa91 | 162 | fsnotify_remove_first_event(group); |
63c882a0 EP |
163 | |
164 | return event; | |
3632dee2 VN |
165 | } |
166 | ||
167 | /* | |
168 | * Copy an event to user space, returning how much we copied. | |
169 | * | |
170 | * We already checked that the event size is smaller than the | |
171 | * buffer we had in "get_one_event()" above. | |
172 | */ | |
63c882a0 | 173 | static ssize_t copy_event_to_user(struct fsnotify_group *group, |
7053aee2 | 174 | struct fsnotify_event *fsn_event, |
3632dee2 VN |
175 | char __user *buf) |
176 | { | |
63c882a0 | 177 | struct inotify_event inotify_event; |
7053aee2 | 178 | struct inotify_event_info *event; |
3632dee2 | 179 | size_t event_size = sizeof(struct inotify_event); |
e9fe6904 JK |
180 | size_t name_len; |
181 | size_t pad_name_len; | |
63c882a0 | 182 | |
7053aee2 | 183 | pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event); |
63c882a0 | 184 | |
7053aee2 | 185 | event = INOTIFY_E(fsn_event); |
e9fe6904 | 186 | name_len = event->name_len; |
b962e731 | 187 | /* |
e9fe6904 | 188 | * round up name length so it is a multiple of event_size |
0db501bd EB |
189 | * plus an extra byte for the terminating '\0'. |
190 | */ | |
7053aee2 | 191 | pad_name_len = round_event_name_len(fsn_event); |
e9fe6904 | 192 | inotify_event.len = pad_name_len; |
a0a92d26 | 193 | inotify_event.mask = inotify_mask_to_arg(event->mask); |
7053aee2 | 194 | inotify_event.wd = event->wd; |
63c882a0 | 195 | inotify_event.cookie = event->sync_cookie; |
3632dee2 | 196 | |
63c882a0 EP |
197 | /* send the main event */ |
198 | if (copy_to_user(buf, &inotify_event, event_size)) | |
3632dee2 VN |
199 | return -EFAULT; |
200 | ||
63c882a0 | 201 | buf += event_size; |
3632dee2 | 202 | |
63c882a0 EP |
203 | /* |
204 | * fsnotify only stores the pathname, so here we have to send the pathname | |
205 | * and then pad that pathname out to a multiple of sizeof(inotify_event) | |
e9fe6904 | 206 | * with zeros. |
63c882a0 | 207 | */ |
e9fe6904 | 208 | if (pad_name_len) { |
63c882a0 | 209 | /* copy the path name */ |
7053aee2 | 210 | if (copy_to_user(buf, event->name, name_len)) |
3632dee2 | 211 | return -EFAULT; |
e9fe6904 | 212 | buf += name_len; |
3632dee2 | 213 | |
0db501bd | 214 | /* fill userspace with 0's */ |
e9fe6904 | 215 | if (clear_user(buf, pad_name_len - name_len)) |
63c882a0 | 216 | return -EFAULT; |
e9fe6904 | 217 | event_size += pad_name_len; |
3632dee2 | 218 | } |
63c882a0 | 219 | |
3632dee2 VN |
220 | return event_size; |
221 | } | |
222 | ||
2d9048e2 AG |
223 | static ssize_t inotify_read(struct file *file, char __user *buf, |
224 | size_t count, loff_t *pos) | |
225 | { | |
63c882a0 EP |
226 | struct fsnotify_group *group; |
227 | struct fsnotify_event *kevent; | |
2d9048e2 AG |
228 | char __user *start; |
229 | int ret; | |
e23738a7 | 230 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
2d9048e2 AG |
231 | |
232 | start = buf; | |
63c882a0 | 233 | group = file->private_data; |
2d9048e2 | 234 | |
e23738a7 | 235 | add_wait_queue(&group->notification_waitq, &wait); |
2d9048e2 | 236 | while (1) { |
c21dbe20 | 237 | spin_lock(&group->notification_lock); |
63c882a0 | 238 | kevent = get_one_event(group, count); |
c21dbe20 | 239 | spin_unlock(&group->notification_lock); |
2d9048e2 | 240 | |
5ba08e2e EP |
241 | pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent); |
242 | ||
3632dee2 VN |
243 | if (kevent) { |
244 | ret = PTR_ERR(kevent); | |
245 | if (IS_ERR(kevent)) | |
246 | break; | |
63c882a0 | 247 | ret = copy_event_to_user(group, kevent, buf); |
7053aee2 | 248 | fsnotify_destroy_event(group, kevent); |
3632dee2 VN |
249 | if (ret < 0) |
250 | break; | |
251 | buf += ret; | |
252 | count -= ret; | |
253 | continue; | |
2d9048e2 AG |
254 | } |
255 | ||
3632dee2 VN |
256 | ret = -EAGAIN; |
257 | if (file->f_flags & O_NONBLOCK) | |
2d9048e2 | 258 | break; |
1ca39ab9 | 259 | ret = -ERESTARTSYS; |
3632dee2 | 260 | if (signal_pending(current)) |
2d9048e2 | 261 | break; |
16dbc6c9 | 262 | |
3632dee2 | 263 | if (start != buf) |
2d9048e2 | 264 | break; |
16dbc6c9 | 265 | |
e23738a7 | 266 | wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
2d9048e2 | 267 | } |
e23738a7 | 268 | remove_wait_queue(&group->notification_waitq, &wait); |
2d9048e2 | 269 | |
3632dee2 VN |
270 | if (start != buf && ret != -EFAULT) |
271 | ret = buf - start; | |
2d9048e2 AG |
272 | return ret; |
273 | } | |
274 | ||
275 | static int inotify_release(struct inode *ignored, struct file *file) | |
276 | { | |
63c882a0 | 277 | struct fsnotify_group *group = file->private_data; |
2d9048e2 | 278 | |
5ba08e2e EP |
279 | pr_debug("%s: group=%p\n", __func__, group); |
280 | ||
63c882a0 | 281 | /* free this group, matching get was inotify_init->fsnotify_obtain_group */ |
d8153d4d | 282 | fsnotify_destroy_group(group); |
2d9048e2 AG |
283 | |
284 | return 0; | |
285 | } | |
286 | ||
287 | static long inotify_ioctl(struct file *file, unsigned int cmd, | |
288 | unsigned long arg) | |
289 | { | |
63c882a0 | 290 | struct fsnotify_group *group; |
7053aee2 | 291 | struct fsnotify_event *fsn_event; |
2d9048e2 AG |
292 | void __user *p; |
293 | int ret = -ENOTTY; | |
63c882a0 | 294 | size_t send_len = 0; |
2d9048e2 | 295 | |
63c882a0 | 296 | group = file->private_data; |
2d9048e2 AG |
297 | p = (void __user *) arg; |
298 | ||
5ba08e2e EP |
299 | pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd); |
300 | ||
2d9048e2 AG |
301 | switch (cmd) { |
302 | case FIONREAD: | |
c21dbe20 | 303 | spin_lock(&group->notification_lock); |
7053aee2 JK |
304 | list_for_each_entry(fsn_event, &group->notification_list, |
305 | list) { | |
63c882a0 | 306 | send_len += sizeof(struct inotify_event); |
7053aee2 | 307 | send_len += round_event_name_len(fsn_event); |
63c882a0 | 308 | } |
c21dbe20 | 309 | spin_unlock(&group->notification_lock); |
63c882a0 | 310 | ret = put_user(send_len, (int __user *) p); |
2d9048e2 | 311 | break; |
e1603b6e KT |
312 | #ifdef CONFIG_CHECKPOINT_RESTORE |
313 | case INOTIFY_IOC_SETNEXTWD: | |
314 | ret = -EINVAL; | |
315 | if (arg >= 1 && arg <= INT_MAX) { | |
316 | struct inotify_group_private_data *data; | |
317 | ||
318 | data = &group->inotify_data; | |
319 | spin_lock(&data->idr_lock); | |
320 | idr_set_cursor(&data->idr, (unsigned int)arg); | |
321 | spin_unlock(&data->idr_lock); | |
322 | ret = 0; | |
323 | } | |
324 | break; | |
325 | #endif /* CONFIG_CHECKPOINT_RESTORE */ | |
2d9048e2 AG |
326 | } |
327 | ||
328 | return ret; | |
329 | } | |
330 | ||
331 | static const struct file_operations inotify_fops = { | |
be77196b | 332 | .show_fdinfo = inotify_show_fdinfo, |
63c882a0 EP |
333 | .poll = inotify_poll, |
334 | .read = inotify_read, | |
0a6b6bd5 | 335 | .fasync = fsnotify_fasync, |
63c882a0 EP |
336 | .release = inotify_release, |
337 | .unlocked_ioctl = inotify_ioctl, | |
2d9048e2 | 338 | .compat_ioctl = inotify_ioctl, |
6038f373 | 339 | .llseek = noop_llseek, |
2d9048e2 AG |
340 | }; |
341 | ||
2d9048e2 | 342 | |
63c882a0 EP |
343 | /* |
344 | * find_inode - resolve a user-given path to a specific inode | |
345 | */ | |
ac5656d8 AG |
346 | static int inotify_find_inode(const char __user *dirname, struct path *path, |
347 | unsigned int flags, __u64 mask) | |
63c882a0 EP |
348 | { |
349 | int error; | |
350 | ||
351 | error = user_path_at(AT_FDCWD, dirname, flags, path); | |
352 | if (error) | |
353 | return error; | |
354 | /* you can only watch an inode if you have read permissions on it */ | |
355 | error = inode_permission(path->dentry->d_inode, MAY_READ); | |
ac5656d8 AG |
356 | if (error) { |
357 | path_put(path); | |
358 | return error; | |
359 | } | |
360 | error = security_path_notify(path, mask, | |
361 | FSNOTIFY_OBJ_TYPE_INODE); | |
63c882a0 EP |
362 | if (error) |
363 | path_put(path); | |
ac5656d8 | 364 | |
63c882a0 EP |
365 | return error; |
366 | } | |
367 | ||
b7ba8371 | 368 | static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock, |
000285de | 369 | struct inotify_inode_mark *i_mark) |
b7ba8371 EP |
370 | { |
371 | int ret; | |
372 | ||
4542da63 TH |
373 | idr_preload(GFP_KERNEL); |
374 | spin_lock(idr_lock); | |
b7ba8371 | 375 | |
a66c04b4 | 376 | ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT); |
4542da63 | 377 | if (ret >= 0) { |
b7ba8371 | 378 | /* we added the mark to the idr, take a reference */ |
4542da63 | 379 | i_mark->wd = ret; |
4542da63 TH |
380 | fsnotify_get_mark(&i_mark->fsn_mark); |
381 | } | |
b7ba8371 | 382 | |
4542da63 TH |
383 | spin_unlock(idr_lock); |
384 | idr_preload_end(); | |
385 | return ret < 0 ? ret : 0; | |
b7ba8371 EP |
386 | } |
387 | ||
000285de | 388 | static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, |
b7ba8371 EP |
389 | int wd) |
390 | { | |
391 | struct idr *idr = &group->inotify_data.idr; | |
392 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; | |
000285de | 393 | struct inotify_inode_mark *i_mark; |
b7ba8371 EP |
394 | |
395 | assert_spin_locked(idr_lock); | |
396 | ||
000285de EP |
397 | i_mark = idr_find(idr, wd); |
398 | if (i_mark) { | |
399 | struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark; | |
b7ba8371 | 400 | |
000285de | 401 | fsnotify_get_mark(fsn_mark); |
b7ba8371 | 402 | /* One ref for being in the idr, one ref we just took */ |
ab97f873 | 403 | BUG_ON(refcount_read(&fsn_mark->refcnt) < 2); |
b7ba8371 EP |
404 | } |
405 | ||
000285de | 406 | return i_mark; |
b7ba8371 EP |
407 | } |
408 | ||
000285de | 409 | static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group, |
b7ba8371 EP |
410 | int wd) |
411 | { | |
000285de | 412 | struct inotify_inode_mark *i_mark; |
b7ba8371 EP |
413 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; |
414 | ||
415 | spin_lock(idr_lock); | |
000285de | 416 | i_mark = inotify_idr_find_locked(group, wd); |
b7ba8371 EP |
417 | spin_unlock(idr_lock); |
418 | ||
000285de | 419 | return i_mark; |
b7ba8371 EP |
420 | } |
421 | ||
dead537d EP |
422 | /* |
423 | * Remove the mark from the idr (if present) and drop the reference | |
424 | * on the mark because it was in the idr. | |
425 | */ | |
7e790dd5 | 426 | static void inotify_remove_from_idr(struct fsnotify_group *group, |
000285de | 427 | struct inotify_inode_mark *i_mark) |
7e790dd5 | 428 | { |
e7253760 | 429 | struct idr *idr = &group->inotify_data.idr; |
b7ba8371 | 430 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; |
000285de | 431 | struct inotify_inode_mark *found_i_mark = NULL; |
dead537d | 432 | int wd; |
7e790dd5 | 433 | |
b7ba8371 | 434 | spin_lock(idr_lock); |
000285de | 435 | wd = i_mark->wd; |
dead537d | 436 | |
b7ba8371 | 437 | /* |
000285de | 438 | * does this i_mark think it is in the idr? we shouldn't get called |
b7ba8371 EP |
439 | * if it wasn't.... |
440 | */ | |
441 | if (wd == -1) { | |
25c829af JK |
442 | WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", |
443 | __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); | |
dead537d | 444 | goto out; |
b7ba8371 | 445 | } |
dead537d | 446 | |
b7ba8371 | 447 | /* Lets look in the idr to see if we find it */ |
000285de EP |
448 | found_i_mark = inotify_idr_find_locked(group, wd); |
449 | if (unlikely(!found_i_mark)) { | |
25c829af JK |
450 | WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", |
451 | __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); | |
dead537d | 452 | goto out; |
b7ba8371 | 453 | } |
dead537d | 454 | |
b7ba8371 | 455 | /* |
000285de EP |
456 | * We found an mark in the idr at the right wd, but it's |
457 | * not the mark we were told to remove. eparis seriously | |
b7ba8371 EP |
458 | * fucked up somewhere. |
459 | */ | |
000285de EP |
460 | if (unlikely(found_i_mark != i_mark)) { |
461 | WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p " | |
25c829af JK |
462 | "found_i_mark=%p found_i_mark->wd=%d " |
463 | "found_i_mark->group=%p\n", __func__, i_mark, | |
464 | i_mark->wd, i_mark->fsn_mark.group, found_i_mark, | |
465 | found_i_mark->wd, found_i_mark->fsn_mark.group); | |
dead537d EP |
466 | goto out; |
467 | } | |
468 | ||
b7ba8371 EP |
469 | /* |
470 | * One ref for being in the idr | |
b7ba8371 EP |
471 | * one ref grabbed by inotify_idr_find |
472 | */ | |
ab97f873 | 473 | if (unlikely(refcount_read(&i_mark->fsn_mark.refcnt) < 2)) { |
25c829af JK |
474 | printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n", |
475 | __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group); | |
b7ba8371 EP |
476 | /* we can't really recover with bad ref cnting.. */ |
477 | BUG(); | |
478 | } | |
dead537d | 479 | |
e7253760 JK |
480 | idr_remove(idr, wd); |
481 | /* Removed from the idr, drop that ref. */ | |
482 | fsnotify_put_mark(&i_mark->fsn_mark); | |
dead537d | 483 | out: |
e7253760 JK |
484 | i_mark->wd = -1; |
485 | spin_unlock(idr_lock); | |
b7ba8371 | 486 | /* match the ref taken by inotify_idr_find_locked() */ |
000285de EP |
487 | if (found_i_mark) |
488 | fsnotify_put_mark(&found_i_mark->fsn_mark); | |
7e790dd5 | 489 | } |
dead537d | 490 | |
63c882a0 | 491 | /* |
dead537d | 492 | * Send IN_IGNORED for this wd, remove this wd from the idr. |
63c882a0 | 493 | */ |
000285de | 494 | void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, |
528da3e9 | 495 | struct fsnotify_group *group) |
63c882a0 | 496 | { |
000285de | 497 | struct inotify_inode_mark *i_mark; |
47d9c7cc AG |
498 | struct fsnotify_iter_info iter_info = { }; |
499 | ||
500 | fsnotify_iter_set_report_type_mark(&iter_info, FSNOTIFY_OBJ_TYPE_INODE, | |
501 | fsn_mark); | |
63c882a0 | 502 | |
7053aee2 | 503 | /* Queue ignore event for the watch */ |
b54cecf5 AG |
504 | inotify_handle_event(group, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, |
505 | NULL, NULL, 0, &iter_info); | |
f44aebcc | 506 | |
7053aee2 | 507 | i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); |
000285de EP |
508 | /* remove this mark from the idr */ |
509 | inotify_remove_from_idr(group, i_mark); | |
63c882a0 | 510 | |
1cce1eea | 511 | dec_inotify_watches(group->inotify_data.ucounts); |
63c882a0 EP |
512 | } |
513 | ||
52cef755 EP |
514 | static int inotify_update_existing_watch(struct fsnotify_group *group, |
515 | struct inode *inode, | |
516 | u32 arg) | |
63c882a0 | 517 | { |
000285de EP |
518 | struct fsnotify_mark *fsn_mark; |
519 | struct inotify_inode_mark *i_mark; | |
63c882a0 | 520 | __u32 old_mask, new_mask; |
52cef755 EP |
521 | __u32 mask; |
522 | int add = (arg & IN_MASK_ADD); | |
4d97f7d5 | 523 | int create = (arg & IN_MASK_CREATE); |
52cef755 | 524 | int ret; |
63c882a0 | 525 | |
957f7b47 | 526 | mask = inotify_arg_to_mask(inode, arg); |
63c882a0 | 527 | |
b1362edf | 528 | fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); |
000285de | 529 | if (!fsn_mark) |
52cef755 | 530 | return -ENOENT; |
62c9d267 Z |
531 | else if (create) { |
532 | ret = -EEXIST; | |
533 | goto out; | |
534 | } | |
7e790dd5 | 535 | |
000285de | 536 | i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); |
75fe2b26 | 537 | |
000285de | 538 | spin_lock(&fsn_mark->lock); |
000285de | 539 | old_mask = fsn_mark->mask; |
90b1e7a5 | 540 | if (add) |
66d2b81b | 541 | fsn_mark->mask |= mask; |
90b1e7a5 | 542 | else |
66d2b81b | 543 | fsn_mark->mask = mask; |
90b1e7a5 | 544 | new_mask = fsn_mark->mask; |
000285de | 545 | spin_unlock(&fsn_mark->lock); |
63c882a0 EP |
546 | |
547 | if (old_mask != new_mask) { | |
548 | /* more bits in old than in new? */ | |
549 | int dropped = (old_mask & ~new_mask); | |
000285de | 550 | /* more bits in this fsn_mark than the inode's mask? */ |
63c882a0 | 551 | int do_inode = (new_mask & ~inode->i_fsnotify_mask); |
63c882a0 | 552 | |
000285de | 553 | /* update the inode with this new fsn_mark */ |
63c882a0 | 554 | if (dropped || do_inode) |
8920d273 | 555 | fsnotify_recalc_mask(inode->i_fsnotify_marks); |
63c882a0 | 556 | |
63c882a0 EP |
557 | } |
558 | ||
52cef755 | 559 | /* return the wd */ |
000285de | 560 | ret = i_mark->wd; |
52cef755 | 561 | |
62c9d267 | 562 | out: |
d0775441 | 563 | /* match the get from fsnotify_find_mark() */ |
000285de | 564 | fsnotify_put_mark(fsn_mark); |
75fe2b26 | 565 | |
52cef755 EP |
566 | return ret; |
567 | } | |
568 | ||
569 | static int inotify_new_watch(struct fsnotify_group *group, | |
570 | struct inode *inode, | |
571 | u32 arg) | |
572 | { | |
000285de | 573 | struct inotify_inode_mark *tmp_i_mark; |
52cef755 EP |
574 | __u32 mask; |
575 | int ret; | |
b7ba8371 EP |
576 | struct idr *idr = &group->inotify_data.idr; |
577 | spinlock_t *idr_lock = &group->inotify_data.idr_lock; | |
52cef755 | 578 | |
957f7b47 | 579 | mask = inotify_arg_to_mask(inode, arg); |
52cef755 | 580 | |
000285de EP |
581 | tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); |
582 | if (unlikely(!tmp_i_mark)) | |
52cef755 EP |
583 | return -ENOMEM; |
584 | ||
054c636e | 585 | fsnotify_init_mark(&tmp_i_mark->fsn_mark, group); |
000285de EP |
586 | tmp_i_mark->fsn_mark.mask = mask; |
587 | tmp_i_mark->wd = -1; | |
52cef755 | 588 | |
a66c04b4 | 589 | ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark); |
b7ba8371 | 590 | if (ret) |
52cef755 | 591 | goto out_err; |
52cef755 | 592 | |
1cce1eea NB |
593 | /* increment the number of watches the user has */ |
594 | if (!inc_inotify_watches(group->inotify_data.ucounts)) { | |
595 | inotify_remove_from_idr(group, tmp_i_mark); | |
596 | ret = -ENOSPC; | |
597 | goto out_err; | |
598 | } | |
599 | ||
52cef755 | 600 | /* we are on the idr, now get on the inode */ |
b249f5be | 601 | ret = fsnotify_add_inode_mark_locked(&tmp_i_mark->fsn_mark, inode, 0); |
52cef755 EP |
602 | if (ret) { |
603 | /* we failed to get on the inode, get off the idr */ | |
000285de | 604 | inotify_remove_from_idr(group, tmp_i_mark); |
52cef755 EP |
605 | goto out_err; |
606 | } | |
607 | ||
52cef755 | 608 | |
000285de EP |
609 | /* return the watch descriptor for this new mark */ |
610 | ret = tmp_i_mark->wd; | |
52cef755 | 611 | |
63c882a0 | 612 | out_err: |
000285de EP |
613 | /* match the ref from fsnotify_init_mark() */ |
614 | fsnotify_put_mark(&tmp_i_mark->fsn_mark); | |
52cef755 EP |
615 | |
616 | return ret; | |
617 | } | |
618 | ||
619 | static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg) | |
620 | { | |
621 | int ret = 0; | |
622 | ||
e1e5a9f8 | 623 | mutex_lock(&group->mark_mutex); |
52cef755 EP |
624 | /* try to update and existing watch with the new arg */ |
625 | ret = inotify_update_existing_watch(group, inode, arg); | |
626 | /* no mark present, try to add a new one */ | |
627 | if (ret == -ENOENT) | |
628 | ret = inotify_new_watch(group, inode, arg); | |
e1e5a9f8 | 629 | mutex_unlock(&group->mark_mutex); |
7e790dd5 | 630 | |
63c882a0 EP |
631 | return ret; |
632 | } | |
633 | ||
d0de4dc5 | 634 | static struct fsnotify_group *inotify_new_group(unsigned int max_events) |
63c882a0 EP |
635 | { |
636 | struct fsnotify_group *group; | |
ff57cd58 | 637 | struct inotify_event_info *oevent; |
63c882a0 | 638 | |
0d2e2a1d | 639 | group = fsnotify_alloc_group(&inotify_fsnotify_ops); |
63c882a0 EP |
640 | if (IS_ERR(group)) |
641 | return group; | |
642 | ||
ff57cd58 JK |
643 | oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL); |
644 | if (unlikely(!oevent)) { | |
645 | fsnotify_destroy_group(group); | |
646 | return ERR_PTR(-ENOMEM); | |
647 | } | |
648 | group->overflow_event = &oevent->fse; | |
dfc2d259 | 649 | fsnotify_init_event(group->overflow_event, 0); |
a0a92d26 | 650 | oevent->mask = FS_Q_OVERFLOW; |
ff57cd58 JK |
651 | oevent->wd = -1; |
652 | oevent->sync_cookie = 0; | |
653 | oevent->name_len = 0; | |
654 | ||
63c882a0 | 655 | group->max_events = max_events; |
d46eb14b | 656 | group->memcg = get_mem_cgroup_from_mm(current->mm); |
63c882a0 EP |
657 | |
658 | spin_lock_init(&group->inotify_data.idr_lock); | |
659 | idr_init(&group->inotify_data.idr); | |
1cce1eea NB |
660 | group->inotify_data.ucounts = inc_ucount(current_user_ns(), |
661 | current_euid(), | |
662 | UCOUNT_INOTIFY_INSTANCES); | |
d0de4dc5 | 663 | |
1cce1eea | 664 | if (!group->inotify_data.ucounts) { |
d8153d4d | 665 | fsnotify_destroy_group(group); |
d0de4dc5 EP |
666 | return ERR_PTR(-EMFILE); |
667 | } | |
63c882a0 EP |
668 | |
669 | return group; | |
670 | } | |
671 | ||
672 | ||
673 | /* inotify syscalls */ | |
d0d89d1e | 674 | static int do_inotify_init(int flags) |
2d9048e2 | 675 | { |
63c882a0 | 676 | struct fsnotify_group *group; |
c44dcc56 | 677 | int ret; |
2d9048e2 | 678 | |
e38b36f3 UD |
679 | /* Check the IN_* constants for consistency. */ |
680 | BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); | |
681 | BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); | |
682 | ||
510df2dd | 683 | if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) |
4006553b UD |
684 | return -EINVAL; |
685 | ||
63c882a0 | 686 | /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ |
d0de4dc5 EP |
687 | group = inotify_new_group(inotify_max_queued_events); |
688 | if (IS_ERR(group)) | |
689 | return PTR_ERR(group); | |
825f9692 | 690 | |
c44dcc56 AV |
691 | ret = anon_inode_getfd("inotify", &inotify_fops, group, |
692 | O_RDONLY | flags); | |
d0de4dc5 | 693 | if (ret < 0) |
d8153d4d | 694 | fsnotify_destroy_group(group); |
825f9692 | 695 | |
2d9048e2 AG |
696 | return ret; |
697 | } | |
698 | ||
d0d89d1e DB |
699 | SYSCALL_DEFINE1(inotify_init1, int, flags) |
700 | { | |
701 | return do_inotify_init(flags); | |
702 | } | |
703 | ||
938bb9f5 | 704 | SYSCALL_DEFINE0(inotify_init) |
4006553b | 705 | { |
d0d89d1e | 706 | return do_inotify_init(0); |
4006553b UD |
707 | } |
708 | ||
2e4d0924 HC |
709 | SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, |
710 | u32, mask) | |
2d9048e2 | 711 | { |
63c882a0 | 712 | struct fsnotify_group *group; |
2d9048e2 | 713 | struct inode *inode; |
2d8f3038 | 714 | struct path path; |
2903ff01 AV |
715 | struct fd f; |
716 | int ret; | |
2d9048e2 AG |
717 | unsigned flags = 0; |
718 | ||
d30e2c05 DH |
719 | /* |
720 | * We share a lot of code with fs/dnotify. We also share | |
721 | * the bit layout between inotify's IN_* and the fsnotify | |
722 | * FS_*. This check ensures that only the inotify IN_* | |
723 | * bits get passed in and set in watches/events. | |
724 | */ | |
725 | if (unlikely(mask & ~ALL_INOTIFY_BITS)) | |
726 | return -EINVAL; | |
727 | /* | |
728 | * Require at least one valid bit set in the mask. | |
729 | * Without _something_ set, we would have no events to | |
730 | * watch for. | |
731 | */ | |
04df32fa ZH |
732 | if (unlikely(!(mask & ALL_INOTIFY_BITS))) |
733 | return -EINVAL; | |
734 | ||
2903ff01 AV |
735 | f = fdget(fd); |
736 | if (unlikely(!f.file)) | |
2d9048e2 AG |
737 | return -EBADF; |
738 | ||
4d97f7d5 | 739 | /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ |
125892ed TH |
740 | if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) { |
741 | ret = -EINVAL; | |
742 | goto fput_and_out; | |
743 | } | |
4d97f7d5 | 744 | |
2d9048e2 | 745 | /* verify that this is indeed an inotify instance */ |
2903ff01 | 746 | if (unlikely(f.file->f_op != &inotify_fops)) { |
2d9048e2 AG |
747 | ret = -EINVAL; |
748 | goto fput_and_out; | |
749 | } | |
750 | ||
751 | if (!(mask & IN_DONT_FOLLOW)) | |
752 | flags |= LOOKUP_FOLLOW; | |
753 | if (mask & IN_ONLYDIR) | |
754 | flags |= LOOKUP_DIRECTORY; | |
755 | ||
ac5656d8 AG |
756 | ret = inotify_find_inode(pathname, &path, flags, |
757 | (mask & IN_ALL_EVENTS)); | |
63c882a0 | 758 | if (ret) |
2d9048e2 AG |
759 | goto fput_and_out; |
760 | ||
63c882a0 | 761 | /* inode held in place by reference to path; group by fget on fd */ |
2d8f3038 | 762 | inode = path.dentry->d_inode; |
2903ff01 | 763 | group = f.file->private_data; |
2d9048e2 | 764 | |
63c882a0 EP |
765 | /* create/update an inode mark */ |
766 | ret = inotify_update_watch(group, inode, mask); | |
2d8f3038 | 767 | path_put(&path); |
2d9048e2 | 768 | fput_and_out: |
2903ff01 | 769 | fdput(f); |
2d9048e2 AG |
770 | return ret; |
771 | } | |
772 | ||
2e4d0924 | 773 | SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd) |
2d9048e2 | 774 | { |
63c882a0 | 775 | struct fsnotify_group *group; |
000285de | 776 | struct inotify_inode_mark *i_mark; |
2903ff01 | 777 | struct fd f; |
7b26aa24 | 778 | int ret = -EINVAL; |
2d9048e2 | 779 | |
2903ff01 AV |
780 | f = fdget(fd); |
781 | if (unlikely(!f.file)) | |
2d9048e2 AG |
782 | return -EBADF; |
783 | ||
784 | /* verify that this is indeed an inotify instance */ | |
2903ff01 | 785 | if (unlikely(f.file->f_op != &inotify_fops)) |
2d9048e2 | 786 | goto out; |
2d9048e2 | 787 | |
2903ff01 | 788 | group = f.file->private_data; |
2d9048e2 | 789 | |
000285de EP |
790 | i_mark = inotify_idr_find(group, wd); |
791 | if (unlikely(!i_mark)) | |
63c882a0 | 792 | goto out; |
63c882a0 | 793 | |
b7ba8371 EP |
794 | ret = 0; |
795 | ||
e2a29943 | 796 | fsnotify_destroy_mark(&i_mark->fsn_mark, group); |
b7ba8371 EP |
797 | |
798 | /* match ref taken by inotify_idr_find */ | |
000285de | 799 | fsnotify_put_mark(&i_mark->fsn_mark); |
2d9048e2 AG |
800 | |
801 | out: | |
2903ff01 | 802 | fdput(f); |
2d9048e2 AG |
803 | return ret; |
804 | } | |
805 | ||
2d9048e2 | 806 | /* |
ae0e47f0 | 807 | * inotify_user_setup - Our initialization function. Note that we cannot return |
2d9048e2 AG |
808 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here |
809 | * must result in panic(). | |
810 | */ | |
811 | static int __init inotify_user_setup(void) | |
812 | { | |
92890123 WL |
813 | unsigned long watches_max; |
814 | struct sysinfo si; | |
815 | ||
816 | si_meminfo(&si); | |
817 | /* | |
818 | * Allow up to 1% of addressable memory to be allocated for inotify | |
819 | * watches (per user) limited to the range [8192, 1048576]. | |
820 | */ | |
821 | watches_max = (((si.totalram - si.totalhigh) / 100) << PAGE_SHIFT) / | |
822 | INOTIFY_WATCH_COST; | |
823 | watches_max = clamp(watches_max, 8192UL, 1048576UL); | |
824 | ||
f874e1ac EP |
825 | BUILD_BUG_ON(IN_ACCESS != FS_ACCESS); |
826 | BUILD_BUG_ON(IN_MODIFY != FS_MODIFY); | |
827 | BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB); | |
828 | BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE); | |
829 | BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); | |
830 | BUILD_BUG_ON(IN_OPEN != FS_OPEN); | |
831 | BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM); | |
832 | BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO); | |
833 | BUILD_BUG_ON(IN_CREATE != FS_CREATE); | |
834 | BUILD_BUG_ON(IN_DELETE != FS_DELETE); | |
835 | BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF); | |
836 | BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF); | |
837 | BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT); | |
838 | BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW); | |
839 | BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED); | |
840 | BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK); | |
b29866aa | 841 | BUILD_BUG_ON(IN_ISDIR != FS_ISDIR); |
f874e1ac EP |
842 | BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT); |
843 | ||
a39f7ec4 | 844 | BUILD_BUG_ON(HWEIGHT32(ALL_INOTIFY_BITS) != 22); |
f874e1ac | 845 | |
d46eb14b SB |
846 | inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, |
847 | SLAB_PANIC|SLAB_ACCOUNT); | |
63c882a0 | 848 | |
2d9048e2 | 849 | inotify_max_queued_events = 16384; |
1cce1eea | 850 | init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128; |
92890123 | 851 | init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = watches_max; |
2d9048e2 | 852 | |
2d9048e2 AG |
853 | return 0; |
854 | } | |
c013d5a4 | 855 | fs_initcall(inotify_user_setup); |