]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/notify/mark.c
fsnotify: Add group pointer in fsnotify_init_mark()
[mirror_ubuntu-artful-kernel.git] / fs / notify / mark.c
CommitLineData
5444e298
EP
1/*
2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/*
20 * fsnotify inode mark locking/lifetime/and refcnting
21 *
22 * REFCNT:
9756b918
LS
23 * The group->recnt and mark->refcnt tell how many "things" in the kernel
24 * currently are referencing the objects. Both kind of objects typically will
25 * live inside the kernel with a refcnt of 2, one for its creation and one for
26 * the reference a group and a mark hold to each other.
27 * If you are holding the appropriate locks, you can take a reference and the
28 * object itself is guaranteed to survive until the reference is dropped.
5444e298
EP
29 *
30 * LOCKING:
9756b918
LS
31 * There are 3 locks involved with fsnotify inode marks and they MUST be taken
32 * in order as follows:
5444e298 33 *
9756b918 34 * group->mark_mutex
5444e298 35 * mark->lock
04662cab 36 * mark->connector->lock
5444e298 37 *
9756b918
LS
38 * group->mark_mutex protects the marks_list anchored inside a given group and
39 * each mark is hooked via the g_list. It also protects the groups private
40 * data (i.e group limits).
41
42 * mark->lock protects the marks attributes like its masks and flags.
43 * Furthermore it protects the access to a reference of the group that the mark
44 * is assigned to as well as the access to a reference of the inode/vfsmount
45 * that is being watched by the mark.
5444e298 46 *
04662cab
JK
47 * mark->connector->lock protects the list of marks anchored inside an
48 * inode / vfsmount and each mark is hooked via the i_list.
5444e298 49 *
04662cab
JK
50 * A list of notification marks relating to inode / mnt is contained in
51 * fsnotify_mark_connector. That structure is alive as long as there are any
6b3f05d2
JK
52 * marks in the list and is also protected by fsnotify_mark_srcu. A mark gets
53 * detached from fsnotify_mark_connector when last reference to the mark is
54 * dropped. Thus having mark reference is enough to protect mark->connector
55 * pointer and to make sure fsnotify_mark_connector cannot disappear. Also
56 * because we remove mark from g_list before dropping mark reference associated
57 * with that, any mark found through g_list is guaranteed to have
58 * mark->connector set until we drop group->mark_mutex.
5444e298
EP
59 *
60 * LIFETIME:
61 * Inode marks survive between when they are added to an inode and when their
c1f33073 62 * refcnt==0. Marks are also protected by fsnotify_mark_srcu.
5444e298
EP
63 *
64 * The inode mark can be cleared for a number of different reasons including:
65 * - The inode is unlinked for the last time. (fsnotify_inode_remove)
66 * - The inode is being evicted from cache. (fsnotify_inode_delete)
67 * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
68 * - Something explicitly requests that it be removed. (fsnotify_destroy_mark)
69 * - The fsnotify_group associated with the mark is going away and all such marks
2e37c6ca 70 * need to be cleaned up. (fsnotify_clear_marks_by_group)
5444e298 71 *
5444e298
EP
72 * This has the very interesting property of being able to run concurrently with
73 * any (or all) other directions.
74 */
75
76#include <linux/fs.h>
77#include <linux/init.h>
78#include <linux/kernel.h>
75c1be48 79#include <linux/kthread.h>
5444e298
EP
80#include <linux/module.h>
81#include <linux/mutex.h>
82#include <linux/slab.h>
83#include <linux/spinlock.h>
75c1be48 84#include <linux/srcu.h>
5444e298 85
60063497 86#include <linux/atomic.h>
5444e298
EP
87
88#include <linux/fsnotify_backend.h>
89#include "fsnotify.h"
90
0918f1c3
JL
91#define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */
92
75c1be48 93struct srcu_struct fsnotify_mark_srcu;
9dd813c1
JK
94struct kmem_cache *fsnotify_mark_connector_cachep;
95
13d34ac6
JL
96static DEFINE_SPINLOCK(destroy_lock);
97static LIST_HEAD(destroy_list);
08991e83 98static struct fsnotify_mark_connector *connector_destroy_list;
0918f1c3 99
35e48176
JK
100static void fsnotify_mark_destroy_workfn(struct work_struct *work);
101static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy_workfn);
75c1be48 102
08991e83
JK
103static void fsnotify_connector_destroy_workfn(struct work_struct *work);
104static DECLARE_WORK(connector_reaper_work, fsnotify_connector_destroy_workfn);
105
5444e298
EP
106void fsnotify_get_mark(struct fsnotify_mark *mark)
107{
11375145 108 WARN_ON_ONCE(!atomic_read(&mark->refcnt));
5444e298
EP
109 atomic_inc(&mark->refcnt);
110}
111
abc77577
JK
112/*
113 * Get mark reference when we found the mark via lockless traversal of object
114 * list. Mark can be already removed from the list by now and on its way to be
115 * destroyed once SRCU period ends.
116 */
117static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
118{
119 return atomic_inc_not_zero(&mark->refcnt);
120}
121
a242677b 122static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
0809ab69
JK
123{
124 u32 new_mask = 0;
125 struct fsnotify_mark *mark;
126
04662cab 127 assert_spin_locked(&conn->lock);
6b3f05d2
JK
128 hlist_for_each_entry(mark, &conn->list, obj_list) {
129 if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
130 new_mask |= mark->mask;
131 }
a242677b
JK
132 if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE)
133 conn->inode->i_fsnotify_mask = new_mask;
134 else if (conn->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT)
135 real_mount(conn->mnt)->mnt_fsnotify_mask = new_mask;
136}
137
138/*
139 * Calculate mask of events for a list of marks. The caller must make sure
6b3f05d2
JK
140 * connector and connector->inode cannot disappear under us. Callers achieve
141 * this by holding a mark->lock or mark->group->mark_mutex for a mark on this
142 * list.
a242677b
JK
143 */
144void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
145{
146 if (!conn)
147 return;
148
04662cab 149 spin_lock(&conn->lock);
a242677b 150 __fsnotify_recalc_mask(conn);
04662cab
JK
151 spin_unlock(&conn->lock);
152 if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE)
a242677b 153 __fsnotify_update_child_dentry_flags(conn->inode);
0809ab69
JK
154}
155
08991e83
JK
156/* Free all connectors queued for freeing once SRCU period ends */
157static void fsnotify_connector_destroy_workfn(struct work_struct *work)
158{
159 struct fsnotify_mark_connector *conn, *free;
160
161 spin_lock(&destroy_lock);
162 conn = connector_destroy_list;
163 connector_destroy_list = NULL;
164 spin_unlock(&destroy_lock);
165
166 synchronize_srcu(&fsnotify_mark_srcu);
167 while (conn) {
168 free = conn;
169 conn = conn->destroy_next;
170 kmem_cache_free(fsnotify_mark_connector_cachep, free);
171 }
172}
173
08991e83
JK
174static struct inode *fsnotify_detach_connector_from_object(
175 struct fsnotify_mark_connector *conn)
176{
177 struct inode *inode = NULL;
178
179 if (conn->flags & FSNOTIFY_OBJ_TYPE_INODE) {
180 inode = conn->inode;
181 rcu_assign_pointer(inode->i_fsnotify_marks, NULL);
182 inode->i_fsnotify_mask = 0;
183 conn->inode = NULL;
184 conn->flags &= ~FSNOTIFY_OBJ_TYPE_INODE;
185 } else if (conn->flags & FSNOTIFY_OBJ_TYPE_VFSMOUNT) {
186 rcu_assign_pointer(real_mount(conn->mnt)->mnt_fsnotify_marks,
187 NULL);
188 real_mount(conn->mnt)->mnt_fsnotify_mask = 0;
189 conn->mnt = NULL;
190 conn->flags &= ~FSNOTIFY_OBJ_TYPE_VFSMOUNT;
191 }
192
193 return inode;
194}
195
6b3f05d2
JK
196static void fsnotify_final_mark_destroy(struct fsnotify_mark *mark)
197{
198 if (mark->group)
199 fsnotify_put_group(mark->group);
200 mark->free_mark(mark);
201}
202
203void fsnotify_put_mark(struct fsnotify_mark *mark)
8212a609
JK
204{
205 struct fsnotify_mark_connector *conn;
206 struct inode *inode = NULL;
08991e83 207 bool free_conn = false;
8212a609 208
6b3f05d2
JK
209 /* Catch marks that were actually never attached to object */
210 if (!mark->connector) {
211 if (atomic_dec_and_test(&mark->refcnt))
212 fsnotify_final_mark_destroy(mark);
213 return;
214 }
215
216 /*
217 * We have to be careful so that traversals of obj_list under lock can
218 * safely grab mark reference.
219 */
220 if (!atomic_dec_and_lock(&mark->refcnt, &mark->connector->lock))
221 return;
222
8212a609 223 conn = mark->connector;
8212a609
JK
224 hlist_del_init_rcu(&mark->obj_list);
225 if (hlist_empty(&conn->list)) {
08991e83
JK
226 inode = fsnotify_detach_connector_from_object(conn);
227 free_conn = true;
228 } else {
229 __fsnotify_recalc_mask(conn);
8212a609
JK
230 }
231 mark->connector = NULL;
04662cab 232 spin_unlock(&conn->lock);
8212a609 233
6b3f05d2
JK
234 iput(inode);
235
08991e83
JK
236 if (free_conn) {
237 spin_lock(&destroy_lock);
238 conn->destroy_next = connector_destroy_list;
239 connector_destroy_list = conn;
240 spin_unlock(&destroy_lock);
241 queue_work(system_unbound_wq, &connector_reaper_work);
242 }
6b3f05d2
JK
243 /*
244 * Note that we didn't update flags telling whether inode cares about
245 * what's happening with children. We update these flags from
246 * __fsnotify_parent() lazily when next event happens on one of our
247 * children.
248 */
249 spin_lock(&destroy_lock);
250 list_add(&mark->g_list, &destroy_list);
251 spin_unlock(&destroy_lock);
252 queue_delayed_work(system_unbound_wq, &reaper_work,
253 FSNOTIFY_REAPER_DELAY);
8212a609
JK
254}
255
abc77577
JK
256bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
257{
258 struct fsnotify_group *group;
259
260 if (WARN_ON_ONCE(!iter_info->inode_mark && !iter_info->vfsmount_mark))
261 return false;
262
263 if (iter_info->inode_mark)
264 group = iter_info->inode_mark->group;
265 else
266 group = iter_info->vfsmount_mark->group;
267
268 /*
269 * Since acquisition of mark reference is an atomic op as well, we can
270 * be sure this inc is seen before any effect of refcount increment.
271 */
272 atomic_inc(&group->user_waits);
273
274 if (iter_info->inode_mark) {
275 /* This can fail if mark is being removed */
276 if (!fsnotify_get_mark_safe(iter_info->inode_mark))
277 goto out_wait;
278 }
279 if (iter_info->vfsmount_mark) {
280 if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark))
281 goto out_inode;
282 }
283
284 /*
285 * Now that both marks are pinned by refcount in the inode / vfsmount
286 * lists, we can drop SRCU lock, and safely resume the list iteration
287 * once userspace returns.
288 */
289 srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx);
290
291 return true;
292out_inode:
293 if (iter_info->inode_mark)
294 fsnotify_put_mark(iter_info->inode_mark);
295out_wait:
296 if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
297 wake_up(&group->notification_waitq);
298 return false;
299}
300
301void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
302{
303 struct fsnotify_group *group = NULL;
304
305 iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
306 if (iter_info->inode_mark) {
307 group = iter_info->inode_mark->group;
308 fsnotify_put_mark(iter_info->inode_mark);
309 }
310 if (iter_info->vfsmount_mark) {
311 group = iter_info->vfsmount_mark->group;
312 fsnotify_put_mark(iter_info->vfsmount_mark);
313 }
314 /*
315 * We abuse notification_waitq on group shutdown for waiting for all
316 * marks pinned when waiting for userspace.
317 */
318 if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
319 wake_up(&group->notification_waitq);
320}
321
5444e298 322/*
6b3f05d2
JK
323 * Mark mark as detached, remove it from group list. Mark still stays in object
324 * list until its last reference is dropped. Note that we rely on mark being
325 * removed from group list before corresponding reference to it is dropped. In
326 * particular we rely on mark->connector being valid while we hold
327 * group->mark_mutex if we found the mark through g_list.
4712e722 328 *
11375145
JK
329 * Must be called with group->mark_mutex held. The caller must either hold
330 * reference to the mark or be protected by fsnotify_mark_srcu.
5444e298 331 */
4712e722 332void fsnotify_detach_mark(struct fsnotify_mark *mark)
5444e298 333{
4712e722 334 struct fsnotify_group *group = mark->group;
5444e298 335
11375145
JK
336 WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex));
337 WARN_ON_ONCE(!srcu_read_lock_held(&fsnotify_mark_srcu) &&
338 atomic_read(&mark->refcnt) < 1 +
339 !!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED));
d5a335b8 340
104d06f0 341 spin_lock(&mark->lock);
700307a2 342 /* something else already called this function on this mark */
4712e722 343 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
5444e298 344 spin_unlock(&mark->lock);
e2a29943 345 return;
5444e298 346 }
4712e722 347 mark->flags &= ~FSNOTIFY_MARK_FLAG_ATTACHED;
5444e298 348 list_del_init(&mark->g_list);
5444e298 349 spin_unlock(&mark->lock);
d5a335b8 350
4712e722 351 atomic_dec(&group->num_marks);
11375145
JK
352
353 /* Drop mark reference acquired in fsnotify_add_mark_locked() */
354 fsnotify_put_mark(mark);
4712e722
JK
355}
356
357/*
11375145
JK
358 * Free fsnotify mark. The mark is actually only marked as being freed. The
359 * freeing is actually happening only once last reference to the mark is
360 * dropped from a workqueue which first waits for srcu period end.
35e48176 361 *
11375145
JK
362 * Caller must have a reference to the mark or be protected by
363 * fsnotify_mark_srcu.
4712e722 364 */
11375145 365void fsnotify_free_mark(struct fsnotify_mark *mark)
4712e722
JK
366{
367 struct fsnotify_group *group = mark->group;
368
369 spin_lock(&mark->lock);
370 /* something else already called this function on this mark */
371 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) {
372 spin_unlock(&mark->lock);
11375145 373 return;
4712e722
JK
374 }
375 mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
376 spin_unlock(&mark->lock);
5444e298 377
d725e66c
LT
378 /*
379 * Some groups like to know that marks are being freed. This is a
380 * callback to the group function to let it know that this mark
381 * is being freed.
382 */
383 if (group->ops->freeing_mark)
384 group->ops->freeing_mark(mark, group);
d5a335b8
LS
385}
386
387void fsnotify_destroy_mark(struct fsnotify_mark *mark,
388 struct fsnotify_group *group)
389{
6960b0d9 390 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
4712e722 391 fsnotify_detach_mark(mark);
d5a335b8 392 mutex_unlock(&group->mark_mutex);
4712e722 393 fsnotify_free_mark(mark);
5444e298
EP
394}
395
8edc6e16
JK
396/*
397 * Sorting function for lists of fsnotify marks.
398 *
399 * Fanotify supports different notification classes (reflected as priority of
400 * notification group). Events shall be passed to notification groups in
401 * decreasing priority order. To achieve this marks in notification lists for
402 * inodes and vfsmounts are sorted so that priorities of corresponding groups
403 * are descending.
404 *
405 * Furthermore correct handling of the ignore mask requires processing inode
406 * and vfsmount marks of each group together. Using the group address as
407 * further sort criterion provides a unique sorting order and thus we can
408 * merge inode and vfsmount lists of marks in linear time and find groups
409 * present in both lists.
410 *
411 * A return value of 1 signifies that b has priority over a.
412 * A return value of 0 signifies that the two marks have to be handled together.
413 * A return value of -1 signifies that a has priority over b.
414 */
415int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
416{
417 if (a == b)
418 return 0;
419 if (!a)
420 return 1;
421 if (!b)
422 return -1;
423 if (a->priority < b->priority)
424 return 1;
425 if (a->priority > b->priority)
426 return -1;
427 if (a < b)
428 return 1;
429 return -1;
430}
431
9dd813c1 432static int fsnotify_attach_connector_to_object(
08991e83
JK
433 struct fsnotify_mark_connector __rcu **connp,
434 struct inode *inode,
435 struct vfsmount *mnt)
9dd813c1
JK
436{
437 struct fsnotify_mark_connector *conn;
438
755b5bc6 439 conn = kmem_cache_alloc(fsnotify_mark_connector_cachep, GFP_KERNEL);
9dd813c1
JK
440 if (!conn)
441 return -ENOMEM;
04662cab 442 spin_lock_init(&conn->lock);
9dd813c1 443 INIT_HLIST_HEAD(&conn->list);
86ffe245
JK
444 if (inode) {
445 conn->flags = FSNOTIFY_OBJ_TYPE_INODE;
08991e83 446 conn->inode = igrab(inode);
86ffe245
JK
447 } else {
448 conn->flags = FSNOTIFY_OBJ_TYPE_VFSMOUNT;
449 conn->mnt = mnt;
450 }
9dd813c1 451 /*
04662cab
JK
452 * cmpxchg() provides the barrier so that readers of *connp can see
453 * only initialized structure
9dd813c1 454 */
04662cab
JK
455 if (cmpxchg(connp, NULL, conn)) {
456 /* Someone else created list structure for us */
08991e83
JK
457 if (inode)
458 iput(inode);
755b5bc6 459 kmem_cache_free(fsnotify_mark_connector_cachep, conn);
04662cab 460 }
9dd813c1
JK
461
462 return 0;
463}
464
08991e83
JK
465/*
466 * Get mark connector, make sure it is alive and return with its lock held.
467 * This is for users that get connector pointer from inode or mount. Users that
468 * hold reference to a mark on the list may directly lock connector->lock as
469 * they are sure list cannot go away under them.
470 */
471static struct fsnotify_mark_connector *fsnotify_grab_connector(
472 struct fsnotify_mark_connector __rcu **connp)
473{
474 struct fsnotify_mark_connector *conn;
475 int idx;
476
477 idx = srcu_read_lock(&fsnotify_mark_srcu);
478 conn = srcu_dereference(*connp, &fsnotify_mark_srcu);
479 if (!conn)
480 goto out;
481 spin_lock(&conn->lock);
482 if (!(conn->flags & (FSNOTIFY_OBJ_TYPE_INODE |
483 FSNOTIFY_OBJ_TYPE_VFSMOUNT))) {
484 spin_unlock(&conn->lock);
485 srcu_read_unlock(&fsnotify_mark_srcu, idx);
486 return NULL;
487 }
488out:
489 srcu_read_unlock(&fsnotify_mark_srcu, idx);
490 return conn;
491}
492
9dd813c1
JK
493/*
494 * Add mark into proper place in given list of marks. These marks may be used
495 * for the fsnotify backend to determine which event types should be delivered
496 * to which group and for which inodes. These marks are ordered according to
497 * priority, highest number first, and then by the group's location in memory.
498 */
755b5bc6
JK
499static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
500 struct inode *inode, struct vfsmount *mnt,
501 int allow_dups)
0809ab69
JK
502{
503 struct fsnotify_mark *lmark, *last = NULL;
9dd813c1 504 struct fsnotify_mark_connector *conn;
08991e83 505 struct fsnotify_mark_connector __rcu **connp;
0809ab69 506 int cmp;
755b5bc6
JK
507 int err = 0;
508
509 if (WARN_ON(!inode && !mnt))
510 return -EINVAL;
04662cab 511 if (inode)
755b5bc6 512 connp = &inode->i_fsnotify_marks;
04662cab 513 else
755b5bc6 514 connp = &real_mount(mnt)->mnt_fsnotify_marks;
08991e83
JK
515restart:
516 spin_lock(&mark->lock);
517 conn = fsnotify_grab_connector(connp);
518 if (!conn) {
519 spin_unlock(&mark->lock);
04662cab 520 err = fsnotify_attach_connector_to_object(connp, inode, mnt);
9dd813c1
JK
521 if (err)
522 return err;
08991e83 523 goto restart;
9dd813c1 524 }
0809ab69
JK
525
526 /* is mark the first mark? */
9dd813c1
JK
527 if (hlist_empty(&conn->list)) {
528 hlist_add_head_rcu(&mark->obj_list, &conn->list);
86ffe245 529 goto added;
0809ab69
JK
530 }
531
532 /* should mark be in the middle of the current list? */
9dd813c1 533 hlist_for_each_entry(lmark, &conn->list, obj_list) {
0809ab69
JK
534 last = lmark;
535
6b3f05d2
JK
536 if ((lmark->group == mark->group) &&
537 (lmark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) &&
538 !allow_dups) {
755b5bc6
JK
539 err = -EEXIST;
540 goto out_err;
541 }
0809ab69
JK
542
543 cmp = fsnotify_compare_groups(lmark->group, mark->group);
544 if (cmp >= 0) {
545 hlist_add_before_rcu(&mark->obj_list, &lmark->obj_list);
86ffe245 546 goto added;
0809ab69
JK
547 }
548 }
549
550 BUG_ON(last == NULL);
551 /* mark should be the last entry. last is the current last entry */
552 hlist_add_behind_rcu(&mark->obj_list, &last->obj_list);
86ffe245
JK
553added:
554 mark->connector = conn;
755b5bc6 555out_err:
04662cab 556 spin_unlock(&conn->lock);
755b5bc6
JK
557 spin_unlock(&mark->lock);
558 return err;
0809ab69
JK
559}
560
5444e298
EP
561/*
562 * Attach an initialized mark to a given group and fs object.
563 * These marks may be used for the fsnotify backend to determine which
564 * event types should be delivered to which group.
565 */
7b129323 566int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct inode *inode,
d5a335b8 567 struct vfsmount *mnt, int allow_dups)
5444e298 568{
7b129323 569 struct fsnotify_group *group = mark->group;
5444e298
EP
570 int ret = 0;
571
5444e298
EP
572 BUG_ON(inode && mnt);
573 BUG_ON(!inode && !mnt);
d5a335b8 574 BUG_ON(!mutex_is_locked(&group->mark_mutex));
5444e298 575
5444e298
EP
576 /*
577 * LOCKING ORDER!!!!
986ab098 578 * group->mark_mutex
104d06f0 579 * mark->lock
04662cab 580 * mark->connector->lock
5444e298 581 */
104d06f0 582 spin_lock(&mark->lock);
4712e722 583 mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_ATTACHED;
700307a2 584
5444e298
EP
585 list_add(&mark->g_list, &group->marks_list);
586 atomic_inc(&group->num_marks);
6b3f05d2 587 fsnotify_get_mark(mark); /* for g_list */
5444e298
EP
588 spin_unlock(&mark->lock);
589
755b5bc6
JK
590 ret = fsnotify_add_mark_list(mark, inode, mnt, allow_dups);
591 if (ret)
592 goto err;
593
a242677b
JK
594 if (mark->mask)
595 fsnotify_recalc_mask(mark->connector);
5444e298
EP
596
597 return ret;
598err:
11375145
JK
599 mark->flags &= ~(FSNOTIFY_MARK_FLAG_ALIVE |
600 FSNOTIFY_MARK_FLAG_ATTACHED);
5444e298
EP
601 list_del_init(&mark->g_list);
602 atomic_dec(&group->num_marks);
5444e298
EP
603 spin_unlock(&mark->lock);
604
11375145 605 fsnotify_put_mark(mark);
5444e298
EP
606 return ret;
607}
608
7b129323
JK
609int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
610 struct vfsmount *mnt, int allow_dups)
d5a335b8
LS
611{
612 int ret;
7b129323
JK
613 struct fsnotify_group *group = mark->group;
614
d5a335b8 615 mutex_lock(&group->mark_mutex);
7b129323 616 ret = fsnotify_add_mark_locked(mark, inode, mnt, allow_dups);
d5a335b8
LS
617 mutex_unlock(&group->mark_mutex);
618 return ret;
619}
620
0809ab69
JK
621/*
622 * Given a list of marks, find the mark associated with given group. If found
623 * take a reference to that mark and return it, else return NULL.
624 */
08991e83
JK
625struct fsnotify_mark *fsnotify_find_mark(
626 struct fsnotify_mark_connector __rcu **connp,
627 struct fsnotify_group *group)
0809ab69 628{
08991e83 629 struct fsnotify_mark_connector *conn;
0809ab69
JK
630 struct fsnotify_mark *mark;
631
08991e83 632 conn = fsnotify_grab_connector(connp);
9dd813c1
JK
633 if (!conn)
634 return NULL;
635
636 hlist_for_each_entry(mark, &conn->list, obj_list) {
6b3f05d2
JK
637 if (mark->group == group &&
638 (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
0809ab69 639 fsnotify_get_mark(mark);
04662cab 640 spin_unlock(&conn->lock);
0809ab69
JK
641 return mark;
642 }
643 }
04662cab 644 spin_unlock(&conn->lock);
0809ab69
JK
645 return NULL;
646}
647
18f2e0d3
JK
648/* Clear any marks in a group with given type */
649void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
650 unsigned int type)
5444e298
EP
651{
652 struct fsnotify_mark *lmark, *mark;
8f2f3eb5 653 LIST_HEAD(to_free);
2e37c6ca 654 struct list_head *head = &to_free;
5444e298 655
2e37c6ca
JK
656 /* Skip selection step if we want to clear all marks. */
657 if (type == FSNOTIFY_OBJ_ALL_TYPES) {
658 head = &group->marks_list;
659 goto clear;
660 }
8f2f3eb5
JK
661 /*
662 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
663 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
664 * to_free list so we have to use mark_mutex even when accessing that
665 * list. And freeing mark requires us to drop mark_mutex. So we can
666 * reliably free only the first mark in the list. That's why we first
667 * move marks to free to to_free list in one go and then free marks in
668 * to_free list one by one.
669 */
6960b0d9 670 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
5444e298 671 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
18f2e0d3 672 if (mark->connector->flags & type)
8f2f3eb5 673 list_move(&mark->g_list, &to_free);
5444e298 674 }
986ab098 675 mutex_unlock(&group->mark_mutex);
8f2f3eb5 676
2e37c6ca 677clear:
8f2f3eb5
JK
678 while (1) {
679 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
2e37c6ca 680 if (list_empty(head)) {
8f2f3eb5
JK
681 mutex_unlock(&group->mark_mutex);
682 break;
683 }
2e37c6ca 684 mark = list_first_entry(head, struct fsnotify_mark, g_list);
8f2f3eb5 685 fsnotify_get_mark(mark);
4712e722 686 fsnotify_detach_mark(mark);
8f2f3eb5 687 mutex_unlock(&group->mark_mutex);
4712e722 688 fsnotify_free_mark(mark);
8f2f3eb5
JK
689 fsnotify_put_mark(mark);
690 }
5444e298
EP
691}
692
08991e83
JK
693/* Destroy all marks attached to inode / vfsmount */
694void fsnotify_destroy_marks(struct fsnotify_mark_connector __rcu **connp)
0810b4f9 695{
08991e83 696 struct fsnotify_mark_connector *conn;
6b3f05d2
JK
697 struct fsnotify_mark *mark, *old_mark = NULL;
698 struct inode *inode;
0810b4f9 699
6b3f05d2
JK
700 conn = fsnotify_grab_connector(connp);
701 if (!conn)
702 return;
703 /*
704 * We have to be careful since we can race with e.g.
705 * fsnotify_clear_marks_by_group() and once we drop the conn->lock, the
706 * list can get modified. However we are holding mark reference and
707 * thus our mark cannot be removed from obj_list so we can continue
708 * iteration after regaining conn->lock.
709 */
710 hlist_for_each_entry(mark, &conn->list, obj_list) {
0810b4f9 711 fsnotify_get_mark(mark);
04662cab 712 spin_unlock(&conn->lock);
6b3f05d2
JK
713 if (old_mark)
714 fsnotify_put_mark(old_mark);
715 old_mark = mark;
0810b4f9 716 fsnotify_destroy_mark(mark, mark->group);
6b3f05d2 717 spin_lock(&conn->lock);
0810b4f9 718 }
6b3f05d2
JK
719 /*
720 * Detach list from object now so that we don't pin inode until all
721 * mark references get dropped. It would lead to strange results such
722 * as delaying inode deletion or blocking unmount.
723 */
724 inode = fsnotify_detach_connector_from_object(conn);
725 spin_unlock(&conn->lock);
726 if (old_mark)
727 fsnotify_put_mark(old_mark);
728 iput(inode);
0810b4f9
JK
729}
730
5444e298
EP
731/*
732 * Nothing fancy, just initialize lists and locks and counters.
733 */
734void fsnotify_init_mark(struct fsnotify_mark *mark,
7b129323 735 struct fsnotify_group *group,
5444e298
EP
736 void (*free_mark)(struct fsnotify_mark *mark))
737{
ba643f04 738 memset(mark, 0, sizeof(*mark));
5444e298
EP
739 spin_lock_init(&mark->lock);
740 atomic_set(&mark->refcnt, 1);
5444e298 741 mark->free_mark = free_mark;
7b129323
JK
742 fsnotify_get_group(group);
743 mark->group = group;
5444e298 744}
13d34ac6 745
35e48176
JK
746/*
747 * Destroy all marks in destroy_list, waits for SRCU period to finish before
748 * actually freeing marks.
749 */
f09b04a0 750static void fsnotify_mark_destroy_workfn(struct work_struct *work)
13d34ac6
JL
751{
752 struct fsnotify_mark *mark, *next;
753 struct list_head private_destroy_list;
754
0918f1c3
JL
755 spin_lock(&destroy_lock);
756 /* exchange the list head */
757 list_replace_init(&destroy_list, &private_destroy_list);
758 spin_unlock(&destroy_lock);
13d34ac6 759
0918f1c3 760 synchronize_srcu(&fsnotify_mark_srcu);
13d34ac6 761
0918f1c3
JL
762 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
763 list_del_init(&mark->g_list);
6b3f05d2 764 fsnotify_final_mark_destroy(mark);
13d34ac6 765 }
13d34ac6 766}
35e48176 767
f09b04a0
JK
768/* Wait for all marks queued for destruction to be actually destroyed */
769void fsnotify_wait_marks_destroyed(void)
35e48176 770{
f09b04a0 771 flush_delayed_work(&reaper_work);
35e48176 772}