]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/notify/vfsmount_mark.c
Merge branch 'for-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[mirror_ubuntu-artful-kernel.git] / fs / notify / vfsmount_mark.c
1 /*
2 * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; see the file COPYING. If not, write to
16 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/mount.h>
24 #include <linux/mutex.h>
25 #include <linux/spinlock.h>
26
27 #include <linux/atomic.h>
28
29 #include <linux/fsnotify_backend.h>
30 #include "fsnotify.h"
31 #include "../mount.h"
32
33 void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
34 {
35 struct fsnotify_mark *mark, *lmark;
36 struct hlist_node *pos, *n;
37 struct mount *m = real_mount(mnt);
38 LIST_HEAD(free_list);
39
40 spin_lock(&mnt->mnt_root->d_lock);
41 hlist_for_each_entry_safe(mark, pos, n, &m->mnt_fsnotify_marks, m.m_list) {
42 list_add(&mark->m.free_m_list, &free_list);
43 hlist_del_init_rcu(&mark->m.m_list);
44 fsnotify_get_mark(mark);
45 }
46 spin_unlock(&mnt->mnt_root->d_lock);
47
48 list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
49 struct fsnotify_group *group;
50
51 spin_lock(&mark->lock);
52 fsnotify_get_group(mark->group);
53 group = mark->group;
54 spin_unlock(&mark->lock);
55
56 fsnotify_destroy_mark(mark, group);
57 fsnotify_put_mark(mark);
58 fsnotify_put_group(group);
59 }
60 }
61
62 void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
63 {
64 fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_VFSMOUNT);
65 }
66
67 /*
68 * Recalculate the mask of events relevant to a given vfsmount locked.
69 */
70 static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
71 {
72 struct mount *m = real_mount(mnt);
73 struct fsnotify_mark *mark;
74 struct hlist_node *pos;
75 __u32 new_mask = 0;
76
77 assert_spin_locked(&mnt->mnt_root->d_lock);
78
79 hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list)
80 new_mask |= mark->mask;
81 m->mnt_fsnotify_mask = new_mask;
82 }
83
84 /*
85 * Recalculate the mnt->mnt_fsnotify_mask, or the mask of all FS_* event types
86 * any notifier is interested in hearing for this mount point
87 */
88 void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt)
89 {
90 spin_lock(&mnt->mnt_root->d_lock);
91 fsnotify_recalc_vfsmount_mask_locked(mnt);
92 spin_unlock(&mnt->mnt_root->d_lock);
93 }
94
95 void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
96 {
97 struct vfsmount *mnt = mark->m.mnt;
98
99 BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));
100 assert_spin_locked(&mark->lock);
101
102 spin_lock(&mnt->mnt_root->d_lock);
103
104 hlist_del_init_rcu(&mark->m.m_list);
105 mark->m.mnt = NULL;
106
107 fsnotify_recalc_vfsmount_mask_locked(mnt);
108
109 spin_unlock(&mnt->mnt_root->d_lock);
110 }
111
112 static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
113 struct vfsmount *mnt)
114 {
115 struct mount *m = real_mount(mnt);
116 struct fsnotify_mark *mark;
117 struct hlist_node *pos;
118
119 assert_spin_locked(&mnt->mnt_root->d_lock);
120
121 hlist_for_each_entry(mark, pos, &m->mnt_fsnotify_marks, m.m_list) {
122 if (mark->group == group) {
123 fsnotify_get_mark(mark);
124 return mark;
125 }
126 }
127 return NULL;
128 }
129
130 /*
131 * given a group and vfsmount, find the mark associated with that combination.
132 * if found take a reference to that mark and return it, else return NULL
133 */
134 struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group,
135 struct vfsmount *mnt)
136 {
137 struct fsnotify_mark *mark;
138
139 spin_lock(&mnt->mnt_root->d_lock);
140 mark = fsnotify_find_vfsmount_mark_locked(group, mnt);
141 spin_unlock(&mnt->mnt_root->d_lock);
142
143 return mark;
144 }
145
146 /*
147 * Attach an initialized mark to a given group and vfsmount.
148 * These marks may be used for the fsnotify backend to determine which
149 * event types should be delivered to which groups.
150 */
151 int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
152 struct fsnotify_group *group, struct vfsmount *mnt,
153 int allow_dups)
154 {
155 struct mount *m = real_mount(mnt);
156 struct fsnotify_mark *lmark;
157 struct hlist_node *node, *last = NULL;
158 int ret = 0;
159
160 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
161
162 BUG_ON(!mutex_is_locked(&group->mark_mutex));
163 assert_spin_locked(&mark->lock);
164
165 spin_lock(&mnt->mnt_root->d_lock);
166
167 mark->m.mnt = mnt;
168
169 /* is mark the first mark? */
170 if (hlist_empty(&m->mnt_fsnotify_marks)) {
171 hlist_add_head_rcu(&mark->m.m_list, &m->mnt_fsnotify_marks);
172 goto out;
173 }
174
175 /* should mark be in the middle of the current list? */
176 hlist_for_each_entry(lmark, node, &m->mnt_fsnotify_marks, m.m_list) {
177 last = node;
178
179 if ((lmark->group == group) && !allow_dups) {
180 ret = -EEXIST;
181 goto out;
182 }
183
184 if (mark->group->priority < lmark->group->priority)
185 continue;
186
187 if ((mark->group->priority == lmark->group->priority) &&
188 (mark->group < lmark->group))
189 continue;
190
191 hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list);
192 goto out;
193 }
194
195 BUG_ON(last == NULL);
196 /* mark should be the last entry. last is the current last entry */
197 hlist_add_after_rcu(last, &mark->m.m_list);
198 out:
199 fsnotify_recalc_vfsmount_mask_locked(mnt);
200 spin_unlock(&mnt->mnt_root->d_lock);
201
202 return ret;
203 }