]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/aufs/wkq.c
802571c4bf53779903ac8f6530abce8d7d9d7daf
[mirror_ubuntu-bionic-kernel.git] / fs / aufs / wkq.c
1 /*
2 * Copyright (C) 2005-2017 Junjiro R. Okajima
3 *
4 * This program, aufs is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 /*
19 * workqueue for asynchronous/super-io operations
20 * todo: try new dredential scheme
21 */
22
23 #include <linux/module.h>
24 #include "aufs.h"
25
26 /* internal workqueue named AUFS_WKQ_NAME */
27
28 static struct workqueue_struct *au_wkq;
29
30 struct au_wkinfo {
31 struct work_struct wk;
32 struct kobject *kobj;
33
34 unsigned int flags; /* see wkq.h */
35
36 au_wkq_func_t func;
37 void *args;
38
39 #ifdef CONFIG_LOCKDEP
40 int dont_check;
41 struct held_lock **hlock;
42 #endif
43
44 struct completion *comp;
45 };
46
47 /* ---------------------------------------------------------------------- */
48 /*
49 * Aufs passes some operations to the workqueue such as the internal copyup.
50 * This scheme looks rather unnatural for LOCKDEP debugging feature, since the
51 * job run by workqueue depends upon the locks acquired in the other task.
52 * Delegating a small operation to the workqueue, aufs passes its lockdep
53 * information too. And the job in the workqueue restores the info in order to
54 * pretend as if it acquired those locks. This is just to make LOCKDEP work
55 * correctly and expectedly.
56 */
57
58 #ifndef CONFIG_LOCKDEP
59 AuStubInt0(au_wkq_lockdep_alloc, struct au_wkinfo *wkinfo);
60 AuStubVoid(au_wkq_lockdep_free, struct au_wkinfo *wkinfo);
61 AuStubVoid(au_wkq_lockdep_pre, struct au_wkinfo *wkinfo);
62 AuStubVoid(au_wkq_lockdep_post, struct au_wkinfo *wkinfo);
63 AuStubVoid(au_wkq_lockdep_init, struct au_wkinfo *wkinfo);
64 #else
65 static void au_wkq_lockdep_init(struct au_wkinfo *wkinfo)
66 {
67 wkinfo->hlock = NULL;
68 wkinfo->dont_check = 0;
69 }
70
71 /*
72 * 1: matched
73 * 0: unmatched
74 */
75 static int au_wkq_lockdep_test(struct lock_class_key *key, const char *name)
76 {
77 static DEFINE_SPINLOCK(spin);
78 static struct {
79 char *name;
80 struct lock_class_key *key;
81 } a[] = {
82 { .name = "&sbinfo->si_rwsem" },
83 { .name = "&finfo->fi_rwsem" },
84 { .name = "&dinfo->di_rwsem" },
85 { .name = "&iinfo->ii_rwsem" }
86 };
87 static int set;
88 int i;
89
90 /* lockless read from 'set.' see below */
91 if (set == ARRAY_SIZE(a)) {
92 for (i = 0; i < ARRAY_SIZE(a); i++)
93 if (a[i].key == key)
94 goto match;
95 goto unmatch;
96 }
97
98 spin_lock(&spin);
99 if (set)
100 for (i = 0; i < ARRAY_SIZE(a); i++)
101 if (a[i].key == key) {
102 spin_unlock(&spin);
103 goto match;
104 }
105 for (i = 0; i < ARRAY_SIZE(a); i++) {
106 if (a[i].key) {
107 if (unlikely(a[i].key == key)) { /* rare but possible */
108 spin_unlock(&spin);
109 goto match;
110 } else
111 continue;
112 }
113 if (strstr(a[i].name, name)) {
114 /*
115 * the order of these three lines is important for the
116 * lockless read above.
117 */
118 a[i].key = key;
119 spin_unlock(&spin);
120 set++;
121 /* AuDbg("%d, %s\n", set, name); */
122 goto match;
123 }
124 }
125 spin_unlock(&spin);
126 goto unmatch;
127
128 match:
129 return 1;
130 unmatch:
131 return 0;
132 }
133
134 static int au_wkq_lockdep_alloc(struct au_wkinfo *wkinfo)
135 {
136 int err, n;
137 struct task_struct *curr;
138 struct held_lock **hl, *held_locks, *p;
139
140 err = 0;
141 curr = current;
142 wkinfo->dont_check = lockdep_recursing(curr);
143 if (wkinfo->dont_check)
144 goto out;
145 n = curr->lockdep_depth;
146 if (!n)
147 goto out;
148
149 err = -ENOMEM;
150 wkinfo->hlock = kmalloc_array(n + 1, sizeof(*wkinfo->hlock), GFP_NOFS);
151 if (unlikely(!wkinfo->hlock))
152 goto out;
153
154 err = 0;
155 #if 0
156 if (0 && au_debug_test()) /* left for debugging */
157 lockdep_print_held_locks(curr);
158 #endif
159 held_locks = curr->held_locks;
160 hl = wkinfo->hlock;
161 while (n--) {
162 p = held_locks++;
163 if (au_wkq_lockdep_test(p->instance->key, p->instance->name))
164 *hl++ = p;
165 }
166 *hl = NULL;
167
168 out:
169 return err;
170 }
171
172 static void au_wkq_lockdep_free(struct au_wkinfo *wkinfo)
173 {
174 kfree(wkinfo->hlock);
175 }
176
177 static void au_wkq_lockdep_pre(struct au_wkinfo *wkinfo)
178 {
179 struct held_lock *p, **hl = wkinfo->hlock;
180 int subclass;
181
182 if (wkinfo->dont_check)
183 lockdep_off();
184 if (!hl)
185 return;
186 while ((p = *hl++)) { /* assignment */
187 subclass = lockdep_hlock_class(p)->subclass;
188 /* AuDbg("%s, %d\n", p->instance->name, subclass); */
189 if (p->read)
190 rwsem_acquire_read(p->instance, subclass, 0,
191 /*p->acquire_ip*/_RET_IP_);
192 else
193 rwsem_acquire(p->instance, subclass, 0,
194 /*p->acquire_ip*/_RET_IP_);
195 }
196 }
197
198 static void au_wkq_lockdep_post(struct au_wkinfo *wkinfo)
199 {
200 struct held_lock *p, **hl = wkinfo->hlock;
201
202 if (wkinfo->dont_check)
203 lockdep_on();
204 if (!hl)
205 return;
206 while ((p = *hl++)) /* assignment */
207 rwsem_release(p->instance, 0, /*p->acquire_ip*/_RET_IP_);
208 }
209 #endif
210
211 static void wkq_func(struct work_struct *wk)
212 {
213 struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk);
214
215 AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID));
216 AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY);
217
218 au_wkq_lockdep_pre(wkinfo);
219 wkinfo->func(wkinfo->args);
220 au_wkq_lockdep_post(wkinfo);
221 if (au_ftest_wkq(wkinfo->flags, WAIT))
222 complete(wkinfo->comp);
223 else {
224 kobject_put(wkinfo->kobj);
225 module_put(THIS_MODULE); /* todo: ?? */
226 kfree(wkinfo);
227 }
228 }
229
230 /*
231 * Since struct completion is large, try allocating it dynamically.
232 */
233 #if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */
234 #define AuWkqCompDeclare(name) struct completion *comp = NULL
235
236 static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
237 {
238 *comp = kmalloc(sizeof(**comp), GFP_NOFS);
239 if (*comp) {
240 init_completion(*comp);
241 wkinfo->comp = *comp;
242 return 0;
243 }
244 return -ENOMEM;
245 }
246
247 static void au_wkq_comp_free(struct completion *comp)
248 {
249 kfree(comp);
250 }
251
252 #else
253
254 /* no braces */
255 #define AuWkqCompDeclare(name) \
256 DECLARE_COMPLETION_ONSTACK(_ ## name); \
257 struct completion *comp = &_ ## name
258
259 static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
260 {
261 wkinfo->comp = *comp;
262 return 0;
263 }
264
265 static void au_wkq_comp_free(struct completion *comp __maybe_unused)
266 {
267 /* empty */
268 }
269 #endif /* 4KSTACKS */
270
271 static void au_wkq_run(struct au_wkinfo *wkinfo)
272 {
273 if (au_ftest_wkq(wkinfo->flags, NEST)) {
274 if (au_wkq_test()) {
275 AuWarn1("wkq from wkq, unless silly-rename on NFS,"
276 " due to a dead dir by UDBA?\n");
277 AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT));
278 }
279 } else
280 au_dbg_verify_kthread();
281
282 if (au_ftest_wkq(wkinfo->flags, WAIT)) {
283 INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func);
284 queue_work(au_wkq, &wkinfo->wk);
285 } else {
286 INIT_WORK(&wkinfo->wk, wkq_func);
287 schedule_work(&wkinfo->wk);
288 }
289 }
290
291 /*
292 * Be careful. It is easy to make deadlock happen.
293 * processA: lock, wkq and wait
294 * processB: wkq and wait, lock in wkq
295 * --> deadlock
296 */
297 int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args)
298 {
299 int err;
300 AuWkqCompDeclare(comp);
301 struct au_wkinfo wkinfo = {
302 .flags = flags,
303 .func = func,
304 .args = args
305 };
306
307 err = au_wkq_comp_alloc(&wkinfo, &comp);
308 if (unlikely(err))
309 goto out;
310 err = au_wkq_lockdep_alloc(&wkinfo);
311 if (unlikely(err))
312 goto out_comp;
313 if (!err) {
314 au_wkq_run(&wkinfo);
315 /* no timeout, no interrupt */
316 wait_for_completion(wkinfo.comp);
317 }
318 au_wkq_lockdep_free(&wkinfo);
319
320 out_comp:
321 au_wkq_comp_free(comp);
322 out:
323 destroy_work_on_stack(&wkinfo.wk);
324 return err;
325 }
326
327 /*
328 * Note: dget/dput() in func for aufs dentries are not supported. It will be a
329 * problem in a concurrent umounting.
330 */
331 int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
332 unsigned int flags)
333 {
334 int err;
335 struct au_wkinfo *wkinfo;
336
337 atomic_inc(&au_sbi(sb)->si_nowait.nw_len);
338
339 /*
340 * wkq_func() must free this wkinfo.
341 * it highly depends upon the implementation of workqueue.
342 */
343 err = 0;
344 wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS);
345 if (wkinfo) {
346 wkinfo->kobj = &au_sbi(sb)->si_kobj;
347 wkinfo->flags = flags & ~AuWkq_WAIT;
348 wkinfo->func = func;
349 wkinfo->args = args;
350 wkinfo->comp = NULL;
351 au_wkq_lockdep_init(wkinfo);
352 kobject_get(wkinfo->kobj);
353 __module_get(THIS_MODULE); /* todo: ?? */
354
355 au_wkq_run(wkinfo);
356 } else {
357 err = -ENOMEM;
358 au_nwt_done(&au_sbi(sb)->si_nowait);
359 }
360
361 return err;
362 }
363
364 /* ---------------------------------------------------------------------- */
365
366 void au_nwt_init(struct au_nowait_tasks *nwt)
367 {
368 atomic_set(&nwt->nw_len, 0);
369 /* smp_mb(); */ /* atomic_set */
370 init_waitqueue_head(&nwt->nw_wq);
371 }
372
373 void au_wkq_fin(void)
374 {
375 destroy_workqueue(au_wkq);
376 }
377
378 int __init au_wkq_init(void)
379 {
380 int err;
381
382 err = 0;
383 au_wkq = alloc_workqueue(AUFS_WKQ_NAME, 0, WQ_DFL_ACTIVE);
384 if (IS_ERR(au_wkq))
385 err = PTR_ERR(au_wkq);
386 else if (!au_wkq)
387 err = -ENOMEM;
388
389 return err;
390 }