]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/aufs/wkq.c
2 * Copyright (C) 2005-2017 Junjiro R. Okajima
4 * This program, aufs is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * workqueue for asynchronous/super-io operations
20 * todo: try new dredential scheme
23 #include <linux/module.h>
26 /* internal workqueue named AUFS_WKQ_NAME */
28 static struct workqueue_struct
*au_wkq
;
31 struct work_struct wk
;
34 unsigned int flags
; /* see wkq.h */
41 struct held_lock
**hlock
;
44 struct completion
*comp
;
47 /* ---------------------------------------------------------------------- */
49 * Aufs passes some operations to the workqueue such as the internal copyup.
50 * This scheme looks rather unnatural for LOCKDEP debugging feature, since the
51 * job run by workqueue depends upon the locks acquired in the other task.
52 * Delegating a small operation to the workqueue, aufs passes its lockdep
53 * information too. And the job in the workqueue restores the info in order to
54 * pretend as if it acquired those locks. This is just to make LOCKDEP work
55 * correctly and expectedly.
58 #ifndef CONFIG_LOCKDEP
59 AuStubInt0(au_wkq_lockdep_alloc
, struct au_wkinfo
*wkinfo
);
60 AuStubVoid(au_wkq_lockdep_free
, struct au_wkinfo
*wkinfo
);
61 AuStubVoid(au_wkq_lockdep_pre
, struct au_wkinfo
*wkinfo
);
62 AuStubVoid(au_wkq_lockdep_post
, struct au_wkinfo
*wkinfo
);
63 AuStubVoid(au_wkq_lockdep_init
, struct au_wkinfo
*wkinfo
);
65 static void au_wkq_lockdep_init(struct au_wkinfo
*wkinfo
)
68 wkinfo
->dont_check
= 0;
75 static int au_wkq_lockdep_test(struct lock_class_key
*key
, const char *name
)
77 static DEFINE_SPINLOCK(spin
);
80 struct lock_class_key
*key
;
82 { .name
= "&sbinfo->si_rwsem" },
83 { .name
= "&finfo->fi_rwsem" },
84 { .name
= "&dinfo->di_rwsem" },
85 { .name
= "&iinfo->ii_rwsem" }
90 /* lockless read from 'set.' see below */
91 if (set
== ARRAY_SIZE(a
)) {
92 for (i
= 0; i
< ARRAY_SIZE(a
); i
++)
100 for (i
= 0; i
< ARRAY_SIZE(a
); i
++)
101 if (a
[i
].key
== key
) {
105 for (i
= 0; i
< ARRAY_SIZE(a
); i
++) {
107 if (unlikely(a
[i
].key
== key
)) { /* rare but possible */
113 if (strstr(a
[i
].name
, name
)) {
115 * the order of these three lines is important for the
116 * lockless read above.
121 /* AuDbg("%d, %s\n", set, name); */
134 static int au_wkq_lockdep_alloc(struct au_wkinfo
*wkinfo
)
137 struct task_struct
*curr
;
138 struct held_lock
**hl
, *held_locks
, *p
;
142 wkinfo
->dont_check
= lockdep_recursing(curr
);
143 if (wkinfo
->dont_check
)
145 n
= curr
->lockdep_depth
;
150 wkinfo
->hlock
= kmalloc_array(n
+ 1, sizeof(*wkinfo
->hlock
), GFP_NOFS
);
151 if (unlikely(!wkinfo
->hlock
))
156 if (0 && au_debug_test()) /* left for debugging */
157 lockdep_print_held_locks(curr
);
159 held_locks
= curr
->held_locks
;
163 if (au_wkq_lockdep_test(p
->instance
->key
, p
->instance
->name
))
172 static void au_wkq_lockdep_free(struct au_wkinfo
*wkinfo
)
174 kfree(wkinfo
->hlock
);
177 static void au_wkq_lockdep_pre(struct au_wkinfo
*wkinfo
)
179 struct held_lock
*p
, **hl
= wkinfo
->hlock
;
182 if (wkinfo
->dont_check
)
186 while ((p
= *hl
++)) { /* assignment */
187 subclass
= lockdep_hlock_class(p
)->subclass
;
188 /* AuDbg("%s, %d\n", p->instance->name, subclass); */
190 rwsem_acquire_read(p
->instance
, subclass
, 0,
191 /*p->acquire_ip*/_RET_IP_
);
193 rwsem_acquire(p
->instance
, subclass
, 0,
194 /*p->acquire_ip*/_RET_IP_
);
198 static void au_wkq_lockdep_post(struct au_wkinfo
*wkinfo
)
200 struct held_lock
*p
, **hl
= wkinfo
->hlock
;
202 if (wkinfo
->dont_check
)
206 while ((p
= *hl
++)) /* assignment */
207 rwsem_release(p
->instance
, 0, /*p->acquire_ip*/_RET_IP_
);
211 static void wkq_func(struct work_struct
*wk
)
213 struct au_wkinfo
*wkinfo
= container_of(wk
, struct au_wkinfo
, wk
);
215 AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID
));
216 AuDebugOn(rlimit(RLIMIT_FSIZE
) != RLIM_INFINITY
);
218 au_wkq_lockdep_pre(wkinfo
);
219 wkinfo
->func(wkinfo
->args
);
220 au_wkq_lockdep_post(wkinfo
);
221 if (au_ftest_wkq(wkinfo
->flags
, WAIT
))
222 complete(wkinfo
->comp
);
224 kobject_put(wkinfo
->kobj
);
225 module_put(THIS_MODULE
); /* todo: ?? */
231 * Since struct completion is large, try allocating it dynamically.
233 #if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */
234 #define AuWkqCompDeclare(name) struct completion *comp = NULL
236 static int au_wkq_comp_alloc(struct au_wkinfo
*wkinfo
, struct completion
**comp
)
238 *comp
= kmalloc(sizeof(**comp
), GFP_NOFS
);
240 init_completion(*comp
);
241 wkinfo
->comp
= *comp
;
247 static void au_wkq_comp_free(struct completion
*comp
)
255 #define AuWkqCompDeclare(name) \
256 DECLARE_COMPLETION_ONSTACK(_ ## name); \
257 struct completion *comp = &_ ## name
259 static int au_wkq_comp_alloc(struct au_wkinfo
*wkinfo
, struct completion
**comp
)
261 wkinfo
->comp
= *comp
;
265 static void au_wkq_comp_free(struct completion
*comp __maybe_unused
)
269 #endif /* 4KSTACKS */
271 static void au_wkq_run(struct au_wkinfo
*wkinfo
)
273 if (au_ftest_wkq(wkinfo
->flags
, NEST
)) {
275 AuWarn1("wkq from wkq, unless silly-rename on NFS,"
276 " due to a dead dir by UDBA?\n");
277 AuDebugOn(au_ftest_wkq(wkinfo
->flags
, WAIT
));
280 au_dbg_verify_kthread();
282 if (au_ftest_wkq(wkinfo
->flags
, WAIT
)) {
283 INIT_WORK_ONSTACK(&wkinfo
->wk
, wkq_func
);
284 queue_work(au_wkq
, &wkinfo
->wk
);
286 INIT_WORK(&wkinfo
->wk
, wkq_func
);
287 schedule_work(&wkinfo
->wk
);
292 * Be careful. It is easy to make deadlock happen.
293 * processA: lock, wkq and wait
294 * processB: wkq and wait, lock in wkq
297 int au_wkq_do_wait(unsigned int flags
, au_wkq_func_t func
, void *args
)
300 AuWkqCompDeclare(comp
);
301 struct au_wkinfo wkinfo
= {
307 err
= au_wkq_comp_alloc(&wkinfo
, &comp
);
310 err
= au_wkq_lockdep_alloc(&wkinfo
);
315 /* no timeout, no interrupt */
316 wait_for_completion(wkinfo
.comp
);
318 au_wkq_lockdep_free(&wkinfo
);
321 au_wkq_comp_free(comp
);
323 destroy_work_on_stack(&wkinfo
.wk
);
328 * Note: dget/dput() in func for aufs dentries are not supported. It will be a
329 * problem in a concurrent umounting.
331 int au_wkq_nowait(au_wkq_func_t func
, void *args
, struct super_block
*sb
,
335 struct au_wkinfo
*wkinfo
;
337 atomic_inc(&au_sbi(sb
)->si_nowait
.nw_len
);
340 * wkq_func() must free this wkinfo.
341 * it highly depends upon the implementation of workqueue.
344 wkinfo
= kmalloc(sizeof(*wkinfo
), GFP_NOFS
);
346 wkinfo
->kobj
= &au_sbi(sb
)->si_kobj
;
347 wkinfo
->flags
= flags
& ~AuWkq_WAIT
;
351 au_wkq_lockdep_init(wkinfo
);
352 kobject_get(wkinfo
->kobj
);
353 __module_get(THIS_MODULE
); /* todo: ?? */
358 au_nwt_done(&au_sbi(sb
)->si_nowait
);
364 /* ---------------------------------------------------------------------- */
366 void au_nwt_init(struct au_nowait_tasks
*nwt
)
368 atomic_set(&nwt
->nw_len
, 0);
369 /* smp_mb(); */ /* atomic_set */
370 init_waitqueue_head(&nwt
->nw_wq
);
373 void au_wkq_fin(void)
375 destroy_workqueue(au_wkq
);
378 int __init
au_wkq_init(void)
383 au_wkq
= alloc_workqueue(AUFS_WKQ_NAME
, 0, WQ_DFL_ACTIVE
);
385 err
= PTR_ERR(au_wkq
);