]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/aufs/wkq.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / fs / aufs / wkq.c
CommitLineData
b6450630
SF
1/*
2 * Copyright (C) 2005-2017 Junjiro R. Okajima
3 *
4 * This program, aufs is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18/*
19 * workqueue for asynchronous/super-io operations
20 * todo: try new dredential scheme
21 */
22
23#include <linux/module.h>
24#include "aufs.h"
25
26/* internal workqueue named AUFS_WKQ_NAME */
27
28static struct workqueue_struct *au_wkq;
29
30struct au_wkinfo {
31 struct work_struct wk;
32 struct kobject *kobj;
33
34 unsigned int flags; /* see wkq.h */
35
36 au_wkq_func_t func;
37 void *args;
38
39 struct completion *comp;
40};
41
42/* ---------------------------------------------------------------------- */
43
44static void wkq_func(struct work_struct *wk)
45{
46 struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk);
47
48 AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID));
49 AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY);
50
51 wkinfo->func(wkinfo->args);
52 if (au_ftest_wkq(wkinfo->flags, WAIT))
53 complete(wkinfo->comp);
54 else {
55 kobject_put(wkinfo->kobj);
56 module_put(THIS_MODULE); /* todo: ?? */
57 kfree(wkinfo);
58 }
59}
60
61/*
62 * Since struct completion is large, try allocating it dynamically.
63 */
64#if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */
65#define AuWkqCompDeclare(name) struct completion *comp = NULL
66
67static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
68{
69 *comp = kmalloc(sizeof(**comp), GFP_NOFS);
70 if (*comp) {
71 init_completion(*comp);
72 wkinfo->comp = *comp;
73 return 0;
74 }
75 return -ENOMEM;
76}
77
78static void au_wkq_comp_free(struct completion *comp)
79{
80 kfree(comp);
81}
82
83#else
84
85/* no braces */
86#define AuWkqCompDeclare(name) \
87 DECLARE_COMPLETION_ONSTACK(_ ## name); \
88 struct completion *comp = &_ ## name
89
90static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp)
91{
92 wkinfo->comp = *comp;
93 return 0;
94}
95
96static void au_wkq_comp_free(struct completion *comp __maybe_unused)
97{
98 /* empty */
99}
100#endif /* 4KSTACKS */
101
102static void au_wkq_run(struct au_wkinfo *wkinfo)
103{
104 if (au_ftest_wkq(wkinfo->flags, NEST)) {
105 if (au_wkq_test()) {
106 AuWarn1("wkq from wkq, unless silly-rename on NFS,"
107 " due to a dead dir by UDBA?\n");
108 AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT));
109 }
110 } else
111 au_dbg_verify_kthread();
112
113 if (au_ftest_wkq(wkinfo->flags, WAIT)) {
114 INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func);
115 queue_work(au_wkq, &wkinfo->wk);
116 } else {
117 INIT_WORK(&wkinfo->wk, wkq_func);
118 schedule_work(&wkinfo->wk);
119 }
120}
121
122/*
123 * Be careful. It is easy to make deadlock happen.
124 * processA: lock, wkq and wait
125 * processB: wkq and wait, lock in wkq
126 * --> deadlock
127 */
128int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args)
129{
130 int err;
131 AuWkqCompDeclare(comp);
132 struct au_wkinfo wkinfo = {
133 .flags = flags,
134 .func = func,
135 .args = args
136 };
137
138 err = au_wkq_comp_alloc(&wkinfo, &comp);
139 if (!err) {
140 au_wkq_run(&wkinfo);
141 /* no timeout, no interrupt */
142 wait_for_completion(wkinfo.comp);
143 au_wkq_comp_free(comp);
144 destroy_work_on_stack(&wkinfo.wk);
145 }
146
147 return err;
148
149}
150
151/*
152 * Note: dget/dput() in func for aufs dentries are not supported. It will be a
153 * problem in a concurrent umounting.
154 */
155int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb,
156 unsigned int flags)
157{
158 int err;
159 struct au_wkinfo *wkinfo;
160
161 atomic_inc(&au_sbi(sb)->si_nowait.nw_len);
162
163 /*
164 * wkq_func() must free this wkinfo.
165 * it highly depends upon the implementation of workqueue.
166 */
167 err = 0;
168 wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS);
169 if (wkinfo) {
170 wkinfo->kobj = &au_sbi(sb)->si_kobj;
171 wkinfo->flags = flags & ~AuWkq_WAIT;
172 wkinfo->func = func;
173 wkinfo->args = args;
174 wkinfo->comp = NULL;
175 kobject_get(wkinfo->kobj);
176 __module_get(THIS_MODULE); /* todo: ?? */
177
178 au_wkq_run(wkinfo);
179 } else {
180 err = -ENOMEM;
181 au_nwt_done(&au_sbi(sb)->si_nowait);
182 }
183
184 return err;
185}
186
187/* ---------------------------------------------------------------------- */
188
189void au_nwt_init(struct au_nowait_tasks *nwt)
190{
191 atomic_set(&nwt->nw_len, 0);
192 /* smp_mb(); */ /* atomic_set */
193 init_waitqueue_head(&nwt->nw_wq);
194}
195
196void au_wkq_fin(void)
197{
198 destroy_workqueue(au_wkq);
199}
200
201int __init au_wkq_init(void)
202{
203 int err;
204
205 err = 0;
206 au_wkq = alloc_workqueue(AUFS_WKQ_NAME, 0, WQ_DFL_ACTIVE);
207 if (IS_ERR(au_wkq))
208 err = PTR_ERR(au_wkq);
209 else if (!au_wkq)
210 err = -ENOMEM;
211
212 return err;
213}