]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - kernel/async.c
workqueue: implement current_is_async()
[mirror_ubuntu-zesty-kernel.git] / kernel / async.c
CommitLineData
22a9d645
AV
1/*
2 * async.c: Asynchronous function calls for boot performance
3 *
4 * (C) Copyright 2009 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13
14/*
15
16Goals and Theory of Operation
17
18The primary goal of this feature is to reduce the kernel boot time,
19by doing various independent hardware delays and discovery operations
20decoupled and not strictly serialized.
21
22More specifically, the asynchronous function call concept allows
23certain operations (primarily during system boot) to happen
24asynchronously, out of order, while these operations still
25have their externally visible parts happen sequentially and in-order.
26(not unlike how out-of-order CPUs retire their instructions in order)
27
28Key to the asynchronous function call implementation is the concept of
29a "sequence cookie" (which, although it has an abstracted type, can be
30thought of as a monotonically incrementing number).
31
32The async core will assign each scheduled event such a sequence cookie and
33pass this to the called functions.
34
35The asynchronously called function should before doing a globally visible
36operation, such as registering device numbers, call the
37async_synchronize_cookie() function and pass in its own cookie. The
38async_synchronize_cookie() function will make sure that all asynchronous
39operations that were scheduled prior to the operation corresponding with the
40cookie have completed.
41
42Subsystem/driver initialization code that scheduled asynchronous probe
43functions, but which shares global resources with other drivers/subsystems
44that do not use the asynchronous call feature, need to do a full
45synchronization with the async_synchronize_full() function, before returning
46from their init function. This is to maintain strict ordering between the
47asynchronous and synchronous parts of the kernel.
48
49*/
50
51#include <linux/async.h>
84c15027
PM
52#include <linux/atomic.h>
53#include <linux/ktime.h>
9984de1a 54#include <linux/export.h>
22a9d645
AV
55#include <linux/wait.h>
56#include <linux/sched.h>
5a0e3ad6 57#include <linux/slab.h>
083b804c 58#include <linux/workqueue.h>
22a9d645 59
84b233ad
TH
60#include "workqueue_internal.h"
61
22a9d645
AV
62static async_cookie_t next_cookie = 1;
63
22a9d645
AV
64#define MAX_WORK 32768
65
66static LIST_HEAD(async_pending);
2955b47d 67static ASYNC_DOMAIN(async_running);
a4683487 68static LIST_HEAD(async_domains);
22a9d645 69static DEFINE_SPINLOCK(async_lock);
a4683487 70static DEFINE_MUTEX(async_register_mutex);
22a9d645
AV
71
72struct async_entry {
083b804c
TH
73 struct list_head list;
74 struct work_struct work;
75 async_cookie_t cookie;
76 async_func_ptr *func;
77 void *data;
2955b47d 78 struct async_domain *running;
22a9d645
AV
79};
80
81static DECLARE_WAIT_QUEUE_HEAD(async_done);
22a9d645
AV
82
83static atomic_t entry_count;
22a9d645 84
22a9d645
AV
85
86/*
87 * MUST be called with the lock held!
88 */
2955b47d 89static async_cookie_t __lowest_in_progress(struct async_domain *running)
22a9d645
AV
90{
91 struct async_entry *entry;
d5a877e8 92
2955b47d
DW
93 if (!list_empty(&running->domain)) {
94 entry = list_first_entry(&running->domain, typeof(*entry), list);
3af968e0 95 return entry->cookie;
22a9d645
AV
96 }
97
3af968e0
LT
98 list_for_each_entry(entry, &async_pending, list)
99 if (entry->running == running)
100 return entry->cookie;
d5a877e8 101
3af968e0 102 return next_cookie; /* "infinity" value */
22a9d645 103}
37a76bd4 104
2955b47d 105static async_cookie_t lowest_in_progress(struct async_domain *running)
37a76bd4
AV
106{
107 unsigned long flags;
108 async_cookie_t ret;
109
110 spin_lock_irqsave(&async_lock, flags);
111 ret = __lowest_in_progress(running);
112 spin_unlock_irqrestore(&async_lock, flags);
113 return ret;
114}
083b804c 115
22a9d645
AV
116/*
117 * pick the first pending entry and run it
118 */
083b804c 119static void async_run_entry_fn(struct work_struct *work)
22a9d645 120{
083b804c
TH
121 struct async_entry *entry =
122 container_of(work, struct async_entry, work);
22a9d645 123 unsigned long flags;
124ff4e5 124 ktime_t uninitialized_var(calltime), delta, rettime;
2955b47d 125 struct async_domain *running = entry->running;
22a9d645 126
083b804c 127 /* 1) move self to the running queue */
22a9d645 128 spin_lock_irqsave(&async_lock, flags);
2955b47d 129 list_move_tail(&entry->list, &running->domain);
22a9d645
AV
130 spin_unlock_irqrestore(&async_lock, flags);
131
083b804c 132 /* 2) run (and print duration) */
ad160d23 133 if (initcall_debug && system_state == SYSTEM_BOOTING) {
84c15027
PM
134 printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
135 (long long)entry->cookie,
58763a29 136 entry->func, task_pid_nr(current));
22a9d645
AV
137 calltime = ktime_get();
138 }
139 entry->func(entry->data, entry->cookie);
ad160d23 140 if (initcall_debug && system_state == SYSTEM_BOOTING) {
22a9d645
AV
141 rettime = ktime_get();
142 delta = ktime_sub(rettime, calltime);
84c15027 143 printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
58763a29
AM
144 (long long)entry->cookie,
145 entry->func,
146 (long long)ktime_to_ns(delta) >> 10);
22a9d645
AV
147 }
148
083b804c 149 /* 3) remove self from the running queue */
22a9d645
AV
150 spin_lock_irqsave(&async_lock, flags);
151 list_del(&entry->list);
a4683487
DW
152 if (running->registered && --running->count == 0)
153 list_del_init(&running->node);
22a9d645 154
083b804c 155 /* 4) free the entry */
22a9d645
AV
156 kfree(entry);
157 atomic_dec(&entry_count);
158
159 spin_unlock_irqrestore(&async_lock, flags);
160
083b804c 161 /* 5) wake up any waiters */
22a9d645 162 wake_up(&async_done);
22a9d645
AV
163}
164
2955b47d 165static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
22a9d645
AV
166{
167 struct async_entry *entry;
168 unsigned long flags;
169 async_cookie_t newcookie;
22a9d645
AV
170
171 /* allow irq-off callers */
172 entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
173
174 /*
175 * If we're out of memory or if there's too much work
176 * pending already, we execute synchronously.
177 */
083b804c 178 if (!entry || atomic_read(&entry_count) > MAX_WORK) {
22a9d645
AV
179 kfree(entry);
180 spin_lock_irqsave(&async_lock, flags);
181 newcookie = next_cookie++;
182 spin_unlock_irqrestore(&async_lock, flags);
183
184 /* low on memory.. run synchronously */
185 ptr(data, newcookie);
186 return newcookie;
187 }
083b804c 188 INIT_WORK(&entry->work, async_run_entry_fn);
22a9d645
AV
189 entry->func = ptr;
190 entry->data = data;
191 entry->running = running;
192
193 spin_lock_irqsave(&async_lock, flags);
194 newcookie = entry->cookie = next_cookie++;
195 list_add_tail(&entry->list, &async_pending);
a4683487
DW
196 if (running->registered && running->count++ == 0)
197 list_add_tail(&running->node, &async_domains);
22a9d645
AV
198 atomic_inc(&entry_count);
199 spin_unlock_irqrestore(&async_lock, flags);
083b804c
TH
200
201 /* schedule for execution */
202 queue_work(system_unbound_wq, &entry->work);
203
22a9d645
AV
204 return newcookie;
205}
206
f30d5b30
CH
207/**
208 * async_schedule - schedule a function for asynchronous execution
209 * @ptr: function to execute asynchronously
210 * @data: data pointer to pass to the function
211 *
212 * Returns an async_cookie_t that may be used for checkpointing later.
213 * Note: This function may be called from atomic or non-atomic contexts.
214 */
22a9d645
AV
215async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
216{
7a89bbc7 217 return __async_schedule(ptr, data, &async_running);
22a9d645
AV
218}
219EXPORT_SYMBOL_GPL(async_schedule);
220
f30d5b30 221/**
766ccb9e 222 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
f30d5b30
CH
223 * @ptr: function to execute asynchronously
224 * @data: data pointer to pass to the function
766ccb9e 225 * @running: running list for the domain
f30d5b30
CH
226 *
227 * Returns an async_cookie_t that may be used for checkpointing later.
766ccb9e
CH
228 * @running may be used in the async_synchronize_*_domain() functions
229 * to wait within a certain synchronization domain rather than globally.
230 * A synchronization domain is specified via the running queue @running to use.
f30d5b30
CH
231 * Note: This function may be called from atomic or non-atomic contexts.
232 */
766ccb9e 233async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
2955b47d 234 struct async_domain *running)
22a9d645
AV
235{
236 return __async_schedule(ptr, data, running);
237}
766ccb9e 238EXPORT_SYMBOL_GPL(async_schedule_domain);
22a9d645 239
f30d5b30
CH
240/**
241 * async_synchronize_full - synchronize all asynchronous function calls
242 *
243 * This function waits until all asynchronous function calls have been done.
244 */
22a9d645
AV
245void async_synchronize_full(void)
246{
a4683487 247 mutex_lock(&async_register_mutex);
33b04b93 248 do {
a4683487
DW
249 struct async_domain *domain = NULL;
250
251 spin_lock_irq(&async_lock);
252 if (!list_empty(&async_domains))
253 domain = list_first_entry(&async_domains, typeof(*domain), node);
254 spin_unlock_irq(&async_lock);
255
256 async_synchronize_cookie_domain(next_cookie, domain);
257 } while (!list_empty(&async_domains));
258 mutex_unlock(&async_register_mutex);
22a9d645
AV
259}
260EXPORT_SYMBOL_GPL(async_synchronize_full);
261
a4683487
DW
262/**
263 * async_unregister_domain - ensure no more anonymous waiters on this domain
264 * @domain: idle domain to flush out of any async_synchronize_full instances
265 *
266 * async_synchronize_{cookie|full}_domain() are not flushed since callers
267 * of these routines should know the lifetime of @domain
268 *
269 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
270 */
271void async_unregister_domain(struct async_domain *domain)
272{
273 mutex_lock(&async_register_mutex);
274 spin_lock_irq(&async_lock);
275 WARN_ON(!domain->registered || !list_empty(&domain->node) ||
276 !list_empty(&domain->domain));
277 domain->registered = 0;
278 spin_unlock_irq(&async_lock);
279 mutex_unlock(&async_register_mutex);
280}
281EXPORT_SYMBOL_GPL(async_unregister_domain);
282
f30d5b30 283/**
766ccb9e 284 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
2955b47d 285 * @domain: running list to synchronize on
f30d5b30 286 *
766ccb9e 287 * This function waits until all asynchronous function calls for the
2955b47d 288 * synchronization domain specified by the running list @domain have been done.
f30d5b30 289 */
2955b47d 290void async_synchronize_full_domain(struct async_domain *domain)
22a9d645 291{
2955b47d 292 async_synchronize_cookie_domain(next_cookie, domain);
22a9d645 293}
766ccb9e 294EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
22a9d645 295
f30d5b30 296/**
766ccb9e 297 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
f30d5b30
CH
298 * @cookie: async_cookie_t to use as checkpoint
299 * @running: running list to synchronize on
300 *
766ccb9e 301 * This function waits until all asynchronous function calls for the
2955b47d 302 * synchronization domain specified by running list @running submitted
766ccb9e 303 * prior to @cookie have been done.
f30d5b30 304 */
2955b47d 305void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
22a9d645 306{
124ff4e5 307 ktime_t uninitialized_var(starttime), delta, endtime;
22a9d645 308
a4683487
DW
309 if (!running)
310 return;
311
ad160d23 312 if (initcall_debug && system_state == SYSTEM_BOOTING) {
84c15027 313 printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
22a9d645
AV
314 starttime = ktime_get();
315 }
316
37a76bd4 317 wait_event(async_done, lowest_in_progress(running) >= cookie);
22a9d645 318
ad160d23 319 if (initcall_debug && system_state == SYSTEM_BOOTING) {
22a9d645
AV
320 endtime = ktime_get();
321 delta = ktime_sub(endtime, starttime);
322
84c15027 323 printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
58763a29
AM
324 task_pid_nr(current),
325 (long long)ktime_to_ns(delta) >> 10);
22a9d645
AV
326 }
327}
766ccb9e 328EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
22a9d645 329
f30d5b30
CH
330/**
331 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
332 * @cookie: async_cookie_t to use as checkpoint
333 *
334 * This function waits until all asynchronous function calls prior to @cookie
335 * have been done.
336 */
22a9d645
AV
337void async_synchronize_cookie(async_cookie_t cookie)
338{
766ccb9e 339 async_synchronize_cookie_domain(cookie, &async_running);
22a9d645
AV
340}
341EXPORT_SYMBOL_GPL(async_synchronize_cookie);
84b233ad
TH
342
343/**
344 * current_is_async - is %current an async worker task?
345 *
346 * Returns %true if %current is an async worker task.
347 */
348bool current_is_async(void)
349{
350 struct worker *worker = current_wq_worker();
351
352 return worker && worker->current_func == async_run_entry_fn;
353}