2 * drivers/base/power/wakeup.c - System wakeup events framework
4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
6 * This file is released under the GPLv2.
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/capability.h>
13 #include <linux/suspend.h>
20 * If set, the suspend/hibernate code will abort transitions to a sleep state
21 * if wakeup events are registered during or immediately before the transition.
23 bool events_check_enabled
;
25 /* The counter of registered wakeup events. */
26 static atomic_t event_count
= ATOMIC_INIT(0);
27 /* A preserved old value of event_count. */
28 static unsigned int saved_count
;
29 /* The counter of wakeup events being processed. */
30 static atomic_t events_in_progress
= ATOMIC_INIT(0);
32 static DEFINE_SPINLOCK(events_lock
);
34 static void pm_wakeup_timer_fn(unsigned long data
);
36 static LIST_HEAD(wakeup_sources
);
39 * wakeup_source_create - Create a struct wakeup_source object.
40 * @name: Name of the new wakeup source.
42 struct wakeup_source
*wakeup_source_create(const char *name
)
44 struct wakeup_source
*ws
;
46 ws
= kzalloc(sizeof(*ws
), GFP_KERNEL
);
50 spin_lock_init(&ws
->lock
);
52 ws
->name
= kstrdup(name
, GFP_KERNEL
);
56 EXPORT_SYMBOL_GPL(wakeup_source_create
);
59 * wakeup_source_destroy - Destroy a struct wakeup_source object.
60 * @ws: Wakeup source to destroy.
62 void wakeup_source_destroy(struct wakeup_source
*ws
)
67 spin_lock_irq(&ws
->lock
);
69 spin_unlock_irq(&ws
->lock
);
71 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT
));
73 spin_lock_irq(&ws
->lock
);
75 spin_unlock_irq(&ws
->lock
);
80 EXPORT_SYMBOL_GPL(wakeup_source_destroy
);
83 * wakeup_source_add - Add given object to the list of wakeup sources.
84 * @ws: Wakeup source object to add to the list.
86 void wakeup_source_add(struct wakeup_source
*ws
)
91 setup_timer(&ws
->timer
, pm_wakeup_timer_fn
, (unsigned long)ws
);
94 spin_lock_irq(&events_lock
);
95 list_add_rcu(&ws
->entry
, &wakeup_sources
);
96 spin_unlock_irq(&events_lock
);
99 EXPORT_SYMBOL_GPL(wakeup_source_add
);
102 * wakeup_source_remove - Remove given object from the wakeup sources list.
103 * @ws: Wakeup source object to remove from the list.
105 void wakeup_source_remove(struct wakeup_source
*ws
)
110 spin_lock_irq(&events_lock
);
111 list_del_rcu(&ws
->entry
);
112 spin_unlock_irq(&events_lock
);
115 EXPORT_SYMBOL_GPL(wakeup_source_remove
);
118 * wakeup_source_register - Create wakeup source and add it to the list.
119 * @name: Name of the wakeup source to register.
121 struct wakeup_source
*wakeup_source_register(const char *name
)
123 struct wakeup_source
*ws
;
125 ws
= wakeup_source_create(name
);
127 wakeup_source_add(ws
);
131 EXPORT_SYMBOL_GPL(wakeup_source_register
);
134 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
135 * @ws: Wakeup source object to unregister.
137 void wakeup_source_unregister(struct wakeup_source
*ws
)
139 wakeup_source_remove(ws
);
140 wakeup_source_destroy(ws
);
142 EXPORT_SYMBOL_GPL(wakeup_source_unregister
);
145 * device_wakeup_attach - Attach a wakeup source object to a device object.
146 * @dev: Device to handle.
147 * @ws: Wakeup source object to attach to @dev.
149 * This causes @dev to be treated as a wakeup device.
151 static int device_wakeup_attach(struct device
*dev
, struct wakeup_source
*ws
)
153 spin_lock_irq(&dev
->power
.lock
);
154 if (dev
->power
.wakeup
) {
155 spin_unlock_irq(&dev
->power
.lock
);
158 dev
->power
.wakeup
= ws
;
159 spin_unlock_irq(&dev
->power
.lock
);
164 * device_wakeup_enable - Enable given device to be a wakeup source.
165 * @dev: Device to handle.
167 * Create a wakeup source object, register it and attach it to @dev.
169 int device_wakeup_enable(struct device
*dev
)
171 struct wakeup_source
*ws
;
174 if (!dev
|| !dev
->power
.can_wakeup
)
177 ws
= wakeup_source_register(dev_name(dev
));
181 ret
= device_wakeup_attach(dev
, ws
);
183 wakeup_source_unregister(ws
);
187 EXPORT_SYMBOL_GPL(device_wakeup_enable
);
190 * device_wakeup_detach - Detach a device's wakeup source object from it.
191 * @dev: Device to detach the wakeup source object from.
193 * After it returns, @dev will not be treated as a wakeup device any more.
195 static struct wakeup_source
*device_wakeup_detach(struct device
*dev
)
197 struct wakeup_source
*ws
;
199 spin_lock_irq(&dev
->power
.lock
);
200 ws
= dev
->power
.wakeup
;
201 dev
->power
.wakeup
= NULL
;
202 spin_unlock_irq(&dev
->power
.lock
);
207 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
208 * @dev: Device to handle.
210 * Detach the @dev's wakeup source object from it, unregister this wakeup source
211 * object and destroy it.
213 int device_wakeup_disable(struct device
*dev
)
215 struct wakeup_source
*ws
;
217 if (!dev
|| !dev
->power
.can_wakeup
)
220 ws
= device_wakeup_detach(dev
);
222 wakeup_source_unregister(ws
);
226 EXPORT_SYMBOL_GPL(device_wakeup_disable
);
229 * device_init_wakeup - Device wakeup initialization.
230 * @dev: Device to handle.
231 * @enable: Whether or not to enable @dev as a wakeup device.
233 * By default, most devices should leave wakeup disabled. The exceptions are
234 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
235 * possibly network interfaces, etc.
237 int device_init_wakeup(struct device
*dev
, bool enable
)
242 device_set_wakeup_capable(dev
, true);
243 ret
= device_wakeup_enable(dev
);
245 device_set_wakeup_capable(dev
, false);
250 EXPORT_SYMBOL_GPL(device_init_wakeup
);
253 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
254 * @dev: Device to handle.
256 int device_set_wakeup_enable(struct device
*dev
, bool enable
)
258 if (!dev
|| !dev
->power
.can_wakeup
)
261 return enable
? device_wakeup_enable(dev
) : device_wakeup_disable(dev
);
263 EXPORT_SYMBOL_GPL(device_set_wakeup_enable
);
266 * The functions below use the observation that each wakeup event starts a
267 * period in which the system should not be suspended. The moment this period
268 * will end depends on how the wakeup event is going to be processed after being
269 * detected and all of the possible cases can be divided into two distinct
272 * First, a wakeup event may be detected by the same functional unit that will
273 * carry out the entire processing of it and possibly will pass it to user space
274 * for further processing. In that case the functional unit that has detected
275 * the event may later "close" the "no suspend" period associated with it
276 * directly as soon as it has been dealt with. The pair of pm_stay_awake() and
277 * pm_relax(), balanced with each other, is supposed to be used in such
280 * Second, a wakeup event may be detected by one functional unit and processed
281 * by another one. In that case the unit that has detected it cannot really
282 * "close" the "no suspend" period associated with it, unless it knows in
283 * advance what's going to happen to the event during processing. This
284 * knowledge, however, may not be available to it, so it can simply specify time
285 * to wait before the system can be suspended and pass it as the second
286 * argument of pm_wakeup_event().
288 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
289 * "no suspend" period will be ended either by the pm_relax(), or by the timer
290 * function executed when the timer expires, whichever comes first.
294 * wakup_source_activate - Mark given wakeup source as active.
295 * @ws: Wakeup source to handle.
297 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
298 * core of the event by incrementing the counter of of wakeup events being
301 static void wakeup_source_activate(struct wakeup_source
*ws
)
305 ws
->timer_expires
= jiffies
;
306 ws
->last_time
= ktime_get();
308 atomic_inc(&events_in_progress
);
312 * __pm_stay_awake - Notify the PM core of a wakeup event.
313 * @ws: Wakeup source object associated with the source of the event.
315 * It is safe to call this function from interrupt context.
317 void __pm_stay_awake(struct wakeup_source
*ws
)
324 spin_lock_irqsave(&ws
->lock
, flags
);
327 wakeup_source_activate(ws
);
328 spin_unlock_irqrestore(&ws
->lock
, flags
);
330 EXPORT_SYMBOL_GPL(__pm_stay_awake
);
333 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
334 * @dev: Device the wakeup event is related to.
336 * Notify the PM core of a wakeup event (signaled by @dev) by calling
337 * __pm_stay_awake for the @dev's wakeup source object.
339 * Call this function after detecting of a wakeup event if pm_relax() is going
340 * to be called directly after processing the event (and possibly passing it to
341 * user space for further processing).
343 void pm_stay_awake(struct device
*dev
)
350 spin_lock_irqsave(&dev
->power
.lock
, flags
);
351 __pm_stay_awake(dev
->power
.wakeup
);
352 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
354 EXPORT_SYMBOL_GPL(pm_stay_awake
);
357 * wakup_source_deactivate - Mark given wakeup source as inactive.
358 * @ws: Wakeup source to handle.
360 * Update the @ws' statistics and notify the PM core that the wakeup source has
361 * become inactive by decrementing the counter of wakeup events being processed
362 * and incrementing the counter of registered wakeup events.
364 static void wakeup_source_deactivate(struct wakeup_source
*ws
)
371 * __pm_relax() may be called directly or from a timer function.
372 * If it is called directly right after the timer function has been
373 * started, but before the timer function calls __pm_relax(), it is
374 * possible that __pm_stay_awake() will be called in the meantime and
375 * will set ws->active. Then, ws->active may be cleared immediately
376 * by the __pm_relax() called from the timer function, but in such a
377 * case ws->relax_count will be different from ws->active_count.
379 if (ws
->relax_count
!= ws
->active_count
) {
387 duration
= ktime_sub(now
, ws
->last_time
);
388 ws
->total_time
= ktime_add(ws
->total_time
, duration
);
389 if (ktime_to_ns(duration
) > ktime_to_ns(ws
->max_time
))
390 ws
->max_time
= duration
;
392 del_timer(&ws
->timer
);
395 * event_count has to be incremented before events_in_progress is
396 * modified, so that the callers of pm_check_wakeup_events() and
397 * pm_save_wakeup_count() don't see the old value of event_count and
398 * events_in_progress equal to zero at the same time.
400 atomic_inc(&event_count
);
401 smp_mb__before_atomic_dec();
402 atomic_dec(&events_in_progress
);
406 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
407 * @ws: Wakeup source object associated with the source of the event.
409 * Call this function for wakeup events whose processing started with calling
412 * It is safe to call it from interrupt context.
414 void __pm_relax(struct wakeup_source
*ws
)
421 spin_lock_irqsave(&ws
->lock
, flags
);
423 wakeup_source_deactivate(ws
);
424 spin_unlock_irqrestore(&ws
->lock
, flags
);
426 EXPORT_SYMBOL_GPL(__pm_relax
);
429 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
430 * @dev: Device that signaled the event.
432 * Execute __pm_relax() for the @dev's wakeup source object.
434 void pm_relax(struct device
*dev
)
441 spin_lock_irqsave(&dev
->power
.lock
, flags
);
442 __pm_relax(dev
->power
.wakeup
);
443 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
445 EXPORT_SYMBOL_GPL(pm_relax
);
448 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
449 * @data: Address of the wakeup source object associated with the event source.
451 * Call __pm_relax() for the wakeup source whose address is stored in @data.
453 static void pm_wakeup_timer_fn(unsigned long data
)
455 __pm_relax((struct wakeup_source
*)data
);
459 * __pm_wakeup_event - Notify the PM core of a wakeup event.
460 * @ws: Wakeup source object associated with the event source.
461 * @msec: Anticipated event processing time (in milliseconds).
463 * Notify the PM core of a wakeup event whose source is @ws that will take
464 * approximately @msec milliseconds to be processed by the kernel. If @ws is
465 * not active, activate it. If @msec is nonzero, set up the @ws' timer to
466 * execute pm_wakeup_timer_fn() in future.
468 * It is safe to call this function from interrupt context.
470 void __pm_wakeup_event(struct wakeup_source
*ws
, unsigned int msec
)
473 unsigned long expires
;
478 spin_lock_irqsave(&ws
->lock
, flags
);
482 wakeup_source_activate(ws
);
485 wakeup_source_deactivate(ws
);
489 expires
= jiffies
+ msecs_to_jiffies(msec
);
493 if (time_after(expires
, ws
->timer_expires
)) {
494 mod_timer(&ws
->timer
, expires
);
495 ws
->timer_expires
= expires
;
499 spin_unlock_irqrestore(&ws
->lock
, flags
);
501 EXPORT_SYMBOL_GPL(__pm_wakeup_event
);
505 * pm_wakeup_event - Notify the PM core of a wakeup event.
506 * @dev: Device the wakeup event is related to.
507 * @msec: Anticipated event processing time (in milliseconds).
509 * Call __pm_wakeup_event() for the @dev's wakeup source object.
511 void pm_wakeup_event(struct device
*dev
, unsigned int msec
)
518 spin_lock_irqsave(&dev
->power
.lock
, flags
);
519 __pm_wakeup_event(dev
->power
.wakeup
, msec
);
520 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
522 EXPORT_SYMBOL_GPL(pm_wakeup_event
);
525 * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources.
527 static void pm_wakeup_update_hit_counts(void)
530 struct wakeup_source
*ws
;
533 list_for_each_entry_rcu(ws
, &wakeup_sources
, entry
) {
534 spin_lock_irqsave(&ws
->lock
, flags
);
537 spin_unlock_irqrestore(&ws
->lock
, flags
);
543 * pm_check_wakeup_events - Check for new wakeup events.
545 * Compare the current number of registered wakeup events with its preserved
546 * value from the past to check if new wakeup events have been registered since
547 * the old value was stored. Check if the current number of wakeup events being
550 bool pm_check_wakeup_events(void)
555 spin_lock_irqsave(&events_lock
, flags
);
556 if (events_check_enabled
) {
557 ret
= ((unsigned int)atomic_read(&event_count
) == saved_count
)
558 && !atomic_read(&events_in_progress
);
559 events_check_enabled
= ret
;
561 spin_unlock_irqrestore(&events_lock
, flags
);
563 pm_wakeup_update_hit_counts();
568 * pm_get_wakeup_count - Read the number of registered wakeup events.
569 * @count: Address to store the value at.
571 * Store the number of registered wakeup events at the address in @count. Block
572 * if the current number of wakeup events being processed is nonzero.
574 * Return false if the wait for the number of wakeup events being processed to
575 * drop down to zero has been interrupted by a signal (and the current number
576 * of wakeup events being processed is still nonzero). Otherwise return true.
578 bool pm_get_wakeup_count(unsigned int *count
)
582 if (capable(CAP_SYS_ADMIN
))
583 events_check_enabled
= false;
585 while (atomic_read(&events_in_progress
) && !signal_pending(current
)) {
586 pm_wakeup_update_hit_counts();
587 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT
));
590 ret
= !atomic_read(&events_in_progress
);
591 *count
= atomic_read(&event_count
);
596 * pm_save_wakeup_count - Save the current number of registered wakeup events.
597 * @count: Value to compare with the current number of registered wakeup events.
599 * If @count is equal to the current number of registered wakeup events and the
600 * current number of wakeup events being processed is zero, store @count as the
601 * old number of registered wakeup events to be used by pm_check_wakeup_events()
602 * and return true. Otherwise return false.
604 bool pm_save_wakeup_count(unsigned int count
)
608 spin_lock_irq(&events_lock
);
609 if (count
== (unsigned int)atomic_read(&event_count
)
610 && !atomic_read(&events_in_progress
)) {
612 events_check_enabled
= true;
615 spin_unlock_irq(&events_lock
);
617 pm_wakeup_update_hit_counts();