]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/base/power/wakeup.c
PM / Suspend: Fix ordering of calls in suspend error paths
[mirror_ubuntu-artful-kernel.git] / drivers / base / power / wakeup.c
CommitLineData
c125e96f
RW
1/*
2 * drivers/base/power/wakeup.c - System wakeup events framework
3 *
4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <linux/capability.h>
13#include <linux/suspend.h>
14#include <linux/pm.h>
15
16/*
17 * If set, the suspend/hibernate code will abort transitions to a sleep state
18 * if wakeup events are registered during or immediately before the transition.
19 */
20bool events_check_enabled;
21
22/* The counter of registered wakeup events. */
23static unsigned long event_count;
24/* A preserved old value of event_count. */
25static unsigned long saved_event_count;
26/* The counter of wakeup events being processed. */
27static unsigned long events_in_progress;
28
29static DEFINE_SPINLOCK(events_lock);
30
31/*
32 * The functions below use the observation that each wakeup event starts a
33 * period in which the system should not be suspended. The moment this period
34 * will end depends on how the wakeup event is going to be processed after being
35 * detected and all of the possible cases can be divided into two distinct
36 * groups.
37 *
38 * First, a wakeup event may be detected by the same functional unit that will
39 * carry out the entire processing of it and possibly will pass it to user space
40 * for further processing. In that case the functional unit that has detected
41 * the event may later "close" the "no suspend" period associated with it
42 * directly as soon as it has been dealt with. The pair of pm_stay_awake() and
43 * pm_relax(), balanced with each other, is supposed to be used in such
44 * situations.
45 *
46 * Second, a wakeup event may be detected by one functional unit and processed
47 * by another one. In that case the unit that has detected it cannot really
48 * "close" the "no suspend" period associated with it, unless it knows in
49 * advance what's going to happen to the event during processing. This
50 * knowledge, however, may not be available to it, so it can simply specify time
51 * to wait before the system can be suspended and pass it as the second
52 * argument of pm_wakeup_event().
53 */
54
55/**
56 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
57 * @dev: Device the wakeup event is related to.
58 *
59 * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the
60 * counter of wakeup events being processed. If @dev is not NULL, the counter
61 * of wakeup events related to @dev is incremented too.
62 *
63 * Call this function after detecting of a wakeup event if pm_relax() is going
64 * to be called directly after processing the event (and possibly passing it to
65 * user space for further processing).
66 *
67 * It is safe to call this function from interrupt context.
68 */
69void pm_stay_awake(struct device *dev)
70{
71 unsigned long flags;
72
73 spin_lock_irqsave(&events_lock, flags);
74 if (dev)
75 dev->power.wakeup_count++;
76
77 events_in_progress++;
78 spin_unlock_irqrestore(&events_lock, flags);
79}
80
81/**
82 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
83 *
84 * Notify the PM core that a wakeup event has been processed by decrementing
85 * the counter of wakeup events being processed and incrementing the counter
86 * of registered wakeup events.
87 *
88 * Call this function for wakeup events whose processing started with calling
89 * pm_stay_awake().
90 *
91 * It is safe to call it from interrupt context.
92 */
93void pm_relax(void)
94{
95 unsigned long flags;
96
97 spin_lock_irqsave(&events_lock, flags);
98 if (events_in_progress) {
99 events_in_progress--;
100 event_count++;
101 }
102 spin_unlock_irqrestore(&events_lock, flags);
103}
104
105/**
106 * pm_wakeup_work_fn - Deferred closing of a wakeup event.
107 *
108 * Execute pm_relax() for a wakeup event detected in the past and free the
109 * work item object used for queuing up the work.
110 */
111static void pm_wakeup_work_fn(struct work_struct *work)
112{
113 struct delayed_work *dwork = to_delayed_work(work);
114
115 pm_relax();
116 kfree(dwork);
117}
118
119/**
120 * pm_wakeup_event - Notify the PM core of a wakeup event.
121 * @dev: Device the wakeup event is related to.
122 * @msec: Anticipated event processing time (in milliseconds).
123 *
124 * Notify the PM core of a wakeup event (signaled by @dev) that will take
125 * approximately @msec milliseconds to be processed by the kernel. Increment
126 * the counter of wakeup events being processed and queue up a work item
127 * that will execute pm_relax() for the event after @msec milliseconds. If @dev
128 * is not NULL, the counter of wakeup events related to @dev is incremented too.
129 *
130 * It is safe to call this function from interrupt context.
131 */
132void pm_wakeup_event(struct device *dev, unsigned int msec)
133{
134 unsigned long flags;
135 struct delayed_work *dwork;
136
137 dwork = msec ? kzalloc(sizeof(*dwork), GFP_ATOMIC) : NULL;
138
139 spin_lock_irqsave(&events_lock, flags);
140 if (dev)
141 dev->power.wakeup_count++;
142
143 if (dwork) {
144 INIT_DELAYED_WORK(dwork, pm_wakeup_work_fn);
145 schedule_delayed_work(dwork, msecs_to_jiffies(msec));
146
147 events_in_progress++;
148 } else {
149 event_count++;
150 }
151 spin_unlock_irqrestore(&events_lock, flags);
152}
153
154/**
155 * pm_check_wakeup_events - Check for new wakeup events.
156 *
157 * Compare the current number of registered wakeup events with its preserved
158 * value from the past to check if new wakeup events have been registered since
159 * the old value was stored. Check if the current number of wakeup events being
160 * processed is zero.
161 */
162bool pm_check_wakeup_events(void)
163{
164 unsigned long flags;
165 bool ret = true;
166
167 spin_lock_irqsave(&events_lock, flags);
168 if (events_check_enabled) {
169 ret = (event_count == saved_event_count) && !events_in_progress;
170 events_check_enabled = ret;
171 }
172 spin_unlock_irqrestore(&events_lock, flags);
173 return ret;
174}
175
176/**
177 * pm_get_wakeup_count - Read the number of registered wakeup events.
178 * @count: Address to store the value at.
179 *
180 * Store the number of registered wakeup events at the address in @count. Block
181 * if the current number of wakeup events being processed is nonzero.
182 *
183 * Return false if the wait for the number of wakeup events being processed to
184 * drop down to zero has been interrupted by a signal (and the current number
185 * of wakeup events being processed is still nonzero). Otherwise return true.
186 */
187bool pm_get_wakeup_count(unsigned long *count)
188{
189 bool ret;
190
191 spin_lock_irq(&events_lock);
192 if (capable(CAP_SYS_ADMIN))
193 events_check_enabled = false;
194
195 while (events_in_progress && !signal_pending(current)) {
196 spin_unlock_irq(&events_lock);
197
198 schedule_timeout_interruptible(msecs_to_jiffies(100));
199
200 spin_lock_irq(&events_lock);
201 }
202 *count = event_count;
203 ret = !events_in_progress;
204 spin_unlock_irq(&events_lock);
205 return ret;
206}
207
208/**
209 * pm_save_wakeup_count - Save the current number of registered wakeup events.
210 * @count: Value to compare with the current number of registered wakeup events.
211 *
212 * If @count is equal to the current number of registered wakeup events and the
213 * current number of wakeup events being processed is zero, store @count as the
214 * old number of registered wakeup events to be used by pm_check_wakeup_events()
215 * and return true. Otherwise return false.
216 */
217bool pm_save_wakeup_count(unsigned long count)
218{
219 bool ret = false;
220
221 spin_lock_irq(&events_lock);
222 if (count == event_count && !events_in_progress) {
223 saved_event_count = count;
224 events_check_enabled = true;
225 ret = true;
226 }
227 spin_unlock_irq(&events_lock);
228 return ret;
229}