]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/suspend.h
PM / Suspend: Off by one in pm_suspend()
[mirror_ubuntu-bionic-kernel.git] / include / linux / suspend.h
CommitLineData
95d9ffbe
RW
1#ifndef _LINUX_SUSPEND_H
2#define _LINUX_SUSPEND_H
1da177e4 3
1da177e4
LT
4#include <linux/swap.h>
5#include <linux/notifier.h>
1da177e4
LT
6#include <linux/init.h>
7#include <linux/pm.h>
7be98234 8#include <linux/mm.h>
95d9ffbe
RW
9#include <asm/errno.h>
10
11#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
b6f448e9 12extern void pm_set_vt_switch(int);
95d9ffbe
RW
13extern int pm_prepare_console(void);
14extern void pm_restore_console(void);
15#else
b6f448e9
AS
16static inline void pm_set_vt_switch(int do_switch)
17{
18}
19
20static inline int pm_prepare_console(void)
21{
22 return 0;
23}
24
25static inline void pm_restore_console(void)
26{
27}
95d9ffbe
RW
28#endif
29
30typedef int __bitwise suspend_state_t;
31
32#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
33#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
34#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
35#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
36
2a77c46d
SL
37enum suspend_stat_step {
38 SUSPEND_FREEZE = 1,
39 SUSPEND_PREPARE,
40 SUSPEND_SUSPEND,
41 SUSPEND_SUSPEND_NOIRQ,
42 SUSPEND_RESUME_NOIRQ,
43 SUSPEND_RESUME
44};
45
46struct suspend_stats {
47 int success;
48 int fail;
49 int failed_freeze;
50 int failed_prepare;
51 int failed_suspend;
52 int failed_suspend_noirq;
53 int failed_resume;
54 int failed_resume_noirq;
55#define REC_FAILED_NUM 2
56 int last_failed_dev;
57 char failed_devs[REC_FAILED_NUM][40];
58 int last_failed_errno;
59 int errno[REC_FAILED_NUM];
60 int last_failed_step;
61 enum suspend_stat_step failed_steps[REC_FAILED_NUM];
62};
63
64extern struct suspend_stats suspend_stats;
65
66static inline void dpm_save_failed_dev(const char *name)
67{
68 strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
69 name,
70 sizeof(suspend_stats.failed_devs[0]));
71 suspend_stats.last_failed_dev++;
72 suspend_stats.last_failed_dev %= REC_FAILED_NUM;
73}
74
75static inline void dpm_save_failed_errno(int err)
76{
77 suspend_stats.errno[suspend_stats.last_failed_errno] = err;
78 suspend_stats.last_failed_errno++;
79 suspend_stats.last_failed_errno %= REC_FAILED_NUM;
80}
81
82static inline void dpm_save_failed_step(enum suspend_stat_step step)
83{
84 suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
85 suspend_stats.last_failed_step++;
86 suspend_stats.last_failed_step %= REC_FAILED_NUM;
87}
88
95d9ffbe 89/**
26398a70
RW
90 * struct platform_suspend_ops - Callbacks for managing platform dependent
91 * system sleep states.
95d9ffbe
RW
92 *
93 * @valid: Callback to determine if given system sleep state is supported by
94 * the platform.
95 * Valid (ie. supported) states are advertised in /sys/power/state. Note
96 * that it still may be impossible to enter given system sleep state if the
97 * conditions aren't right.
26398a70
RW
98 * There is the %suspend_valid_only_mem function available that can be
99 * assigned to this if the platform only supports mem sleep.
95d9ffbe 100 *
c697eece
RW
101 * @begin: Initialise a transition to given system sleep state.
102 * @begin() is executed right prior to suspending devices. The information
103 * conveyed to the platform code by @begin() should be disregarded by it as
104 * soon as @end() is executed. If @begin() fails (ie. returns nonzero),
95d9ffbe
RW
105 * @prepare(), @enter() and @finish() will not be called by the PM core.
106 * This callback is optional. However, if it is implemented, the argument
c697eece 107 * passed to @enter() is redundant and should be ignored.
95d9ffbe
RW
108 *
109 * @prepare: Prepare the platform for entering the system sleep state indicated
c697eece 110 * by @begin().
95d9ffbe
RW
111 * @prepare() is called right after devices have been suspended (ie. the
112 * appropriate .suspend() method has been executed for each device) and
6a7c7eaf
RW
113 * before device drivers' late suspend callbacks are executed. It returns
114 * 0 on success or a negative error code otherwise, in which case the
115 * system cannot enter the desired sleep state (@prepare_late(), @enter(),
ce441011 116 * and @wake() will not be called in that case).
6a7c7eaf
RW
117 *
118 * @prepare_late: Finish preparing the platform for entering the system sleep
119 * state indicated by @begin().
120 * @prepare_late is called before disabling nonboot CPUs and after
121 * device drivers' late suspend callbacks have been executed. It returns
122 * 0 on success or a negative error code otherwise, in which case the
ce441011
RW
123 * system cannot enter the desired sleep state (@enter() will not be
124 * executed).
95d9ffbe 125 *
c697eece
RW
126 * @enter: Enter the system sleep state indicated by @begin() or represented by
127 * the argument if @begin() is not implemented.
95d9ffbe
RW
128 * This callback is mandatory. It returns 0 on success or a negative
129 * error code otherwise, in which case the system cannot enter the desired
130 * sleep state.
131 *
6a7c7eaf
RW
132 * @wake: Called when the system has just left a sleep state, right after
133 * the nonboot CPUs have been enabled and before device drivers' early
134 * resume callbacks are executed.
135 * This callback is optional, but should be implemented by the platforms
136 * that implement @prepare_late(). If implemented, it is always called
ce441011 137 * after @prepare_late and @enter(), even if one of them fails.
6a7c7eaf
RW
138 *
139 * @finish: Finish wake-up of the platform.
140 * @finish is called right prior to calling device drivers' regular suspend
141 * callbacks.
95d9ffbe
RW
142 * This callback is optional, but should be implemented by the platforms
143 * that implement @prepare(). If implemented, it is always called after
ce441011
RW
144 * @enter() and @wake(), even if any of them fails. It is executed after
145 * a failing @prepare.
c697eece 146 *
3b5fe852
MH
147 * @suspend_again: Returns whether the system should suspend again (true) or
148 * not (false). If the platform wants to poll sensors or execute some
149 * code during suspended without invoking userspace and most of devices,
150 * suspend_again callback is the place assuming that periodic-wakeup or
151 * alarm-wakeup is already setup. This allows to execute some codes while
152 * being kept suspended in the view of userland and devices.
153 *
c697eece
RW
154 * @end: Called by the PM core right after resuming devices, to indicate to
155 * the platform that the system has returned to the working state or
156 * the transition to the sleep state has been aborted.
157 * This callback is optional, but should be implemented by the platforms
6a7c7eaf
RW
158 * that implement @begin(). Accordingly, platforms implementing @begin()
159 * should also provide a @end() which cleans up transitions aborted before
c697eece 160 * @enter().
d8f3de0d
RW
161 *
162 * @recover: Recover the platform from a suspend failure.
163 * Called by the PM core if the suspending of devices fails.
164 * This callback is optional and should only be implemented by platforms
165 * which require special recovery actions in that situation.
95d9ffbe 166 */
26398a70 167struct platform_suspend_ops {
95d9ffbe 168 int (*valid)(suspend_state_t state);
c697eece 169 int (*begin)(suspend_state_t state);
e6c5eb95 170 int (*prepare)(void);
6a7c7eaf 171 int (*prepare_late)(void);
95d9ffbe 172 int (*enter)(suspend_state_t state);
6a7c7eaf 173 void (*wake)(void);
e6c5eb95 174 void (*finish)(void);
3b5fe852 175 bool (*suspend_again)(void);
c697eece 176 void (*end)(void);
d8f3de0d 177 void (*recover)(void);
95d9ffbe
RW
178};
179
180#ifdef CONFIG_SUSPEND
95d9ffbe 181/**
26398a70
RW
182 * suspend_set_ops - set platform dependent suspend operations
183 * @ops: The new suspend operations to set.
95d9ffbe 184 */
2f55ac07 185extern void suspend_set_ops(const struct platform_suspend_ops *ops);
26398a70 186extern int suspend_valid_only_mem(suspend_state_t state);
95d9ffbe
RW
187
188/**
189 * arch_suspend_disable_irqs - disable IRQs for suspend
190 *
191 * Disables IRQs (in the default case). This is a weak symbol in the common
192 * code and thus allows architectures to override it if more needs to be
193 * done. Not called for suspend to disk.
194 */
195extern void arch_suspend_disable_irqs(void);
196
197/**
198 * arch_suspend_enable_irqs - enable IRQs after suspend
199 *
200 * Enables IRQs (in the default case). This is a weak symbol in the common
201 * code and thus allows architectures to override it if more needs to be
202 * done. Not called for suspend to disk.
203 */
204extern void arch_suspend_enable_irqs(void);
205
206extern int pm_suspend(suspend_state_t state);
207#else /* !CONFIG_SUSPEND */
208#define suspend_valid_only_mem NULL
209
2f55ac07 210static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
95d9ffbe
RW
211static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
212#endif /* !CONFIG_SUSPEND */
1da177e4 213
8357376d
RW
214/* struct pbe is used for creating lists of pages that should be restored
215 * atomically during the resume from disk, because the page frames they have
216 * occupied before the suspend are in use.
217 */
dcbb5a54 218struct pbe {
8357376d
RW
219 void *address; /* address of the copy */
220 void *orig_address; /* original address of a page */
7088a5c0 221 struct pbe *next;
dcbb5a54 222};
1da177e4 223
1da177e4 224/* mm/page_alloc.c */
1da177e4
LT
225extern void mark_free_pages(struct zone *zone);
226
a3d25c27 227/**
b3dac3b3 228 * struct platform_hibernation_ops - hibernation platform support
a3d25c27 229 *
caea99ef
RW
230 * The methods in this structure allow a platform to carry out special
231 * operations required by it during a hibernation transition.
a3d25c27 232 *
d8f3de0d 233 * All the methods below, except for @recover(), must be implemented.
a3d25c27 234 *
caea99ef 235 * @begin: Tell the platform driver that we're starting hibernation.
74f270af
RW
236 * Called right after shrinking memory and before freezing devices.
237 *
caea99ef
RW
238 * @end: Called by the PM core right after resuming devices, to indicate to
239 * the platform that the system has returned to the working state.
240 *
74f270af
RW
241 * @pre_snapshot: Prepare the platform for creating the hibernation image.
242 * Called right after devices have been frozen and before the nonboot
243 * CPUs are disabled (runs with IRQs on).
244 *
245 * @finish: Restore the previous state of the platform after the hibernation
246 * image has been created *or* put the platform into the normal operation
247 * mode after the hibernation (the same method is executed in both cases).
248 * Called right after the nonboot CPUs have been enabled and before
249 * thawing devices (runs with IRQs on).
250 *
251 * @prepare: Prepare the platform for entering the low power state.
252 * Called right after the hibernation image has been saved and before
253 * devices are prepared for entering the low power state.
254 *
255 * @enter: Put the system into the low power state after the hibernation image
256 * has been saved to disk.
257 * Called after the nonboot CPUs have been disabled and all of the low
258 * level devices have been shut down (runs with IRQs off).
259 *
c7e0831d
RW
260 * @leave: Perform the first stage of the cleanup after the system sleep state
261 * indicated by @set_target() has been left.
262 * Called right after the control has been passed from the boot kernel to
263 * the image kernel, before the nonboot CPUs are enabled and before devices
264 * are resumed. Executed with interrupts disabled.
265 *
74f270af
RW
266 * @pre_restore: Prepare system for the restoration from a hibernation image.
267 * Called right after devices have been frozen and before the nonboot
268 * CPUs are disabled (runs with IRQs on).
269 *
270 * @restore_cleanup: Clean up after a failing image restoration.
271 * Called right after the nonboot CPUs have been enabled and before
272 * thawing devices (runs with IRQs on).
d8f3de0d
RW
273 *
274 * @recover: Recover the platform from a failure to suspend devices.
275 * Called by the PM core if the suspending of devices during hibernation
276 * fails. This callback is optional and should only be implemented by
277 * platforms which require special recovery actions in that situation.
a3d25c27 278 */
b3dac3b3 279struct platform_hibernation_ops {
caea99ef
RW
280 int (*begin)(void);
281 void (*end)(void);
74f270af
RW
282 int (*pre_snapshot)(void);
283 void (*finish)(void);
a3d25c27
RW
284 int (*prepare)(void);
285 int (*enter)(void);
c7e0831d 286 void (*leave)(void);
a634cc10
RW
287 int (*pre_restore)(void);
288 void (*restore_cleanup)(void);
d8f3de0d 289 void (*recover)(void);
a3d25c27
RW
290};
291
b0cb1a19 292#ifdef CONFIG_HIBERNATION
74dfd666 293/* kernel/power/snapshot.c */
940d67f6 294extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
ce289e89 295static inline void __init register_nosave_region(unsigned long b, unsigned long e)
940d67f6
JB
296{
297 __register_nosave_region(b, e, 0);
298}
ce289e89 299static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
940d67f6
JB
300{
301 __register_nosave_region(b, e, 1);
302}
74dfd666
RW
303extern int swsusp_page_is_forbidden(struct page *);
304extern void swsusp_set_page_free(struct page *);
305extern void swsusp_unset_page_free(struct page *);
306extern unsigned long get_safe_page(gfp_t gfp_mask);
a3d25c27 307
073ef1f6 308extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
a3d25c27 309extern int hibernate(void);
abfe2d7b 310extern bool system_entering_hibernation(void);
b0cb1a19 311#else /* CONFIG_HIBERNATION */
1f112cee
RW
312static inline void register_nosave_region(unsigned long b, unsigned long e) {}
313static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
74dfd666
RW
314static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
315static inline void swsusp_set_page_free(struct page *p) {}
316static inline void swsusp_unset_page_free(struct page *p) {}
a3d25c27 317
073ef1f6 318static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
a3d25c27 319static inline int hibernate(void) { return -ENOSYS; }
fce2b111
CH
320static inline bool system_entering_hibernation(void) { return false; }
321#endif /* CONFIG_HIBERNATION */
322
35eb6db1
AW
323/* Hibernation and suspend events */
324#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
325#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
326#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
327#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
328#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
329#define PM_POST_RESTORE 0x0006 /* Restore failed */
330
296699de 331#ifdef CONFIG_PM_SLEEP
1da177e4
LT
332void save_processor_state(void);
333void restore_processor_state(void);
25761b6e 334
b10d9117 335/* kernel/power/main.c */
82525756
AS
336extern int register_pm_notifier(struct notifier_block *nb);
337extern int unregister_pm_notifier(struct notifier_block *nb);
b10d9117
RW
338
339#define pm_notifier(fn, pri) { \
340 static struct notifier_block fn##_nb = \
341 { .notifier_call = fn, .priority = pri }; \
342 register_pm_notifier(&fn##_nb); \
343}
c125e96f
RW
344
345/* drivers/base/power/wakeup.c */
346extern bool events_check_enabled;
347
a2867e08 348extern bool pm_wakeup_pending(void);
074037ec
RW
349extern bool pm_get_wakeup_count(unsigned int *count);
350extern bool pm_save_wakeup_count(unsigned int count);
296699de 351#else /* !CONFIG_PM_SLEEP */
b10d9117
RW
352
353static inline int register_pm_notifier(struct notifier_block *nb)
354{
355 return 0;
356}
357
358static inline int unregister_pm_notifier(struct notifier_block *nb)
359{
360 return 0;
361}
362
363#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
dbeeec5f 364
a2867e08 365static inline bool pm_wakeup_pending(void) { return false; }
296699de 366#endif /* !CONFIG_PM_SLEEP */
b10d9117 367
6ad696d2
AK
368extern struct mutex pm_mutex;
369
1f112cee 370#ifndef CONFIG_HIBERNATE_CALLBACKS
6ad696d2
AK
371static inline void lock_system_sleep(void) {}
372static inline void unlock_system_sleep(void) {}
373
374#else
375
376/* Let some subsystems like memory hotadd exclude hibernation */
377
378static inline void lock_system_sleep(void)
379{
380 mutex_lock(&pm_mutex);
381}
382
383static inline void unlock_system_sleep(void)
384{
385 mutex_unlock(&pm_mutex);
386}
387#endif
89081d17 388
85055dd8
MS
389#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
390/*
391 * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
392 * to save/restore additional information to/from the array of page
393 * frame numbers in the hibernation image. For s390 this is used to
394 * save and restore the storage key for each page that is included
395 * in the hibernation image.
396 */
397unsigned long page_key_additional_pages(unsigned long pages);
398int page_key_alloc(unsigned long pages);
399void page_key_free(void);
400void page_key_read(unsigned long *pfn);
401void page_key_memorize(unsigned long *pfn);
402void page_key_write(void *address);
403
404#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
405
406static inline unsigned long page_key_additional_pages(unsigned long pages)
407{
408 return 0;
409}
410
411static inline int page_key_alloc(unsigned long pages)
412{
413 return 0;
414}
415
416static inline void page_key_free(void) {}
417static inline void page_key_read(unsigned long *pfn) {}
418static inline void page_key_memorize(unsigned long *pfn) {}
419static inline void page_key_write(void *address) {}
420
421#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
422
95d9ffbe 423#endif /* _LINUX_SUSPEND_H */