]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/freezer.h
mm/hotplug: invalid PFNs from pfn_to_online_page()
[mirror_ubuntu-bionic-kernel.git] / include / linux / freezer.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Freezer declarations */
3
4 #ifndef FREEZER_H_INCLUDED
5 #define FREEZER_H_INCLUDED
6
7 #include <linux/debug_locks.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10 #include <linux/atomic.h>
11
12 #ifdef CONFIG_FREEZER
13 extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
14 extern bool pm_freezing; /* PM freezing in effect */
15 extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
16
17 /*
18 * Timeout for stopping processes
19 */
20 extern unsigned int freeze_timeout_msecs;
21
22 /*
23 * Check if a process has been frozen
24 */
25 static inline bool frozen(struct task_struct *p)
26 {
27 return p->flags & PF_FROZEN;
28 }
29
30 extern bool freezing_slow_path(struct task_struct *p);
31
32 /*
33 * Check if there is a request to freeze a process
34 */
35 static inline bool freezing(struct task_struct *p)
36 {
37 if (likely(!atomic_read(&system_freezing_cnt)))
38 return false;
39 return freezing_slow_path(p);
40 }
41
42 /* Takes and releases task alloc lock using task_lock() */
43 extern void __thaw_task(struct task_struct *t);
44
45 extern bool __refrigerator(bool check_kthr_stop);
46 extern int freeze_processes(void);
47 extern int freeze_kernel_threads(void);
48 extern void thaw_processes(void);
49 extern void thaw_kernel_threads(void);
50
51 /*
52 * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
53 * If try_to_freeze causes a lockdep warning it means the caller may deadlock
54 */
55 static inline bool try_to_freeze_unsafe(void)
56 {
57 might_sleep();
58 if (likely(!freezing(current)))
59 return false;
60 return __refrigerator(false);
61 }
62
63 static inline bool try_to_freeze(void)
64 {
65 if (!(current->flags & PF_NOFREEZE))
66 debug_check_no_locks_held();
67 return try_to_freeze_unsafe();
68 }
69
70 extern bool freeze_task(struct task_struct *p);
71 extern bool set_freezable(void);
72
73 #ifdef CONFIG_CGROUP_FREEZER
74 extern bool cgroup_freezing(struct task_struct *task);
75 #else /* !CONFIG_CGROUP_FREEZER */
76 static inline bool cgroup_freezing(struct task_struct *task)
77 {
78 return false;
79 }
80 #endif /* !CONFIG_CGROUP_FREEZER */
81
82 /*
83 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
84 * calls wait_for_completion(&vfork) and reset right after it returns from this
85 * function. Next, the parent should call try_to_freeze() to freeze itself
86 * appropriately in case the child has exited before the freezing of tasks is
87 * complete. However, we don't want kernel threads to be frozen in unexpected
88 * places, so we allow them to block freeze_processes() instead or to set
89 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
90 * parent won't really block freeze_processes(), since ____call_usermodehelper()
91 * (the child) does a little before exec/exit and it can't be frozen before
92 * waking up the parent.
93 */
94
95
96 /**
97 * freezer_do_not_count - tell freezer to ignore %current
98 *
99 * Tell freezers to ignore the current task when determining whether the
100 * target frozen state is reached. IOW, the current task will be
101 * considered frozen enough by freezers.
102 *
103 * The caller shouldn't do anything which isn't allowed for a frozen task
104 * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
105 * wrap a scheduling operation and nothing much else.
106 */
107 static inline void freezer_do_not_count(void)
108 {
109 current->flags |= PF_FREEZER_SKIP;
110 }
111
112 /**
113 * freezer_count - tell freezer to stop ignoring %current
114 *
115 * Undo freezer_do_not_count(). It tells freezers that %current should be
116 * considered again and tries to freeze if freezing condition is already in
117 * effect.
118 */
119 static inline void freezer_count(void)
120 {
121 current->flags &= ~PF_FREEZER_SKIP;
122 /*
123 * If freezing is in progress, the following paired with smp_mb()
124 * in freezer_should_skip() ensures that either we see %true
125 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
126 */
127 smp_mb();
128 try_to_freeze();
129 }
130
131 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
132 static inline void freezer_count_unsafe(void)
133 {
134 current->flags &= ~PF_FREEZER_SKIP;
135 smp_mb();
136 try_to_freeze_unsafe();
137 }
138
139 /**
140 * freezer_should_skip - whether to skip a task when determining frozen
141 * state is reached
142 * @p: task in quesion
143 *
144 * This function is used by freezers after establishing %true freezing() to
145 * test whether a task should be skipped when determining the target frozen
146 * state is reached. IOW, if this function returns %true, @p is considered
147 * frozen enough.
148 */
149 static inline bool freezer_should_skip(struct task_struct *p)
150 {
151 /*
152 * The following smp_mb() paired with the one in freezer_count()
153 * ensures that either freezer_count() sees %true freezing() or we
154 * see cleared %PF_FREEZER_SKIP and return %false. This makes it
155 * impossible for a task to slip frozen state testing after
156 * clearing %PF_FREEZER_SKIP.
157 */
158 smp_mb();
159 return p->flags & PF_FREEZER_SKIP;
160 }
161
162 /*
163 * These functions are intended to be used whenever you want allow a sleeping
164 * task to be frozen. Note that neither return any clear indication of
165 * whether a freeze event happened while in this function.
166 */
167
168 /* Like schedule(), but should not block the freezer. */
169 static inline void freezable_schedule(void)
170 {
171 freezer_do_not_count();
172 schedule();
173 freezer_count();
174 }
175
176 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
177 static inline void freezable_schedule_unsafe(void)
178 {
179 freezer_do_not_count();
180 schedule();
181 freezer_count_unsafe();
182 }
183
184 /*
185 * Like schedule_timeout(), but should not block the freezer. Do not
186 * call this with locks held.
187 */
188 static inline long freezable_schedule_timeout(long timeout)
189 {
190 long __retval;
191 freezer_do_not_count();
192 __retval = schedule_timeout(timeout);
193 freezer_count();
194 return __retval;
195 }
196
197 /*
198 * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
199 * call this with locks held.
200 */
201 static inline long freezable_schedule_timeout_interruptible(long timeout)
202 {
203 long __retval;
204 freezer_do_not_count();
205 __retval = schedule_timeout_interruptible(timeout);
206 freezer_count();
207 return __retval;
208 }
209
210 /* Like schedule_timeout_killable(), but should not block the freezer. */
211 static inline long freezable_schedule_timeout_killable(long timeout)
212 {
213 long __retval;
214 freezer_do_not_count();
215 __retval = schedule_timeout_killable(timeout);
216 freezer_count();
217 return __retval;
218 }
219
220 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
221 static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
222 {
223 long __retval;
224 freezer_do_not_count();
225 __retval = schedule_timeout_killable(timeout);
226 freezer_count_unsafe();
227 return __retval;
228 }
229
230 /*
231 * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
232 * call this with locks held.
233 */
234 static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
235 u64 delta, const enum hrtimer_mode mode)
236 {
237 int __retval;
238 freezer_do_not_count();
239 __retval = schedule_hrtimeout_range(expires, delta, mode);
240 freezer_count();
241 return __retval;
242 }
243
244 /*
245 * Freezer-friendly wrappers around wait_event_interruptible(),
246 * wait_event_killable() and wait_event_interruptible_timeout(), originally
247 * defined in <linux/wait.h>
248 */
249
250 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
251 #define wait_event_freezekillable_unsafe(wq, condition) \
252 ({ \
253 int __retval; \
254 freezer_do_not_count(); \
255 __retval = wait_event_killable(wq, (condition)); \
256 freezer_count_unsafe(); \
257 __retval; \
258 })
259
260 #else /* !CONFIG_FREEZER */
261 static inline bool frozen(struct task_struct *p) { return false; }
262 static inline bool freezing(struct task_struct *p) { return false; }
263 static inline void __thaw_task(struct task_struct *t) {}
264
265 static inline bool __refrigerator(bool check_kthr_stop) { return false; }
266 static inline int freeze_processes(void) { return -ENOSYS; }
267 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
268 static inline void thaw_processes(void) {}
269 static inline void thaw_kernel_threads(void) {}
270
271 static inline bool try_to_freeze_nowarn(void) { return false; }
272 static inline bool try_to_freeze(void) { return false; }
273
274 static inline void freezer_do_not_count(void) {}
275 static inline void freezer_count(void) {}
276 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
277 static inline void set_freezable(void) {}
278
279 #define freezable_schedule() schedule()
280
281 #define freezable_schedule_unsafe() schedule()
282
283 #define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
284
285 #define freezable_schedule_timeout_interruptible(timeout) \
286 schedule_timeout_interruptible(timeout)
287
288 #define freezable_schedule_timeout_killable(timeout) \
289 schedule_timeout_killable(timeout)
290
291 #define freezable_schedule_timeout_killable_unsafe(timeout) \
292 schedule_timeout_killable(timeout)
293
294 #define freezable_schedule_hrtimeout_range(expires, delta, mode) \
295 schedule_hrtimeout_range(expires, delta, mode)
296
297 #define wait_event_freezekillable_unsafe(wq, condition) \
298 wait_event_killable(wq, condition)
299
300 #endif /* !CONFIG_FREEZER */
301
302 #endif /* FREEZER_H_INCLUDED */