]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/oom_kill.c | |
3 | * | |
4 | * Copyright (C) 1998,2000 Rik van Riel | |
5 | * Thanks go out to Claus Fischer for some serious inspiration and | |
6 | * for goading me into coding this file... | |
7 | * | |
8 | * The routines in this file are used to kill a process when | |
a49335cc PJ |
9 | * we're seriously out of memory. This gets called from __alloc_pages() |
10 | * in mm/page_alloc.c when we really run out of memory. | |
1da177e4 LT |
11 | * |
12 | * Since we won't call these routines often (on a well-configured | |
13 | * machine) this file will double as a 'coding guide' and a signpost | |
14 | * for newbie kernel hackers. It features several pointers to major | |
15 | * kernel subsystems and hints as to where to find out what things do. | |
16 | */ | |
17 | ||
8ac773b4 | 18 | #include <linux/oom.h> |
1da177e4 | 19 | #include <linux/mm.h> |
4e950f6f | 20 | #include <linux/err.h> |
5a0e3ad6 | 21 | #include <linux/gfp.h> |
1da177e4 LT |
22 | #include <linux/sched.h> |
23 | #include <linux/swap.h> | |
24 | #include <linux/timex.h> | |
25 | #include <linux/jiffies.h> | |
ef08e3b4 | 26 | #include <linux/cpuset.h> |
8bc719d3 MS |
27 | #include <linux/module.h> |
28 | #include <linux/notifier.h> | |
c7ba5c9e | 29 | #include <linux/memcontrol.h> |
6f48d0eb | 30 | #include <linux/mempolicy.h> |
5cd9c58f | 31 | #include <linux/security.h> |
1da177e4 | 32 | |
fadd8fbd | 33 | int sysctl_panic_on_oom; |
fe071d7e | 34 | int sysctl_oom_kill_allocating_task; |
ad915c43 | 35 | int sysctl_oom_dump_tasks = 1; |
c7d4caeb | 36 | static DEFINE_SPINLOCK(zone_scan_lock); |
1da177e4 LT |
37 | /* #define DEBUG */ |
38 | ||
6f48d0eb DR |
39 | #ifdef CONFIG_NUMA |
40 | /** | |
41 | * has_intersects_mems_allowed() - check task eligiblity for kill | |
42 | * @tsk: task struct of which task to consider | |
43 | * @mask: nodemask passed to page allocator for mempolicy ooms | |
44 | * | |
45 | * Task eligibility is determined by whether or not a candidate task, @tsk, | |
46 | * shares the same mempolicy nodes as current if it is bound by such a policy | |
47 | * and whether or not it has the same set of allowed cpuset nodes. | |
495789a5 | 48 | */ |
6f48d0eb DR |
49 | static bool has_intersects_mems_allowed(struct task_struct *tsk, |
50 | const nodemask_t *mask) | |
495789a5 | 51 | { |
6f48d0eb | 52 | struct task_struct *start = tsk; |
495789a5 | 53 | |
495789a5 | 54 | do { |
6f48d0eb DR |
55 | if (mask) { |
56 | /* | |
57 | * If this is a mempolicy constrained oom, tsk's | |
58 | * cpuset is irrelevant. Only return true if its | |
59 | * mempolicy intersects current, otherwise it may be | |
60 | * needlessly killed. | |
61 | */ | |
62 | if (mempolicy_nodemask_intersects(tsk, mask)) | |
63 | return true; | |
64 | } else { | |
65 | /* | |
66 | * This is not a mempolicy constrained oom, so only | |
67 | * check the mems of tsk's cpuset. | |
68 | */ | |
69 | if (cpuset_mems_allowed_intersects(current, tsk)) | |
70 | return true; | |
71 | } | |
df1090a8 KM |
72 | } while_each_thread(start, tsk); |
73 | ||
6f48d0eb DR |
74 | return false; |
75 | } | |
76 | #else | |
77 | static bool has_intersects_mems_allowed(struct task_struct *tsk, | |
78 | const nodemask_t *mask) | |
79 | { | |
80 | return true; | |
495789a5 | 81 | } |
6f48d0eb | 82 | #endif /* CONFIG_NUMA */ |
495789a5 | 83 | |
6f48d0eb DR |
84 | /* |
85 | * The process p may have detached its own ->mm while exiting or through | |
86 | * use_mm(), but one or more of its subthreads may still have a valid | |
87 | * pointer. Return p, or any of its subthreads with a valid ->mm, with | |
88 | * task_lock() held. | |
89 | */ | |
dd8e8f40 ON |
90 | static struct task_struct *find_lock_task_mm(struct task_struct *p) |
91 | { | |
92 | struct task_struct *t = p; | |
93 | ||
94 | do { | |
95 | task_lock(t); | |
96 | if (likely(t->mm)) | |
97 | return t; | |
98 | task_unlock(t); | |
99 | } while_each_thread(p, t); | |
100 | ||
101 | return NULL; | |
102 | } | |
103 | ||
ab290adb KM |
104 | /* return true if the task is not adequate as candidate victim task. */ |
105 | static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem, | |
106 | const nodemask_t *nodemask) | |
107 | { | |
108 | if (is_global_init(p)) | |
109 | return true; | |
110 | if (p->flags & PF_KTHREAD) | |
111 | return true; | |
112 | ||
113 | /* When mem_cgroup_out_of_memory() and p is not member of the group */ | |
114 | if (mem && !task_in_mem_cgroup(p, mem)) | |
115 | return true; | |
116 | ||
117 | /* p may not have freeable memory in nodemask */ | |
118 | if (!has_intersects_mems_allowed(p, nodemask)) | |
119 | return true; | |
120 | ||
121 | return false; | |
122 | } | |
123 | ||
1da177e4 | 124 | /** |
6937a25c | 125 | * badness - calculate a numeric value for how bad this task has been |
1da177e4 | 126 | * @p: task struct of which task we should calculate |
a49335cc | 127 | * @uptime: current uptime in seconds |
1da177e4 LT |
128 | * |
129 | * The formula used is relatively simple and documented inline in the | |
130 | * function. The main rationale is that we want to select a good task | |
131 | * to kill when we run out of memory. | |
132 | * | |
133 | * Good in this context means that: | |
134 | * 1) we lose the minimum amount of work done | |
135 | * 2) we recover a large amount of memory | |
136 | * 3) we don't kill anything innocent of eating tons of memory | |
137 | * 4) we want to kill the minimum amount of processes (one) | |
138 | * 5) we try to kill the process the user expects us to kill, this | |
139 | * algorithm has been meticulously tuned to meet the principle | |
140 | * of least surprise ... (be careful when you change it) | |
141 | */ | |
26ebc984 KM |
142 | unsigned long badness(struct task_struct *p, struct mem_cgroup *mem, |
143 | const nodemask_t *nodemask, unsigned long uptime) | |
1da177e4 | 144 | { |
a12888f7 | 145 | unsigned long points, cpu_time, run_time; |
97c2c9b8 | 146 | struct task_struct *child; |
dd8e8f40 | 147 | struct task_struct *c, *t; |
28b83c51 | 148 | int oom_adj = p->signal->oom_adj; |
495789a5 KM |
149 | struct task_cputime task_time; |
150 | unsigned long utime; | |
151 | unsigned long stime; | |
28b83c51 | 152 | |
26ebc984 KM |
153 | if (oom_unkillable_task(p, mem, nodemask)) |
154 | return 0; | |
28b83c51 KM |
155 | if (oom_adj == OOM_DISABLE) |
156 | return 0; | |
1da177e4 | 157 | |
dd8e8f40 ON |
158 | p = find_lock_task_mm(p); |
159 | if (!p) | |
1da177e4 LT |
160 | return 0; |
161 | ||
162 | /* | |
163 | * The memory size of the process is the basis for the badness. | |
164 | */ | |
dd8e8f40 | 165 | points = p->mm->total_vm; |
97c2c9b8 | 166 | task_unlock(p); |
1da177e4 | 167 | |
7ba34859 HD |
168 | /* |
169 | * swapoff can easily use up all memory, so kill those first. | |
170 | */ | |
35451bee | 171 | if (p->flags & PF_OOM_ORIGIN) |
7ba34859 HD |
172 | return ULONG_MAX; |
173 | ||
1da177e4 LT |
174 | /* |
175 | * Processes which fork a lot of child processes are likely | |
9827b781 | 176 | * a good choice. We add half the vmsize of the children if they |
1da177e4 | 177 | * have an own mm. This prevents forking servers to flood the |
9827b781 KG |
178 | * machine with an endless amount of children. In case a single |
179 | * child is eating the vast majority of memory, adding only half | |
180 | * to the parents will make the child our kill candidate of choice. | |
1da177e4 | 181 | */ |
dd8e8f40 ON |
182 | t = p; |
183 | do { | |
184 | list_for_each_entry(c, &t->children, sibling) { | |
185 | child = find_lock_task_mm(c); | |
186 | if (child) { | |
187 | if (child->mm != p->mm) | |
188 | points += child->mm->total_vm/2 + 1; | |
189 | task_unlock(child); | |
190 | } | |
191 | } | |
192 | } while_each_thread(p, t); | |
1da177e4 LT |
193 | |
194 | /* | |
195 | * CPU time is in tens of seconds and run time is in thousands | |
196 | * of seconds. There is no particular reason for this other than | |
197 | * that it turned out to work very well in practice. | |
198 | */ | |
495789a5 KM |
199 | thread_group_cputime(p, &task_time); |
200 | utime = cputime_to_jiffies(task_time.utime); | |
201 | stime = cputime_to_jiffies(task_time.stime); | |
202 | cpu_time = (utime + stime) >> (SHIFT_HZ + 3); | |
203 | ||
1da177e4 LT |
204 | |
205 | if (uptime >= p->start_time.tv_sec) | |
206 | run_time = (uptime - p->start_time.tv_sec) >> 10; | |
207 | else | |
208 | run_time = 0; | |
209 | ||
a12888f7 CG |
210 | if (cpu_time) |
211 | points /= int_sqrt(cpu_time); | |
212 | if (run_time) | |
213 | points /= int_sqrt(int_sqrt(run_time)); | |
1da177e4 LT |
214 | |
215 | /* | |
216 | * Niced processes are most likely less important, so double | |
217 | * their badness points. | |
218 | */ | |
219 | if (task_nice(p) > 0) | |
220 | points *= 2; | |
221 | ||
222 | /* | |
223 | * Superuser processes are usually more important, so we make it | |
224 | * less likely that we kill those. | |
225 | */ | |
a2f2945a EP |
226 | if (has_capability_noaudit(p, CAP_SYS_ADMIN) || |
227 | has_capability_noaudit(p, CAP_SYS_RESOURCE)) | |
1da177e4 LT |
228 | points /= 4; |
229 | ||
230 | /* | |
231 | * We don't want to kill a process with direct hardware access. | |
232 | * Not only could that mess up the hardware, but usually users | |
233 | * tend to only have this flag set on applications they think | |
234 | * of as important. | |
235 | */ | |
a2f2945a | 236 | if (has_capability_noaudit(p, CAP_SYS_RAWIO)) |
1da177e4 LT |
237 | points /= 4; |
238 | ||
239 | /* | |
28b83c51 | 240 | * Adjust the score by oom_adj. |
1da177e4 | 241 | */ |
28b83c51 KM |
242 | if (oom_adj) { |
243 | if (oom_adj > 0) { | |
9a82782f JP |
244 | if (!points) |
245 | points = 1; | |
28b83c51 | 246 | points <<= oom_adj; |
9a82782f | 247 | } else |
28b83c51 | 248 | points >>= -(oom_adj); |
1da177e4 LT |
249 | } |
250 | ||
251 | #ifdef DEBUG | |
a5e58a61 | 252 | printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n", |
1da177e4 LT |
253 | p->pid, p->comm, points); |
254 | #endif | |
255 | return points; | |
256 | } | |
257 | ||
9b0f8b04 CL |
258 | /* |
259 | * Determine the type of allocation constraint. | |
260 | */ | |
9b0f8b04 | 261 | #ifdef CONFIG_NUMA |
4365a567 KH |
262 | static enum oom_constraint constrained_alloc(struct zonelist *zonelist, |
263 | gfp_t gfp_mask, nodemask_t *nodemask) | |
264 | { | |
54a6eb5c | 265 | struct zone *zone; |
dd1a239f | 266 | struct zoneref *z; |
54a6eb5c | 267 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
9b0f8b04 | 268 | |
4365a567 KH |
269 | /* |
270 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid | |
271 | * to kill current.We have to random task kill in this case. | |
272 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. | |
273 | */ | |
274 | if (gfp_mask & __GFP_THISNODE) | |
275 | return CONSTRAINT_NONE; | |
9b0f8b04 | 276 | |
4365a567 KH |
277 | /* |
278 | * The nodemask here is a nodemask passed to alloc_pages(). Now, | |
279 | * cpuset doesn't use this nodemask for its hardwall/softwall/hierarchy | |
280 | * feature. mempolicy is an only user of nodemask here. | |
281 | * check mempolicy's nodemask contains all N_HIGH_MEMORY | |
282 | */ | |
283 | if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) | |
9b0f8b04 | 284 | return CONSTRAINT_MEMORY_POLICY; |
4365a567 KH |
285 | |
286 | /* Check this allocation failure is caused by cpuset's wall function */ | |
287 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | |
288 | high_zoneidx, nodemask) | |
289 | if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) | |
290 | return CONSTRAINT_CPUSET; | |
9b0f8b04 CL |
291 | |
292 | return CONSTRAINT_NONE; | |
293 | } | |
4365a567 KH |
294 | #else |
295 | static enum oom_constraint constrained_alloc(struct zonelist *zonelist, | |
296 | gfp_t gfp_mask, nodemask_t *nodemask) | |
297 | { | |
298 | return CONSTRAINT_NONE; | |
299 | } | |
300 | #endif | |
9b0f8b04 | 301 | |
1da177e4 LT |
302 | /* |
303 | * Simple selection loop. We chose the process with the highest | |
304 | * number of 'points'. We expect the caller will lock the tasklist. | |
305 | * | |
306 | * (not docbooked, we don't want this one cluttering up the manual) | |
307 | */ | |
c7ba5c9e | 308 | static struct task_struct *select_bad_process(unsigned long *ppoints, |
f4420032 | 309 | struct mem_cgroup *mem, const nodemask_t *nodemask) |
1da177e4 | 310 | { |
495789a5 | 311 | struct task_struct *p; |
1da177e4 LT |
312 | struct task_struct *chosen = NULL; |
313 | struct timespec uptime; | |
9827b781 | 314 | *ppoints = 0; |
1da177e4 LT |
315 | |
316 | do_posix_clock_monotonic_gettime(&uptime); | |
495789a5 | 317 | for_each_process(p) { |
a49335cc | 318 | unsigned long points; |
a49335cc | 319 | |
ab290adb | 320 | if (oom_unkillable_task(p, mem, nodemask)) |
6cf86ac6 | 321 | continue; |
ef08e3b4 | 322 | |
b78483a4 NP |
323 | /* |
324 | * This task already has access to memory reserves and is | |
325 | * being killed. Don't allow any other task access to the | |
326 | * memory reserve. | |
327 | * | |
328 | * Note: this may have a chance of deadlock if it gets | |
329 | * blocked waiting for another task which itself is waiting | |
330 | * for memory. Is there a better alternative? | |
331 | */ | |
332 | if (test_tsk_thread_flag(p, TIF_MEMDIE)) | |
333 | return ERR_PTR(-1UL); | |
334 | ||
a49335cc | 335 | /* |
6937a25c | 336 | * This is in the process of releasing memory so wait for it |
a49335cc | 337 | * to finish before killing some other task by mistake. |
50ec3bbf NP |
338 | * |
339 | * However, if p is the current task, we allow the 'kill' to | |
340 | * go ahead if it is exiting: this will simply set TIF_MEMDIE, | |
341 | * which will allow it to gain access to memory reserves in | |
342 | * the process of exiting and releasing its resources. | |
b78483a4 | 343 | * Otherwise we could get an easy OOM deadlock. |
a49335cc | 344 | */ |
b5227940 | 345 | if ((p->flags & PF_EXITING) && p->mm) { |
b78483a4 NP |
346 | if (p != current) |
347 | return ERR_PTR(-1UL); | |
348 | ||
972c4ea5 ON |
349 | chosen = p; |
350 | *ppoints = ULONG_MAX; | |
50ec3bbf | 351 | } |
972c4ea5 | 352 | |
26ebc984 | 353 | points = badness(p, mem, nodemask, uptime.tv_sec); |
0753ba01 | 354 | if (points > *ppoints || !chosen) { |
a49335cc | 355 | chosen = p; |
9827b781 | 356 | *ppoints = points; |
1da177e4 | 357 | } |
495789a5 | 358 | } |
972c4ea5 | 359 | |
1da177e4 LT |
360 | return chosen; |
361 | } | |
362 | ||
fef1bdd6 | 363 | /** |
1b578df0 | 364 | * dump_tasks - dump current memory state of all system tasks |
74ab7f1d | 365 | * @mem: current's memory controller, if constrained |
1b578df0 | 366 | * |
fef1bdd6 DR |
367 | * Dumps the current memory state of all system tasks, excluding kernel threads. |
368 | * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj | |
369 | * score, and name. | |
370 | * | |
371 | * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are | |
372 | * shown. | |
373 | * | |
374 | * Call with tasklist_lock read-locked. | |
375 | */ | |
376 | static void dump_tasks(const struct mem_cgroup *mem) | |
377 | { | |
c55db957 KM |
378 | struct task_struct *p; |
379 | struct task_struct *task; | |
fef1bdd6 DR |
380 | |
381 | printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " | |
382 | "name\n"); | |
c55db957 | 383 | for_each_process(p) { |
c55db957 | 384 | if (p->flags & PF_KTHREAD) |
fef1bdd6 | 385 | continue; |
c55db957 | 386 | if (mem && !task_in_mem_cgroup(p, mem)) |
b4416d2b | 387 | continue; |
fef1bdd6 | 388 | |
c55db957 KM |
389 | task = find_lock_task_mm(p); |
390 | if (!task) { | |
6d2661ed | 391 | /* |
74ab7f1d DR |
392 | * This is a kthread or all of p's threads have already |
393 | * detached their mm's. There's no need to report | |
c55db957 | 394 | * them; they can't be oom killed anyway. |
6d2661ed | 395 | */ |
6d2661ed DR |
396 | continue; |
397 | } | |
c55db957 | 398 | |
c81fac5c | 399 | printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3u %3d %s\n", |
c55db957 KM |
400 | task->pid, __task_cred(task)->uid, task->tgid, |
401 | task->mm->total_vm, get_mm_rss(task->mm), | |
c81fac5c | 402 | task_cpu(task), task->signal->oom_adj, task->comm); |
c55db957 KM |
403 | task_unlock(task); |
404 | } | |
fef1bdd6 DR |
405 | } |
406 | ||
d31f56db DN |
407 | static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, |
408 | struct mem_cgroup *mem) | |
1b604d75 | 409 | { |
5e9d834a | 410 | task_lock(current); |
1b604d75 DR |
411 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " |
412 | "oom_adj=%d\n", | |
413 | current->comm, gfp_mask, order, current->signal->oom_adj); | |
1b604d75 DR |
414 | cpuset_print_task_mems_allowed(current); |
415 | task_unlock(current); | |
416 | dump_stack(); | |
d31f56db | 417 | mem_cgroup_print_oom_info(mem, p); |
1b604d75 DR |
418 | show_mem(); |
419 | if (sysctl_oom_dump_tasks) | |
420 | dump_tasks(mem); | |
421 | } | |
422 | ||
3b4798cb | 423 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
b940fd70 | 424 | static int oom_kill_task(struct task_struct *p) |
1da177e4 | 425 | { |
dd8e8f40 | 426 | p = find_lock_task_mm(p); |
a96cfd6e | 427 | if (!p) { |
b940fd70 DR |
428 | task_unlock(p); |
429 | return 1; | |
430 | } | |
431 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", | |
432 | task_pid_nr(p), p->comm, K(p->mm->total_vm), | |
433 | K(get_mm_counter(p->mm, MM_ANONPAGES)), | |
434 | K(get_mm_counter(p->mm, MM_FILEPAGES))); | |
3b4798cb | 435 | task_unlock(p); |
1da177e4 | 436 | |
fa717060 | 437 | p->rt.time_slice = HZ; |
1da177e4 | 438 | set_tsk_thread_flag(p, TIF_MEMDIE); |
1da177e4 | 439 | force_sig(SIGKILL, p); |
01315922 | 440 | return 0; |
1da177e4 | 441 | } |
b940fd70 | 442 | #undef K |
1da177e4 | 443 | |
7213f506 | 444 | static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, |
fef1bdd6 | 445 | unsigned long points, struct mem_cgroup *mem, |
7c59aec8 | 446 | nodemask_t *nodemask, const char *message) |
1da177e4 | 447 | { |
5e9d834a DR |
448 | struct task_struct *victim = p; |
449 | struct task_struct *child; | |
dd8e8f40 | 450 | struct task_struct *t = p; |
5e9d834a DR |
451 | unsigned long victim_points = 0; |
452 | struct timespec uptime; | |
1da177e4 | 453 | |
1b604d75 | 454 | if (printk_ratelimit()) |
d31f56db | 455 | dump_header(p, gfp_mask, order, mem); |
7213f506 | 456 | |
50ec3bbf NP |
457 | /* |
458 | * If the task is already exiting, don't alarm the sysadmin or kill | |
459 | * its children or threads, just set TIF_MEMDIE so it can die quickly | |
460 | */ | |
0753ba01 | 461 | if (p->flags & PF_EXITING) { |
4358997a | 462 | set_tsk_thread_flag(p, TIF_MEMDIE); |
50ec3bbf NP |
463 | return 0; |
464 | } | |
465 | ||
5e9d834a DR |
466 | task_lock(p); |
467 | pr_err("%s: Kill process %d (%s) score %lu or sacrifice child\n", | |
468 | message, task_pid_nr(p), p->comm, points); | |
469 | task_unlock(p); | |
f3af38d3 | 470 | |
5e9d834a DR |
471 | /* |
472 | * If any of p's children has a different mm and is eligible for kill, | |
473 | * the one with the highest badness() score is sacrificed for its | |
474 | * parent. This attempts to lose the minimal amount of work done while | |
475 | * still freeing memory. | |
476 | */ | |
477 | do_posix_clock_monotonic_gettime(&uptime); | |
dd8e8f40 | 478 | do { |
5e9d834a DR |
479 | list_for_each_entry(child, &t->children, sibling) { |
480 | unsigned long child_points; | |
481 | ||
5e9d834a | 482 | /* badness() returns 0 if the thread is unkillable */ |
26ebc984 KM |
483 | child_points = badness(child, mem, nodemask, |
484 | uptime.tv_sec); | |
5e9d834a DR |
485 | if (child_points > victim_points) { |
486 | victim = child; | |
487 | victim_points = child_points; | |
488 | } | |
dd8e8f40 ON |
489 | } |
490 | } while_each_thread(p, t); | |
491 | ||
5e9d834a | 492 | return oom_kill_task(victim); |
1da177e4 LT |
493 | } |
494 | ||
309ed882 DR |
495 | /* |
496 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | |
497 | */ | |
498 | static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, | |
499 | int order) | |
500 | { | |
501 | if (likely(!sysctl_panic_on_oom)) | |
502 | return; | |
503 | if (sysctl_panic_on_oom != 2) { | |
504 | /* | |
505 | * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel | |
506 | * does not panic for cpuset, mempolicy, or memcg allocation | |
507 | * failures. | |
508 | */ | |
509 | if (constraint != CONSTRAINT_NONE) | |
510 | return; | |
511 | } | |
512 | read_lock(&tasklist_lock); | |
513 | dump_header(NULL, gfp_mask, order, NULL); | |
514 | read_unlock(&tasklist_lock); | |
515 | panic("Out of memory: %s panic_on_oom is enabled\n", | |
516 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | |
517 | } | |
518 | ||
00f0b825 | 519 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
c7ba5c9e PE |
520 | void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) |
521 | { | |
522 | unsigned long points = 0; | |
523 | struct task_struct *p; | |
524 | ||
309ed882 | 525 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0); |
e115f2d8 | 526 | read_lock(&tasklist_lock); |
c7ba5c9e | 527 | retry: |
f4420032 | 528 | p = select_bad_process(&points, mem, NULL); |
df64f81b | 529 | if (!p || PTR_ERR(p) == -1UL) |
c7ba5c9e PE |
530 | goto out; |
531 | ||
7c59aec8 | 532 | if (oom_kill_process(p, gfp_mask, 0, points, mem, NULL, |
c7ba5c9e PE |
533 | "Memory cgroup out of memory")) |
534 | goto retry; | |
535 | out: | |
e115f2d8 | 536 | read_unlock(&tasklist_lock); |
c7ba5c9e PE |
537 | } |
538 | #endif | |
539 | ||
8bc719d3 MS |
540 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); |
541 | ||
542 | int register_oom_notifier(struct notifier_block *nb) | |
543 | { | |
544 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
545 | } | |
546 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
547 | ||
548 | int unregister_oom_notifier(struct notifier_block *nb) | |
549 | { | |
550 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
551 | } | |
552 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
553 | ||
098d7f12 DR |
554 | /* |
555 | * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero | |
556 | * if a parallel OOM killing is already taking place that includes a zone in | |
557 | * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. | |
558 | */ | |
ff321fea | 559 | int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) |
098d7f12 | 560 | { |
dd1a239f MG |
561 | struct zoneref *z; |
562 | struct zone *zone; | |
098d7f12 DR |
563 | int ret = 1; |
564 | ||
c7d4caeb | 565 | spin_lock(&zone_scan_lock); |
dd1a239f MG |
566 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
567 | if (zone_is_oom_locked(zone)) { | |
098d7f12 DR |
568 | ret = 0; |
569 | goto out; | |
570 | } | |
dd1a239f MG |
571 | } |
572 | ||
573 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | |
574 | /* | |
c7d4caeb | 575 | * Lock each zone in the zonelist under zone_scan_lock so a |
ff321fea | 576 | * parallel invocation of try_set_zonelist_oom() doesn't succeed |
dd1a239f MG |
577 | * when it shouldn't. |
578 | */ | |
579 | zone_set_flag(zone, ZONE_OOM_LOCKED); | |
580 | } | |
098d7f12 | 581 | |
098d7f12 | 582 | out: |
c7d4caeb | 583 | spin_unlock(&zone_scan_lock); |
098d7f12 DR |
584 | return ret; |
585 | } | |
586 | ||
587 | /* | |
588 | * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed | |
589 | * allocation attempts with zonelists containing them may now recall the OOM | |
590 | * killer, if necessary. | |
591 | */ | |
dd1a239f | 592 | void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) |
098d7f12 | 593 | { |
dd1a239f MG |
594 | struct zoneref *z; |
595 | struct zone *zone; | |
098d7f12 | 596 | |
c7d4caeb | 597 | spin_lock(&zone_scan_lock); |
dd1a239f MG |
598 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { |
599 | zone_clear_flag(zone, ZONE_OOM_LOCKED); | |
600 | } | |
c7d4caeb | 601 | spin_unlock(&zone_scan_lock); |
098d7f12 DR |
602 | } |
603 | ||
e3658932 DR |
604 | /* |
605 | * Try to acquire the oom killer lock for all system zones. Returns zero if a | |
606 | * parallel oom killing is taking place, otherwise locks all zones and returns | |
607 | * non-zero. | |
608 | */ | |
609 | static int try_set_system_oom(void) | |
610 | { | |
611 | struct zone *zone; | |
612 | int ret = 1; | |
613 | ||
614 | spin_lock(&zone_scan_lock); | |
615 | for_each_populated_zone(zone) | |
616 | if (zone_is_oom_locked(zone)) { | |
617 | ret = 0; | |
618 | goto out; | |
619 | } | |
620 | for_each_populated_zone(zone) | |
621 | zone_set_flag(zone, ZONE_OOM_LOCKED); | |
622 | out: | |
623 | spin_unlock(&zone_scan_lock); | |
624 | return ret; | |
625 | } | |
626 | ||
627 | /* | |
628 | * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation | |
629 | * attempts or page faults may now recall the oom killer, if necessary. | |
630 | */ | |
631 | static void clear_system_oom(void) | |
632 | { | |
633 | struct zone *zone; | |
634 | ||
635 | spin_lock(&zone_scan_lock); | |
636 | for_each_populated_zone(zone) | |
637 | zone_clear_flag(zone, ZONE_OOM_LOCKED); | |
638 | spin_unlock(&zone_scan_lock); | |
639 | } | |
640 | ||
1da177e4 | 641 | /** |
6937a25c | 642 | * out_of_memory - kill the "best" process when we run out of memory |
1b578df0 RD |
643 | * @zonelist: zonelist pointer |
644 | * @gfp_mask: memory allocation flags | |
645 | * @order: amount of memory being requested as a power of 2 | |
6f48d0eb | 646 | * @nodemask: nodemask passed to page allocator |
1da177e4 LT |
647 | * |
648 | * If we run out of memory, we have the choice between either | |
649 | * killing a random task (bad), letting the system crash (worse) | |
650 | * OR try to be smart about which process to kill. Note that we | |
651 | * don't have to be perfect here, we just have to be good. | |
652 | */ | |
4365a567 KH |
653 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
654 | int order, nodemask_t *nodemask) | |
1da177e4 | 655 | { |
0aad4b31 | 656 | struct task_struct *p; |
8bc719d3 | 657 | unsigned long freed = 0; |
0aad4b31 | 658 | unsigned long points; |
e3658932 | 659 | enum oom_constraint constraint = CONSTRAINT_NONE; |
8bc719d3 MS |
660 | |
661 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | |
662 | if (freed > 0) | |
663 | /* Got some memory back in the last second. */ | |
664 | return; | |
1da177e4 | 665 | |
7b98c2e4 DR |
666 | /* |
667 | * If current has a pending SIGKILL, then automatically select it. The | |
668 | * goal is to allow it to allocate so that it may quickly exit and free | |
669 | * its memory. | |
670 | */ | |
671 | if (fatal_signal_pending(current)) { | |
672 | set_thread_flag(TIF_MEMDIE); | |
673 | return; | |
674 | } | |
675 | ||
9b0f8b04 CL |
676 | /* |
677 | * Check if there were limitations on the allocation (only relevant for | |
678 | * NUMA) that may require different handling. | |
679 | */ | |
e3658932 DR |
680 | if (zonelist) |
681 | constraint = constrained_alloc(zonelist, gfp_mask, nodemask); | |
309ed882 | 682 | check_panic_on_oom(constraint, gfp_mask, order); |
0aad4b31 | 683 | |
2b45ab33 | 684 | read_lock(&tasklist_lock); |
f88ccad5 | 685 | if (sysctl_oom_kill_allocating_task && |
a96cfd6e KM |
686 | !oom_unkillable_task(current, NULL, nodemask) && |
687 | (current->signal->oom_adj != OOM_DISABLE)) { | |
0aad4b31 DR |
688 | /* |
689 | * oom_kill_process() needs tasklist_lock held. If it returns | |
690 | * non-zero, current could not be killed so we must fallback to | |
691 | * the tasklist scan. | |
692 | */ | |
693 | if (!oom_kill_process(current, gfp_mask, order, 0, NULL, | |
7c59aec8 | 694 | nodemask, |
0aad4b31 DR |
695 | "Out of memory (oom_kill_allocating_task)")) |
696 | return; | |
697 | } | |
698 | ||
699 | retry: | |
700 | p = select_bad_process(&points, NULL, | |
f4420032 DR |
701 | constraint == CONSTRAINT_MEMORY_POLICY ? nodemask : |
702 | NULL); | |
0aad4b31 DR |
703 | if (PTR_ERR(p) == -1UL) |
704 | return; | |
705 | ||
706 | /* Found nothing?!?! Either we hang forever, or we panic. */ | |
707 | if (!p) { | |
708 | dump_header(NULL, gfp_mask, order, NULL); | |
709 | read_unlock(&tasklist_lock); | |
710 | panic("Out of memory and no killable processes...\n"); | |
711 | } | |
712 | ||
7c59aec8 | 713 | if (oom_kill_process(p, gfp_mask, order, points, NULL, nodemask, |
0aad4b31 DR |
714 | "Out of memory")) |
715 | goto retry; | |
140ffcec | 716 | read_unlock(&tasklist_lock); |
1da177e4 LT |
717 | |
718 | /* | |
719 | * Give "p" a good chance of killing itself before we | |
2f659f46 | 720 | * retry to allocate memory unless "p" is current |
1da177e4 | 721 | */ |
2f659f46 | 722 | if (!test_thread_flag(TIF_MEMDIE)) |
140ffcec | 723 | schedule_timeout_uninterruptible(1); |
1da177e4 | 724 | } |
e3658932 DR |
725 | |
726 | /* | |
727 | * The pagefault handler calls here because it is out of memory, so kill a | |
728 | * memory-hogging task. If a populated zone has ZONE_OOM_LOCKED set, a parallel | |
729 | * oom killing is already in progress so do nothing. If a task is found with | |
730 | * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit. | |
731 | */ | |
732 | void pagefault_out_of_memory(void) | |
733 | { | |
734 | if (try_set_system_oom()) { | |
735 | out_of_memory(NULL, 0, 0, NULL); | |
736 | clear_system_oom(); | |
737 | } | |
738 | if (!test_thread_flag(TIF_MEMDIE)) | |
739 | schedule_timeout_uninterruptible(1); | |
740 | } |