]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/oom_kill.c | |
3 | * | |
4 | * Copyright (C) 1998,2000 Rik van Riel | |
5 | * Thanks go out to Claus Fischer for some serious inspiration and | |
6 | * for goading me into coding this file... | |
7 | * | |
8 | * The routines in this file are used to kill a process when | |
9 | * we're seriously out of memory. This gets called from __alloc_pages() | |
10 | * in mm/page_alloc.c when we really run out of memory. | |
11 | * | |
12 | * Since we won't call these routines often (on a well-configured | |
13 | * machine) this file will double as a 'coding guide' and a signpost | |
14 | * for newbie kernel hackers. It features several pointers to major | |
15 | * kernel subsystems and hints as to where to find out what things do. | |
16 | */ | |
17 | ||
18 | #include <linux/oom.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/swap.h> | |
23 | #include <linux/timex.h> | |
24 | #include <linux/jiffies.h> | |
25 | #include <linux/cpuset.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/notifier.h> | |
28 | #include <linux/memcontrol.h> | |
29 | #include <linux/security.h> | |
30 | ||
31 | int sysctl_panic_on_oom; | |
32 | int sysctl_oom_kill_allocating_task; | |
33 | int sysctl_oom_dump_tasks; | |
34 | static DEFINE_SPINLOCK(zone_scan_mutex); | |
35 | /* #define DEBUG */ | |
36 | ||
37 | /** | |
38 | * badness - calculate a numeric value for how bad this task has been | |
39 | * @p: task struct of which task we should calculate | |
40 | * @uptime: current uptime in seconds | |
41 | * @mem: target memory controller | |
42 | * | |
43 | * The formula used is relatively simple and documented inline in the | |
44 | * function. The main rationale is that we want to select a good task | |
45 | * to kill when we run out of memory. | |
46 | * | |
47 | * Good in this context means that: | |
48 | * 1) we lose the minimum amount of work done | |
49 | * 2) we recover a large amount of memory | |
50 | * 3) we don't kill anything innocent of eating tons of memory | |
51 | * 4) we want to kill the minimum amount of processes (one) | |
52 | * 5) we try to kill the process the user expects us to kill, this | |
53 | * algorithm has been meticulously tuned to meet the principle | |
54 | * of least surprise ... (be careful when you change it) | |
55 | */ | |
56 | ||
57 | unsigned long badness(struct task_struct *p, unsigned long uptime) | |
58 | { | |
59 | unsigned long points, cpu_time, run_time, s; | |
60 | struct mm_struct *mm; | |
61 | struct task_struct *child; | |
62 | ||
63 | task_lock(p); | |
64 | mm = p->mm; | |
65 | if (!mm) { | |
66 | task_unlock(p); | |
67 | return 0; | |
68 | } | |
69 | ||
70 | /* | |
71 | * The memory size of the process is the basis for the badness. | |
72 | */ | |
73 | points = mm->total_vm; | |
74 | ||
75 | /* | |
76 | * After this unlock we can no longer dereference local variable `mm' | |
77 | */ | |
78 | task_unlock(p); | |
79 | ||
80 | /* | |
81 | * swapoff can easily use up all memory, so kill those first. | |
82 | */ | |
83 | if (p->flags & PF_SWAPOFF) | |
84 | return ULONG_MAX; | |
85 | ||
86 | /* | |
87 | * Processes which fork a lot of child processes are likely | |
88 | * a good choice. We add half the vmsize of the children if they | |
89 | * have an own mm. This prevents forking servers to flood the | |
90 | * machine with an endless amount of children. In case a single | |
91 | * child is eating the vast majority of memory, adding only half | |
92 | * to the parents will make the child our kill candidate of choice. | |
93 | */ | |
94 | list_for_each_entry(child, &p->children, sibling) { | |
95 | task_lock(child); | |
96 | if (child->mm != mm && child->mm) | |
97 | points += child->mm->total_vm/2 + 1; | |
98 | task_unlock(child); | |
99 | } | |
100 | ||
101 | /* | |
102 | * CPU time is in tens of seconds and run time is in thousands | |
103 | * of seconds. There is no particular reason for this other than | |
104 | * that it turned out to work very well in practice. | |
105 | */ | |
106 | cpu_time = (cputime_to_jiffies(p->utime) + cputime_to_jiffies(p->stime)) | |
107 | >> (SHIFT_HZ + 3); | |
108 | ||
109 | if (uptime >= p->start_time.tv_sec) | |
110 | run_time = (uptime - p->start_time.tv_sec) >> 10; | |
111 | else | |
112 | run_time = 0; | |
113 | ||
114 | s = int_sqrt(cpu_time); | |
115 | if (s) | |
116 | points /= s; | |
117 | s = int_sqrt(int_sqrt(run_time)); | |
118 | if (s) | |
119 | points /= s; | |
120 | ||
121 | /* | |
122 | * Niced processes are most likely less important, so double | |
123 | * their badness points. | |
124 | */ | |
125 | if (task_nice(p) > 0) | |
126 | points *= 2; | |
127 | ||
128 | /* | |
129 | * Superuser processes are usually more important, so we make it | |
130 | * less likely that we kill those. | |
131 | */ | |
132 | if (has_capability(p, CAP_SYS_ADMIN) || | |
133 | has_capability(p, CAP_SYS_RESOURCE)) | |
134 | points /= 4; | |
135 | ||
136 | /* | |
137 | * We don't want to kill a process with direct hardware access. | |
138 | * Not only could that mess up the hardware, but usually users | |
139 | * tend to only have this flag set on applications they think | |
140 | * of as important. | |
141 | */ | |
142 | if (has_capability(p, CAP_SYS_RAWIO)) | |
143 | points /= 4; | |
144 | ||
145 | /* | |
146 | * If p's nodes don't overlap ours, it may still help to kill p | |
147 | * because p may have allocated or otherwise mapped memory on | |
148 | * this node before. However it will be less likely. | |
149 | */ | |
150 | if (!cpuset_mems_allowed_intersects(current, p)) | |
151 | points /= 8; | |
152 | ||
153 | /* | |
154 | * Adjust the score by oomkilladj. | |
155 | */ | |
156 | if (p->oomkilladj) { | |
157 | if (p->oomkilladj > 0) { | |
158 | if (!points) | |
159 | points = 1; | |
160 | points <<= p->oomkilladj; | |
161 | } else | |
162 | points >>= -(p->oomkilladj); | |
163 | } | |
164 | ||
165 | #ifdef DEBUG | |
166 | printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n", | |
167 | p->pid, p->comm, points); | |
168 | #endif | |
169 | return points; | |
170 | } | |
171 | ||
172 | /* | |
173 | * Determine the type of allocation constraint. | |
174 | */ | |
175 | static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist, | |
176 | gfp_t gfp_mask) | |
177 | { | |
178 | #ifdef CONFIG_NUMA | |
179 | struct zone *zone; | |
180 | struct zoneref *z; | |
181 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); | |
182 | nodemask_t nodes = node_states[N_HIGH_MEMORY]; | |
183 | ||
184 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) | |
185 | if (cpuset_zone_allowed_softwall(zone, gfp_mask)) | |
186 | node_clear(zone_to_nid(zone), nodes); | |
187 | else | |
188 | return CONSTRAINT_CPUSET; | |
189 | ||
190 | if (!nodes_empty(nodes)) | |
191 | return CONSTRAINT_MEMORY_POLICY; | |
192 | #endif | |
193 | ||
194 | return CONSTRAINT_NONE; | |
195 | } | |
196 | ||
197 | /* | |
198 | * Simple selection loop. We chose the process with the highest | |
199 | * number of 'points'. We expect the caller will lock the tasklist. | |
200 | * | |
201 | * (not docbooked, we don't want this one cluttering up the manual) | |
202 | */ | |
203 | static struct task_struct *select_bad_process(unsigned long *ppoints, | |
204 | struct mem_cgroup *mem) | |
205 | { | |
206 | struct task_struct *g, *p; | |
207 | struct task_struct *chosen = NULL; | |
208 | struct timespec uptime; | |
209 | *ppoints = 0; | |
210 | ||
211 | do_posix_clock_monotonic_gettime(&uptime); | |
212 | do_each_thread(g, p) { | |
213 | unsigned long points; | |
214 | ||
215 | /* | |
216 | * skip kernel threads and tasks which have already released | |
217 | * their mm. | |
218 | */ | |
219 | if (!p->mm) | |
220 | continue; | |
221 | /* skip the init task */ | |
222 | if (is_global_init(p)) | |
223 | continue; | |
224 | if (mem && !task_in_mem_cgroup(p, mem)) | |
225 | continue; | |
226 | ||
227 | /* | |
228 | * This task already has access to memory reserves and is | |
229 | * being killed. Don't allow any other task access to the | |
230 | * memory reserve. | |
231 | * | |
232 | * Note: this may have a chance of deadlock if it gets | |
233 | * blocked waiting for another task which itself is waiting | |
234 | * for memory. Is there a better alternative? | |
235 | */ | |
236 | if (test_tsk_thread_flag(p, TIF_MEMDIE)) | |
237 | return ERR_PTR(-1UL); | |
238 | ||
239 | /* | |
240 | * This is in the process of releasing memory so wait for it | |
241 | * to finish before killing some other task by mistake. | |
242 | * | |
243 | * However, if p is the current task, we allow the 'kill' to | |
244 | * go ahead if it is exiting: this will simply set TIF_MEMDIE, | |
245 | * which will allow it to gain access to memory reserves in | |
246 | * the process of exiting and releasing its resources. | |
247 | * Otherwise we could get an easy OOM deadlock. | |
248 | */ | |
249 | if (p->flags & PF_EXITING) { | |
250 | if (p != current) | |
251 | return ERR_PTR(-1UL); | |
252 | ||
253 | chosen = p; | |
254 | *ppoints = ULONG_MAX; | |
255 | } | |
256 | ||
257 | if (p->oomkilladj == OOM_DISABLE) | |
258 | continue; | |
259 | ||
260 | points = badness(p, uptime.tv_sec); | |
261 | if (points > *ppoints || !chosen) { | |
262 | chosen = p; | |
263 | *ppoints = points; | |
264 | } | |
265 | } while_each_thread(g, p); | |
266 | ||
267 | return chosen; | |
268 | } | |
269 | ||
270 | /** | |
271 | * dump_tasks - dump current memory state of all system tasks | |
272 | * @mem: target memory controller | |
273 | * | |
274 | * Dumps the current memory state of all system tasks, excluding kernel threads. | |
275 | * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj | |
276 | * score, and name. | |
277 | * | |
278 | * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are | |
279 | * shown. | |
280 | * | |
281 | * Call with tasklist_lock read-locked. | |
282 | */ | |
283 | static void dump_tasks(const struct mem_cgroup *mem) | |
284 | { | |
285 | struct task_struct *g, *p; | |
286 | ||
287 | printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj " | |
288 | "name\n"); | |
289 | do_each_thread(g, p) { | |
290 | /* | |
291 | * total_vm and rss sizes do not exist for tasks with a | |
292 | * detached mm so there's no need to report them. | |
293 | */ | |
294 | if (!p->mm) | |
295 | continue; | |
296 | if (mem && !task_in_mem_cgroup(p, mem)) | |
297 | continue; | |
298 | ||
299 | task_lock(p); | |
300 | printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", | |
301 | p->pid, p->uid, p->tgid, p->mm->total_vm, | |
302 | get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj, | |
303 | p->comm); | |
304 | task_unlock(p); | |
305 | } while_each_thread(g, p); | |
306 | } | |
307 | ||
308 | /* | |
309 | * Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO | |
310 | * flag though it's unlikely that we select a process with CAP_SYS_RAW_IO | |
311 | * set. | |
312 | */ | |
313 | static void __oom_kill_task(struct task_struct *p, int verbose) | |
314 | { | |
315 | if (is_global_init(p)) { | |
316 | WARN_ON(1); | |
317 | printk(KERN_WARNING "tried to kill init!\n"); | |
318 | return; | |
319 | } | |
320 | ||
321 | if (!p->mm) { | |
322 | WARN_ON(1); | |
323 | printk(KERN_WARNING "tried to kill an mm-less task!\n"); | |
324 | return; | |
325 | } | |
326 | ||
327 | if (verbose) | |
328 | printk(KERN_ERR "Killed process %d (%s)\n", | |
329 | task_pid_nr(p), p->comm); | |
330 | ||
331 | /* | |
332 | * We give our sacrificial lamb high priority and access to | |
333 | * all the memory it needs. That way it should be able to | |
334 | * exit() and clear out its resources quickly... | |
335 | */ | |
336 | p->rt.time_slice = HZ; | |
337 | set_tsk_thread_flag(p, TIF_MEMDIE); | |
338 | ||
339 | force_sig(SIGKILL, p); | |
340 | } | |
341 | ||
342 | static int oom_kill_task(struct task_struct *p) | |
343 | { | |
344 | struct mm_struct *mm; | |
345 | struct task_struct *g, *q; | |
346 | ||
347 | mm = p->mm; | |
348 | ||
349 | /* WARNING: mm may not be dereferenced since we did not obtain its | |
350 | * value from get_task_mm(p). This is OK since all we need to do is | |
351 | * compare mm to q->mm below. | |
352 | * | |
353 | * Furthermore, even if mm contains a non-NULL value, p->mm may | |
354 | * change to NULL at any time since we do not hold task_lock(p). | |
355 | * However, this is of no concern to us. | |
356 | */ | |
357 | ||
358 | if (mm == NULL) | |
359 | return 1; | |
360 | ||
361 | /* | |
362 | * Don't kill the process if any threads are set to OOM_DISABLE | |
363 | */ | |
364 | do_each_thread(g, q) { | |
365 | if (q->mm == mm && q->oomkilladj == OOM_DISABLE) | |
366 | return 1; | |
367 | } while_each_thread(g, q); | |
368 | ||
369 | __oom_kill_task(p, 1); | |
370 | ||
371 | /* | |
372 | * kill all processes that share the ->mm (i.e. all threads), | |
373 | * but are in a different thread group. Don't let them have access | |
374 | * to memory reserves though, otherwise we might deplete all memory. | |
375 | */ | |
376 | do_each_thread(g, q) { | |
377 | if (q->mm == mm && !same_thread_group(q, p)) | |
378 | force_sig(SIGKILL, q); | |
379 | } while_each_thread(g, q); | |
380 | ||
381 | return 0; | |
382 | } | |
383 | ||
384 | static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |
385 | unsigned long points, struct mem_cgroup *mem, | |
386 | const char *message) | |
387 | { | |
388 | struct task_struct *c; | |
389 | ||
390 | if (printk_ratelimit()) { | |
391 | printk(KERN_WARNING "%s invoked oom-killer: " | |
392 | "gfp_mask=0x%x, order=%d, oomkilladj=%d\n", | |
393 | current->comm, gfp_mask, order, current->oomkilladj); | |
394 | dump_stack(); | |
395 | show_mem(); | |
396 | if (sysctl_oom_dump_tasks) | |
397 | dump_tasks(mem); | |
398 | } | |
399 | ||
400 | /* | |
401 | * If the task is already exiting, don't alarm the sysadmin or kill | |
402 | * its children or threads, just set TIF_MEMDIE so it can die quickly | |
403 | */ | |
404 | if (p->flags & PF_EXITING) { | |
405 | __oom_kill_task(p, 0); | |
406 | return 0; | |
407 | } | |
408 | ||
409 | printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n", | |
410 | message, task_pid_nr(p), p->comm, points); | |
411 | ||
412 | /* Try to kill a child first */ | |
413 | list_for_each_entry(c, &p->children, sibling) { | |
414 | if (c->mm == p->mm) | |
415 | continue; | |
416 | if (!oom_kill_task(c)) | |
417 | return 0; | |
418 | } | |
419 | return oom_kill_task(p); | |
420 | } | |
421 | ||
422 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | |
423 | void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) | |
424 | { | |
425 | unsigned long points = 0; | |
426 | struct task_struct *p; | |
427 | ||
428 | cgroup_lock(); | |
429 | read_lock(&tasklist_lock); | |
430 | retry: | |
431 | p = select_bad_process(&points, mem); | |
432 | if (PTR_ERR(p) == -1UL) | |
433 | goto out; | |
434 | ||
435 | if (!p) | |
436 | p = current; | |
437 | ||
438 | if (oom_kill_process(p, gfp_mask, 0, points, mem, | |
439 | "Memory cgroup out of memory")) | |
440 | goto retry; | |
441 | out: | |
442 | read_unlock(&tasklist_lock); | |
443 | cgroup_unlock(); | |
444 | } | |
445 | #endif | |
446 | ||
447 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); | |
448 | ||
449 | int register_oom_notifier(struct notifier_block *nb) | |
450 | { | |
451 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
452 | } | |
453 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
454 | ||
455 | int unregister_oom_notifier(struct notifier_block *nb) | |
456 | { | |
457 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
458 | } | |
459 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
460 | ||
461 | /* | |
462 | * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero | |
463 | * if a parallel OOM killing is already taking place that includes a zone in | |
464 | * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. | |
465 | */ | |
466 | int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask) | |
467 | { | |
468 | struct zoneref *z; | |
469 | struct zone *zone; | |
470 | int ret = 1; | |
471 | ||
472 | spin_lock(&zone_scan_mutex); | |
473 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | |
474 | if (zone_is_oom_locked(zone)) { | |
475 | ret = 0; | |
476 | goto out; | |
477 | } | |
478 | } | |
479 | ||
480 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | |
481 | /* | |
482 | * Lock each zone in the zonelist under zone_scan_mutex so a | |
483 | * parallel invocation of try_set_zone_oom() doesn't succeed | |
484 | * when it shouldn't. | |
485 | */ | |
486 | zone_set_flag(zone, ZONE_OOM_LOCKED); | |
487 | } | |
488 | ||
489 | out: | |
490 | spin_unlock(&zone_scan_mutex); | |
491 | return ret; | |
492 | } | |
493 | ||
494 | /* | |
495 | * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed | |
496 | * allocation attempts with zonelists containing them may now recall the OOM | |
497 | * killer, if necessary. | |
498 | */ | |
499 | void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) | |
500 | { | |
501 | struct zoneref *z; | |
502 | struct zone *zone; | |
503 | ||
504 | spin_lock(&zone_scan_mutex); | |
505 | for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { | |
506 | zone_clear_flag(zone, ZONE_OOM_LOCKED); | |
507 | } | |
508 | spin_unlock(&zone_scan_mutex); | |
509 | } | |
510 | ||
511 | /** | |
512 | * out_of_memory - kill the "best" process when we run out of memory | |
513 | * @zonelist: zonelist pointer | |
514 | * @gfp_mask: memory allocation flags | |
515 | * @order: amount of memory being requested as a power of 2 | |
516 | * | |
517 | * If we run out of memory, we have the choice between either | |
518 | * killing a random task (bad), letting the system crash (worse) | |
519 | * OR try to be smart about which process to kill. Note that we | |
520 | * don't have to be perfect here, we just have to be good. | |
521 | */ | |
522 | void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) | |
523 | { | |
524 | struct task_struct *p; | |
525 | unsigned long points = 0; | |
526 | unsigned long freed = 0; | |
527 | enum oom_constraint constraint; | |
528 | ||
529 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | |
530 | if (freed > 0) | |
531 | /* Got some memory back in the last second. */ | |
532 | return; | |
533 | ||
534 | if (sysctl_panic_on_oom == 2) | |
535 | panic("out of memory. Compulsory panic_on_oom is selected.\n"); | |
536 | ||
537 | /* | |
538 | * Check if there were limitations on the allocation (only relevant for | |
539 | * NUMA) that may require different handling. | |
540 | */ | |
541 | constraint = constrained_alloc(zonelist, gfp_mask); | |
542 | read_lock(&tasklist_lock); | |
543 | ||
544 | switch (constraint) { | |
545 | case CONSTRAINT_MEMORY_POLICY: | |
546 | oom_kill_process(current, gfp_mask, order, points, NULL, | |
547 | "No available memory (MPOL_BIND)"); | |
548 | break; | |
549 | ||
550 | case CONSTRAINT_NONE: | |
551 | if (sysctl_panic_on_oom) | |
552 | panic("out of memory. panic_on_oom is selected\n"); | |
553 | /* Fall-through */ | |
554 | case CONSTRAINT_CPUSET: | |
555 | if (sysctl_oom_kill_allocating_task) { | |
556 | oom_kill_process(current, gfp_mask, order, points, NULL, | |
557 | "Out of memory (oom_kill_allocating_task)"); | |
558 | break; | |
559 | } | |
560 | retry: | |
561 | /* | |
562 | * Rambo mode: Shoot down a process and hope it solves whatever | |
563 | * issues we may have. | |
564 | */ | |
565 | p = select_bad_process(&points, NULL); | |
566 | ||
567 | if (PTR_ERR(p) == -1UL) | |
568 | goto out; | |
569 | ||
570 | /* Found nothing?!?! Either we hang forever, or we panic. */ | |
571 | if (!p) { | |
572 | read_unlock(&tasklist_lock); | |
573 | panic("Out of memory and no killable processes...\n"); | |
574 | } | |
575 | ||
576 | if (oom_kill_process(p, gfp_mask, order, points, NULL, | |
577 | "Out of memory")) | |
578 | goto retry; | |
579 | ||
580 | break; | |
581 | } | |
582 | ||
583 | out: | |
584 | read_unlock(&tasklist_lock); | |
585 | ||
586 | /* | |
587 | * Give "p" a good chance of killing itself before we | |
588 | * retry to allocate memory unless "p" is current | |
589 | */ | |
590 | if (!test_thread_flag(TIF_MEMDIE)) | |
591 | schedule_timeout_uninterruptible(1); | |
592 | } |