]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/mm/oom_kill.c | |
3 | * | |
4 | * Copyright (C) 1998,2000 Rik van Riel | |
5 | * Thanks go out to Claus Fischer for some serious inspiration and | |
6 | * for goading me into coding this file... | |
7 | * Copyright (C) 2010 Google, Inc. | |
8 | * Rewritten by David Rientjes | |
9 | * | |
10 | * The routines in this file are used to kill a process when | |
11 | * we're seriously out of memory. This gets called from __alloc_pages() | |
12 | * in mm/page_alloc.c when we really run out of memory. | |
13 | * | |
14 | * Since we won't call these routines often (on a well-configured | |
15 | * machine) this file will double as a 'coding guide' and a signpost | |
16 | * for newbie kernel hackers. It features several pointers to major | |
17 | * kernel subsystems and hints as to where to find out what things do. | |
18 | */ | |
19 | ||
20 | #include <linux/oom.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/gfp.h> | |
24 | #include <linux/sched.h> | |
25 | #include <linux/swap.h> | |
26 | #include <linux/timex.h> | |
27 | #include <linux/jiffies.h> | |
28 | #include <linux/cpuset.h> | |
29 | #include <linux/export.h> | |
30 | #include <linux/notifier.h> | |
31 | #include <linux/memcontrol.h> | |
32 | #include <linux/mempolicy.h> | |
33 | #include <linux/security.h> | |
34 | #include <linux/ptrace.h> | |
35 | #include <linux/freezer.h> | |
36 | #include <linux/ftrace.h> | |
37 | #include <linux/ratelimit.h> | |
38 | ||
39 | #define CREATE_TRACE_POINTS | |
40 | #include <trace/events/oom.h> | |
41 | ||
42 | int sysctl_panic_on_oom; | |
43 | int sysctl_oom_kill_allocating_task; | |
44 | int sysctl_oom_dump_tasks = 1; | |
45 | ||
46 | DEFINE_MUTEX(oom_lock); | |
47 | ||
48 | #ifdef CONFIG_NUMA | |
49 | /** | |
50 | * has_intersects_mems_allowed() - check task eligiblity for kill | |
51 | * @start: task struct of which task to consider | |
52 | * @mask: nodemask passed to page allocator for mempolicy ooms | |
53 | * | |
54 | * Task eligibility is determined by whether or not a candidate task, @tsk, | |
55 | * shares the same mempolicy nodes as current if it is bound by such a policy | |
56 | * and whether or not it has the same set of allowed cpuset nodes. | |
57 | */ | |
58 | static bool has_intersects_mems_allowed(struct task_struct *start, | |
59 | const nodemask_t *mask) | |
60 | { | |
61 | struct task_struct *tsk; | |
62 | bool ret = false; | |
63 | ||
64 | rcu_read_lock(); | |
65 | for_each_thread(start, tsk) { | |
66 | if (mask) { | |
67 | /* | |
68 | * If this is a mempolicy constrained oom, tsk's | |
69 | * cpuset is irrelevant. Only return true if its | |
70 | * mempolicy intersects current, otherwise it may be | |
71 | * needlessly killed. | |
72 | */ | |
73 | ret = mempolicy_nodemask_intersects(tsk, mask); | |
74 | } else { | |
75 | /* | |
76 | * This is not a mempolicy constrained oom, so only | |
77 | * check the mems of tsk's cpuset. | |
78 | */ | |
79 | ret = cpuset_mems_allowed_intersects(current, tsk); | |
80 | } | |
81 | if (ret) | |
82 | break; | |
83 | } | |
84 | rcu_read_unlock(); | |
85 | ||
86 | return ret; | |
87 | } | |
88 | #else | |
89 | static bool has_intersects_mems_allowed(struct task_struct *tsk, | |
90 | const nodemask_t *mask) | |
91 | { | |
92 | return true; | |
93 | } | |
94 | #endif /* CONFIG_NUMA */ | |
95 | ||
96 | /* | |
97 | * The process p may have detached its own ->mm while exiting or through | |
98 | * use_mm(), but one or more of its subthreads may still have a valid | |
99 | * pointer. Return p, or any of its subthreads with a valid ->mm, with | |
100 | * task_lock() held. | |
101 | */ | |
102 | struct task_struct *find_lock_task_mm(struct task_struct *p) | |
103 | { | |
104 | struct task_struct *t; | |
105 | ||
106 | rcu_read_lock(); | |
107 | ||
108 | for_each_thread(p, t) { | |
109 | task_lock(t); | |
110 | if (likely(t->mm)) | |
111 | goto found; | |
112 | task_unlock(t); | |
113 | } | |
114 | t = NULL; | |
115 | found: | |
116 | rcu_read_unlock(); | |
117 | ||
118 | return t; | |
119 | } | |
120 | ||
121 | /* | |
122 | * order == -1 means the oom kill is required by sysrq, otherwise only | |
123 | * for display purposes. | |
124 | */ | |
125 | static inline bool is_sysrq_oom(struct oom_control *oc) | |
126 | { | |
127 | return oc->order == -1; | |
128 | } | |
129 | ||
130 | /* return true if the task is not adequate as candidate victim task. */ | |
131 | static bool oom_unkillable_task(struct task_struct *p, | |
132 | struct mem_cgroup *memcg, const nodemask_t *nodemask) | |
133 | { | |
134 | if (is_global_init(p)) | |
135 | return true; | |
136 | if (p->flags & PF_KTHREAD) | |
137 | return true; | |
138 | ||
139 | /* When mem_cgroup_out_of_memory() and p is not member of the group */ | |
140 | if (memcg && !task_in_mem_cgroup(p, memcg)) | |
141 | return true; | |
142 | ||
143 | /* p may not have freeable memory in nodemask */ | |
144 | if (!has_intersects_mems_allowed(p, nodemask)) | |
145 | return true; | |
146 | ||
147 | return false; | |
148 | } | |
149 | ||
150 | /** | |
151 | * oom_badness - heuristic function to determine which candidate task to kill | |
152 | * @p: task struct of which task we should calculate | |
153 | * @totalpages: total present RAM allowed for page allocation | |
154 | * | |
155 | * The heuristic for determining which task to kill is made to be as simple and | |
156 | * predictable as possible. The goal is to return the highest value for the | |
157 | * task consuming the most memory to avoid subsequent oom failures. | |
158 | */ | |
159 | unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, | |
160 | const nodemask_t *nodemask, unsigned long totalpages) | |
161 | { | |
162 | long points; | |
163 | long adj; | |
164 | ||
165 | if (oom_unkillable_task(p, memcg, nodemask)) | |
166 | return 0; | |
167 | ||
168 | p = find_lock_task_mm(p); | |
169 | if (!p) | |
170 | return 0; | |
171 | ||
172 | adj = (long)p->signal->oom_score_adj; | |
173 | if (adj == OOM_SCORE_ADJ_MIN) { | |
174 | task_unlock(p); | |
175 | return 0; | |
176 | } | |
177 | ||
178 | /* | |
179 | * The baseline for the badness score is the proportion of RAM that each | |
180 | * task's rss, pagetable and swap space use. | |
181 | */ | |
182 | points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + | |
183 | atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); | |
184 | task_unlock(p); | |
185 | ||
186 | /* | |
187 | * Root processes get 3% bonus, just like the __vm_enough_memory() | |
188 | * implementation used by LSMs. | |
189 | */ | |
190 | if (has_capability_noaudit(p, CAP_SYS_ADMIN)) | |
191 | points -= (points * 3) / 100; | |
192 | ||
193 | /* Normalize to oom_score_adj units */ | |
194 | adj *= totalpages / 1000; | |
195 | points += adj; | |
196 | ||
197 | /* | |
198 | * Never return 0 for an eligible task regardless of the root bonus and | |
199 | * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). | |
200 | */ | |
201 | return points > 0 ? points : 1; | |
202 | } | |
203 | ||
204 | /* | |
205 | * Determine the type of allocation constraint. | |
206 | */ | |
207 | #ifdef CONFIG_NUMA | |
208 | static enum oom_constraint constrained_alloc(struct oom_control *oc, | |
209 | unsigned long *totalpages) | |
210 | { | |
211 | struct zone *zone; | |
212 | struct zoneref *z; | |
213 | enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask); | |
214 | bool cpuset_limited = false; | |
215 | int nid; | |
216 | ||
217 | /* Default to all available memory */ | |
218 | *totalpages = totalram_pages + total_swap_pages; | |
219 | ||
220 | if (!oc->zonelist) | |
221 | return CONSTRAINT_NONE; | |
222 | /* | |
223 | * Reach here only when __GFP_NOFAIL is used. So, we should avoid | |
224 | * to kill current.We have to random task kill in this case. | |
225 | * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. | |
226 | */ | |
227 | if (oc->gfp_mask & __GFP_THISNODE) | |
228 | return CONSTRAINT_NONE; | |
229 | ||
230 | /* | |
231 | * This is not a __GFP_THISNODE allocation, so a truncated nodemask in | |
232 | * the page allocator means a mempolicy is in effect. Cpuset policy | |
233 | * is enforced in get_page_from_freelist(). | |
234 | */ | |
235 | if (oc->nodemask && | |
236 | !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { | |
237 | *totalpages = total_swap_pages; | |
238 | for_each_node_mask(nid, *oc->nodemask) | |
239 | *totalpages += node_spanned_pages(nid); | |
240 | return CONSTRAINT_MEMORY_POLICY; | |
241 | } | |
242 | ||
243 | /* Check this allocation failure is caused by cpuset's wall function */ | |
244 | for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, | |
245 | high_zoneidx, oc->nodemask) | |
246 | if (!cpuset_zone_allowed(zone, oc->gfp_mask)) | |
247 | cpuset_limited = true; | |
248 | ||
249 | if (cpuset_limited) { | |
250 | *totalpages = total_swap_pages; | |
251 | for_each_node_mask(nid, cpuset_current_mems_allowed) | |
252 | *totalpages += node_spanned_pages(nid); | |
253 | return CONSTRAINT_CPUSET; | |
254 | } | |
255 | return CONSTRAINT_NONE; | |
256 | } | |
257 | #else | |
258 | static enum oom_constraint constrained_alloc(struct oom_control *oc, | |
259 | unsigned long *totalpages) | |
260 | { | |
261 | *totalpages = totalram_pages + total_swap_pages; | |
262 | return CONSTRAINT_NONE; | |
263 | } | |
264 | #endif | |
265 | ||
266 | enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, | |
267 | struct task_struct *task, unsigned long totalpages) | |
268 | { | |
269 | if (oom_unkillable_task(task, NULL, oc->nodemask)) | |
270 | return OOM_SCAN_CONTINUE; | |
271 | ||
272 | /* | |
273 | * This task already has access to memory reserves and is being killed. | |
274 | * Don't allow any other task to have access to the reserves. | |
275 | */ | |
276 | if (test_tsk_thread_flag(task, TIF_MEMDIE)) { | |
277 | if (!is_sysrq_oom(oc)) | |
278 | return OOM_SCAN_ABORT; | |
279 | } | |
280 | if (!task->mm) | |
281 | return OOM_SCAN_CONTINUE; | |
282 | ||
283 | /* | |
284 | * If task is allocating a lot of memory and has been marked to be | |
285 | * killed first if it triggers an oom, then select it. | |
286 | */ | |
287 | if (oom_task_origin(task)) | |
288 | return OOM_SCAN_SELECT; | |
289 | ||
290 | if (task_will_free_mem(task) && !is_sysrq_oom(oc)) | |
291 | return OOM_SCAN_ABORT; | |
292 | ||
293 | return OOM_SCAN_OK; | |
294 | } | |
295 | ||
296 | /* | |
297 | * Simple selection loop. We chose the process with the highest | |
298 | * number of 'points'. Returns -1 on scan abort. | |
299 | */ | |
300 | static struct task_struct *select_bad_process(struct oom_control *oc, | |
301 | unsigned int *ppoints, unsigned long totalpages) | |
302 | { | |
303 | struct task_struct *g, *p; | |
304 | struct task_struct *chosen = NULL; | |
305 | unsigned long chosen_points = 0; | |
306 | ||
307 | rcu_read_lock(); | |
308 | for_each_process_thread(g, p) { | |
309 | unsigned int points; | |
310 | ||
311 | switch (oom_scan_process_thread(oc, p, totalpages)) { | |
312 | case OOM_SCAN_SELECT: | |
313 | chosen = p; | |
314 | chosen_points = ULONG_MAX; | |
315 | /* fall through */ | |
316 | case OOM_SCAN_CONTINUE: | |
317 | continue; | |
318 | case OOM_SCAN_ABORT: | |
319 | rcu_read_unlock(); | |
320 | return (struct task_struct *)(-1UL); | |
321 | case OOM_SCAN_OK: | |
322 | break; | |
323 | }; | |
324 | points = oom_badness(p, NULL, oc->nodemask, totalpages); | |
325 | if (!points || points < chosen_points) | |
326 | continue; | |
327 | /* Prefer thread group leaders for display purposes */ | |
328 | if (points == chosen_points && thread_group_leader(chosen)) | |
329 | continue; | |
330 | ||
331 | chosen = p; | |
332 | chosen_points = points; | |
333 | } | |
334 | if (chosen) | |
335 | get_task_struct(chosen); | |
336 | rcu_read_unlock(); | |
337 | ||
338 | *ppoints = chosen_points * 1000 / totalpages; | |
339 | return chosen; | |
340 | } | |
341 | ||
342 | /** | |
343 | * dump_tasks - dump current memory state of all system tasks | |
344 | * @memcg: current's memory controller, if constrained | |
345 | * @nodemask: nodemask passed to page allocator for mempolicy ooms | |
346 | * | |
347 | * Dumps the current memory state of all eligible tasks. Tasks not in the same | |
348 | * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes | |
349 | * are not shown. | |
350 | * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, | |
351 | * swapents, oom_score_adj value, and name. | |
352 | */ | |
353 | static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) | |
354 | { | |
355 | struct task_struct *p; | |
356 | struct task_struct *task; | |
357 | ||
358 | pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); | |
359 | rcu_read_lock(); | |
360 | for_each_process(p) { | |
361 | if (oom_unkillable_task(p, memcg, nodemask)) | |
362 | continue; | |
363 | ||
364 | task = find_lock_task_mm(p); | |
365 | if (!task) { | |
366 | /* | |
367 | * This is a kthread or all of p's threads have already | |
368 | * detached their mm's. There's no need to report | |
369 | * them; they can't be oom killed anyway. | |
370 | */ | |
371 | continue; | |
372 | } | |
373 | ||
374 | pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", | |
375 | task->pid, from_kuid(&init_user_ns, task_uid(task)), | |
376 | task->tgid, task->mm->total_vm, get_mm_rss(task->mm), | |
377 | atomic_long_read(&task->mm->nr_ptes), | |
378 | mm_nr_pmds(task->mm), | |
379 | get_mm_counter(task->mm, MM_SWAPENTS), | |
380 | task->signal->oom_score_adj, task->comm); | |
381 | task_unlock(task); | |
382 | } | |
383 | rcu_read_unlock(); | |
384 | } | |
385 | ||
386 | static void dump_header(struct oom_control *oc, struct task_struct *p, | |
387 | struct mem_cgroup *memcg) | |
388 | { | |
389 | pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " | |
390 | "oom_score_adj=%hd\n", | |
391 | current->comm, oc->gfp_mask, oc->order, | |
392 | current->signal->oom_score_adj); | |
393 | cpuset_print_current_mems_allowed(); | |
394 | dump_stack(); | |
395 | if (memcg) | |
396 | mem_cgroup_print_oom_info(memcg, p); | |
397 | else | |
398 | show_mem(SHOW_MEM_FILTER_NODES); | |
399 | if (sysctl_oom_dump_tasks) | |
400 | dump_tasks(memcg, oc->nodemask); | |
401 | } | |
402 | ||
403 | /* | |
404 | * Number of OOM victims in flight | |
405 | */ | |
406 | static atomic_t oom_victims = ATOMIC_INIT(0); | |
407 | static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait); | |
408 | ||
409 | bool oom_killer_disabled __read_mostly; | |
410 | ||
411 | /** | |
412 | * mark_oom_victim - mark the given task as OOM victim | |
413 | * @tsk: task to mark | |
414 | * | |
415 | * Has to be called with oom_lock held and never after | |
416 | * oom has been disabled already. | |
417 | */ | |
418 | void mark_oom_victim(struct task_struct *tsk) | |
419 | { | |
420 | WARN_ON(oom_killer_disabled); | |
421 | /* OOM killer might race with memcg OOM */ | |
422 | if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE)) | |
423 | return; | |
424 | /* | |
425 | * Make sure that the task is woken up from uninterruptible sleep | |
426 | * if it is frozen because OOM killer wouldn't be able to free | |
427 | * any memory and livelock. freezing_slow_path will tell the freezer | |
428 | * that TIF_MEMDIE tasks should be ignored. | |
429 | */ | |
430 | __thaw_task(tsk); | |
431 | atomic_inc(&oom_victims); | |
432 | } | |
433 | ||
434 | /** | |
435 | * exit_oom_victim - note the exit of an OOM victim | |
436 | */ | |
437 | void exit_oom_victim(void) | |
438 | { | |
439 | clear_thread_flag(TIF_MEMDIE); | |
440 | ||
441 | if (!atomic_dec_return(&oom_victims)) | |
442 | wake_up_all(&oom_victims_wait); | |
443 | } | |
444 | ||
445 | /** | |
446 | * oom_killer_disable - disable OOM killer | |
447 | * | |
448 | * Forces all page allocations to fail rather than trigger OOM killer. | |
449 | * Will block and wait until all OOM victims are killed. | |
450 | * | |
451 | * The function cannot be called when there are runnable user tasks because | |
452 | * the userspace would see unexpected allocation failures as a result. Any | |
453 | * new usage of this function should be consulted with MM people. | |
454 | * | |
455 | * Returns true if successful and false if the OOM killer cannot be | |
456 | * disabled. | |
457 | */ | |
458 | bool oom_killer_disable(void) | |
459 | { | |
460 | /* | |
461 | * Make sure to not race with an ongoing OOM killer | |
462 | * and that the current is not the victim. | |
463 | */ | |
464 | mutex_lock(&oom_lock); | |
465 | if (test_thread_flag(TIF_MEMDIE)) { | |
466 | mutex_unlock(&oom_lock); | |
467 | return false; | |
468 | } | |
469 | ||
470 | oom_killer_disabled = true; | |
471 | mutex_unlock(&oom_lock); | |
472 | ||
473 | wait_event(oom_victims_wait, !atomic_read(&oom_victims)); | |
474 | ||
475 | return true; | |
476 | } | |
477 | ||
478 | /** | |
479 | * oom_killer_enable - enable OOM killer | |
480 | */ | |
481 | void oom_killer_enable(void) | |
482 | { | |
483 | oom_killer_disabled = false; | |
484 | } | |
485 | ||
486 | /* | |
487 | * task->mm can be NULL if the task is the exited group leader. So to | |
488 | * determine whether the task is using a particular mm, we examine all the | |
489 | * task's threads: if one of those is using this mm then this task was also | |
490 | * using it. | |
491 | */ | |
492 | static bool process_shares_mm(struct task_struct *p, struct mm_struct *mm) | |
493 | { | |
494 | struct task_struct *t; | |
495 | ||
496 | for_each_thread(p, t) { | |
497 | struct mm_struct *t_mm = READ_ONCE(t->mm); | |
498 | if (t_mm) | |
499 | return t_mm == mm; | |
500 | } | |
501 | return false; | |
502 | } | |
503 | ||
504 | #define K(x) ((x) << (PAGE_SHIFT-10)) | |
505 | /* | |
506 | * Must be called while holding a reference to p, which will be released upon | |
507 | * returning. | |
508 | */ | |
509 | void oom_kill_process(struct oom_control *oc, struct task_struct *p, | |
510 | unsigned int points, unsigned long totalpages, | |
511 | struct mem_cgroup *memcg, const char *message) | |
512 | { | |
513 | struct task_struct *victim = p; | |
514 | struct task_struct *child; | |
515 | struct task_struct *t; | |
516 | struct mm_struct *mm; | |
517 | unsigned int victim_points = 0; | |
518 | static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL, | |
519 | DEFAULT_RATELIMIT_BURST); | |
520 | ||
521 | /* | |
522 | * If the task is already exiting, don't alarm the sysadmin or kill | |
523 | * its children or threads, just set TIF_MEMDIE so it can die quickly | |
524 | */ | |
525 | task_lock(p); | |
526 | if (p->mm && task_will_free_mem(p)) { | |
527 | mark_oom_victim(p); | |
528 | task_unlock(p); | |
529 | put_task_struct(p); | |
530 | return; | |
531 | } | |
532 | task_unlock(p); | |
533 | ||
534 | if (__ratelimit(&oom_rs)) | |
535 | dump_header(oc, p, memcg); | |
536 | ||
537 | pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", | |
538 | message, task_pid_nr(p), p->comm, points); | |
539 | ||
540 | /* | |
541 | * If any of p's children has a different mm and is eligible for kill, | |
542 | * the one with the highest oom_badness() score is sacrificed for its | |
543 | * parent. This attempts to lose the minimal amount of work done while | |
544 | * still freeing memory. | |
545 | */ | |
546 | read_lock(&tasklist_lock); | |
547 | for_each_thread(p, t) { | |
548 | list_for_each_entry(child, &t->children, sibling) { | |
549 | unsigned int child_points; | |
550 | ||
551 | if (process_shares_mm(child, p->mm)) | |
552 | continue; | |
553 | /* | |
554 | * oom_badness() returns 0 if the thread is unkillable | |
555 | */ | |
556 | child_points = oom_badness(child, memcg, oc->nodemask, | |
557 | totalpages); | |
558 | if (child_points > victim_points) { | |
559 | put_task_struct(victim); | |
560 | victim = child; | |
561 | victim_points = child_points; | |
562 | get_task_struct(victim); | |
563 | } | |
564 | } | |
565 | } | |
566 | read_unlock(&tasklist_lock); | |
567 | ||
568 | p = find_lock_task_mm(victim); | |
569 | if (!p) { | |
570 | put_task_struct(victim); | |
571 | return; | |
572 | } else if (victim != p) { | |
573 | get_task_struct(p); | |
574 | put_task_struct(victim); | |
575 | victim = p; | |
576 | } | |
577 | ||
578 | /* Get a reference to safely compare mm after task_unlock(victim) */ | |
579 | mm = victim->mm; | |
580 | atomic_inc(&mm->mm_count); | |
581 | /* | |
582 | * We should send SIGKILL before setting TIF_MEMDIE in order to prevent | |
583 | * the OOM victim from depleting the memory reserves from the user | |
584 | * space under its control. | |
585 | */ | |
586 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true); | |
587 | mark_oom_victim(victim); | |
588 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", | |
589 | task_pid_nr(victim), victim->comm, K(victim->mm->total_vm), | |
590 | K(get_mm_counter(victim->mm, MM_ANONPAGES)), | |
591 | K(get_mm_counter(victim->mm, MM_FILEPAGES))); | |
592 | task_unlock(victim); | |
593 | ||
594 | /* | |
595 | * Kill all user processes sharing victim->mm in other thread groups, if | |
596 | * any. They don't get access to memory reserves, though, to avoid | |
597 | * depletion of all memory. This prevents mm->mmap_sem livelock when an | |
598 | * oom killed thread cannot exit because it requires the semaphore and | |
599 | * its contended by another thread trying to allocate memory itself. | |
600 | * That thread will now get access to memory reserves since it has a | |
601 | * pending fatal signal. | |
602 | */ | |
603 | rcu_read_lock(); | |
604 | for_each_process(p) { | |
605 | if (!process_shares_mm(p, mm)) | |
606 | continue; | |
607 | if (same_thread_group(p, victim)) | |
608 | continue; | |
609 | if (unlikely(p->flags & PF_KTHREAD)) | |
610 | continue; | |
611 | if (is_global_init(p)) | |
612 | continue; | |
613 | if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) | |
614 | continue; | |
615 | ||
616 | do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true); | |
617 | } | |
618 | rcu_read_unlock(); | |
619 | ||
620 | mmdrop(mm); | |
621 | put_task_struct(victim); | |
622 | } | |
623 | #undef K | |
624 | ||
625 | /* | |
626 | * Determines whether the kernel must panic because of the panic_on_oom sysctl. | |
627 | */ | |
628 | void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint, | |
629 | struct mem_cgroup *memcg) | |
630 | { | |
631 | if (likely(!sysctl_panic_on_oom)) | |
632 | return; | |
633 | if (sysctl_panic_on_oom != 2) { | |
634 | /* | |
635 | * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel | |
636 | * does not panic for cpuset, mempolicy, or memcg allocation | |
637 | * failures. | |
638 | */ | |
639 | if (constraint != CONSTRAINT_NONE) | |
640 | return; | |
641 | } | |
642 | /* Do not panic for oom kills triggered by sysrq */ | |
643 | if (is_sysrq_oom(oc)) | |
644 | return; | |
645 | dump_header(oc, NULL, memcg); | |
646 | panic("Out of memory: %s panic_on_oom is enabled\n", | |
647 | sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); | |
648 | } | |
649 | ||
650 | static BLOCKING_NOTIFIER_HEAD(oom_notify_list); | |
651 | ||
652 | int register_oom_notifier(struct notifier_block *nb) | |
653 | { | |
654 | return blocking_notifier_chain_register(&oom_notify_list, nb); | |
655 | } | |
656 | EXPORT_SYMBOL_GPL(register_oom_notifier); | |
657 | ||
658 | int unregister_oom_notifier(struct notifier_block *nb) | |
659 | { | |
660 | return blocking_notifier_chain_unregister(&oom_notify_list, nb); | |
661 | } | |
662 | EXPORT_SYMBOL_GPL(unregister_oom_notifier); | |
663 | ||
664 | /** | |
665 | * out_of_memory - kill the "best" process when we run out of memory | |
666 | * @oc: pointer to struct oom_control | |
667 | * | |
668 | * If we run out of memory, we have the choice between either | |
669 | * killing a random task (bad), letting the system crash (worse) | |
670 | * OR try to be smart about which process to kill. Note that we | |
671 | * don't have to be perfect here, we just have to be good. | |
672 | */ | |
673 | bool out_of_memory(struct oom_control *oc) | |
674 | { | |
675 | struct task_struct *p; | |
676 | unsigned long totalpages; | |
677 | unsigned long freed = 0; | |
678 | unsigned int uninitialized_var(points); | |
679 | enum oom_constraint constraint = CONSTRAINT_NONE; | |
680 | ||
681 | if (oom_killer_disabled) | |
682 | return false; | |
683 | ||
684 | blocking_notifier_call_chain(&oom_notify_list, 0, &freed); | |
685 | if (freed > 0) | |
686 | /* Got some memory back in the last second. */ | |
687 | return true; | |
688 | ||
689 | /* | |
690 | * If current has a pending SIGKILL or is exiting, then automatically | |
691 | * select it. The goal is to allow it to allocate so that it may | |
692 | * quickly exit and free its memory. | |
693 | * | |
694 | * But don't select if current has already released its mm and cleared | |
695 | * TIF_MEMDIE flag at exit_mm(), otherwise an OOM livelock may occur. | |
696 | */ | |
697 | if (current->mm && | |
698 | (fatal_signal_pending(current) || task_will_free_mem(current))) { | |
699 | mark_oom_victim(current); | |
700 | return true; | |
701 | } | |
702 | ||
703 | /* | |
704 | * Check if there were limitations on the allocation (only relevant for | |
705 | * NUMA) that may require different handling. | |
706 | */ | |
707 | constraint = constrained_alloc(oc, &totalpages); | |
708 | if (constraint != CONSTRAINT_MEMORY_POLICY) | |
709 | oc->nodemask = NULL; | |
710 | check_panic_on_oom(oc, constraint, NULL); | |
711 | ||
712 | if (sysctl_oom_kill_allocating_task && current->mm && | |
713 | !oom_unkillable_task(current, NULL, oc->nodemask) && | |
714 | current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { | |
715 | get_task_struct(current); | |
716 | oom_kill_process(oc, current, 0, totalpages, NULL, | |
717 | "Out of memory (oom_kill_allocating_task)"); | |
718 | return true; | |
719 | } | |
720 | ||
721 | p = select_bad_process(oc, &points, totalpages); | |
722 | /* Found nothing?!?! Either we hang forever, or we panic. */ | |
723 | if (!p && !is_sysrq_oom(oc)) { | |
724 | dump_header(oc, NULL, NULL); | |
725 | panic("Out of memory and no killable processes...\n"); | |
726 | } | |
727 | if (p && p != (void *)-1UL) { | |
728 | oom_kill_process(oc, p, points, totalpages, NULL, | |
729 | "Out of memory"); | |
730 | /* | |
731 | * Give the killed process a good chance to exit before trying | |
732 | * to allocate memory again. | |
733 | */ | |
734 | schedule_timeout_killable(1); | |
735 | } | |
736 | return true; | |
737 | } | |
738 | ||
739 | /* | |
740 | * The pagefault handler calls here because it is out of memory, so kill a | |
741 | * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a | |
742 | * parallel oom killing is already in progress so do nothing. | |
743 | */ | |
744 | void pagefault_out_of_memory(void) | |
745 | { | |
746 | struct oom_control oc = { | |
747 | .zonelist = NULL, | |
748 | .nodemask = NULL, | |
749 | .gfp_mask = 0, | |
750 | .order = 0, | |
751 | }; | |
752 | ||
753 | if (mem_cgroup_oom_synchronize(true)) | |
754 | return; | |
755 | ||
756 | if (!mutex_trylock(&oom_lock)) | |
757 | return; | |
758 | ||
759 | if (!out_of_memory(&oc)) { | |
760 | /* | |
761 | * There shouldn't be any user tasks runnable while the | |
762 | * OOM killer is disabled, so the current task has to | |
763 | * be a racing OOM victim for which oom_killer_disable() | |
764 | * is waiting for. | |
765 | */ | |
766 | WARN_ON(test_thread_flag(TIF_MEMDIE)); | |
767 | } | |
768 | ||
769 | mutex_unlock(&oom_lock); | |
770 | } |