]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - mm/oom_kill.c
drm/i915: Save the old CDCLK atomic state
[mirror_ubuntu-bionic-kernel.git] / mm / oom_kill.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/oom_kill.c
3 *
4 * Copyright (C) 1998,2000 Rik van Riel
5 * Thanks go out to Claus Fischer for some serious inspiration and
6 * for goading me into coding this file...
a63d83f4
DR
7 * Copyright (C) 2010 Google, Inc.
8 * Rewritten by David Rientjes
1da177e4
LT
9 *
10 * The routines in this file are used to kill a process when
a49335cc
PJ
11 * we're seriously out of memory. This gets called from __alloc_pages()
12 * in mm/page_alloc.c when we really run out of memory.
1da177e4
LT
13 *
14 * Since we won't call these routines often (on a well-configured
15 * machine) this file will double as a 'coding guide' and a signpost
16 * for newbie kernel hackers. It features several pointers to major
17 * kernel subsystems and hints as to where to find out what things do.
18 */
19
8ac773b4 20#include <linux/oom.h>
1da177e4 21#include <linux/mm.h>
4e950f6f 22#include <linux/err.h>
5a0e3ad6 23#include <linux/gfp.h>
1da177e4 24#include <linux/sched.h>
6e84f315 25#include <linux/sched/mm.h>
f7ccbae4 26#include <linux/sched/coredump.h>
29930025 27#include <linux/sched/task.h>
1da177e4
LT
28#include <linux/swap.h>
29#include <linux/timex.h>
30#include <linux/jiffies.h>
ef08e3b4 31#include <linux/cpuset.h>
b95f1b31 32#include <linux/export.h>
8bc719d3 33#include <linux/notifier.h>
c7ba5c9e 34#include <linux/memcontrol.h>
6f48d0eb 35#include <linux/mempolicy.h>
5cd9c58f 36#include <linux/security.h>
edd45544 37#include <linux/ptrace.h>
f660daac 38#include <linux/freezer.h>
43d2b113 39#include <linux/ftrace.h>
dc3f21ea 40#include <linux/ratelimit.h>
aac45363
MH
41#include <linux/kthread.h>
42#include <linux/init.h>
4d4bbd85 43#include <linux/mmu_notifier.h>
aac45363
MH
44
45#include <asm/tlb.h>
46#include "internal.h"
852d8be0 47#include "slab.h"
43d2b113
KH
48
49#define CREATE_TRACE_POINTS
50#include <trace/events/oom.h>
1da177e4 51
fadd8fbd 52int sysctl_panic_on_oom;
fe071d7e 53int sysctl_oom_kill_allocating_task;
ad915c43 54int sysctl_oom_dump_tasks = 1;
dc56401f
JW
55
56DEFINE_MUTEX(oom_lock);
1da177e4 57
6f48d0eb
DR
58#ifdef CONFIG_NUMA
59/**
60 * has_intersects_mems_allowed() - check task eligiblity for kill
ad962441 61 * @start: task struct of which task to consider
6f48d0eb
DR
62 * @mask: nodemask passed to page allocator for mempolicy ooms
63 *
64 * Task eligibility is determined by whether or not a candidate task, @tsk,
65 * shares the same mempolicy nodes as current if it is bound by such a policy
66 * and whether or not it has the same set of allowed cpuset nodes.
495789a5 67 */
ad962441 68static bool has_intersects_mems_allowed(struct task_struct *start,
6f48d0eb 69 const nodemask_t *mask)
495789a5 70{
ad962441
ON
71 struct task_struct *tsk;
72 bool ret = false;
495789a5 73
ad962441 74 rcu_read_lock();
1da4db0c 75 for_each_thread(start, tsk) {
6f48d0eb
DR
76 if (mask) {
77 /*
78 * If this is a mempolicy constrained oom, tsk's
79 * cpuset is irrelevant. Only return true if its
80 * mempolicy intersects current, otherwise it may be
81 * needlessly killed.
82 */
ad962441 83 ret = mempolicy_nodemask_intersects(tsk, mask);
6f48d0eb
DR
84 } else {
85 /*
86 * This is not a mempolicy constrained oom, so only
87 * check the mems of tsk's cpuset.
88 */
ad962441 89 ret = cpuset_mems_allowed_intersects(current, tsk);
6f48d0eb 90 }
ad962441
ON
91 if (ret)
92 break;
1da4db0c 93 }
ad962441 94 rcu_read_unlock();
df1090a8 95
ad962441 96 return ret;
6f48d0eb
DR
97}
98#else
99static bool has_intersects_mems_allowed(struct task_struct *tsk,
100 const nodemask_t *mask)
101{
102 return true;
495789a5 103}
6f48d0eb 104#endif /* CONFIG_NUMA */
495789a5 105
6f48d0eb
DR
106/*
107 * The process p may have detached its own ->mm while exiting or through
108 * use_mm(), but one or more of its subthreads may still have a valid
109 * pointer. Return p, or any of its subthreads with a valid ->mm, with
110 * task_lock() held.
111 */
158e0a2d 112struct task_struct *find_lock_task_mm(struct task_struct *p)
dd8e8f40 113{
1da4db0c 114 struct task_struct *t;
dd8e8f40 115
4d4048be
ON
116 rcu_read_lock();
117
1da4db0c 118 for_each_thread(p, t) {
dd8e8f40
ON
119 task_lock(t);
120 if (likely(t->mm))
4d4048be 121 goto found;
dd8e8f40 122 task_unlock(t);
1da4db0c 123 }
4d4048be
ON
124 t = NULL;
125found:
126 rcu_read_unlock();
dd8e8f40 127
4d4048be 128 return t;
dd8e8f40
ON
129}
130
db2a0dd7
YB
131/*
132 * order == -1 means the oom kill is required by sysrq, otherwise only
133 * for display purposes.
134 */
135static inline bool is_sysrq_oom(struct oom_control *oc)
136{
137 return oc->order == -1;
138}
139
7c5f64f8
VD
140static inline bool is_memcg_oom(struct oom_control *oc)
141{
142 return oc->memcg != NULL;
143}
144
ab290adb 145/* return true if the task is not adequate as candidate victim task. */
e85bfd3a 146static bool oom_unkillable_task(struct task_struct *p,
2314b42d 147 struct mem_cgroup *memcg, const nodemask_t *nodemask)
ab290adb
KM
148{
149 if (is_global_init(p))
150 return true;
151 if (p->flags & PF_KTHREAD)
152 return true;
153
154 /* When mem_cgroup_out_of_memory() and p is not member of the group */
72835c86 155 if (memcg && !task_in_mem_cgroup(p, memcg))
ab290adb
KM
156 return true;
157
158 /* p may not have freeable memory in nodemask */
159 if (!has_intersects_mems_allowed(p, nodemask))
160 return true;
161
162 return false;
163}
164
852d8be0
YS
165/*
166 * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
167 * than all user memory (LRU pages)
168 */
169static bool is_dump_unreclaim_slabs(void)
170{
171 unsigned long nr_lru;
172
173 nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
174 global_node_page_state(NR_INACTIVE_ANON) +
175 global_node_page_state(NR_ACTIVE_FILE) +
176 global_node_page_state(NR_INACTIVE_FILE) +
177 global_node_page_state(NR_ISOLATED_ANON) +
178 global_node_page_state(NR_ISOLATED_FILE) +
179 global_node_page_state(NR_UNEVICTABLE);
180
181 return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
182}
183
1da177e4 184/**
a63d83f4 185 * oom_badness - heuristic function to determine which candidate task to kill
1da177e4 186 * @p: task struct of which task we should calculate
a63d83f4 187 * @totalpages: total present RAM allowed for page allocation
1da177e4 188 *
a63d83f4
DR
189 * The heuristic for determining which task to kill is made to be as simple and
190 * predictable as possible. The goal is to return the highest value for the
191 * task consuming the most memory to avoid subsequent oom failures.
1da177e4 192 */
a7f638f9
DR
193unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
194 const nodemask_t *nodemask, unsigned long totalpages)
1da177e4 195{
1e11ad8d 196 long points;
61eafb00 197 long adj;
28b83c51 198
72835c86 199 if (oom_unkillable_task(p, memcg, nodemask))
26ebc984 200 return 0;
1da177e4 201
dd8e8f40
ON
202 p = find_lock_task_mm(p);
203 if (!p)
1da177e4
LT
204 return 0;
205
bb8a4b7f
MH
206 /*
207 * Do not even consider tasks which are explicitly marked oom
b18dc5f2
MH
208 * unkillable or have been already oom reaped or the are in
209 * the middle of vfork
bb8a4b7f 210 */
a9c58b90 211 adj = (long)p->signal->oom_score_adj;
bb8a4b7f 212 if (adj == OOM_SCORE_ADJ_MIN ||
862e3073 213 test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
b18dc5f2 214 in_vfork(p)) {
5aecc85a
MH
215 task_unlock(p);
216 return 0;
217 }
218
1da177e4 219 /*
a63d83f4 220 * The baseline for the badness score is the proportion of RAM that each
f755a042 221 * task's rss, pagetable and swap space use.
1da177e4 222 */
dc6c9a35 223 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
af5b0f6a 224 mm_pgtables_bytes(p->mm) / PAGE_SIZE;
a63d83f4 225 task_unlock(p);
1da177e4
LT
226
227 /*
a63d83f4
DR
228 * Root processes get 3% bonus, just like the __vm_enough_memory()
229 * implementation used by LSMs.
1da177e4 230 */
a63d83f4 231 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
778c14af 232 points -= (points * 3) / 100;
1da177e4 233
61eafb00
DR
234 /* Normalize to oom_score_adj units */
235 adj *= totalpages / 1000;
236 points += adj;
1da177e4 237
f19e8aa1 238 /*
a7f638f9
DR
239 * Never return 0 for an eligible task regardless of the root bonus and
240 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
f19e8aa1 241 */
1e11ad8d 242 return points > 0 ? points : 1;
1da177e4
LT
243}
244
7c5f64f8
VD
245enum oom_constraint {
246 CONSTRAINT_NONE,
247 CONSTRAINT_CPUSET,
248 CONSTRAINT_MEMORY_POLICY,
249 CONSTRAINT_MEMCG,
250};
251
9b0f8b04
CL
252/*
253 * Determine the type of allocation constraint.
254 */
7c5f64f8 255static enum oom_constraint constrained_alloc(struct oom_control *oc)
4365a567 256{
54a6eb5c 257 struct zone *zone;
dd1a239f 258 struct zoneref *z;
6e0fc46d 259 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
a63d83f4
DR
260 bool cpuset_limited = false;
261 int nid;
9b0f8b04 262
7c5f64f8
VD
263 if (is_memcg_oom(oc)) {
264 oc->totalpages = mem_cgroup_get_limit(oc->memcg) ?: 1;
265 return CONSTRAINT_MEMCG;
266 }
267
a63d83f4 268 /* Default to all available memory */
7c5f64f8
VD
269 oc->totalpages = totalram_pages + total_swap_pages;
270
271 if (!IS_ENABLED(CONFIG_NUMA))
272 return CONSTRAINT_NONE;
a63d83f4 273
6e0fc46d 274 if (!oc->zonelist)
a63d83f4 275 return CONSTRAINT_NONE;
4365a567
KH
276 /*
277 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
278 * to kill current.We have to random task kill in this case.
279 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
280 */
6e0fc46d 281 if (oc->gfp_mask & __GFP_THISNODE)
4365a567 282 return CONSTRAINT_NONE;
9b0f8b04 283
4365a567 284 /*
a63d83f4
DR
285 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
286 * the page allocator means a mempolicy is in effect. Cpuset policy
287 * is enforced in get_page_from_freelist().
4365a567 288 */
6e0fc46d
DR
289 if (oc->nodemask &&
290 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
7c5f64f8 291 oc->totalpages = total_swap_pages;
6e0fc46d 292 for_each_node_mask(nid, *oc->nodemask)
7c5f64f8 293 oc->totalpages += node_spanned_pages(nid);
9b0f8b04 294 return CONSTRAINT_MEMORY_POLICY;
a63d83f4 295 }
4365a567
KH
296
297 /* Check this allocation failure is caused by cpuset's wall function */
6e0fc46d
DR
298 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
299 high_zoneidx, oc->nodemask)
300 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
a63d83f4 301 cpuset_limited = true;
9b0f8b04 302
a63d83f4 303 if (cpuset_limited) {
7c5f64f8 304 oc->totalpages = total_swap_pages;
a63d83f4 305 for_each_node_mask(nid, cpuset_current_mems_allowed)
7c5f64f8 306 oc->totalpages += node_spanned_pages(nid);
a63d83f4
DR
307 return CONSTRAINT_CPUSET;
308 }
9b0f8b04
CL
309 return CONSTRAINT_NONE;
310}
311
7c5f64f8 312static int oom_evaluate_task(struct task_struct *task, void *arg)
462607ec 313{
7c5f64f8
VD
314 struct oom_control *oc = arg;
315 unsigned long points;
316
6e0fc46d 317 if (oom_unkillable_task(task, NULL, oc->nodemask))
7c5f64f8 318 goto next;
462607ec
DR
319
320 /*
321 * This task already has access to memory reserves and is being killed.
a373966d 322 * Don't allow any other task to have access to the reserves unless
862e3073 323 * the task has MMF_OOM_SKIP because chances that it would release
a373966d 324 * any memory is quite low.
462607ec 325 */
862e3073
MH
326 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
327 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
7c5f64f8
VD
328 goto next;
329 goto abort;
a373966d 330 }
462607ec 331
e1e12d2f
DR
332 /*
333 * If task is allocating a lot of memory and has been marked to be
334 * killed first if it triggers an oom, then select it.
335 */
7c5f64f8
VD
336 if (oom_task_origin(task)) {
337 points = ULONG_MAX;
338 goto select;
339 }
e1e12d2f 340
7c5f64f8
VD
341 points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
342 if (!points || points < oc->chosen_points)
343 goto next;
344
345 /* Prefer thread group leaders for display purposes */
346 if (points == oc->chosen_points && thread_group_leader(oc->chosen))
347 goto next;
348select:
349 if (oc->chosen)
350 put_task_struct(oc->chosen);
351 get_task_struct(task);
352 oc->chosen = task;
353 oc->chosen_points = points;
354next:
355 return 0;
356abort:
357 if (oc->chosen)
358 put_task_struct(oc->chosen);
359 oc->chosen = (void *)-1UL;
360 return 1;
462607ec
DR
361}
362
1da177e4 363/*
7c5f64f8
VD
364 * Simple selection loop. We choose the process with the highest number of
365 * 'points'. In case scan was aborted, oc->chosen is set to -1.
1da177e4 366 */
7c5f64f8 367static void select_bad_process(struct oom_control *oc)
1da177e4 368{
7c5f64f8
VD
369 if (is_memcg_oom(oc))
370 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
371 else {
372 struct task_struct *p;
d49ad935 373
7c5f64f8
VD
374 rcu_read_lock();
375 for_each_process(p)
376 if (oom_evaluate_task(p, oc))
377 break;
378 rcu_read_unlock();
1da4db0c 379 }
972c4ea5 380
7c5f64f8 381 oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
1da177e4
LT
382}
383
fef1bdd6 384/**
1b578df0 385 * dump_tasks - dump current memory state of all system tasks
dad7557e 386 * @memcg: current's memory controller, if constrained
e85bfd3a 387 * @nodemask: nodemask passed to page allocator for mempolicy ooms
1b578df0 388 *
e85bfd3a
DR
389 * Dumps the current memory state of all eligible tasks. Tasks not in the same
390 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
391 * are not shown.
af5b0f6a
KS
392 * State information includes task's pid, uid, tgid, vm size, rss,
393 * pgtables_bytes, swapents, oom_score_adj value, and name.
fef1bdd6 394 */
2314b42d 395static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
fef1bdd6 396{
c55db957
KM
397 struct task_struct *p;
398 struct task_struct *task;
fef1bdd6 399
af5b0f6a 400 pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
6b0c81b3 401 rcu_read_lock();
c55db957 402 for_each_process(p) {
72835c86 403 if (oom_unkillable_task(p, memcg, nodemask))
b4416d2b 404 continue;
fef1bdd6 405
c55db957
KM
406 task = find_lock_task_mm(p);
407 if (!task) {
6d2661ed 408 /*
74ab7f1d
DR
409 * This is a kthread or all of p's threads have already
410 * detached their mm's. There's no need to report
c55db957 411 * them; they can't be oom killed anyway.
6d2661ed 412 */
6d2661ed
DR
413 continue;
414 }
c55db957 415
af5b0f6a 416 pr_info("[%5d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
078de5f7
EB
417 task->pid, from_kuid(&init_user_ns, task_uid(task)),
418 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
af5b0f6a 419 mm_pgtables_bytes(task->mm),
de34d965 420 get_mm_counter(task->mm, MM_SWAPENTS),
a63d83f4 421 task->signal->oom_score_adj, task->comm);
c55db957
KM
422 task_unlock(task);
423 }
6b0c81b3 424 rcu_read_unlock();
fef1bdd6
DR
425}
426
2a966b77 427static void dump_header(struct oom_control *oc, struct task_struct *p)
1b604d75 428{
0205f755
MH
429 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), nodemask=%*pbl, order=%d, oom_score_adj=%hd\n",
430 current->comm, oc->gfp_mask, &oc->gfp_mask,
431 nodemask_pr_args(oc->nodemask), oc->order,
432 current->signal->oom_score_adj);
9254990f
MH
433 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
434 pr_warn("COMPACTION is disabled!!!\n");
a0795cd4 435
da39da3a 436 cpuset_print_current_mems_allowed();
1b604d75 437 dump_stack();
852d8be0 438 if (is_memcg_oom(oc))
2a966b77 439 mem_cgroup_print_oom_info(oc->memcg, p);
852d8be0 440 else {
299c517a 441 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
852d8be0
YS
442 if (is_dump_unreclaim_slabs())
443 dump_unreclaimable_slab();
444 }
1b604d75 445 if (sysctl_oom_dump_tasks)
2a966b77 446 dump_tasks(oc->memcg, oc->nodemask);
1b604d75
DR
447}
448
5695be14 449/*
c32b3cbe 450 * Number of OOM victims in flight
5695be14 451 */
c32b3cbe
MH
452static atomic_t oom_victims = ATOMIC_INIT(0);
453static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
5695be14 454
7c5f64f8 455static bool oom_killer_disabled __read_mostly;
5695be14 456
bc448e89
MH
457#define K(x) ((x) << (PAGE_SHIFT-10))
458
3ef22dff
MH
459/*
460 * task->mm can be NULL if the task is the exited group leader. So to
461 * determine whether the task is using a particular mm, we examine all the
462 * task's threads: if one of those is using this mm then this task was also
463 * using it.
464 */
44a70ade 465bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
3ef22dff
MH
466{
467 struct task_struct *t;
468
469 for_each_thread(p, t) {
470 struct mm_struct *t_mm = READ_ONCE(t->mm);
471 if (t_mm)
472 return t_mm == mm;
473 }
474 return false;
475}
476
aac45363
MH
477#ifdef CONFIG_MMU
478/*
479 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
480 * victim (if that is possible) to help the OOM killer to move on.
481 */
482static struct task_struct *oom_reaper_th;
aac45363 483static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
29c696e1 484static struct task_struct *oom_reaper_list;
03049269
MH
485static DEFINE_SPINLOCK(oom_reaper_lock);
486
b5c4fcb6 487void __oom_reap_task_mm(struct mm_struct *mm)
aac45363 488{
aac45363 489 struct vm_area_struct *vma;
b5c4fcb6
DR
490
491 /*
492 * Tell all users of get_user/copy_from_user etc... that the content
493 * is no longer stable. No barriers really needed because unmapping
494 * should imply barriers already and the reader would hit a page fault
495 * if it stumbled over a reaped memory.
496 */
497 set_bit(MMF_UNSTABLE, &mm->flags);
498
499 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
500 if (!can_madv_dontneed_vma(vma))
501 continue;
502
503 /*
504 * Only anonymous pages have a good chance to be dropped
505 * without additional steps which we cannot afford as we
506 * are OOM already.
507 *
508 * We do not even care about fs backed pages because all
509 * which are reclaimable have already been reclaimed and
510 * we do not want to block exit_mmap by keeping mm ref
511 * count elevated without a good reason.
512 */
513 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
514 struct mmu_gather tlb;
515
516 tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
517 unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
518 NULL);
519 tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
520 }
521 }
522}
523
524static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
525{
aac45363
MH
526 bool ret = true;
527
e2fe1456
MH
528 /*
529 * We have to make sure to not race with the victim exit path
530 * and cause premature new oom victim selection:
b5c4fcb6 531 * oom_reap_task_mm exit_mm
e5e3f4c4 532 * mmget_not_zero
e2fe1456
MH
533 * mmput
534 * atomic_dec_and_test
535 * exit_oom_victim
536 * [...]
537 * out_of_memory
538 * select_bad_process
539 * # no TIF_MEMDIE task selects new victim
540 * unmap_page_range # frees some memory
541 */
542 mutex_lock(&oom_lock);
543
aac45363
MH
544 if (!down_read_trylock(&mm->mmap_sem)) {
545 ret = false;
422580c3 546 trace_skip_task_reaping(tsk->pid);
7ebffa45 547 goto unlock_oom;
e5e3f4c4
MH
548 }
549
4d4bbd85
MH
550 /*
551 * If the mm has notifiers then we would need to invalidate them around
552 * unmap_page_range and that is risky because notifiers can sleep and
553 * what they do is basically undeterministic. So let's have a short
554 * sleep to give the oom victim some more time.
555 * TODO: we really want to get rid of this ugly hack and make sure that
556 * notifiers cannot block for unbounded amount of time and add
557 * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
558 */
559 if (mm_has_notifiers(mm)) {
560 up_read(&mm->mmap_sem);
561 schedule_timeout_idle(HZ);
562 goto unlock_oom;
563 }
564
e5e3f4c4 565 /*
21292580
AA
566 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
567 * work on the mm anymore. The check for MMF_OOM_SKIP must run
568 * under mmap_sem for reading because it serializes against the
569 * down_write();up_write() cycle in exit_mmap().
e5e3f4c4 570 */
21292580 571 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
e5e3f4c4 572 up_read(&mm->mmap_sem);
422580c3 573 trace_skip_task_reaping(tsk->pid);
7ebffa45 574 goto unlock_oom;
aac45363
MH
575 }
576
422580c3
RG
577 trace_start_task_reaping(tsk->pid);
578
b5c4fcb6 579 __oom_reap_task_mm(mm);
aac45363 580
bc448e89
MH
581 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
582 task_pid_nr(tsk), tsk->comm,
583 K(get_mm_counter(mm, MM_ANONPAGES)),
584 K(get_mm_counter(mm, MM_FILEPAGES)),
585 K(get_mm_counter(mm, MM_SHMEMPAGES)));
aac45363 586 up_read(&mm->mmap_sem);
36324a99 587
422580c3 588 trace_finish_task_reaping(tsk->pid);
e5e3f4c4
MH
589unlock_oom:
590 mutex_unlock(&oom_lock);
aac45363
MH
591 return ret;
592}
593
bc448e89 594#define MAX_OOM_REAP_RETRIES 10
36324a99 595static void oom_reap_task(struct task_struct *tsk)
aac45363
MH
596{
597 int attempts = 0;
26db62f1 598 struct mm_struct *mm = tsk->signal->oom_mm;
aac45363
MH
599
600 /* Retry the down_read_trylock(mmap_sem) a few times */
b5c4fcb6 601 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
aac45363
MH
602 schedule_timeout_idle(HZ/10);
603
7ebffa45
TH
604 if (attempts <= MAX_OOM_REAP_RETRIES)
605 goto done;
11a410d5 606
7ebffa45
TH
607 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
608 task_pid_nr(tsk), tsk->comm);
7ebffa45 609 debug_show_all_locks();
bc448e89 610
7ebffa45 611done:
449d777d 612 tsk->oom_reaper_list = NULL;
449d777d 613
26db62f1
MH
614 /*
615 * Hide this mm from OOM killer because it has been either reaped or
616 * somebody can't call up_write(mmap_sem).
617 */
862e3073 618 set_bit(MMF_OOM_SKIP, &mm->flags);
26db62f1 619
aac45363 620 /* Drop a reference taken by wake_oom_reaper */
36324a99 621 put_task_struct(tsk);
aac45363
MH
622}
623
624static int oom_reaper(void *unused)
625{
626 while (true) {
03049269 627 struct task_struct *tsk = NULL;
aac45363 628
29c696e1 629 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
03049269 630 spin_lock(&oom_reaper_lock);
29c696e1
VD
631 if (oom_reaper_list != NULL) {
632 tsk = oom_reaper_list;
633 oom_reaper_list = tsk->oom_reaper_list;
03049269
MH
634 }
635 spin_unlock(&oom_reaper_lock);
636
637 if (tsk)
638 oom_reap_task(tsk);
aac45363
MH
639 }
640
641 return 0;
642}
643
7c5f64f8 644static void wake_oom_reaper(struct task_struct *tsk)
aac45363 645{
af8e15cc
MH
646 /* tsk is already queued? */
647 if (tsk == oom_reaper_list || tsk->oom_reaper_list)
aac45363
MH
648 return;
649
36324a99 650 get_task_struct(tsk);
aac45363 651
03049269 652 spin_lock(&oom_reaper_lock);
29c696e1
VD
653 tsk->oom_reaper_list = oom_reaper_list;
654 oom_reaper_list = tsk;
03049269 655 spin_unlock(&oom_reaper_lock);
422580c3 656 trace_wake_reaper(tsk->pid);
03049269 657 wake_up(&oom_reaper_wait);
aac45363
MH
658}
659
660static int __init oom_init(void)
661{
662 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
aac45363
MH
663 return 0;
664}
665subsys_initcall(oom_init)
7c5f64f8
VD
666#else
667static inline void wake_oom_reaper(struct task_struct *tsk)
668{
669}
670#endif /* CONFIG_MMU */
aac45363 671
49550b60 672/**
16e95196 673 * mark_oom_victim - mark the given task as OOM victim
49550b60 674 * @tsk: task to mark
c32b3cbe 675 *
dc56401f 676 * Has to be called with oom_lock held and never after
c32b3cbe 677 * oom has been disabled already.
26db62f1
MH
678 *
679 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
680 * under task_lock or operate on the current).
49550b60 681 */
7c5f64f8 682static void mark_oom_victim(struct task_struct *tsk)
49550b60 683{
26db62f1
MH
684 struct mm_struct *mm = tsk->mm;
685
c32b3cbe
MH
686 WARN_ON(oom_killer_disabled);
687 /* OOM killer might race with memcg OOM */
688 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
689 return;
26db62f1 690
26db62f1 691 /* oom_mm is bound to the signal struct life time. */
4837fe37 692 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
f1f10076 693 mmgrab(tsk->signal->oom_mm);
4837fe37
MH
694 set_bit(MMF_OOM_VICTIM, &mm->flags);
695 }
26db62f1 696
63a8ca9b
MH
697 /*
698 * Make sure that the task is woken up from uninterruptible sleep
699 * if it is frozen because OOM killer wouldn't be able to free
700 * any memory and livelock. freezing_slow_path will tell the freezer
701 * that TIF_MEMDIE tasks should be ignored.
702 */
703 __thaw_task(tsk);
c32b3cbe 704 atomic_inc(&oom_victims);
422580c3 705 trace_mark_victim(tsk->pid);
49550b60
MH
706}
707
708/**
16e95196 709 * exit_oom_victim - note the exit of an OOM victim
49550b60 710 */
38531201 711void exit_oom_victim(void)
49550b60 712{
38531201 713 clear_thread_flag(TIF_MEMDIE);
c32b3cbe 714
c38f1025 715 if (!atomic_dec_return(&oom_victims))
c32b3cbe 716 wake_up_all(&oom_victims_wait);
c32b3cbe
MH
717}
718
7d2e7a22
MH
719/**
720 * oom_killer_enable - enable OOM killer
721 */
722void oom_killer_enable(void)
723{
724 oom_killer_disabled = false;
d75da004 725 pr_info("OOM killer enabled.\n");
7d2e7a22
MH
726}
727
c32b3cbe
MH
728/**
729 * oom_killer_disable - disable OOM killer
7d2e7a22 730 * @timeout: maximum timeout to wait for oom victims in jiffies
c32b3cbe
MH
731 *
732 * Forces all page allocations to fail rather than trigger OOM killer.
7d2e7a22
MH
733 * Will block and wait until all OOM victims are killed or the given
734 * timeout expires.
c32b3cbe
MH
735 *
736 * The function cannot be called when there are runnable user tasks because
737 * the userspace would see unexpected allocation failures as a result. Any
738 * new usage of this function should be consulted with MM people.
739 *
740 * Returns true if successful and false if the OOM killer cannot be
741 * disabled.
742 */
7d2e7a22 743bool oom_killer_disable(signed long timeout)
c32b3cbe 744{
7d2e7a22
MH
745 signed long ret;
746
c32b3cbe 747 /*
6afcf289
TH
748 * Make sure to not race with an ongoing OOM killer. Check that the
749 * current is not killed (possibly due to sharing the victim's memory).
c32b3cbe 750 */
6afcf289 751 if (mutex_lock_killable(&oom_lock))
c32b3cbe 752 return false;
c32b3cbe 753 oom_killer_disabled = true;
dc56401f 754 mutex_unlock(&oom_lock);
c32b3cbe 755
7d2e7a22
MH
756 ret = wait_event_interruptible_timeout(oom_victims_wait,
757 !atomic_read(&oom_victims), timeout);
758 if (ret <= 0) {
759 oom_killer_enable();
760 return false;
761 }
d75da004 762 pr_info("OOM killer disabled.\n");
c32b3cbe
MH
763
764 return true;
765}
766
1af8bb43
MH
767static inline bool __task_will_free_mem(struct task_struct *task)
768{
769 struct signal_struct *sig = task->signal;
770
771 /*
772 * A coredumping process may sleep for an extended period in exit_mm(),
773 * so the oom killer cannot assume that the process will promptly exit
774 * and release memory.
775 */
776 if (sig->flags & SIGNAL_GROUP_COREDUMP)
777 return false;
778
779 if (sig->flags & SIGNAL_GROUP_EXIT)
780 return true;
781
782 if (thread_group_empty(task) && (task->flags & PF_EXITING))
783 return true;
784
785 return false;
786}
787
788/*
789 * Checks whether the given task is dying or exiting and likely to
790 * release its address space. This means that all threads and processes
791 * sharing the same mm have to be killed or exiting.
091f362c
MH
792 * Caller has to make sure that task->mm is stable (hold task_lock or
793 * it operates on the current).
1af8bb43 794 */
7c5f64f8 795static bool task_will_free_mem(struct task_struct *task)
1af8bb43 796{
091f362c 797 struct mm_struct *mm = task->mm;
1af8bb43 798 struct task_struct *p;
f33e6f06 799 bool ret = true;
1af8bb43 800
1af8bb43 801 /*
091f362c
MH
802 * Skip tasks without mm because it might have passed its exit_mm and
803 * exit_oom_victim. oom_reaper could have rescued that but do not rely
804 * on that for now. We can consider find_lock_task_mm in future.
1af8bb43 805 */
091f362c 806 if (!mm)
1af8bb43
MH
807 return false;
808
091f362c
MH
809 if (!__task_will_free_mem(task))
810 return false;
696453e6
MH
811
812 /*
813 * This task has already been drained by the oom reaper so there are
814 * only small chances it will free some more
815 */
862e3073 816 if (test_bit(MMF_OOM_SKIP, &mm->flags))
696453e6 817 return false;
696453e6 818
091f362c 819 if (atomic_read(&mm->mm_users) <= 1)
1af8bb43 820 return true;
1af8bb43
MH
821
822 /*
5870c2e1
MH
823 * Make sure that all tasks which share the mm with the given tasks
824 * are dying as well to make sure that a) nobody pins its mm and
825 * b) the task is also reapable by the oom reaper.
1af8bb43
MH
826 */
827 rcu_read_lock();
828 for_each_process(p) {
829 if (!process_shares_mm(p, mm))
830 continue;
831 if (same_thread_group(task, p))
832 continue;
833 ret = __task_will_free_mem(p);
834 if (!ret)
835 break;
836 }
837 rcu_read_unlock();
1af8bb43
MH
838
839 return ret;
840}
841
7c5f64f8 842static void oom_kill_process(struct oom_control *oc, const char *message)
1da177e4 843{
7c5f64f8
VD
844 struct task_struct *p = oc->chosen;
845 unsigned int points = oc->chosen_points;
52d3c036 846 struct task_struct *victim = p;
5e9d834a 847 struct task_struct *child;
1da4db0c 848 struct task_struct *t;
647f2bdf 849 struct mm_struct *mm;
52d3c036 850 unsigned int victim_points = 0;
dc3f21ea
DR
851 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
852 DEFAULT_RATELIMIT_BURST);
bb29902a 853 bool can_oom_reap = true;
1da177e4 854
50ec3bbf
NP
855 /*
856 * If the task is already exiting, don't alarm the sysadmin or kill
cd04ae1e
MH
857 * its children or threads, just give it access to memory reserves
858 * so it can die quickly
50ec3bbf 859 */
091f362c 860 task_lock(p);
1af8bb43 861 if (task_will_free_mem(p)) {
16e95196 862 mark_oom_victim(p);
1af8bb43 863 wake_oom_reaper(p);
091f362c 864 task_unlock(p);
6b0c81b3 865 put_task_struct(p);
2a1c9b1f 866 return;
50ec3bbf 867 }
091f362c 868 task_unlock(p);
50ec3bbf 869
dc3f21ea 870 if (__ratelimit(&oom_rs))
2a966b77 871 dump_header(oc, p);
8447d950 872
f0d6647e 873 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
5e9d834a 874 message, task_pid_nr(p), p->comm, points);
f3af38d3 875
5e9d834a
DR
876 /*
877 * If any of p's children has a different mm and is eligible for kill,
11239836 878 * the one with the highest oom_badness() score is sacrificed for its
5e9d834a
DR
879 * parent. This attempts to lose the minimal amount of work done while
880 * still freeing memory.
881 */
6b0c81b3 882 read_lock(&tasklist_lock);
1da4db0c 883 for_each_thread(p, t) {
5e9d834a 884 list_for_each_entry(child, &t->children, sibling) {
a63d83f4 885 unsigned int child_points;
5e9d834a 886
4d7b3394 887 if (process_shares_mm(child, p->mm))
edd45544 888 continue;
a63d83f4
DR
889 /*
890 * oom_badness() returns 0 if the thread is unkillable
891 */
2a966b77 892 child_points = oom_badness(child,
7c5f64f8 893 oc->memcg, oc->nodemask, oc->totalpages);
5e9d834a 894 if (child_points > victim_points) {
6b0c81b3 895 put_task_struct(victim);
5e9d834a
DR
896 victim = child;
897 victim_points = child_points;
6b0c81b3 898 get_task_struct(victim);
5e9d834a 899 }
dd8e8f40 900 }
1da4db0c 901 }
6b0c81b3 902 read_unlock(&tasklist_lock);
dd8e8f40 903
6b0c81b3
DR
904 p = find_lock_task_mm(victim);
905 if (!p) {
6b0c81b3 906 put_task_struct(victim);
647f2bdf 907 return;
6b0c81b3
DR
908 } else if (victim != p) {
909 get_task_struct(p);
910 put_task_struct(victim);
911 victim = p;
912 }
647f2bdf 913
880b7689 914 /* Get a reference to safely compare mm after task_unlock(victim) */
647f2bdf 915 mm = victim->mm;
f1f10076 916 mmgrab(mm);
8e675f7a
KK
917
918 /* Raise event before sending signal: task reaper must see this */
919 count_vm_event(OOM_KILL);
920 count_memcg_event_mm(mm, OOM_KILL);
921
426fb5e7 922 /*
cd04ae1e
MH
923 * We should send SIGKILL before granting access to memory reserves
924 * in order to prevent the OOM victim from depleting the memory
925 * reserves from the user space under its control.
426fb5e7
TH
926 */
927 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
16e95196 928 mark_oom_victim(victim);
eca56ff9 929 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
647f2bdf
DR
930 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
931 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
eca56ff9
JM
932 K(get_mm_counter(victim->mm, MM_FILEPAGES)),
933 K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
647f2bdf
DR
934 task_unlock(victim);
935
936 /*
937 * Kill all user processes sharing victim->mm in other thread groups, if
938 * any. They don't get access to memory reserves, though, to avoid
939 * depletion of all memory. This prevents mm->mmap_sem livelock when an
940 * oom killed thread cannot exit because it requires the semaphore and
941 * its contended by another thread trying to allocate memory itself.
942 * That thread will now get access to memory reserves since it has a
943 * pending fatal signal.
944 */
4d4048be 945 rcu_read_lock();
c319025a 946 for_each_process(p) {
4d7b3394 947 if (!process_shares_mm(p, mm))
c319025a
ON
948 continue;
949 if (same_thread_group(p, victim))
950 continue;
1b51e65e 951 if (is_global_init(p)) {
aac45363 952 can_oom_reap = false;
862e3073 953 set_bit(MMF_OOM_SKIP, &mm->flags);
a373966d
MH
954 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
955 task_pid_nr(victim), victim->comm,
956 task_pid_nr(p), p->comm);
c319025a 957 continue;
aac45363 958 }
1b51e65e
MH
959 /*
960 * No use_mm() user needs to read from the userspace so we are
961 * ok to reap it.
962 */
963 if (unlikely(p->flags & PF_KTHREAD))
964 continue;
c319025a
ON
965 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
966 }
6b0c81b3 967 rcu_read_unlock();
647f2bdf 968
aac45363 969 if (can_oom_reap)
36324a99 970 wake_oom_reaper(victim);
aac45363 971
880b7689 972 mmdrop(mm);
6b0c81b3 973 put_task_struct(victim);
1da177e4 974}
647f2bdf 975#undef K
1da177e4 976
309ed882
DR
977/*
978 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
979 */
7c5f64f8
VD
980static void check_panic_on_oom(struct oom_control *oc,
981 enum oom_constraint constraint)
309ed882
DR
982{
983 if (likely(!sysctl_panic_on_oom))
984 return;
985 if (sysctl_panic_on_oom != 2) {
986 /*
987 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
988 * does not panic for cpuset, mempolicy, or memcg allocation
989 * failures.
990 */
991 if (constraint != CONSTRAINT_NONE)
992 return;
993 }
071a4bef 994 /* Do not panic for oom kills triggered by sysrq */
db2a0dd7 995 if (is_sysrq_oom(oc))
071a4bef 996 return;
2a966b77 997 dump_header(oc, NULL);
309ed882
DR
998 panic("Out of memory: %s panic_on_oom is enabled\n",
999 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1000}
1001
8bc719d3
MS
1002static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1003
1004int register_oom_notifier(struct notifier_block *nb)
1005{
1006 return blocking_notifier_chain_register(&oom_notify_list, nb);
1007}
1008EXPORT_SYMBOL_GPL(register_oom_notifier);
1009
1010int unregister_oom_notifier(struct notifier_block *nb)
1011{
1012 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1013}
1014EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1015
1da177e4 1016/**
6e0fc46d
DR
1017 * out_of_memory - kill the "best" process when we run out of memory
1018 * @oc: pointer to struct oom_control
1da177e4
LT
1019 *
1020 * If we run out of memory, we have the choice between either
1021 * killing a random task (bad), letting the system crash (worse)
1022 * OR try to be smart about which process to kill. Note that we
1023 * don't have to be perfect here, we just have to be good.
1024 */
6e0fc46d 1025bool out_of_memory(struct oom_control *oc)
1da177e4 1026{
8bc719d3 1027 unsigned long freed = 0;
e3658932 1028 enum oom_constraint constraint = CONSTRAINT_NONE;
8bc719d3 1029
dc56401f
JW
1030 if (oom_killer_disabled)
1031 return false;
1032
7c5f64f8
VD
1033 if (!is_memcg_oom(oc)) {
1034 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1035 if (freed > 0)
1036 /* Got some memory back in the last second. */
1037 return true;
1038 }
1da177e4 1039
7b98c2e4 1040 /*
9ff4868e
DR
1041 * If current has a pending SIGKILL or is exiting, then automatically
1042 * select it. The goal is to allow it to allocate so that it may
1043 * quickly exit and free its memory.
7b98c2e4 1044 */
091f362c 1045 if (task_will_free_mem(current)) {
16e95196 1046 mark_oom_victim(current);
1af8bb43 1047 wake_oom_reaper(current);
75e8f8b2 1048 return true;
7b98c2e4
DR
1049 }
1050
3da88fb3
MH
1051 /*
1052 * The OOM killer does not compensate for IO-less reclaim.
1053 * pagefault_out_of_memory lost its gfp context so we have to
1054 * make sure exclude 0 mask - all other users should have at least
1055 * ___GFP_DIRECT_RECLAIM to get here.
1056 */
06ad276a 1057 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
3da88fb3
MH
1058 return true;
1059
9b0f8b04
CL
1060 /*
1061 * Check if there were limitations on the allocation (only relevant for
7c5f64f8 1062 * NUMA and memcg) that may require different handling.
9b0f8b04 1063 */
7c5f64f8 1064 constraint = constrained_alloc(oc);
6e0fc46d
DR
1065 if (constraint != CONSTRAINT_MEMORY_POLICY)
1066 oc->nodemask = NULL;
2a966b77 1067 check_panic_on_oom(oc, constraint);
0aad4b31 1068
7c5f64f8
VD
1069 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1070 current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
121d1ba0 1071 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
6b0c81b3 1072 get_task_struct(current);
7c5f64f8
VD
1073 oc->chosen = current;
1074 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
75e8f8b2 1075 return true;
0aad4b31
DR
1076 }
1077
7c5f64f8 1078 select_bad_process(oc);
0aad4b31 1079 /* Found nothing?!?! Either we hang forever, or we panic. */
7c5f64f8 1080 if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
2a966b77 1081 dump_header(oc, NULL);
0aad4b31
DR
1082 panic("Out of memory and no killable processes...\n");
1083 }
7c5f64f8
VD
1084 if (oc->chosen && oc->chosen != (void *)-1UL) {
1085 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1086 "Memory cgroup out of memory");
75e8f8b2
DR
1087 /*
1088 * Give the killed process a good chance to exit before trying
1089 * to allocate memory again.
1090 */
4f774b91 1091 schedule_timeout_killable(1);
75e8f8b2 1092 }
7c5f64f8 1093 return !!oc->chosen;
c32b3cbe
MH
1094}
1095
e3658932
DR
1096/*
1097 * The pagefault handler calls here because it is out of memory, so kill a
798fd756
VD
1098 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1099 * killing is already in progress so do nothing.
e3658932
DR
1100 */
1101void pagefault_out_of_memory(void)
1102{
6e0fc46d
DR
1103 struct oom_control oc = {
1104 .zonelist = NULL,
1105 .nodemask = NULL,
2a966b77 1106 .memcg = NULL,
6e0fc46d
DR
1107 .gfp_mask = 0,
1108 .order = 0,
6e0fc46d
DR
1109 };
1110
49426420 1111 if (mem_cgroup_oom_synchronize(true))
dc56401f 1112 return;
3812c8c8 1113
dc56401f
JW
1114 if (!mutex_trylock(&oom_lock))
1115 return;
a104808e 1116 out_of_memory(&oc);
dc56401f 1117 mutex_unlock(&oom_lock);
e3658932 1118}