1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1998,2000 Rik van Riel
6 * Thanks go out to Claus Fischer for some serious inspiration and
7 * for goading me into coding this file...
8 * Copyright (C) 2010 Google, Inc.
9 * Rewritten by David Rientjes
11 * The routines in this file are used to kill a process when
12 * we're seriously out of memory. This gets called from __alloc_pages()
13 * in mm/page_alloc.c when we really run out of memory.
15 * Since we won't call these routines often (on a well-configured
16 * machine) this file will double as a 'coding guide' and a signpost
17 * for newbie kernel hackers. It features several pointers to major
18 * kernel subsystems and hints as to where to find out what things do.
21 #include <linux/oom.h>
23 #include <linux/err.h>
24 #include <linux/gfp.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/coredump.h>
28 #include <linux/sched/task.h>
29 #include <linux/sched/debug.h>
30 #include <linux/swap.h>
31 #include <linux/timex.h>
32 #include <linux/jiffies.h>
33 #include <linux/cpuset.h>
34 #include <linux/export.h>
35 #include <linux/notifier.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mempolicy.h>
38 #include <linux/security.h>
39 #include <linux/ptrace.h>
40 #include <linux/freezer.h>
41 #include <linux/ftrace.h>
42 #include <linux/ratelimit.h>
43 #include <linux/kthread.h>
44 #include <linux/init.h>
45 #include <linux/mmu_notifier.h>
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/oom.h>
54 int sysctl_panic_on_oom
;
55 int sysctl_oom_kill_allocating_task
;
56 int sysctl_oom_dump_tasks
= 1;
59 * Serializes oom killer invocations (out_of_memory()) from all contexts to
60 * prevent from over eager oom killing (e.g. when the oom killer is invoked
61 * from different domains).
63 * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
66 DEFINE_MUTEX(oom_lock
);
67 /* Serializes oom_score_adj and oom_score_adj_min updates */
68 DEFINE_MUTEX(oom_adj_mutex
);
70 static inline bool is_memcg_oom(struct oom_control
*oc
)
72 return oc
->memcg
!= NULL
;
77 * oom_cpuset_eligible() - check task eligibility for kill
78 * @start: task struct of which task to consider
79 * @oc: pointer to struct oom_control
81 * Task eligibility is determined by whether or not a candidate task, @tsk,
82 * shares the same mempolicy nodes as current if it is bound by such a policy
83 * and whether or not it has the same set of allowed cpuset nodes.
85 * This function is assuming oom-killer context and 'current' has triggered
88 static bool oom_cpuset_eligible(struct task_struct
*start
,
89 struct oom_control
*oc
)
91 struct task_struct
*tsk
;
93 const nodemask_t
*mask
= oc
->nodemask
;
99 for_each_thread(start
, tsk
) {
102 * If this is a mempolicy constrained oom, tsk's
103 * cpuset is irrelevant. Only return true if its
104 * mempolicy intersects current, otherwise it may be
107 ret
= mempolicy_in_oom_domain(tsk
, mask
);
110 * This is not a mempolicy constrained oom, so only
111 * check the mems of tsk's cpuset.
113 ret
= cpuset_mems_allowed_intersects(current
, tsk
);
123 static bool oom_cpuset_eligible(struct task_struct
*tsk
, struct oom_control
*oc
)
127 #endif /* CONFIG_NUMA */
130 * The process p may have detached its own ->mm while exiting or through
131 * kthread_use_mm(), but one or more of its subthreads may still have a valid
132 * pointer. Return p, or any of its subthreads with a valid ->mm, with
135 struct task_struct
*find_lock_task_mm(struct task_struct
*p
)
137 struct task_struct
*t
;
141 for_each_thread(p
, t
) {
155 * order == -1 means the oom kill is required by sysrq, otherwise only
156 * for display purposes.
158 static inline bool is_sysrq_oom(struct oom_control
*oc
)
160 return oc
->order
== -1;
163 /* return true if the task is not adequate as candidate victim task. */
164 static bool oom_unkillable_task(struct task_struct
*p
)
166 if (is_global_init(p
))
168 if (p
->flags
& PF_KTHREAD
)
174 * Check whether unreclaimable slab amount is greater than
175 * all user memory(LRU pages).
176 * dump_unreclaimable_slab() could help in the case that
177 * oom due to too much unreclaimable slab used by kernel.
179 static bool should_dump_unreclaim_slab(void)
181 unsigned long nr_lru
;
183 nr_lru
= global_node_page_state(NR_ACTIVE_ANON
) +
184 global_node_page_state(NR_INACTIVE_ANON
) +
185 global_node_page_state(NR_ACTIVE_FILE
) +
186 global_node_page_state(NR_INACTIVE_FILE
) +
187 global_node_page_state(NR_ISOLATED_ANON
) +
188 global_node_page_state(NR_ISOLATED_FILE
) +
189 global_node_page_state(NR_UNEVICTABLE
);
191 return (global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B
) > nr_lru
);
195 * oom_badness - heuristic function to determine which candidate task to kill
196 * @p: task struct of which task we should calculate
197 * @totalpages: total present RAM allowed for page allocation
199 * The heuristic for determining which task to kill is made to be as simple and
200 * predictable as possible. The goal is to return the highest value for the
201 * task consuming the most memory to avoid subsequent oom failures.
203 long oom_badness(struct task_struct
*p
, unsigned long totalpages
)
208 if (oom_unkillable_task(p
))
211 p
= find_lock_task_mm(p
);
216 * Do not even consider tasks which are explicitly marked oom
217 * unkillable or have been already oom reaped or the are in
218 * the middle of vfork
220 adj
= (long)p
->signal
->oom_score_adj
;
221 if (adj
== OOM_SCORE_ADJ_MIN
||
222 test_bit(MMF_OOM_SKIP
, &p
->mm
->flags
) ||
229 * The baseline for the badness score is the proportion of RAM that each
230 * task's rss, pagetable and swap space use.
232 points
= get_mm_rss(p
->mm
) + get_mm_counter(p
->mm
, MM_SWAPENTS
) +
233 mm_pgtables_bytes(p
->mm
) / PAGE_SIZE
;
236 /* Normalize to oom_score_adj units */
237 adj
*= totalpages
/ 1000;
243 static const char * const oom_constraint_text
[] = {
244 [CONSTRAINT_NONE
] = "CONSTRAINT_NONE",
245 [CONSTRAINT_CPUSET
] = "CONSTRAINT_CPUSET",
246 [CONSTRAINT_MEMORY_POLICY
] = "CONSTRAINT_MEMORY_POLICY",
247 [CONSTRAINT_MEMCG
] = "CONSTRAINT_MEMCG",
251 * Determine the type of allocation constraint.
253 static enum oom_constraint
constrained_alloc(struct oom_control
*oc
)
257 enum zone_type highest_zoneidx
= gfp_zone(oc
->gfp_mask
);
258 bool cpuset_limited
= false;
261 if (is_memcg_oom(oc
)) {
262 oc
->totalpages
= mem_cgroup_get_max(oc
->memcg
) ?: 1;
263 return CONSTRAINT_MEMCG
;
266 /* Default to all available memory */
267 oc
->totalpages
= totalram_pages() + total_swap_pages
;
269 if (!IS_ENABLED(CONFIG_NUMA
))
270 return CONSTRAINT_NONE
;
273 return CONSTRAINT_NONE
;
275 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
276 * to kill current.We have to random task kill in this case.
277 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
279 if (oc
->gfp_mask
& __GFP_THISNODE
)
280 return CONSTRAINT_NONE
;
283 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
284 * the page allocator means a mempolicy is in effect. Cpuset policy
285 * is enforced in get_page_from_freelist().
288 !nodes_subset(node_states
[N_MEMORY
], *oc
->nodemask
)) {
289 oc
->totalpages
= total_swap_pages
;
290 for_each_node_mask(nid
, *oc
->nodemask
)
291 oc
->totalpages
+= node_present_pages(nid
);
292 return CONSTRAINT_MEMORY_POLICY
;
295 /* Check this allocation failure is caused by cpuset's wall function */
296 for_each_zone_zonelist_nodemask(zone
, z
, oc
->zonelist
,
297 highest_zoneidx
, oc
->nodemask
)
298 if (!cpuset_zone_allowed(zone
, oc
->gfp_mask
))
299 cpuset_limited
= true;
301 if (cpuset_limited
) {
302 oc
->totalpages
= total_swap_pages
;
303 for_each_node_mask(nid
, cpuset_current_mems_allowed
)
304 oc
->totalpages
+= node_present_pages(nid
);
305 return CONSTRAINT_CPUSET
;
307 return CONSTRAINT_NONE
;
310 static int oom_evaluate_task(struct task_struct
*task
, void *arg
)
312 struct oom_control
*oc
= arg
;
315 if (oom_unkillable_task(task
))
318 /* p may not have freeable memory in nodemask */
319 if (!is_memcg_oom(oc
) && !oom_cpuset_eligible(task
, oc
))
323 * This task already has access to memory reserves and is being killed.
324 * Don't allow any other task to have access to the reserves unless
325 * the task has MMF_OOM_SKIP because chances that it would release
326 * any memory is quite low.
328 if (!is_sysrq_oom(oc
) && tsk_is_oom_victim(task
)) {
329 if (test_bit(MMF_OOM_SKIP
, &task
->signal
->oom_mm
->flags
))
335 * If task is allocating a lot of memory and has been marked to be
336 * killed first if it triggers an oom, then select it.
338 if (oom_task_origin(task
)) {
343 points
= oom_badness(task
, oc
->totalpages
);
344 if (points
== LONG_MIN
|| points
< oc
->chosen_points
)
349 put_task_struct(oc
->chosen
);
350 get_task_struct(task
);
352 oc
->chosen_points
= points
;
357 put_task_struct(oc
->chosen
);
358 oc
->chosen
= (void *)-1UL;
363 * Simple selection loop. We choose the process with the highest number of
364 * 'points'. In case scan was aborted, oc->chosen is set to -1.
366 static void select_bad_process(struct oom_control
*oc
)
368 oc
->chosen_points
= LONG_MIN
;
370 if (is_memcg_oom(oc
))
371 mem_cgroup_scan_tasks(oc
->memcg
, oom_evaluate_task
, oc
);
373 struct task_struct
*p
;
377 if (oom_evaluate_task(p
, oc
))
383 static int dump_task(struct task_struct
*p
, void *arg
)
385 struct oom_control
*oc
= arg
;
386 struct task_struct
*task
;
388 if (oom_unkillable_task(p
))
391 /* p may not have freeable memory in nodemask */
392 if (!is_memcg_oom(oc
) && !oom_cpuset_eligible(p
, oc
))
395 task
= find_lock_task_mm(p
);
398 * All of p's threads have already detached their mm's. There's
399 * no need to report them; they can't be oom killed anyway.
404 pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
405 task
->pid
, from_kuid(&init_user_ns
, task_uid(task
)),
406 task
->tgid
, task
->mm
->total_vm
, get_mm_rss(task
->mm
),
407 mm_pgtables_bytes(task
->mm
),
408 get_mm_counter(task
->mm
, MM_SWAPENTS
),
409 task
->signal
->oom_score_adj
, task
->comm
);
416 * dump_tasks - dump current memory state of all system tasks
417 * @oc: pointer to struct oom_control
419 * Dumps the current memory state of all eligible tasks. Tasks not in the same
420 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
422 * State information includes task's pid, uid, tgid, vm size, rss,
423 * pgtables_bytes, swapents, oom_score_adj value, and name.
425 static void dump_tasks(struct oom_control
*oc
)
427 pr_info("Tasks state (memory values in pages):\n");
428 pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
430 if (is_memcg_oom(oc
))
431 mem_cgroup_scan_tasks(oc
->memcg
, dump_task
, oc
);
433 struct task_struct
*p
;
442 static void dump_oom_summary(struct oom_control
*oc
, struct task_struct
*victim
)
444 /* one line summary of the oom killer context. */
445 pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
446 oom_constraint_text
[oc
->constraint
],
447 nodemask_pr_args(oc
->nodemask
));
448 cpuset_print_current_mems_allowed();
449 mem_cgroup_print_oom_context(oc
->memcg
, victim
);
450 pr_cont(",task=%s,pid=%d,uid=%d\n", victim
->comm
, victim
->pid
,
451 from_kuid(&init_user_ns
, task_uid(victim
)));
454 static void dump_header(struct oom_control
*oc
, struct task_struct
*p
)
456 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
457 current
->comm
, oc
->gfp_mask
, &oc
->gfp_mask
, oc
->order
,
458 current
->signal
->oom_score_adj
);
459 if (!IS_ENABLED(CONFIG_COMPACTION
) && oc
->order
)
460 pr_warn("COMPACTION is disabled!!!\n");
463 if (is_memcg_oom(oc
))
464 mem_cgroup_print_oom_meminfo(oc
->memcg
);
466 show_mem(SHOW_MEM_FILTER_NODES
, oc
->nodemask
);
467 if (should_dump_unreclaim_slab())
468 dump_unreclaimable_slab();
470 if (sysctl_oom_dump_tasks
)
473 dump_oom_summary(oc
, p
);
477 * Number of OOM victims in flight
479 static atomic_t oom_victims
= ATOMIC_INIT(0);
480 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait
);
482 static bool oom_killer_disabled __read_mostly
;
484 #define K(x) ((x) << (PAGE_SHIFT-10))
487 * task->mm can be NULL if the task is the exited group leader. So to
488 * determine whether the task is using a particular mm, we examine all the
489 * task's threads: if one of those is using this mm then this task was also
492 bool process_shares_mm(struct task_struct
*p
, struct mm_struct
*mm
)
494 struct task_struct
*t
;
496 for_each_thread(p
, t
) {
497 struct mm_struct
*t_mm
= READ_ONCE(t
->mm
);
506 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
507 * victim (if that is possible) to help the OOM killer to move on.
509 static struct task_struct
*oom_reaper_th
;
510 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait
);
511 static struct task_struct
*oom_reaper_list
;
512 static DEFINE_SPINLOCK(oom_reaper_lock
);
514 bool __oom_reap_task_mm(struct mm_struct
*mm
)
516 struct vm_area_struct
*vma
;
520 * Tell all users of get_user/copy_from_user etc... that the content
521 * is no longer stable. No barriers really needed because unmapping
522 * should imply barriers already and the reader would hit a page fault
523 * if it stumbled over a reaped memory.
525 set_bit(MMF_UNSTABLE
, &mm
->flags
);
527 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
528 if (!can_madv_lru_vma(vma
))
532 * Only anonymous pages have a good chance to be dropped
533 * without additional steps which we cannot afford as we
536 * We do not even care about fs backed pages because all
537 * which are reclaimable have already been reclaimed and
538 * we do not want to block exit_mmap by keeping mm ref
539 * count elevated without a good reason.
541 if (vma_is_anonymous(vma
) || !(vma
->vm_flags
& VM_SHARED
)) {
542 struct mmu_notifier_range range
;
543 struct mmu_gather tlb
;
545 mmu_notifier_range_init(&range
, MMU_NOTIFY_UNMAP
, 0,
546 vma
, mm
, vma
->vm_start
,
548 tlb_gather_mmu(&tlb
, mm
);
549 if (mmu_notifier_invalidate_range_start_nonblock(&range
)) {
550 tlb_finish_mmu(&tlb
);
554 unmap_page_range(&tlb
, vma
, range
.start
, range
.end
, NULL
);
555 mmu_notifier_invalidate_range_end(&range
);
556 tlb_finish_mmu(&tlb
);
564 * Reaps the address space of the give task.
566 * Returns true on success and false if none or part of the address space
567 * has been reclaimed and the caller should retry later.
569 static bool oom_reap_task_mm(struct task_struct
*tsk
, struct mm_struct
*mm
)
573 if (!mmap_read_trylock(mm
)) {
574 trace_skip_task_reaping(tsk
->pid
);
579 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
580 * work on the mm anymore. The check for MMF_OOM_SKIP must run
581 * under mmap_lock for reading because it serializes against the
582 * mmap_write_lock();mmap_write_unlock() cycle in exit_mmap().
584 if (test_bit(MMF_OOM_SKIP
, &mm
->flags
)) {
585 trace_skip_task_reaping(tsk
->pid
);
589 trace_start_task_reaping(tsk
->pid
);
591 /* failed to reap part of the address space. Try again later */
592 ret
= __oom_reap_task_mm(mm
);
596 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
597 task_pid_nr(tsk
), tsk
->comm
,
598 K(get_mm_counter(mm
, MM_ANONPAGES
)),
599 K(get_mm_counter(mm
, MM_FILEPAGES
)),
600 K(get_mm_counter(mm
, MM_SHMEMPAGES
)));
602 trace_finish_task_reaping(tsk
->pid
);
604 mmap_read_unlock(mm
);
609 #define MAX_OOM_REAP_RETRIES 10
610 static void oom_reap_task(struct task_struct
*tsk
)
613 struct mm_struct
*mm
= tsk
->signal
->oom_mm
;
615 /* Retry the mmap_read_trylock(mm) a few times */
616 while (attempts
++ < MAX_OOM_REAP_RETRIES
&& !oom_reap_task_mm(tsk
, mm
))
617 schedule_timeout_idle(HZ
/10);
619 if (attempts
<= MAX_OOM_REAP_RETRIES
||
620 test_bit(MMF_OOM_SKIP
, &mm
->flags
))
623 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
624 task_pid_nr(tsk
), tsk
->comm
);
625 sched_show_task(tsk
);
626 debug_show_all_locks();
629 tsk
->oom_reaper_list
= NULL
;
632 * Hide this mm from OOM killer because it has been either reaped or
633 * somebody can't call mmap_write_unlock(mm).
635 set_bit(MMF_OOM_SKIP
, &mm
->flags
);
637 /* Drop a reference taken by wake_oom_reaper */
638 put_task_struct(tsk
);
641 static int oom_reaper(void *unused
)
644 struct task_struct
*tsk
= NULL
;
646 wait_event_freezable(oom_reaper_wait
, oom_reaper_list
!= NULL
);
647 spin_lock(&oom_reaper_lock
);
648 if (oom_reaper_list
!= NULL
) {
649 tsk
= oom_reaper_list
;
650 oom_reaper_list
= tsk
->oom_reaper_list
;
652 spin_unlock(&oom_reaper_lock
);
661 static void wake_oom_reaper(struct task_struct
*tsk
)
663 /* mm is already queued? */
664 if (test_and_set_bit(MMF_OOM_REAP_QUEUED
, &tsk
->signal
->oom_mm
->flags
))
667 get_task_struct(tsk
);
669 spin_lock(&oom_reaper_lock
);
670 tsk
->oom_reaper_list
= oom_reaper_list
;
671 oom_reaper_list
= tsk
;
672 spin_unlock(&oom_reaper_lock
);
673 trace_wake_reaper(tsk
->pid
);
674 wake_up(&oom_reaper_wait
);
677 static int __init
oom_init(void)
679 oom_reaper_th
= kthread_run(oom_reaper
, NULL
, "oom_reaper");
682 subsys_initcall(oom_init
)
684 static inline void wake_oom_reaper(struct task_struct
*tsk
)
687 #endif /* CONFIG_MMU */
690 * mark_oom_victim - mark the given task as OOM victim
693 * Has to be called with oom_lock held and never after
694 * oom has been disabled already.
696 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
697 * under task_lock or operate on the current).
699 static void mark_oom_victim(struct task_struct
*tsk
)
701 struct mm_struct
*mm
= tsk
->mm
;
703 WARN_ON(oom_killer_disabled
);
704 /* OOM killer might race with memcg OOM */
705 if (test_and_set_tsk_thread_flag(tsk
, TIF_MEMDIE
))
708 /* oom_mm is bound to the signal struct life time. */
709 if (!cmpxchg(&tsk
->signal
->oom_mm
, NULL
, mm
)) {
710 mmgrab(tsk
->signal
->oom_mm
);
711 set_bit(MMF_OOM_VICTIM
, &mm
->flags
);
715 * Make sure that the task is woken up from uninterruptible sleep
716 * if it is frozen because OOM killer wouldn't be able to free
717 * any memory and livelock. freezing_slow_path will tell the freezer
718 * that TIF_MEMDIE tasks should be ignored.
721 atomic_inc(&oom_victims
);
722 trace_mark_victim(tsk
->pid
);
726 * exit_oom_victim - note the exit of an OOM victim
728 void exit_oom_victim(void)
730 clear_thread_flag(TIF_MEMDIE
);
732 if (!atomic_dec_return(&oom_victims
))
733 wake_up_all(&oom_victims_wait
);
737 * oom_killer_enable - enable OOM killer
739 void oom_killer_enable(void)
741 oom_killer_disabled
= false;
742 pr_info("OOM killer enabled.\n");
746 * oom_killer_disable - disable OOM killer
747 * @timeout: maximum timeout to wait for oom victims in jiffies
749 * Forces all page allocations to fail rather than trigger OOM killer.
750 * Will block and wait until all OOM victims are killed or the given
753 * The function cannot be called when there are runnable user tasks because
754 * the userspace would see unexpected allocation failures as a result. Any
755 * new usage of this function should be consulted with MM people.
757 * Returns true if successful and false if the OOM killer cannot be
760 bool oom_killer_disable(signed long timeout
)
765 * Make sure to not race with an ongoing OOM killer. Check that the
766 * current is not killed (possibly due to sharing the victim's memory).
768 if (mutex_lock_killable(&oom_lock
))
770 oom_killer_disabled
= true;
771 mutex_unlock(&oom_lock
);
773 ret
= wait_event_interruptible_timeout(oom_victims_wait
,
774 !atomic_read(&oom_victims
), timeout
);
779 pr_info("OOM killer disabled.\n");
784 static inline bool __task_will_free_mem(struct task_struct
*task
)
786 struct signal_struct
*sig
= task
->signal
;
789 * A coredumping process may sleep for an extended period in exit_mm(),
790 * so the oom killer cannot assume that the process will promptly exit
791 * and release memory.
793 if (sig
->flags
& SIGNAL_GROUP_COREDUMP
)
796 if (sig
->flags
& SIGNAL_GROUP_EXIT
)
799 if (thread_group_empty(task
) && (task
->flags
& PF_EXITING
))
806 * Checks whether the given task is dying or exiting and likely to
807 * release its address space. This means that all threads and processes
808 * sharing the same mm have to be killed or exiting.
809 * Caller has to make sure that task->mm is stable (hold task_lock or
810 * it operates on the current).
812 static bool task_will_free_mem(struct task_struct
*task
)
814 struct mm_struct
*mm
= task
->mm
;
815 struct task_struct
*p
;
819 * Skip tasks without mm because it might have passed its exit_mm and
820 * exit_oom_victim. oom_reaper could have rescued that but do not rely
821 * on that for now. We can consider find_lock_task_mm in future.
826 if (!__task_will_free_mem(task
))
830 * This task has already been drained by the oom reaper so there are
831 * only small chances it will free some more
833 if (test_bit(MMF_OOM_SKIP
, &mm
->flags
))
836 if (atomic_read(&mm
->mm_users
) <= 1)
840 * Make sure that all tasks which share the mm with the given tasks
841 * are dying as well to make sure that a) nobody pins its mm and
842 * b) the task is also reapable by the oom reaper.
845 for_each_process(p
) {
846 if (!process_shares_mm(p
, mm
))
848 if (same_thread_group(task
, p
))
850 ret
= __task_will_free_mem(p
);
859 static void __oom_kill_process(struct task_struct
*victim
, const char *message
)
861 struct task_struct
*p
;
862 struct mm_struct
*mm
;
863 bool can_oom_reap
= true;
865 p
= find_lock_task_mm(victim
);
867 pr_info("%s: OOM victim %d (%s) is already exiting. Skip killing the task\n",
868 message
, task_pid_nr(victim
), victim
->comm
);
869 put_task_struct(victim
);
871 } else if (victim
!= p
) {
873 put_task_struct(victim
);
877 /* Get a reference to safely compare mm after task_unlock(victim) */
881 /* Raise event before sending signal: task reaper must see this */
882 count_vm_event(OOM_KILL
);
883 memcg_memory_event_mm(mm
, MEMCG_OOM_KILL
);
886 * We should send SIGKILL before granting access to memory reserves
887 * in order to prevent the OOM victim from depleting the memory
888 * reserves from the user space under its control.
890 do_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, victim
, PIDTYPE_TGID
);
891 mark_oom_victim(victim
);
892 pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
893 message
, task_pid_nr(victim
), victim
->comm
, K(mm
->total_vm
),
894 K(get_mm_counter(mm
, MM_ANONPAGES
)),
895 K(get_mm_counter(mm
, MM_FILEPAGES
)),
896 K(get_mm_counter(mm
, MM_SHMEMPAGES
)),
897 from_kuid(&init_user_ns
, task_uid(victim
)),
898 mm_pgtables_bytes(mm
) >> 10, victim
->signal
->oom_score_adj
);
902 * Kill all user processes sharing victim->mm in other thread groups, if
903 * any. They don't get access to memory reserves, though, to avoid
904 * depletion of all memory. This prevents mm->mmap_lock livelock when an
905 * oom killed thread cannot exit because it requires the semaphore and
906 * its contended by another thread trying to allocate memory itself.
907 * That thread will now get access to memory reserves since it has a
908 * pending fatal signal.
911 for_each_process(p
) {
912 if (!process_shares_mm(p
, mm
))
914 if (same_thread_group(p
, victim
))
916 if (is_global_init(p
)) {
917 can_oom_reap
= false;
918 set_bit(MMF_OOM_SKIP
, &mm
->flags
);
919 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
920 task_pid_nr(victim
), victim
->comm
,
921 task_pid_nr(p
), p
->comm
);
925 * No kthread_use_mm() user needs to read from the userspace so
926 * we are ok to reap it.
928 if (unlikely(p
->flags
& PF_KTHREAD
))
930 do_send_sig_info(SIGKILL
, SEND_SIG_PRIV
, p
, PIDTYPE_TGID
);
935 wake_oom_reaper(victim
);
938 put_task_struct(victim
);
943 * Kill provided task unless it's secured by setting
944 * oom_score_adj to OOM_SCORE_ADJ_MIN.
946 static int oom_kill_memcg_member(struct task_struct
*task
, void *message
)
948 if (task
->signal
->oom_score_adj
!= OOM_SCORE_ADJ_MIN
&&
949 !is_global_init(task
)) {
950 get_task_struct(task
);
951 __oom_kill_process(task
, message
);
956 static void oom_kill_process(struct oom_control
*oc
, const char *message
)
958 struct task_struct
*victim
= oc
->chosen
;
959 struct mem_cgroup
*oom_group
;
960 static DEFINE_RATELIMIT_STATE(oom_rs
, DEFAULT_RATELIMIT_INTERVAL
,
961 DEFAULT_RATELIMIT_BURST
);
964 * If the task is already exiting, don't alarm the sysadmin or kill
965 * its children or threads, just give it access to memory reserves
966 * so it can die quickly
969 if (task_will_free_mem(victim
)) {
970 mark_oom_victim(victim
);
971 wake_oom_reaper(victim
);
973 put_task_struct(victim
);
978 if (__ratelimit(&oom_rs
))
979 dump_header(oc
, victim
);
982 * Do we need to kill the entire memory cgroup?
983 * Or even one of the ancestor memory cgroups?
984 * Check this out before killing the victim task.
986 oom_group
= mem_cgroup_get_oom_group(victim
, oc
->memcg
);
988 __oom_kill_process(victim
, message
);
991 * If necessary, kill all tasks in the selected memory cgroup.
994 mem_cgroup_print_oom_group(oom_group
);
995 mem_cgroup_scan_tasks(oom_group
, oom_kill_memcg_member
,
997 mem_cgroup_put(oom_group
);
1002 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
1004 static void check_panic_on_oom(struct oom_control
*oc
)
1006 if (likely(!sysctl_panic_on_oom
))
1008 if (sysctl_panic_on_oom
!= 2) {
1010 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
1011 * does not panic for cpuset, mempolicy, or memcg allocation
1014 if (oc
->constraint
!= CONSTRAINT_NONE
)
1017 /* Do not panic for oom kills triggered by sysrq */
1018 if (is_sysrq_oom(oc
))
1020 dump_header(oc
, NULL
);
1021 panic("Out of memory: %s panic_on_oom is enabled\n",
1022 sysctl_panic_on_oom
== 2 ? "compulsory" : "system-wide");
1025 static BLOCKING_NOTIFIER_HEAD(oom_notify_list
);
1027 int register_oom_notifier(struct notifier_block
*nb
)
1029 return blocking_notifier_chain_register(&oom_notify_list
, nb
);
1031 EXPORT_SYMBOL_GPL(register_oom_notifier
);
1033 int unregister_oom_notifier(struct notifier_block
*nb
)
1035 return blocking_notifier_chain_unregister(&oom_notify_list
, nb
);
1037 EXPORT_SYMBOL_GPL(unregister_oom_notifier
);
1040 * out_of_memory - kill the "best" process when we run out of memory
1041 * @oc: pointer to struct oom_control
1043 * If we run out of memory, we have the choice between either
1044 * killing a random task (bad), letting the system crash (worse)
1045 * OR try to be smart about which process to kill. Note that we
1046 * don't have to be perfect here, we just have to be good.
1048 bool out_of_memory(struct oom_control
*oc
)
1050 unsigned long freed
= 0;
1052 if (oom_killer_disabled
)
1055 if (!is_memcg_oom(oc
)) {
1056 blocking_notifier_call_chain(&oom_notify_list
, 0, &freed
);
1058 /* Got some memory back in the last second. */
1063 * If current has a pending SIGKILL or is exiting, then automatically
1064 * select it. The goal is to allow it to allocate so that it may
1065 * quickly exit and free its memory.
1067 if (task_will_free_mem(current
)) {
1068 mark_oom_victim(current
);
1069 wake_oom_reaper(current
);
1074 * The OOM killer does not compensate for IO-less reclaim.
1075 * pagefault_out_of_memory lost its gfp context so we have to
1076 * make sure exclude 0 mask - all other users should have at least
1077 * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
1078 * invoke the OOM killer even if it is a GFP_NOFS allocation.
1080 if (oc
->gfp_mask
&& !(oc
->gfp_mask
& __GFP_FS
) && !is_memcg_oom(oc
))
1084 * Check if there were limitations on the allocation (only relevant for
1085 * NUMA and memcg) that may require different handling.
1087 oc
->constraint
= constrained_alloc(oc
);
1088 if (oc
->constraint
!= CONSTRAINT_MEMORY_POLICY
)
1089 oc
->nodemask
= NULL
;
1090 check_panic_on_oom(oc
);
1092 if (!is_memcg_oom(oc
) && sysctl_oom_kill_allocating_task
&&
1093 current
->mm
&& !oom_unkillable_task(current
) &&
1094 oom_cpuset_eligible(current
, oc
) &&
1095 current
->signal
->oom_score_adj
!= OOM_SCORE_ADJ_MIN
) {
1096 get_task_struct(current
);
1097 oc
->chosen
= current
;
1098 oom_kill_process(oc
, "Out of memory (oom_kill_allocating_task)");
1102 select_bad_process(oc
);
1103 /* Found nothing?!?! */
1105 dump_header(oc
, NULL
);
1106 pr_warn("Out of memory and no killable processes...\n");
1108 * If we got here due to an actual allocation at the
1109 * system level, we cannot survive this and will enter
1110 * an endless loop in the allocator. Bail out now.
1112 if (!is_sysrq_oom(oc
) && !is_memcg_oom(oc
))
1113 panic("System is deadlocked on memory\n");
1115 if (oc
->chosen
&& oc
->chosen
!= (void *)-1UL)
1116 oom_kill_process(oc
, !is_memcg_oom(oc
) ? "Out of memory" :
1117 "Memory cgroup out of memory");
1118 return !!oc
->chosen
;
1122 * The pagefault handler calls here because it is out of memory, so kill a
1123 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1124 * killing is already in progress so do nothing.
1126 void pagefault_out_of_memory(void)
1128 struct oom_control oc
= {
1136 if (mem_cgroup_oom_synchronize(true))
1139 if (!mutex_trylock(&oom_lock
))
1142 mutex_unlock(&oom_lock
);