]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - mm/oom_kill.c
tunnels: do not assume mac header is set in skb_tunnel_check_pmtu()
[mirror_ubuntu-jammy-kernel.git] / mm / oom_kill.c
index c729a4c4a1ace9ae4b97fb9602d3a48d726f177e..262f752d3d516cd8af26cfc58d422bb79bb988b7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/sched/task.h>
 #include <linux/sched/debug.h>
 #include <linux/swap.h>
+#include <linux/syscalls.h>
 #include <linux/timex.h>
 #include <linux/jiffies.h>
 #include <linux/cpuset.h>
@@ -634,7 +635,7 @@ done:
         */
        set_bit(MMF_OOM_SKIP, &mm->flags);
 
-       /* Drop a reference taken by wake_oom_reaper */
+       /* Drop a reference taken by queue_oom_reaper */
        put_task_struct(tsk);
 }
 
@@ -644,12 +645,12 @@ static int oom_reaper(void *unused)
                struct task_struct *tsk = NULL;
 
                wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
-               spin_lock(&oom_reaper_lock);
+               spin_lock_irq(&oom_reaper_lock);
                if (oom_reaper_list != NULL) {
                        tsk = oom_reaper_list;
                        oom_reaper_list = tsk->oom_reaper_list;
                }
-               spin_unlock(&oom_reaper_lock);
+               spin_unlock_irq(&oom_reaper_lock);
 
                if (tsk)
                        oom_reap_task(tsk);
@@ -658,22 +659,48 @@ static int oom_reaper(void *unused)
        return 0;
 }
 
-static void wake_oom_reaper(struct task_struct *tsk)
+static void wake_oom_reaper(struct timer_list *timer)
 {
-       /* mm is already queued? */
-       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
-               return;
+       struct task_struct *tsk = container_of(timer, struct task_struct,
+                       oom_reaper_timer);
+       struct mm_struct *mm = tsk->signal->oom_mm;
+       unsigned long flags;
 
-       get_task_struct(tsk);
+       /* The victim managed to terminate on its own - see exit_mmap */
+       if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
+               put_task_struct(tsk);
+               return;
+       }
 
-       spin_lock(&oom_reaper_lock);
+       spin_lock_irqsave(&oom_reaper_lock, flags);
        tsk->oom_reaper_list = oom_reaper_list;
        oom_reaper_list = tsk;
-       spin_unlock(&oom_reaper_lock);
+       spin_unlock_irqrestore(&oom_reaper_lock, flags);
        trace_wake_reaper(tsk->pid);
        wake_up(&oom_reaper_wait);
 }
 
+/*
+ * Give the OOM victim time to exit naturally before invoking the oom_reaping.
+ * The timers timeout is arbitrary... the longer it is, the longer the worst
+ * case scenario for the OOM can take. If it is too small, the oom_reaper can
+ * get in the way and release resources needed by the process exit path.
+ * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
+ * before the exit path is able to wake the futex waiters.
+ */
+#define OOM_REAPER_DELAY (2*HZ)
+static void queue_oom_reaper(struct task_struct *tsk)
+{
+       /* mm is already queued? */
+       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
+               return;
+
+       get_task_struct(tsk);
+       timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
+       tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
+       add_timer(&tsk->oom_reaper_timer);
+}
+
 static int __init oom_init(void)
 {
        oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
@@ -681,7 +708,7 @@ static int __init oom_init(void)
 }
 subsys_initcall(oom_init)
 #else
-static inline void wake_oom_reaper(struct task_struct *tsk)
+static inline void queue_oom_reaper(struct task_struct *tsk)
 {
 }
 #endif /* CONFIG_MMU */
@@ -932,7 +959,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
        rcu_read_unlock();
 
        if (can_oom_reap)
-               wake_oom_reaper(victim);
+               queue_oom_reaper(victim);
 
        mmdrop(mm);
        put_task_struct(victim);
@@ -968,7 +995,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
        task_lock(victim);
        if (task_will_free_mem(victim)) {
                mark_oom_victim(victim);
-               wake_oom_reaper(victim);
+               queue_oom_reaper(victim);
                task_unlock(victim);
                put_task_struct(victim);
                return;
@@ -1066,7 +1093,7 @@ bool out_of_memory(struct oom_control *oc)
         */
        if (task_will_free_mem(current)) {
                mark_oom_victim(current);
-               wake_oom_reaper(current);
+               queue_oom_reaper(current);
                return true;
        }
 
@@ -1119,25 +1146,92 @@ bool out_of_memory(struct oom_control *oc)
 }
 
 /*
- * The pagefault handler calls here because it is out of memory, so kill a
- * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
- * killing is already in progress so do nothing.
+ * The pagefault handler calls here because some allocation has failed. We have
+ * to take care of the memcg OOM here because this is the only safe context without
+ * any locks held but let the oom killer triggered from the allocation context care
+ * about the global OOM.
  */
 void pagefault_out_of_memory(void)
 {
-       struct oom_control oc = {
-               .zonelist = NULL,
-               .nodemask = NULL,
-               .memcg = NULL,
-               .gfp_mask = 0,
-               .order = 0,
-       };
+       static DEFINE_RATELIMIT_STATE(pfoom_rs, DEFAULT_RATELIMIT_INTERVAL,
+                                     DEFAULT_RATELIMIT_BURST);
 
        if (mem_cgroup_oom_synchronize(true))
                return;
 
-       if (!mutex_trylock(&oom_lock))
+       if (fatal_signal_pending(current))
                return;
-       out_of_memory(&oc);
-       mutex_unlock(&oom_lock);
+
+       if (__ratelimit(&pfoom_rs))
+               pr_warn("Huh VM_FAULT_OOM leaked out to the #PF handler. Retrying PF\n");
+}
+
+SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
+{
+#ifdef CONFIG_MMU
+       struct mm_struct *mm = NULL;
+       struct task_struct *task;
+       struct task_struct *p;
+       unsigned int f_flags;
+       bool reap = false;
+       struct pid *pid;
+       long ret = 0;
+
+       if (flags)
+               return -EINVAL;
+
+       pid = pidfd_get_pid(pidfd, &f_flags);
+       if (IS_ERR(pid))
+               return PTR_ERR(pid);
+
+       task = get_pid_task(pid, PIDTYPE_TGID);
+       if (!task) {
+               ret = -ESRCH;
+               goto put_pid;
+       }
+
+       /*
+        * Make sure to choose a thread which still has a reference to mm
+        * during the group exit
+        */
+       p = find_lock_task_mm(task);
+       if (!p) {
+               ret = -ESRCH;
+               goto put_task;
+       }
+
+       if (mmget_not_zero(p->mm)) {
+               mm = p->mm;
+               if (task_will_free_mem(p))
+                       reap = true;
+               else {
+                       /* Error only if the work has not been done already */
+                       if (!test_bit(MMF_OOM_SKIP, &mm->flags))
+                               ret = -EINVAL;
+               }
+       }
+       task_unlock(p);
+
+       if (!reap)
+               goto drop_mm;
+
+       if (mmap_read_lock_killable(mm)) {
+               ret = -EINTR;
+               goto drop_mm;
+       }
+       if (!__oom_reap_task_mm(mm))
+               ret = -EAGAIN;
+       mmap_read_unlock(mm);
+
+drop_mm:
+       if (mm)
+               mmput(mm);
+put_task:
+       put_task_struct(task);
+put_pid:
+       put_pid(pid);
+       return ret;
+#else
+       return -ENOSYS;
+#endif /* CONFIG_MMU */
 }