]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
mm: add new mmget() helper
authorVegard Nossum <vegard.nossum@oracle.com>
Mon, 27 Feb 2017 22:30:10 +0000 (14:30 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Feb 2017 02:43:48 +0000 (18:43 -0800)
Apart from adding the helper function itself, the rest of the kernel is
converted mechanically using:

  git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_users);/mmget\(\1\);/'
  git grep -l 'atomic_inc.*mm_users' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_users);/mmget\(\&\1\);/'

This is needed for a later patch that hooks into the helper, but might
be a worthwhile cleanup on its own.

(Michal Hocko provided most of the kerneldoc comment.)

Link: http://lkml.kernel.org/r/20161218123229.22952-2-vegard.nossum@oracle.com
Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/arc/kernel/smp.c
arch/blackfin/mach-common/smp.c
arch/frv/mm/mmu-context.c
arch/metag/kernel/smp.c
arch/sh/kernel/smp.c
arch/xtensa/kernel/smp.c
include/linux/sched.h
kernel/fork.c
mm/swapfile.c
virt/kvm/async_pf.c

index 6956241816825d6910d854da8f8c89122155fcae..b8e8d394448137e1efc7ce3581860b94f1f7f9b9 100644 (file)
@@ -139,7 +139,7 @@ void start_kernel_secondary(void)
        /* MMU, Caches, Vector Table, Interrupts etc */
        setup_processor();
 
-       atomic_inc(&mm->mm_users);
+       mmget(mm);
        mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
index bc5617ef7128be9bd84b07024c37ea7625ffdc78..a2e6db2ce811c94c5f6bbbb5c5ae6581f8fb797e 100644 (file)
@@ -307,7 +307,7 @@ void secondary_start_kernel(void)
        local_irq_disable();
 
        /* Attach the new idle task to the global mm. */
-       atomic_inc(&mm->mm_users);
+       mmget(mm);
        mmgrab(mm);
        current->active_mm = mm;
 
index 81757d55a5b592fed1e38281edc0cbb6a256dff7..3473bde77f566e196984f054264d42795a1ae249 100644 (file)
@@ -188,7 +188,7 @@ int cxn_pin_by_pid(pid_t pid)
                task_lock(tsk);
                if (tsk->mm) {
                        mm = tsk->mm;
-                       atomic_inc(&mm->mm_users);
+                       mmget(mm);
                        ret = 0;
                }
                task_unlock(tsk);
index af9cff547a194e36f5e646de8c58307e792510de..c622293254e4e409aeb57c77a38fad8e7c34dc5a 100644 (file)
@@ -344,7 +344,7 @@ asmlinkage void secondary_start_kernel(void)
         * All kernel threads share the same mm context; grab a
         * reference and switch to it.
         */
-       atomic_inc(&mm->mm_users);
+       mmget(mm);
        mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
index ee379c699c0884de7df5423392f0bcded3261817..edc4769b047eee780c516b23d2d0a06e44de202d 100644 (file)
@@ -179,7 +179,7 @@ asmlinkage void start_secondary(void)
 
        enable_mmu();
        mmgrab(mm);
-       atomic_inc(&mm->mm_users);
+       mmget(mm);
        current->active_mm = mm;
 #ifdef CONFIG_MMU
        enter_lazy_tlb(mm, current);
index 9bf5cea3bae4987b65b5c6520c00aafa27338773..fcea72019df798110aa9195fe36219fe668a6e38 100644 (file)
@@ -135,7 +135,7 @@ void secondary_start_kernel(void)
 
        /* All kernel threads share the same mm context. */
 
-       atomic_inc(&mm->mm_users);
+       mmget(mm);
        mmgrab(mm);
        current->active_mm = mm;
        cpumask_set_cpu(cpu, mm_cpumask(mm));
index 7cfa5546c8400dbd70534064761b370bf3fb9af9..4a28deb5f210a103d486fa12c509405f92f4ca60 100644 (file)
@@ -2948,6 +2948,27 @@ static inline void mmdrop_async(struct mm_struct *mm)
        }
 }
 
+/**
+ * mmget() - Pin the address space associated with a &struct mm_struct.
+ * @mm: The address space to pin.
+ *
+ * Make sure that the address space of the given &struct mm_struct doesn't
+ * go away. This does not protect against parts of the address space being
+ * modified or freed, however.
+ *
+ * Never use this function to pin this address space for an
+ * unbounded/indefinite amount of time.
+ *
+ * Use mmput() to release the reference acquired by mmget().
+ *
+ * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
+ * of &mm_struct.mm_count vs &mm_struct.mm_users.
+ */
+static inline void mmget(struct mm_struct *mm)
+{
+       atomic_inc(&mm->mm_users);
+}
+
 static inline bool mmget_not_zero(struct mm_struct *mm)
 {
        return atomic_inc_not_zero(&mm->mm_users);
index 348fe73155bc280123d7c961455187b6b640fb67..246bf9aaf9dfddf4632f6994ab6e2bcdd8a02434 100644 (file)
@@ -1000,7 +1000,7 @@ struct mm_struct *get_task_mm(struct task_struct *task)
                if (task->flags & PF_KTHREAD)
                        mm = NULL;
                else
-                       atomic_inc(&mm->mm_users);
+                       mmget(mm);
        }
        task_unlock(task);
        return mm;
@@ -1188,7 +1188,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
        vmacache_flush(tsk);
 
        if (clone_flags & CLONE_VM) {
-               atomic_inc(&oldmm->mm_users);
+               mmget(oldmm);
                mm = oldmm;
                goto good_mm;
        }
index 2cac12cc9abe2dbbd7e5e385de91cf93a4ab61d1..7a0713b76668205fc3122cc9cfcd76f1e449c5a1 100644 (file)
@@ -1671,7 +1671,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
         * that.
         */
        start_mm = &init_mm;
-       atomic_inc(&init_mm.mm_users);
+       mmget(&init_mm);
 
        /*
         * Keep on scanning until all entries have gone.  Usually,
@@ -1720,7 +1720,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
                if (atomic_read(&start_mm->mm_users) == 1) {
                        mmput(start_mm);
                        start_mm = &init_mm;
-                       atomic_inc(&init_mm.mm_users);
+                       mmget(&init_mm);
                }
 
                /*
@@ -1757,8 +1757,8 @@ int try_to_unuse(unsigned int type, bool frontswap,
                        struct mm_struct *prev_mm = start_mm;
                        struct mm_struct *mm;
 
-                       atomic_inc(&new_start_mm->mm_users);
-                       atomic_inc(&prev_mm->mm_users);
+                       mmget(new_start_mm);
+                       mmget(prev_mm);
                        spin_lock(&mmlist_lock);
                        while (swap_count(*swap_map) && !retval &&
                                        (p = p->next) != &start_mm->mmlist) {
@@ -1781,7 +1781,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
 
                                if (set_start_mm && *swap_map < swcount) {
                                        mmput(new_start_mm);
-                                       atomic_inc(&mm->mm_users);
+                                       mmget(mm);
                                        new_start_mm = mm;
                                        set_start_mm = 0;
                                }
index 3815e940fbeacb444bffc22c581f127d98730a59..2366177172f67cd3480dd5f11b62ce412d9bf567 100644 (file)
@@ -204,7 +204,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
        work->addr = hva;
        work->arch = *arch;
        work->mm = current->mm;
-       atomic_inc(&work->mm->mm_users);
+       mmget(work->mm);
        kvm_get_kvm(work->vcpu->kvm);
 
        /* this can't really happen otherwise gfn_to_pfn_async