]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
UBUNTU: SAUCE: s390/mm: fix race on mm->context.flush_mm
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 12 Sep 2017 09:45:42 +0000 (11:45 +0200)
committerJuerg Haefliger <juerg.haefliger@canonical.com>
Wed, 13 Sep 2017 05:58:25 +0000 (07:58 +0200)
BugLink: http://bugs.launchpad.net/bugs/1708399
The order in __tlb_flush_mm_lazy is to flush TLB first and then clear
the mm->context.flush_mm bit. This can lead to missed flushes as the
bit can be set anytime, the order needs to be the other way aronud.

But this leads to a different race, __tlb_flush_mm_lazy may be called
on two CPUs concurrently. If mm->context.flush_mm is cleared first then
another CPU can bypass __tlb_flush_mm_lazy although the first CPU has
not done the flush yet. In a virtualized environment the time until the
flush is finally completed can be arbitrarily long.

Add a spinlock to serialize __tlb_flush_mm_lazy and use the function
in finish_arch_post_lock_switch as well.

Cc: <stable@vger.kernel.org>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
(cherry-picked from commit 60f07c8ec5fae06c23e9fd7bab67dabce92b3414 linux-next)
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Colin Ian King <colin.king@canonical.com>
Acked-by: Brad Figg <brad.figg@canonical.com>
Signed-off-by: Juerg Haefliger <juerg.haefliger@canonical.com>
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/tlbflush.h

index bea785d7f853c12ddd35e7b5e7d6f3d76b87aca7..af85d6b120284e2ee674c082098470a787ca49a5 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/errno.h>
 
 typedef struct {
+       spinlock_t lock;
        cpumask_t cpu_attach_mask;
        atomic_t flush_count;
        unsigned int flush_mm;
@@ -25,6 +26,7 @@ typedef struct {
 } mm_context_t;
 
 #define INIT_MM_CONTEXT(name)                                             \
+       .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock),           \
        .context.pgtable_lock =                                            \
                        __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock),   \
        .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
index d9451467c5f2dc2afe2952fd99a8c0c8cae45276..de93d5a20b94c8e5d86df6adfa93375da8797383 100644 (file)
@@ -15,6 +15,7 @@
 static inline int init_new_context(struct task_struct *tsk,
                                   struct mm_struct *mm)
 {
+       spin_lock_init(&mm->context.lock);
        spin_lock_init(&mm->context.pgtable_lock);
        INIT_LIST_HEAD(&mm->context.pgtable_list);
        spin_lock_init(&mm->context.gmap_lock);
@@ -111,8 +112,7 @@ static inline void finish_arch_post_lock_switch(void)
                while (atomic_read(&mm->context.flush_count))
                        cpu_relax();
                cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-               if (mm->context.flush_mm)
-                       __tlb_flush_mm(mm);
+               __tlb_flush_mm_lazy(mm);
                preempt_enable();
        }
        set_fs(current->thread.mm_segment);
index 043c2aab6622826581d675f02e1bf0062c325a5b..eed927aeb08f9501c0285094b0a911a66d117482 100644 (file)
@@ -96,10 +96,12 @@ static inline void __tlb_flush_kernel(void)
 
 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
 {
+       spin_lock(&mm->context.lock);
        if (mm->context.flush_mm) {
-               __tlb_flush_mm(mm);
                mm->context.flush_mm = 0;
+               __tlb_flush_mm(mm);
        }
+       spin_unlock(&mm->context.lock);
 }
 
 /*