]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
UBUNTU: SAUCE: s390/mm: fix local TLB flushing vs. detach of an mm address space
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 12 Sep 2017 09:45:41 +0000 (11:45 +0200)
committerJuerg Haefliger <juerg.haefliger@canonical.com>
Wed, 13 Sep 2017 05:58:16 +0000 (07:58 +0200)
BugLink: http://bugs.launchpad.net/bugs/1708399
The local TLB flushing code keeps an additional mask in the mm.context,
the cpu_attach_mask. At the time a global flush of an address space is
done the cpu_attach_mask is copied to the mm_cpumask in order to avoid
future global flushes in case the mm is used by a single CPU only after
the flush.

Trouble is that the reset of the mm_cpumask is racy against the detach
of an mm address space by switch_mm. The current order is first the
global TLB flush and then the copy of the cpu_attach_mask to the
mm_cpumask. The order needs to be the other way around.

Cc: <stable@vger.kernel.org>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
(cherry-picked from commit b3e5dc45fd1ec2aa1de6b80008f9295eb17e0659 linux-next)
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Colin Ian King <colin.king@canonical.com>
Acked-by: Brad Figg <brad.figg@canonical.com>
Signed-off-by: Juerg Haefliger <juerg.haefliger@canonical.com>
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/tlbflush.h

index 67f7a991c929bb92731c6aafeef4a1255d69c959..d9451467c5f2dc2afe2952fd99a8c0c8cae45276 100644 (file)
@@ -93,7 +93,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        if (prev == next)
                return;
        cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
-       cpumask_set_cpu(cpu, mm_cpumask(next));
        /* Clear old ASCE by loading the kernel ASCE. */
        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
@@ -111,7 +110,7 @@ static inline void finish_arch_post_lock_switch(void)
                preempt_disable();
                while (atomic_read(&mm->context.flush_count))
                        cpu_relax();
-
+               cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
                if (mm->context.flush_mm)
                        __tlb_flush_mm(mm);
                preempt_enable();
@@ -126,6 +125,7 @@ static inline void activate_mm(struct mm_struct *prev,
                                struct mm_struct *next)
 {
        switch_mm(prev, next, current);
+       cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
        set_user_asce(next);
 }
 
index 39846100682a3e9c11ab9ffc4223d4734844cd1d..043c2aab6622826581d675f02e1bf0062c325a5b 100644 (file)
@@ -43,23 +43,6 @@ static inline void __tlb_flush_global(void)
  * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
  * this implicates multiple ASCEs!).
  */
-static inline void __tlb_flush_full(struct mm_struct *mm)
-{
-       preempt_disable();
-       atomic_inc(&mm->context.flush_count);
-       if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
-               /* Local TLB flush */
-               __tlb_flush_local();
-       } else {
-               /* Global TLB flush */
-               __tlb_flush_global();
-               /* Reset TLB flush mask */
-               cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
-       }
-       atomic_dec(&mm->context.flush_count);
-       preempt_enable();
-}
-
 static inline void __tlb_flush_mm(struct mm_struct *mm)
 {
        unsigned long gmap_asce;
@@ -71,16 +54,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
         */
        preempt_disable();
        atomic_inc(&mm->context.flush_count);
+       /* Reset TLB flush mask */
+       cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
+       barrier();
        gmap_asce = READ_ONCE(mm->context.gmap_asce);
        if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
                if (gmap_asce)
                        __tlb_flush_idte(gmap_asce);
                __tlb_flush_idte(mm->context.asce);
        } else {
-               __tlb_flush_full(mm);
+               /* Global TLB flush */
+               __tlb_flush_global();
        }
-       /* Reset TLB flush mask */
-       cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
        atomic_dec(&mm->context.flush_count);
        preempt_enable();
 }
@@ -94,7 +79,6 @@ static inline void __tlb_flush_kernel(void)
 }
 #else
 #define __tlb_flush_global()   __tlb_flush_local()
-#define __tlb_flush_full(mm)   __tlb_flush_local()
 
 /*
  * Flush TLB entries for a specific ASCE on all CPUs.