]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
powerpc/mm: Ensure cpumask update is ordered
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Mon, 24 Jul 2017 04:28:00 +0000 (14:28 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 18 Aug 2017 03:07:16 +0000 (13:07 +1000)
There is no guarantee that the various isync's involved with
the context switch will order the update of the CPU mask with
the first TLB entry for the new context being loaded by the HW.

Be safe here and add a memory barrier to order any subsequent
load/store which may bring entries into the TLB.

The corresponding barrier on the other side already exists as
pte updates use pte_xchg() which uses __cmpxchg_u64 which has
a sync after the atomic operation.

Cc: stable@vger.kernel.org
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
[mpe: Add comments in the code]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/include/asm/pgtable-be-types.h
arch/powerpc/include/asm/pgtable-types.h

index 0c76675394c5930d5cecf2ac01a2718c8e9b6c1f..35bec1c5bd5aa63567be4847c2073be53fa0390c 100644 (file)
@@ -90,6 +90,24 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
        /* Mark this context has been used on the new CPU */
        if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
                cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+
+               /*
+                * This full barrier orders the store to the cpumask above vs
+                * a subsequent operation which allows this CPU to begin loading
+                * translations for next.
+                *
+                * When using the radix MMU that operation is the load of the
+                * MMU context id, which is then moved to SPRN_PID.
+                *
+                * For the hash MMU it is either the first load from slb_cache
+                * in switch_slb(), and/or the store of paca->mm_ctx_id in
+                * copy_mm_to_paca().
+                *
+                * On the read side the barrier is in pte_xchg(), which orders
+                * the store to the PTE vs the load of mm_cpumask.
+                */
+               smp_mb();
+
                new_on_cpu = true;
        }
 
index 9c0f5db5cf461a92e72185701b4cbc1df168dfcc..67e7e3d990f44ef495ee02b6fcb3ba16053bd5c5 100644 (file)
@@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
        unsigned long *p = (unsigned long *)ptep;
        __be64 prev;
 
+       /* See comment in switch_mm_irqs_off() */
        prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
                                             (__force unsigned long)pte_raw(new));
 
index 8bd3b13fe2fb2e8bd1c5762c4e080c9cd921edaa..369a164b545c09087e0740fd6c32ea282a7b5245 100644 (file)
@@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
 {
        unsigned long *p = (unsigned long *)ptep;
 
+       /* See comment in switch_mm_irqs_off() */
        return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
 }
 #endif