]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
powerpc/mm/radix: Only need the Nest MMU workaround for R -> RW transition
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 16 Oct 2018 16:11:09 +0000 (12:11 -0400)
committerKhalid Elmously <khalid.elmously@canonical.com>
Tue, 23 Oct 2018 06:45:58 +0000 (08:45 +0200)
BugLink: https://bugs.launchpad.net/bugs/1792195
The Nest MMU workaround is only needed for RW upgrades. Avoid doing
that for other PTE updates.

We also avoid clearing the PTE while marking it invalid. This is
because other page table walkers will find this PTE none and can
result in unexpected behaviour due to that. Instead we clear
_PAGE_PRESENT and set the software PTE bit _PAGE_INVALID.
pte_present() is already updated to check for both bits. This makes
sure page table walkers will find the PTE present and things like
pte_pfn(pte) returns the right value.

Based on an original patch from Benjamin Herrenschmidt <benh@kernel.crashing.org>

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
(backported from commit f08d08f3db55452d31ba4a37c702da6245876b96
[jsalisbury: Patch was not expcting cpu_has_feature() check, so
merged it in.])
Signed-off-by: Joseph Salisbury <joseph.salisbury@canonical.com>
Acked-by: Khalid Elmously <khalid.elmously@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/powerpc/mm/pgtable-radix.c

index a778560d32610e442cad6dc723465e9b17de2367..42e2b37d406478386ae91744f40d32308abf797d 100644 (file)
@@ -1025,21 +1025,23 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
        struct mm_struct *mm = vma->vm_mm;
        unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
                                              _PAGE_RW | _PAGE_EXEC);
+
+       unsigned long change = pte_val(entry) ^ pte_val(*ptep);
        /*
         * To avoid NMMU hang while relaxing access, we need mark
         * the pte invalid in between.
         */
        if (cpu_has_feature(CPU_FTR_POWER9_DD1) ||
-           atomic_read(&mm->context.copros) > 0) {
+           ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0)) {
                unsigned long old_pte, new_pte;
 
-               old_pte = __radix_pte_update(ptep, ~0, 0);
+               old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
                /*
                 * new value of pte
                 */
                new_pte = old_pte | set;
                radix__flush_tlb_page_psize(mm, address, psize);
-               __radix_pte_update(ptep, 0, new_pte);
+               __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
        } else {
                __radix_pte_update(ptep, 0, set);
                radix__flush_tlb_page_psize(mm, address, psize);