]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
x86/mm: provide pmdp_establish() helper
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Fri, 14 Jun 2019 08:55:05 +0000 (10:55 +0200)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Tue, 2 Jul 2019 12:18:49 +0000 (14:18 +0200)
BugLink: https://bugs.launchpad.net/bugs/1830433
We need an atomic way to setup pmd page table entry, avoiding races with
CPU setting dirty/accessed bits.  This is required to implement
pmdp_invalidate() that doesn't lose these bits.

On PAE we can avoid expensive cmpxchg8b for cases when new page table
entry is not present.  If it's present, fallback to cpmxchg loop.

[akpm@linux-foundation.org: add missing `do' to do-while loop]
Link: http://lkml.kernel.org/r/20171213105756.69879-10-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 86fa949b050184ffc53688516a6a83ae5f98d08a)
Tested-by: Connor Kuehl <connor.kuehl@canonical.com>
Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Acked-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h

index a18fa4eac97bb64bb6c4ae86f380809d5395b908..a564084c6141d42f603b257160a34685e1bffdbe 100644 (file)
@@ -161,7 +161,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
-#ifdef CONFIG_SMP
 union split_pmd {
        struct {
                u32 pmd_low;
@@ -169,6 +168,8 @@ union split_pmd {
        };
        pmd_t pmd;
 };
+
+#ifdef CONFIG_SMP
 static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
 {
        union split_pmd res, *orig = (union split_pmd *)pmdp;
@@ -184,6 +185,40 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
 #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
 #endif
 
+#ifndef pmdp_establish
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+               unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+       pmd_t old;
+
+       /*
+        * If pmd has present bit cleared we can get away without expensive
+        * cmpxchg64: we can update pmdp half-by-half without racing with
+        * anybody.
+        */
+       if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
+               union split_pmd old, new, *ptr;
+
+               ptr = (union split_pmd *)pmdp;
+
+               new.pmd = pmd;
+
+               /* xchg acts as a barrier before setting of the high bits */
+               old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low);
+               old.pmd_high = ptr->pmd_high;
+               ptr->pmd_high = new.pmd_high;
+               return old.pmd;
+       }
+
+       do {
+               old = *pmdp;
+       } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
+
+       return old;
+}
+#endif
+
 #ifdef CONFIG_SMP
 union split_pud {
        struct {
index f3f8409f549c6203a5029b1e31c5a6ff1b744e0f..c9f9d2334866c6c4e5662d09be23f258ea16745f 100644 (file)
@@ -1177,6 +1177,21 @@ static inline int pud_write(pud_t pud)
        return pud_flags(pud) & _PAGE_RW;
 }
 
+#ifndef pmdp_establish
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+               unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+       if (IS_ENABLED(CONFIG_SMP)) {
+               return xchg(pmdp, pmd);
+       } else {
+               pmd_t old = *pmdp;
+               *pmdp = pmd;
+               return old;
+       }
+}
+#endif
+
 /*
  * Page table pages are page-aligned.  The lower half of the top
  * level is used for userspace and the top half for the kernel.