]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - arch/sparc/mm/hugetlbpage.c
Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-zesty-kernel.git] / arch / sparc / mm / hugetlbpage.c
index ba52e6466a8252659d0f19e49abb6cba1a7762b2..988acc8b1b80a387d9119782f53f1d41dbe53c4e 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <asm/mman.h>
 #include <asm/pgalloc.h>
+#include <asm/pgtable.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
@@ -131,23 +132,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
 {
        pgd_t *pgd;
        pud_t *pud;
-       pmd_t *pmd;
        pte_t *pte = NULL;
 
-       /* We must align the address, because our caller will run
-        * set_huge_pte_at() on whatever we return, which writes out
-        * all of the sub-ptes for the hugepage range.  So we have
-        * to give it the first such sub-pte.
-        */
-       addr &= HPAGE_MASK;
-
        pgd = pgd_offset(mm, addr);
        pud = pud_alloc(mm, pgd, addr);
-       if (pud) {
-               pmd = pmd_alloc(mm, pud, addr);
-               if (pmd)
-                       pte = pte_alloc_map(mm, pmd, addr);
-       }
+       if (pud)
+               pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
        return pte;
 }
 
@@ -155,19 +146,13 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
        pud_t *pud;
-       pmd_t *pmd;
        pte_t *pte = NULL;
 
-       addr &= HPAGE_MASK;
-
        pgd = pgd_offset(mm, addr);
        if (!pgd_none(*pgd)) {
                pud = pud_offset(pgd, addr);
-               if (!pud_none(*pud)) {
-                       pmd = pmd_offset(pud, addr);
-                       if (!pmd_none(*pmd))
-                               pte = pte_offset_map(pmd, addr);
-               }
+               if (!pud_none(*pud))
+                       pte = (pte_t *)pmd_offset(pud, addr);
        }
        return pte;
 }
@@ -175,70 +160,143 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t entry)
 {
-       int i;
-       pte_t orig[2];
-       unsigned long nptes;
+       pte_t orig;
 
        if (!pte_present(*ptep) && pte_present(entry))
-               mm->context.huge_pte_count++;
+               mm->context.hugetlb_pte_count++;
 
        addr &= HPAGE_MASK;
-
-       nptes = 1 << HUGETLB_PAGE_ORDER;
-       orig[0] = *ptep;
-       orig[1] = *(ptep + nptes / 2);
-       for (i = 0; i < nptes; i++) {
-               *ptep = entry;
-               ptep++;
-               addr += PAGE_SIZE;
-               pte_val(entry) += PAGE_SIZE;
-       }
+       orig = *ptep;
+       *ptep = entry;
 
        /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
-       addr -= REAL_HPAGE_SIZE;
-       ptep -= nptes / 2;
-       maybe_tlb_batch_add(mm, addr, ptep, orig[1], 0);
-       addr -= REAL_HPAGE_SIZE;
-       ptep -= nptes / 2;
-       maybe_tlb_batch_add(mm, addr, ptep, orig[0], 0);
+       maybe_tlb_batch_add(mm, addr, ptep, orig, 0);
+       maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0);
 }
 
 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep)
 {
        pte_t entry;
-       int i;
-       unsigned long nptes;
 
        entry = *ptep;
        if (pte_present(entry))
-               mm->context.huge_pte_count--;
+               mm->context.hugetlb_pte_count--;
 
        addr &= HPAGE_MASK;
-       nptes = 1 << HUGETLB_PAGE_ORDER;
-       for (i = 0; i < nptes; i++) {
-               *ptep = __pte(0UL);
-               addr += PAGE_SIZE;
-               ptep++;
-       }
+       *ptep = __pte(0UL);
 
        /* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
-       addr -= REAL_HPAGE_SIZE;
-       ptep -= nptes / 2;
-       maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
-       addr -= REAL_HPAGE_SIZE;
-       ptep -= nptes / 2;
        maybe_tlb_batch_add(mm, addr, ptep, entry, 0);
+       maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0);
 
        return entry;
 }
 
 int pmd_huge(pmd_t pmd)
 {
-       return 0;
+       return !pmd_none(pmd) &&
+               (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
 }
 
 int pud_huge(pud_t pud)
 {
        return 0;
 }
+
+static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+                          unsigned long addr)
+{
+       pgtable_t token = pmd_pgtable(*pmd);
+
+       pmd_clear(pmd);
+       pte_free_tlb(tlb, token, addr);
+       atomic_long_dec(&tlb->mm->nr_ptes);
+}
+
+static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+                                  unsigned long addr, unsigned long end,
+                                  unsigned long floor, unsigned long ceiling)
+{
+       pmd_t *pmd;
+       unsigned long next;
+       unsigned long start;
+
+       start = addr;
+       pmd = pmd_offset(pud, addr);
+       do {
+               next = pmd_addr_end(addr, end);
+               if (pmd_none(*pmd))
+                       continue;
+               if (is_hugetlb_pmd(*pmd))
+                       pmd_clear(pmd);
+               else
+                       hugetlb_free_pte_range(tlb, pmd, addr);
+       } while (pmd++, addr = next, addr != end);
+
+       start &= PUD_MASK;
+       if (start < floor)
+               return;
+       if (ceiling) {
+               ceiling &= PUD_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               return;
+
+       pmd = pmd_offset(pud, start);
+       pud_clear(pud);
+       pmd_free_tlb(tlb, pmd, start);
+       mm_dec_nr_pmds(tlb->mm);
+}
+
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+                                  unsigned long addr, unsigned long end,
+                                  unsigned long floor, unsigned long ceiling)
+{
+       pud_t *pud;
+       unsigned long next;
+       unsigned long start;
+
+       start = addr;
+       pud = pud_offset(pgd, addr);
+       do {
+               next = pud_addr_end(addr, end);
+               if (pud_none_or_clear_bad(pud))
+                       continue;
+               hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
+                                      ceiling);
+       } while (pud++, addr = next, addr != end);
+
+       start &= PGDIR_MASK;
+       if (start < floor)
+               return;
+       if (ceiling) {
+               ceiling &= PGDIR_MASK;
+               if (!ceiling)
+                       return;
+       }
+       if (end - 1 > ceiling - 1)
+               return;
+
+       pud = pud_offset(pgd, start);
+       pgd_clear(pgd);
+       pud_free_tlb(tlb, pud, start);
+}
+
+void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+                           unsigned long addr, unsigned long end,
+                           unsigned long floor, unsigned long ceiling)
+{
+       pgd_t *pgd;
+       unsigned long next;
+
+       pgd = pgd_offset(tlb->mm, addr);
+       do {
+               next = pgd_addr_end(addr, end);
+               if (pgd_none_or_clear_bad(pgd))
+                       continue;
+               hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+       } while (pgd++, addr = next, addr != end);
+}