]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - mm/hugetlb.c
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[mirror_ubuntu-artful-kernel.git] / mm / hugetlb.c
index 385c3a1aced78f7dcd83f97597100816049bd9bc..0a9ac6c268325a6ca9096bfc784cfbf68ac6a65e 100644 (file)
@@ -2657,9 +2657,10 @@ again:
                        goto unlock;
 
                /*
-                * HWPoisoned hugepage is already unmapped and dropped reference
+                * Migrating hugepage or HWPoisoned hugepage is already
+                * unmapped and its refcount is dropped, so just clear pte here.
                 */
-               if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+               if (unlikely(!pte_present(pte))) {
                        huge_pte_clear(mm, address, ptep);
                        goto unlock;
                }
@@ -3384,7 +3385,26 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                        spin_unlock(ptl);
                        continue;
                }
-               if (!huge_pte_none(huge_ptep_get(ptep))) {
+               pte = huge_ptep_get(ptep);
+               if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+                       spin_unlock(ptl);
+                       continue;
+               }
+               if (unlikely(is_hugetlb_entry_migration(pte))) {
+                       swp_entry_t entry = pte_to_swp_entry(pte);
+
+                       if (is_write_migration_entry(entry)) {
+                               pte_t newpte;
+
+                               make_migration_entry_read(&entry);
+                               newpte = swp_entry_to_pte(entry);
+                               set_huge_pte_at(mm, address, ptep, newpte);
+                               pages++;
+                       }
+                       spin_unlock(ptl);
+                       continue;
+               }
+               if (!huge_pte_none(pte)) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(huge_pte_modify(pte, newprot));
                        pte = arch_make_huge_pte(pte, vma, NULL, 0);
@@ -3578,6 +3598,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
                if (saddr) {
                        spte = huge_pte_offset(svma->vm_mm, saddr);
                        if (spte) {
+                               mm_inc_nr_pmds(mm);
                                get_page(virt_to_page(spte));
                                break;
                        }
@@ -3589,11 +3610,13 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
 
        ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
        spin_lock(ptl);
-       if (pud_none(*pud))
+       if (pud_none(*pud)) {
                pud_populate(mm, pud,
                                (pmd_t *)((unsigned long)spte & PAGE_MASK));
-       else
+       } else {
                put_page(virt_to_page(spte));
+               mm_inc_nr_pmds(mm);
+       }
        spin_unlock(ptl);
 out:
        pte = (pte_t *)pmd_alloc(mm, pud, addr);
@@ -3624,6 +3647,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 
        pud_clear(pud);
        put_page(virt_to_page(ptep));
+       mm_dec_nr_pmds(mm);
        *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
        return 1;
 }