]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - mm/hugetlb.c
x86/mm: Drop usage of __flush_tlb_all() in kernel_physical_mapping_init()
[mirror_ubuntu-bionic-kernel.git] / mm / hugetlb.c
index 2d2ff5e8bf2bc035eb300ee16dbdaadcdb0279dd..017842b8fcba3d649cec142f3b4dcc3cb5683fce 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bootmem.h>
 #include <linux/sysfs.h>
 #include <linux/slab.h>
+#include <linux/mmdebug.h>
 #include <linux/sched/signal.h>
 #include <linux/rmap.h>
 #include <linux/string_helpers.h>
@@ -2158,6 +2159,7 @@ static void __init gather_bootmem_prealloc(void)
                 */
                if (hstate_is_gigantic(h))
                        adjust_managed_page_count(page, 1 << h->order);
+               cond_resched();
        }
 }
 
@@ -3125,6 +3127,13 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
        }
 }
 
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+{
+       if (addr & ~(huge_page_mask(hstate_vma(vma))))
+               return -EINVAL;
+       return 0;
+}
+
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
@@ -3141,6 +3150,7 @@ const struct vm_operations_struct hugetlb_vm_ops = {
        .fault = hugetlb_vm_op_fault,
        .open = hugetlb_vm_op_open,
        .close = hugetlb_vm_op_close,
+       .split = hugetlb_vm_op_split,
 };
 
 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
@@ -3256,9 +3266,14 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
                } else {
                        if (cow) {
+                               /*
+                                * No need to notify as we are downgrading page
+                                * table protection not changing it to point
+                                * to a new page.
+                                *
+                                * See Documentation/vm/mmu_notifier.txt
+                                */
                                huge_ptep_set_wrprotect(src, addr, src_pte);
-                               mmu_notifier_invalidate_range(src, mmun_start,
-                                                                  mmun_end);
                        }
                        entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
@@ -4006,7 +4021,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 
                /* fallback to copy_from_user outside mmap_sem */
                if (unlikely(ret)) {
-                       ret = -EFAULT;
+                       ret = -ENOENT;
                        *pagep = page;
                        /* don't free the page */
                        goto out;
@@ -4318,7 +4333,12 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * and that page table be reused and filled with junk.
         */
        flush_hugetlb_tlb_range(vma, start, end);
-       mmu_notifier_invalidate_range(mm, start, end);
+       /*
+        * No need to call mmu_notifier_invalidate_range() we are downgrading
+        * page table protection not changing it to point to a new page.
+        *
+        * See Documentation/vm/mmu_notifier.txt
+        */
        i_mmap_unlock_write(vma->vm_file->f_mapping);
        mmu_notifier_invalidate_range_end(mm, start, end);
 
@@ -4336,6 +4356,12 @@ int hugetlb_reserve_pages(struct inode *inode,
        struct resv_map *resv_map;
        long gbl_reserve;
 
+       /* This should never happen */
+       if (from > to) {
+               VM_WARN(1, "%s called with a negative range\n", __func__);
+               return -EINVAL;
+       }
+
        /*
         * Only apply hugepage reservation if asked. At fault time, an
         * attempt will be made for VM_NORESERVE to allocate a page
@@ -4617,7 +4643,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, addr);
-       p4d = p4d_offset(pgd, addr);
+       p4d = p4d_alloc(mm, pgd, addr);
+       if (!p4d)
+               return NULL;
        pud = pud_alloc(mm, p4d, addr);
        if (pud) {
                if (sz == PUD_SIZE) {