]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - mm/hugetlb.c
tmpfs: simplify unuse and writepage
[mirror_ubuntu-artful-kernel.git] / mm / hugetlb.c
index bbb4a5bbb9582055945237692b8938a731c5492a..dae27ba3be2c8523cd032ad330e2f5a32fbffaf3 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/hugetlb.h>
 #include <linux/node.h>
@@ -62,10 +62,10 @@ static DEFINE_SPINLOCK(hugetlb_lock);
  * must either hold the mmap_sem for write, or the mmap_sem for read and
  * the hugetlb_instantiation mutex:
  *
- *     down_write(&mm->mmap_sem);
+ *     down_write(&mm->mmap_sem);
  * or
- *     down_read(&mm->mmap_sem);
- *     mutex_lock(&hugetlb_instantiation_mutex);
+ *     down_read(&mm->mmap_sem);
+ *     mutex_lock(&hugetlb_instantiation_mutex);
  */
 struct file_region {
        struct list_head link;
@@ -503,9 +503,10 @@ static void update_and_free_page(struct hstate *h, struct page *page)
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
        for (i = 0; i < pages_per_huge_page(h); i++) {
-               page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
-                               1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
-                               1 << PG_private | 1<< PG_writeback);
+               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+                               1 << PG_referenced | 1 << PG_dirty |
+                               1 << PG_active | 1 << PG_reserved |
+                               1 << PG_private | 1 << PG_writeback);
        }
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
@@ -591,7 +592,6 @@ int PageHuge(struct page *page)
 
        return dtor == free_huge_page;
 }
-
 EXPORT_SYMBOL_GPL(PageHuge);
 
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
@@ -1033,10 +1033,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
         */
        chg = vma_needs_reservation(h, vma, addr);
        if (chg < 0)
-               return ERR_PTR(chg);
+               return ERR_PTR(-VM_FAULT_OOM);
        if (chg)
                if (hugetlb_get_quota(inode->i_mapping, chg))
-                       return ERR_PTR(-ENOSPC);
+                       return ERR_PTR(-VM_FAULT_SIGBUS);
 
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
@@ -1105,12 +1105,28 @@ static void __init gather_bootmem_prealloc(void)
        struct huge_bootmem_page *m;
 
        list_for_each_entry(m, &huge_boot_pages, list) {
-               struct page *page = virt_to_page(m);
                struct hstate *h = m->hstate;
+               struct page *page;
+
+#ifdef CONFIG_HIGHMEM
+               page = pfn_to_page(m->phys >> PAGE_SHIFT);
+               free_bootmem_late((unsigned long)m,
+                                 sizeof(struct huge_bootmem_page));
+#else
+               page = virt_to_page(m);
+#endif
                __ClearPageReserved(page);
                WARN_ON(page_count(page) != 1);
                prep_compound_huge_page(page, h->order);
                prep_new_huge_page(h, page, page_to_nid(page));
+               /*
+                * If we had gigantic hugepages allocated at boot time, we need
+                * to restore the 'stolen' pages to totalram_pages in order to
+                * fix confusing memory reports from free(1) and another
+                * side-effects, like CommitLimit going negative.
+                */
+               if (h->order > (MAX_ORDER - 1))
+                       totalram_pages += 1 << h->order;
        }
 }
 
@@ -2116,9 +2132,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
        pte_t entry;
 
        entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
-       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
                update_mmu_cache(vma, address, ptep);
-       }
 }
 
 
@@ -2173,9 +2188,9 @@ static int is_hugetlb_entry_migration(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp)) {
+       if (non_swap_entry(swp) && is_migration_entry(swp))
                return 1;
-       else
+       else
                return 0;
 }
 
@@ -2186,9 +2201,9 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
        if (huge_pte_none(pte) || pte_present(pte))
                return 0;
        swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
                return 1;
-       else
+       else
                return 0;
 }
 
@@ -2205,7 +2220,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        unsigned long sz = huge_page_size(h);
 
        /*
-        * A page gathering list, protected by per file i_mmap_lock. The
+        * A page gathering list, protected by per file i_mmap_mutex. The
         * lock is used to avoid list corruption from multiple unmapping
         * of the same page since we are using page->lru.
         */
@@ -2274,9 +2289,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        __unmap_hugepage_range(vma, start, end, ref_page);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 }
 
 /*
@@ -2308,7 +2323,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
         * this mapping should be shared between all the VMAs,
         * __unmap_hugepage_range() is called as the lock is already held
         */
-       spin_lock(&mapping->i_mmap_lock);
+       mutex_lock(&mapping->i_mmap_mutex);
        vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
                /* Do not unmap the current VMA */
                if (iter_vma == vma)
@@ -2326,7 +2341,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
                                address, address + huge_page_size(h),
                                page);
        }
-       spin_unlock(&mapping->i_mmap_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 
        return 1;
 }
@@ -2551,7 +2566,7 @@ retry:
                 * So we need to block hugepage fault by PG_hwpoison bit check.
                 */
                if (unlikely(PageHWPoison(page))) {
-                       ret = VM_FAULT_HWPOISON | 
+                       ret = VM_FAULT_HWPOISON |
                              VM_FAULT_SET_HINDEX(h - hstates);
                        goto backout_unlocked;
                }
@@ -2619,7 +2634,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        migration_entry_wait(mm, (pmd_t *)ptep, address);
                        return 0;
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
-                       return VM_FAULT_HWPOISON_LARGE | 
+                       return VM_FAULT_HWPOISON_LARGE |
                               VM_FAULT_SET_HINDEX(h - hstates);
        }
 
@@ -2810,7 +2825,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
-       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
        spin_lock(&mm->page_table_lock);
        for (; address < end; address += huge_page_size(h)) {
                ptep = huge_pte_offset(mm, address);
@@ -2825,7 +2840,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                }
        }
        spin_unlock(&mm->page_table_lock);
-       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+       mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
 
        flush_tlb_range(vma, start, end);
 }
@@ -2833,7 +2848,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 int hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma,
-                                       int acctflag)
+                                       vm_flags_t vm_flags)
 {
        long ret, chg;
        struct hstate *h = hstate_inode(inode);
@@ -2843,7 +2858,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * attempt will be made for VM_NORESERVE to allocate a page
         * and filesystem quota without using reserves
         */
-       if (acctflag & VM_NORESERVE)
+       if (vm_flags & VM_NORESERVE)
                return 0;
 
        /*