]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Jan 2016 17:54:34 +0000 (09:54 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 22 Jan 2016 17:54:34 +0000 (09:54 -0800)
Merge misc fixes from Andrew Morton:
 "Six fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  ocfs2: NFS hangs in __ocfs2_cluster_lock due to race with ocfs2_unblock_lock
  reiserfs: fix dereference of ERR_PTR
  ratelimit: fix bug in time interval by resetting right begin time
  mm: fix kernel crash in khugepaged thread
  mm: fix mlock accouting
  thp: change pmd_trans_huge_lock() interface to return ptl

fs/ocfs2/dlmglue.c
fs/proc/task_mmu.c
fs/reiserfs/super.c
include/linux/huge_mm.h
include/trace/events/huge_memory.h
lib/ratelimit.c
mm/huge_memory.c
mm/memcontrol.c
mm/mincore.c
mm/mlock.c

index f92612e4b9d61144b13021c83dd8b4b1e9553ac5..474e57f834e6caff12eaca03c8ece4d35303d1e0 100644 (file)
@@ -1390,6 +1390,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
        unsigned int gen;
        int noqueue_attempted = 0;
        int dlm_locked = 0;
+       int kick_dc = 0;
 
        if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
                mlog_errno(-EINVAL);
@@ -1524,7 +1525,12 @@ update_holders:
 unlock:
        lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
 
+       /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
+       kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
+
        spin_unlock_irqrestore(&lockres->l_lock, flags);
+       if (kick_dc)
+               ocfs2_wake_downconvert_thread(osb);
 out:
        /*
         * This is helping work around a lock inversion between the page lock
index 71ffc91060f6d6ad32081bfa53d2a3c7b3706e53..85d16c67c33eaa8b7a5ac65e38ec465814916619 100644 (file)
@@ -602,7 +602,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        pte_t *pte;
        spinlock_t *ptl;
 
-       if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+       ptl = pmd_trans_huge_lock(pmd, vma);
+       if (ptl) {
                smaps_pmd_entry(pmd, addr, walk);
                spin_unlock(ptl);
                return 0;
@@ -913,7 +914,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
        spinlock_t *ptl;
        struct page *page;
 
-       if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+       ptl = pmd_trans_huge_lock(pmd, vma);
+       if (ptl) {
                if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
                        clear_soft_dirty_pmd(vma, addr, pmd);
                        goto out;
@@ -1187,7 +1189,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
        int err = 0;
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (pmd_trans_huge_lock(pmdp, vma, &ptl)) {
+       ptl = pmd_trans_huge_lock(pmdp, vma);
+       if (ptl) {
                u64 flags = 0, frame = 0;
                pmd_t pmd = *pmdp;
 
@@ -1519,7 +1522,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
        pte_t *orig_pte;
        pte_t *pte;
 
-       if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+       ptl = pmd_trans_huge_lock(pmd, vma);
+       if (ptl) {
                pte_t huge_pte = *(pte_t *)pmd;
                struct page *page;
 
index 05db7473bcb5c64078dc572d2ef8e455a770dbba..c0306ec8ed7b8f5d4eef97fd1cf4c7653c2b1f20 100644 (file)
@@ -288,7 +288,7 @@ static int finish_unfinished(struct super_block *s)
                pathrelse(&path);
 
                inode = reiserfs_iget(s, &obj_key);
-               if (!inode) {
+               if (IS_ERR_OR_NULL(inode)) {
                        /*
                         * the unlink almost completed, it just did not
                         * manage to remove "save" link and release objectid
index cfe81e10bd5429baf5ed0ccf50bd4613ff6ee3c6..459fd25b378e73cfd2e911761ad845076be547de 100644 (file)
@@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
                                    unsigned long start,
                                    unsigned long end,
                                    long adjust_next);
-extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
-               spinlock_t **ptl);
+extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
+               struct vm_area_struct *vma);
 /* mmap_sem must be held on entry */
-static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
-               spinlock_t **ptl)
+static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
+               struct vm_area_struct *vma)
 {
        VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
        if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
-               return __pmd_trans_huge_lock(pmd, vma, ptl);
+               return __pmd_trans_huge_lock(pmd, vma);
        else
                return false;
 }
@@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
                                         long adjust_next)
 {
 }
-static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
-               spinlock_t **ptl)
+static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
+               struct vm_area_struct *vma)
 {
-       return false;
+       return NULL;
 }
 
 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
index 0f803d2783e3834194cb3f5cd3dd4da5c017304c..47c6212d8f3cecb07d62cb9ea81f6ea65ae0c35f 100644 (file)
@@ -46,10 +46,10 @@ SCAN_STATUS
 
 TRACE_EVENT(mm_khugepaged_scan_pmd,
 
-       TP_PROTO(struct mm_struct *mm, unsigned long pfn, bool writable,
+       TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
                 bool referenced, int none_or_zero, int status),
 
-       TP_ARGS(mm, pfn, writable, referenced, none_or_zero, status),
+       TP_ARGS(mm, page, writable, referenced, none_or_zero, status),
 
        TP_STRUCT__entry(
                __field(struct mm_struct *, mm)
@@ -62,7 +62,7 @@ TRACE_EVENT(mm_khugepaged_scan_pmd,
 
        TP_fast_assign(
                __entry->mm = mm;
-               __entry->pfn = pfn;
+               __entry->pfn = page ? page_to_pfn(page) : -1;
                __entry->writable = writable;
                __entry->referenced = referenced;
                __entry->none_or_zero = none_or_zero;
@@ -104,10 +104,10 @@ TRACE_EVENT(mm_collapse_huge_page,
 
 TRACE_EVENT(mm_collapse_huge_page_isolate,
 
-       TP_PROTO(unsigned long pfn, int none_or_zero,
+       TP_PROTO(struct page *page, int none_or_zero,
                 bool referenced, bool  writable, int status),
 
-       TP_ARGS(pfn, none_or_zero, referenced, writable, status),
+       TP_ARGS(page, none_or_zero, referenced, writable, status),
 
        TP_STRUCT__entry(
                __field(unsigned long, pfn)
@@ -118,7 +118,7 @@ TRACE_EVENT(mm_collapse_huge_page_isolate,
        ),
 
        TP_fast_assign(
-               __entry->pfn = pfn;
+               __entry->pfn = page ? page_to_pfn(page) : -1;
                __entry->none_or_zero = none_or_zero;
                __entry->referenced = referenced;
                __entry->writable = writable;
index 40e03ea2a967d978cffae1cbeffe23d609ce01bc..2c5de86460c5bb82f3d1c83b136b2f985484298a 100644 (file)
@@ -49,7 +49,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
                if (rs->missed)
                        printk(KERN_WARNING "%s: %d callbacks suppressed\n",
                                func, rs->missed);
-               rs->begin   = 0;
+               rs->begin   = jiffies;
                rs->printed = 0;
                rs->missed  = 0;
        }
index 8ad580273521046904ec13478e7b897839d955d7..fd3a07b3e6f4e086b2111c312a78512bed927f9b 100644 (file)
@@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        struct mm_struct *mm = tlb->mm;
        int ret = 0;
 
-       if (!pmd_trans_huge_lock(pmd, vma, &ptl))
+       ptl = pmd_trans_huge_lock(pmd, vma);
+       if (!ptl)
                goto out_unlocked;
 
        orig_pmd = *pmd;
@@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        pmd_t orig_pmd;
        spinlock_t *ptl;
 
-       if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
+       ptl = __pmd_trans_huge_lock(pmd, vma);
+       if (!ptl)
                return 0;
        /*
         * For architectures like ppc64 we look at deposited pgtable
@@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
         * We don't have to worry about the ordering of src and dst
         * ptlocks because exclusive mmap_sem prevents deadlock.
         */
-       if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
+       old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
+       if (old_ptl) {
                new_ptl = pmd_lockptr(mm, new_pmd);
                if (new_ptl != old_ptl)
                        spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        spinlock_t *ptl;
        int ret = 0;
 
-       if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
+       ptl = __pmd_trans_huge_lock(pmd, vma);
+       if (ptl) {
                pmd_t entry;
                bool preserve_write = prot_numa && pmd_write(*pmd);
                ret = 1;
@@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  * Note that if it returns true, this routine returns without unlocking page
  * table lock. So callers must unlock it.
  */
-bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
-               spinlock_t **ptl)
+spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
 {
-       *ptl = pmd_lock(vma->vm_mm, pmd);
+       spinlock_t *ptl;
+       ptl = pmd_lock(vma->vm_mm, pmd);
        if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
-               return true;
-       spin_unlock(*ptl);
-       return false;
+               return ptl;
+       spin_unlock(ptl);
+       return NULL;
 }
 
 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
@@ -2068,7 +2072,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
        if (likely(writable)) {
                if (likely(referenced)) {
                        result = SCAN_SUCCEED;
-                       trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
+                       trace_mm_collapse_huge_page_isolate(page, none_or_zero,
                                                            referenced, writable, result);
                        return 1;
                }
@@ -2078,7 +2082,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 
 out:
        release_pte_pages(pte, _pte);
-       trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
+       trace_mm_collapse_huge_page_isolate(page, none_or_zero,
                                            referenced, writable, result);
        return 0;
 }
@@ -2576,7 +2580,7 @@ out_unmap:
                collapse_huge_page(mm, address, hpage, vma, node);
        }
 out:
-       trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced,
+       trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
                                     none_or_zero, result);
        return ret;
 }
index ca052f2a4a0b374eb9a2a6498b407be7da28ac48..d06cae2de783acf462162e27f8770ff7bf60f4ad 100644 (file)
@@ -4638,7 +4638,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
        pte_t *pte;
        spinlock_t *ptl;
 
-       if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+       ptl = pmd_trans_huge_lock(pmd, vma);
+       if (ptl) {
                if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
                        mc.precharge += HPAGE_PMD_NR;
                spin_unlock(ptl);
@@ -4826,7 +4827,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
        union mc_target target;
        struct page *page;
 
-       if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+       ptl = pmd_trans_huge_lock(pmd, vma);
+       if (ptl) {
                if (mc.precharge < HPAGE_PMD_NR) {
                        spin_unlock(ptl);
                        return 0;
index 2a565ed8bb4907398a0d3d2619fd4939df777970..563f320454902df04e782f73aa8d0473656034c9 100644 (file)
@@ -117,7 +117,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        unsigned char *vec = walk->private;
        int nr = (end - addr) >> PAGE_SHIFT;
 
-       if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+       ptl = pmd_trans_huge_lock(pmd, vma);
+       if (ptl) {
                memset(vec, 1, nr);
                spin_unlock(ptl);
                goto out;
index e1e2b1207bf2ee00604468d027b3879381e1b089..96f00104192861aec6358d9fae5f9b39fecb43a4 100644 (file)
@@ -175,7 +175,7 @@ static void __munlock_isolation_failed(struct page *page)
  */
 unsigned int munlock_vma_page(struct page *page)
 {
-       unsigned int nr_pages;
+       int nr_pages;
        struct zone *zone = page_zone(page);
 
        /* For try_to_munlock() and to serialize with page migration */