unsigned int gen;
int noqueue_attempted = 0;
int dlm_locked = 0;
+ int kick_dc = 0;
if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
mlog_errno(-EINVAL);
unlock:
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+ /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
+ kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
+
spin_unlock_irqrestore(&lockres->l_lock, flags);
+ if (kick_dc)
+ ocfs2_wake_downconvert_thread(osb);
out:
/*
* This is helping work around a lock inversion between the page lock
pte_t *pte;
spinlock_t *ptl;
- if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
smaps_pmd_entry(pmd, addr, walk);
spin_unlock(ptl);
return 0;
spinlock_t *ptl;
struct page *page;
- if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty_pmd(vma, addr, pmd);
goto out;
int err = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (pmd_trans_huge_lock(pmdp, vma, &ptl)) {
+ ptl = pmd_trans_huge_lock(pmdp, vma);
+ if (ptl) {
u64 flags = 0, frame = 0;
pmd_t pmd = *pmdp;
pte_t *orig_pte;
pte_t *pte;
- if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
pathrelse(&path);
inode = reiserfs_iget(s, &obj_key);
- if (!inode) {
+ if (IS_ERR_OR_NULL(inode)) {
/*
* the unlink almost completed, it just did not
* manage to remove "save" link and release objectid
unsigned long start,
unsigned long end,
long adjust_next);
-extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
- spinlock_t **ptl);
+extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
+ struct vm_area_struct *vma);
/* mmap_sem must be held on entry */
-static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
- spinlock_t **ptl)
+static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
+ struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
- return __pmd_trans_huge_lock(pmd, vma, ptl);
+ return __pmd_trans_huge_lock(pmd, vma);
else
return false;
}
long adjust_next)
{
}
-static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
- spinlock_t **ptl)
+static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
+ struct vm_area_struct *vma)
{
- return false;
+ return NULL;
}
static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
TRACE_EVENT(mm_khugepaged_scan_pmd,
- TP_PROTO(struct mm_struct *mm, unsigned long pfn, bool writable,
+ TP_PROTO(struct mm_struct *mm, struct page *page, bool writable,
bool referenced, int none_or_zero, int status),
- TP_ARGS(mm, pfn, writable, referenced, none_or_zero, status),
+ TP_ARGS(mm, page, writable, referenced, none_or_zero, status),
TP_STRUCT__entry(
__field(struct mm_struct *, mm)
TP_fast_assign(
__entry->mm = mm;
- __entry->pfn = pfn;
+ __entry->pfn = page ? page_to_pfn(page) : -1;
__entry->writable = writable;
__entry->referenced = referenced;
__entry->none_or_zero = none_or_zero;
TRACE_EVENT(mm_collapse_huge_page_isolate,
- TP_PROTO(unsigned long pfn, int none_or_zero,
+ TP_PROTO(struct page *page, int none_or_zero,
bool referenced, bool writable, int status),
- TP_ARGS(pfn, none_or_zero, referenced, writable, status),
+ TP_ARGS(page, none_or_zero, referenced, writable, status),
TP_STRUCT__entry(
__field(unsigned long, pfn)
),
TP_fast_assign(
- __entry->pfn = pfn;
+ __entry->pfn = page ? page_to_pfn(page) : -1;
__entry->none_or_zero = none_or_zero;
__entry->referenced = referenced;
__entry->writable = writable;
if (rs->missed)
printk(KERN_WARNING "%s: %d callbacks suppressed\n",
func, rs->missed);
- rs->begin = 0;
+ rs->begin = jiffies;
rs->printed = 0;
rs->missed = 0;
}
struct mm_struct *mm = tlb->mm;
int ret = 0;
- if (!pmd_trans_huge_lock(pmd, vma, &ptl))
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (!ptl)
goto out_unlocked;
orig_pmd = *pmd;
pmd_t orig_pmd;
spinlock_t *ptl;
- if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
+ ptl = __pmd_trans_huge_lock(pmd, vma);
+ if (!ptl)
return 0;
/*
* For architectures like ppc64 we look at deposited pgtable
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_sem prevents deadlock.
*/
- if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
+ old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
+ if (old_ptl) {
new_ptl = pmd_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
spinlock_t *ptl;
int ret = 0;
- if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
+ ptl = __pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
pmd_t entry;
bool preserve_write = prot_numa && pmd_write(*pmd);
ret = 1;
* Note that if it returns true, this routine returns without unlocking page
* table lock. So callers must unlock it.
*/
-bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
- spinlock_t **ptl)
+spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
{
- *ptl = pmd_lock(vma->vm_mm, pmd);
+ spinlock_t *ptl;
+ ptl = pmd_lock(vma->vm_mm, pmd);
if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
- return true;
- spin_unlock(*ptl);
- return false;
+ return ptl;
+ spin_unlock(ptl);
+ return NULL;
}
#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
if (likely(writable)) {
if (likely(referenced)) {
result = SCAN_SUCCEED;
- trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
+ trace_mm_collapse_huge_page_isolate(page, none_or_zero,
referenced, writable, result);
return 1;
}
out:
release_pte_pages(pte, _pte);
- trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
+ trace_mm_collapse_huge_page_isolate(page, none_or_zero,
referenced, writable, result);
return 0;
}
collapse_huge_page(mm, address, hpage, vma, node);
}
out:
- trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced,
+ trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
none_or_zero, result);
return ret;
}
pte_t *pte;
spinlock_t *ptl;
- if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
mc.precharge += HPAGE_PMD_NR;
spin_unlock(ptl);
union mc_target target;
struct page *page;
- if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
if (mc.precharge < HPAGE_PMD_NR) {
spin_unlock(ptl);
return 0;
unsigned char *vec = walk->private;
int nr = (end - addr) >> PAGE_SHIFT;
- if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+ ptl = pmd_trans_huge_lock(pmd, vma);
+ if (ptl) {
memset(vec, 1, nr);
spin_unlock(ptl);
goto out;
*/
unsigned int munlock_vma_page(struct page *page)
{
- unsigned int nr_pages;
+ int nr_pages;
struct zone *zone = page_zone(page);
/* For try_to_munlock() and to serialize with page migration */