]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
Merge branch 'xfs-4.10-misc-fixes-1' into for-next
authorDave Chinner <david@fromorbit.com>
Wed, 9 Nov 2016 23:29:43 +0000 (10:29 +1100)
committerDave Chinner <david@fromorbit.com>
Wed, 9 Nov 2016 23:29:43 +0000 (10:29 +1100)
25 files changed:
fs/Kconfig
fs/dax.c
fs/ext2/file.c
fs/ext4/inode.c
fs/iomap.c
fs/xfs/libxfs/xfs_attr_leaf.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_dir2.h
fs/xfs/libxfs/xfs_dir2_data.c
fs/xfs/libxfs/xfs_dir2_priv.h
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_inode_buf.h
fs/xfs/libxfs/xfs_log_format.h
fs/xfs/libxfs/xfs_log_recover.h
fs/xfs/libxfs/xfs_rtbitmap.c
fs/xfs/libxfs/xfs_sb.c
fs/xfs/libxfs/xfs_types.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_aops.h
fs/xfs/xfs_file.c
fs/xfs/xfs_log_recover.c
include/linux/dax.h
include/linux/iomap.h
mm/filemap.c

index 4bd03a2b05181738ec13a098276d72eec2faa658..8e9e5f4104f4d3a36fe9a01c3979e2592fc46e56 100644 (file)
@@ -55,7 +55,6 @@ config FS_DAX_PMD
        depends on FS_DAX
        depends on ZONE_DEVICE
        depends on TRANSPARENT_HUGEPAGE
-       depends on BROKEN
 
 endif # BLOCK
 
index 014defd2e744e00b4df273e88d2a7026a3db4ff9..28af41b9da3ac61027beb189f5a8a42f0ca2c69d 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
 #include <linux/iomap.h>
 #include "internal.h"
 
-/*
- * We use lowest available bit in exceptional entry for locking, other two
- * bits to determine entry type. In total 3 special bits.
- */
-#define RADIX_DAX_SHIFT        (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
-#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
-#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
-#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
-#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
-#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
-#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
-               RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
-               RADIX_TREE_EXCEPTIONAL_ENTRY))
-
 /* We choose 4096 entries - same as per-zone page wait tables */
 #define DAX_WAIT_TABLE_BITS 12
 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
 
-wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
+static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
 
 static int __init init_dax_wait_table(void)
 {
@@ -64,14 +50,6 @@ static int __init init_dax_wait_table(void)
 }
 fs_initcall(init_dax_wait_table);
 
-static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
-                                             pgoff_t index)
-{
-       unsigned long hash = hash_long((unsigned long)mapping ^ index,
-                                      DAX_WAIT_TABLE_BITS);
-       return wait_table + hash;
-}
-
 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
 {
        struct request_queue *q = bdev->bd_queue;
@@ -98,6 +76,26 @@ static void dax_unmap_atomic(struct block_device *bdev,
        blk_queue_exit(bdev->bd_queue);
 }
 
+static int dax_is_pmd_entry(void *entry)
+{
+       return (unsigned long)entry & RADIX_DAX_PMD;
+}
+
+static int dax_is_pte_entry(void *entry)
+{
+       return !((unsigned long)entry & RADIX_DAX_PMD);
+}
+
+static int dax_is_zero_entry(void *entry)
+{
+       return (unsigned long)entry & RADIX_DAX_HZP;
+}
+
+static int dax_is_empty_entry(void *entry)
+{
+       return (unsigned long)entry & RADIX_DAX_EMPTY;
+}
+
 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
 {
        struct page *page = alloc_pages(GFP_KERNEL, 0);
@@ -123,19 +121,6 @@ static bool buffer_written(struct buffer_head *bh)
        return buffer_mapped(bh) && !buffer_unwritten(bh);
 }
 
-/*
- * When ext4 encounters a hole, it returns without modifying the buffer_head
- * which means that we can't trust b_size.  To cope with this, we set b_state
- * to 0 before calling get_block and, if any bit is set, we know we can trust
- * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
- * and would save us time calling get_block repeatedly.
- */
-static bool buffer_size_valid(struct buffer_head *bh)
-{
-       return bh->b_state != 0;
-}
-
-
 static sector_t to_sector(const struct buffer_head *bh,
                const struct inode *inode)
 {
@@ -177,8 +162,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
                                rc = get_block(inode, block, bh, rw == WRITE);
                                if (rc)
                                        break;
-                               if (!buffer_size_valid(bh))
-                                       bh->b_size = 1 << blkbits;
                                bh_max = pos - first + bh->b_size;
                                bdev = bh->b_bdev;
                                /*
@@ -300,7 +283,7 @@ EXPORT_SYMBOL_GPL(dax_do_io);
  */
 struct exceptional_entry_key {
        struct address_space *mapping;
-       unsigned long index;
+       pgoff_t entry_start;
 };
 
 struct wait_exceptional_entry_queue {
@@ -308,6 +291,26 @@ struct wait_exceptional_entry_queue {
        struct exceptional_entry_key key;
 };
 
+static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
+               pgoff_t index, void *entry, struct exceptional_entry_key *key)
+{
+       unsigned long hash;
+
+       /*
+        * If 'entry' is a PMD, align the 'index' that we use for the wait
+        * queue to the start of that PMD.  This ensures that all offsets in
+        * the range covered by the PMD map to the same bit lock.
+        */
+       if (dax_is_pmd_entry(entry))
+               index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
+
+       key->mapping = mapping;
+       key->entry_start = index;
+
+       hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
+       return wait_table + hash;
+}
+
 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
                                       int sync, void *keyp)
 {
@@ -316,7 +319,7 @@ static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
                container_of(wait, struct wait_exceptional_entry_queue, wait);
 
        if (key->mapping != ewait->key.mapping ||
-           key->index != ewait->key.index)
+           key->entry_start != ewait->key.entry_start)
                return 0;
        return autoremove_wake_function(wait, mode, sync, NULL);
 }
@@ -372,24 +375,24 @@ static inline void *unlock_slot(struct address_space *mapping, void **slot)
 static void *get_unlocked_mapping_entry(struct address_space *mapping,
                                        pgoff_t index, void ***slotp)
 {
-       void *ret, **slot;
+       void *entry, **slot;
        struct wait_exceptional_entry_queue ewait;
-       wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+       wait_queue_head_t *wq;
 
        init_wait(&ewait.wait);
        ewait.wait.func = wake_exceptional_entry_func;
-       ewait.key.mapping = mapping;
-       ewait.key.index = index;
 
        for (;;) {
-               ret = __radix_tree_lookup(&mapping->page_tree, index, NULL,
+               entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
                                          &slot);
-               if (!ret || !radix_tree_exceptional_entry(ret) ||
+               if (!entry || !radix_tree_exceptional_entry(entry) ||
                    !slot_locked(mapping, slot)) {
                        if (slotp)
                                *slotp = slot;
-                       return ret;
+                       return entry;
                }
+
+               wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
                prepare_to_wait_exclusive(wq, &ewait.wait,
                                          TASK_UNINTERRUPTIBLE);
                spin_unlock_irq(&mapping->tree_lock);
@@ -399,52 +402,157 @@ static void *get_unlocked_mapping_entry(struct address_space *mapping,
        }
 }
 
+static void put_locked_mapping_entry(struct address_space *mapping,
+                                    pgoff_t index, void *entry)
+{
+       if (!radix_tree_exceptional_entry(entry)) {
+               unlock_page(entry);
+               put_page(entry);
+       } else {
+               dax_unlock_mapping_entry(mapping, index);
+       }
+}
+
+/*
+ * Called when we are done with radix tree entry we looked up via
+ * get_unlocked_mapping_entry() and which we didn't lock in the end.
+ */
+static void put_unlocked_mapping_entry(struct address_space *mapping,
+                                      pgoff_t index, void *entry)
+{
+       if (!radix_tree_exceptional_entry(entry))
+               return;
+
+       /* We have to wake up next waiter for the radix tree entry lock */
+       dax_wake_mapping_entry_waiter(mapping, index, entry, false);
+}
+
 /*
  * Find radix tree entry at given index. If it points to a page, return with
  * the page locked. If it points to the exceptional entry, return with the
  * radix tree entry locked. If the radix tree doesn't contain given index,
  * create empty exceptional entry for the index and return with it locked.
  *
+ * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
+ * either return that locked entry or will return an error.  This error will
+ * happen if there are any 4k entries (either zero pages or DAX entries)
+ * within the 2MiB range that we are requesting.
+ *
+ * We always favor 4k entries over 2MiB entries. There isn't a flow where we
+ * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
+ * insertion will fail if it finds any 4k entries already in the tree, and a
+ * 4k insertion will cause an existing 2MiB entry to be unmapped and
+ * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
+ * well as 2MiB empty entries.
+ *
+ * The exception to this downgrade path is for 2MiB DAX PMD entries that have
+ * real storage backing them.  We will leave these real 2MiB DAX entries in
+ * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
+ *
  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
  * persistent memory the benefit is doubtful. We can add that later if we can
  * show it helps.
  */
-static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
+static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
+               unsigned long size_flag)
 {
-       void *ret, **slot;
+       bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
+       void *entry, **slot;
 
 restart:
        spin_lock_irq(&mapping->tree_lock);
-       ret = get_unlocked_mapping_entry(mapping, index, &slot);
+       entry = get_unlocked_mapping_entry(mapping, index, &slot);
+
+       if (entry) {
+               if (size_flag & RADIX_DAX_PMD) {
+                       if (!radix_tree_exceptional_entry(entry) ||
+                           dax_is_pte_entry(entry)) {
+                               put_unlocked_mapping_entry(mapping, index,
+                                               entry);
+                               entry = ERR_PTR(-EEXIST);
+                               goto out_unlock;
+                       }
+               } else { /* trying to grab a PTE entry */
+                       if (radix_tree_exceptional_entry(entry) &&
+                           dax_is_pmd_entry(entry) &&
+                           (dax_is_zero_entry(entry) ||
+                            dax_is_empty_entry(entry))) {
+                               pmd_downgrade = true;
+                       }
+               }
+       }
+
        /* No entry for given index? Make sure radix tree is big enough. */
-       if (!ret) {
+       if (!entry || pmd_downgrade) {
                int err;
 
+               if (pmd_downgrade) {
+                       /*
+                        * Make sure 'entry' remains valid while we drop
+                        * mapping->tree_lock.
+                        */
+                       entry = lock_slot(mapping, slot);
+               }
+
                spin_unlock_irq(&mapping->tree_lock);
                err = radix_tree_preload(
                                mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
-               if (err)
+               if (err) {
+                       if (pmd_downgrade)
+                               put_locked_mapping_entry(mapping, index, entry);
                        return ERR_PTR(err);
-               ret = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
-                              RADIX_DAX_ENTRY_LOCK);
+               }
+
+               /*
+                * Besides huge zero pages the only other thing that gets
+                * downgraded are empty entries which don't need to be
+                * unmapped.
+                */
+               if (pmd_downgrade && dax_is_zero_entry(entry))
+                       unmap_mapping_range(mapping,
+                               (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
+
                spin_lock_irq(&mapping->tree_lock);
-               err = radix_tree_insert(&mapping->page_tree, index, ret);
+
+               if (pmd_downgrade) {
+                       radix_tree_delete(&mapping->page_tree, index);
+                       mapping->nrexceptional--;
+                       dax_wake_mapping_entry_waiter(mapping, index, entry,
+                                       true);
+               }
+
+               entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
+
+               err = __radix_tree_insert(&mapping->page_tree, index,
+                               dax_radix_order(entry), entry);
                radix_tree_preload_end();
                if (err) {
                        spin_unlock_irq(&mapping->tree_lock);
-                       /* Someone already created the entry? */
-                       if (err == -EEXIST)
+                       /*
+                        * Someone already created the entry?  This is a
+                        * normal failure when inserting PMDs in a range
+                        * that already contains PTEs.  In that case we want
+                        * to return -EEXIST immediately.
+                        */
+                       if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
                                goto restart;
+                       /*
+                        * Our insertion of a DAX PMD entry failed, most
+                        * likely because it collided with a PTE sized entry
+                        * at a different index in the PMD range.  We haven't
+                        * inserted anything into the radix tree and have no
+                        * waiters to wake.
+                        */
                        return ERR_PTR(err);
                }
                /* Good, we have inserted empty locked entry into the tree. */
                mapping->nrexceptional++;
                spin_unlock_irq(&mapping->tree_lock);
-               return ret;
+               return entry;
        }
        /* Normal page in radix tree? */
-       if (!radix_tree_exceptional_entry(ret)) {
-               struct page *page = ret;
+       if (!radix_tree_exceptional_entry(entry)) {
+               struct page *page = entry;
 
                get_page(page);
                spin_unlock_irq(&mapping->tree_lock);
@@ -457,15 +565,26 @@ restart:
                }
                return page;
        }
-       ret = lock_slot(mapping, slot);
+       entry = lock_slot(mapping, slot);
+ out_unlock:
        spin_unlock_irq(&mapping->tree_lock);
-       return ret;
+       return entry;
 }
 
+/*
+ * We do not necessarily hold the mapping->tree_lock when we call this
+ * function so it is possible that 'entry' is no longer a valid item in the
+ * radix tree.  This is okay because all we really need to do is to find the
+ * correct waitqueue where tasks might be waiting for that old 'entry' and
+ * wake them.
+ */
 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
-                                  pgoff_t index, bool wake_all)
+               pgoff_t index, void *entry, bool wake_all)
 {
-       wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+       struct exceptional_entry_key key;
+       wait_queue_head_t *wq;
+
+       wq = dax_entry_waitqueue(mapping, index, entry, &key);
 
        /*
         * Checking for locked entry and prepare_to_wait_exclusive() happens
@@ -473,54 +592,24 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
         * So at this point all tasks that could have seen our entry locked
         * must be in the waitqueue and the following check will see them.
         */
-       if (waitqueue_active(wq)) {
-               struct exceptional_entry_key key;
-
-               key.mapping = mapping;
-               key.index = index;
+       if (waitqueue_active(wq))
                __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
-       }
 }
 
 void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
 {
-       void *ret, **slot;
+       void *entry, **slot;
 
        spin_lock_irq(&mapping->tree_lock);
-       ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
-       if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret) ||
+       entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
+       if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
                         !slot_locked(mapping, slot))) {
                spin_unlock_irq(&mapping->tree_lock);
                return;
        }
        unlock_slot(mapping, slot);
        spin_unlock_irq(&mapping->tree_lock);
-       dax_wake_mapping_entry_waiter(mapping, index, false);
-}
-
-static void put_locked_mapping_entry(struct address_space *mapping,
-                                    pgoff_t index, void *entry)
-{
-       if (!radix_tree_exceptional_entry(entry)) {
-               unlock_page(entry);
-               put_page(entry);
-       } else {
-               dax_unlock_mapping_entry(mapping, index);
-       }
-}
-
-/*
- * Called when we are done with radix tree entry we looked up via
- * get_unlocked_mapping_entry() and which we didn't lock in the end.
- */
-static void put_unlocked_mapping_entry(struct address_space *mapping,
-                                      pgoff_t index, void *entry)
-{
-       if (!radix_tree_exceptional_entry(entry))
-               return;
-
-       /* We have to wake up next waiter for the radix tree entry lock */
-       dax_wake_mapping_entry_waiter(mapping, index, false);
+       dax_wake_mapping_entry_waiter(mapping, index, entry, false);
 }
 
 /*
@@ -547,7 +636,7 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
        radix_tree_delete(&mapping->page_tree, index);
        mapping->nrexceptional--;
        spin_unlock_irq(&mapping->tree_lock);
-       dax_wake_mapping_entry_waiter(mapping, index, true);
+       dax_wake_mapping_entry_waiter(mapping, index, entry, true);
 
        return 1;
 }
@@ -600,11 +689,17 @@ static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size
        return 0;
 }
 
-#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
-
+/*
+ * By this point grab_mapping_entry() has ensured that we have a locked entry
+ * of the appropriate size so we don't have to worry about downgrading PMDs to
+ * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
+ * already in the tree, we will skip the insertion and just dirty the PMD as
+ * appropriate.
+ */
 static void *dax_insert_mapping_entry(struct address_space *mapping,
                                      struct vm_fault *vmf,
-                                     void *entry, sector_t sector)
+                                     void *entry, sector_t sector,
+                                     unsigned long flags)
 {
        struct radix_tree_root *page_tree = &mapping->page_tree;
        int error = 0;
@@ -627,22 +722,35 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
                error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
                if (error)
                        return ERR_PTR(error);
+       } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
+               /* replacing huge zero page with PMD block mapping */
+               unmap_mapping_range(mapping,
+                       (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
        }
 
        spin_lock_irq(&mapping->tree_lock);
-       new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) |
-                      RADIX_DAX_ENTRY_LOCK);
+       new_entry = dax_radix_locked_entry(sector, flags);
+
        if (hole_fill) {
                __delete_from_page_cache(entry, NULL);
                /* Drop pagecache reference */
                put_page(entry);
-               error = radix_tree_insert(page_tree, index, new_entry);
+               error = __radix_tree_insert(page_tree, index,
+                               dax_radix_order(new_entry), new_entry);
                if (error) {
                        new_entry = ERR_PTR(error);
                        goto unlock;
                }
                mapping->nrexceptional++;
-       } else {
+       } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
+               /*
+                * Only swap our new entry into the radix tree if the current
+                * entry is a zero page or an empty entry.  If a normal PTE or
+                * PMD entry is already in the tree, we leave it alone.  This
+                * means that if we are trying to insert a PTE and the
+                * existing entry is a PMD, we will just leave the PMD in the
+                * tree and dirty it if necessary.
+                */
                void **slot;
                void *ret;
 
@@ -672,7 +780,6 @@ static int dax_writeback_one(struct block_device *bdev,
                struct address_space *mapping, pgoff_t index, void *entry)
 {
        struct radix_tree_root *page_tree = &mapping->page_tree;
-       int type = RADIX_DAX_TYPE(entry);
        struct radix_tree_node *node;
        struct blk_dax_ctl dax;
        void **slot;
@@ -693,13 +800,21 @@ static int dax_writeback_one(struct block_device *bdev,
        if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
                goto unlock;
 
-       if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
+       if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
+                               dax_is_zero_entry(entry))) {
                ret = -EIO;
                goto unlock;
        }
 
-       dax.sector = RADIX_DAX_SECTOR(entry);
-       dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
+       /*
+        * Even if dax_writeback_mapping_range() was given a wbc->range_start
+        * in the middle of a PMD, the 'index' we are given will be aligned to
+        * the start index of the PMD, as will the sector we pull from
+        * 'entry'.  This allows us to flush for PMD_SIZE and not have to
+        * worry about partial PMD writebacks.
+        */
+       dax.sector = dax_radix_sector(entry);
+       dax.size = PAGE_SIZE << dax_radix_order(entry);
        spin_unlock_irq(&mapping->tree_lock);
 
        /*
@@ -738,12 +853,11 @@ int dax_writeback_mapping_range(struct address_space *mapping,
                struct block_device *bdev, struct writeback_control *wbc)
 {
        struct inode *inode = mapping->host;
-       pgoff_t start_index, end_index, pmd_index;
+       pgoff_t start_index, end_index;
        pgoff_t indices[PAGEVEC_SIZE];
        struct pagevec pvec;
        bool done = false;
        int i, ret = 0;
-       void *entry;
 
        if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
                return -EIO;
@@ -753,15 +867,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
 
        start_index = wbc->range_start >> PAGE_SHIFT;
        end_index = wbc->range_end >> PAGE_SHIFT;
-       pmd_index = DAX_PMD_INDEX(start_index);
-
-       rcu_read_lock();
-       entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
-       rcu_read_unlock();
-
-       /* see if the start of our range is covered by a PMD entry */
-       if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
-               start_index = pmd_index;
 
        tag_pages_for_writeback(mapping, start_index, end_index);
 
@@ -806,7 +911,7 @@ static int dax_insert_mapping(struct address_space *mapping,
                return PTR_ERR(dax.addr);
        dax_unmap_atomic(bdev, &dax);
 
-       ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
+       ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
        if (IS_ERR(ret))
                return PTR_ERR(ret);
        *entryp = ret;
@@ -853,7 +958,7 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
        bh.b_bdev = inode->i_sb->s_bdev;
        bh.b_size = PAGE_SIZE;
 
-       entry = grab_mapping_entry(mapping, vmf->pgoff);
+       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
        if (IS_ERR(entry)) {
                error = PTR_ERR(entry);
                goto out;
@@ -913,224 +1018,6 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
 }
 EXPORT_SYMBOL_GPL(dax_fault);
 
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
-/*
- * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
- * more often than one might expect in the below function.
- */
-#define PG_PMD_COLOUR  ((PMD_SIZE >> PAGE_SHIFT) - 1)
-
-static void __dax_dbg(struct buffer_head *bh, unsigned long address,
-               const char *reason, const char *fn)
-{
-       if (bh) {
-               char bname[BDEVNAME_SIZE];
-               bdevname(bh->b_bdev, bname);
-               pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
-                       "length %zd fallback: %s\n", fn, current->comm,
-                       address, bname, bh->b_state, (u64)bh->b_blocknr,
-                       bh->b_size, reason);
-       } else {
-               pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
-                       current->comm, address, reason);
-       }
-}
-
-#define dax_pmd_dbg(bh, address, reason)       __dax_dbg(bh, address, reason, "dax_pmd")
-
-/**
- * dax_pmd_fault - handle a PMD fault on a DAX file
- * @vma: The virtual memory area where the fault occurred
- * @vmf: The description of the fault
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * When a page fault occurs, filesystems may call this helper in their
- * pmd_fault handler for DAX files.
- */
-int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
-               pmd_t *pmd, unsigned int flags, get_block_t get_block)
-{
-       struct file *file = vma->vm_file;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       struct buffer_head bh;
-       unsigned blkbits = inode->i_blkbits;
-       unsigned long pmd_addr = address & PMD_MASK;
-       bool write = flags & FAULT_FLAG_WRITE;
-       struct block_device *bdev;
-       pgoff_t size, pgoff;
-       sector_t block;
-       int result = 0;
-       bool alloc = false;
-
-       /* dax pmd mappings require pfn_t_devmap() */
-       if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
-               return VM_FAULT_FALLBACK;
-
-       /* Fall back to PTEs if we're going to COW */
-       if (write && !(vma->vm_flags & VM_SHARED)) {
-               split_huge_pmd(vma, pmd, address);
-               dax_pmd_dbg(NULL, address, "cow write");
-               return VM_FAULT_FALLBACK;
-       }
-       /* If the PMD would extend outside the VMA */
-       if (pmd_addr < vma->vm_start) {
-               dax_pmd_dbg(NULL, address, "vma start unaligned");
-               return VM_FAULT_FALLBACK;
-       }
-       if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
-               dax_pmd_dbg(NULL, address, "vma end unaligned");
-               return VM_FAULT_FALLBACK;
-       }
-
-       pgoff = linear_page_index(vma, pmd_addr);
-       size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       if (pgoff >= size)
-               return VM_FAULT_SIGBUS;
-       /* If the PMD would cover blocks out of the file */
-       if ((pgoff | PG_PMD_COLOUR) >= size) {
-               dax_pmd_dbg(NULL, address,
-                               "offset + huge page size > file size");
-               return VM_FAULT_FALLBACK;
-       }
-
-       memset(&bh, 0, sizeof(bh));
-       bh.b_bdev = inode->i_sb->s_bdev;
-       block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
-
-       bh.b_size = PMD_SIZE;
-
-       if (get_block(inode, block, &bh, 0) != 0)
-               return VM_FAULT_SIGBUS;
-
-       if (!buffer_mapped(&bh) && write) {
-               if (get_block(inode, block, &bh, 1) != 0)
-                       return VM_FAULT_SIGBUS;
-               alloc = true;
-               WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
-       }
-
-       bdev = bh.b_bdev;
-
-       /*
-        * If the filesystem isn't willing to tell us the length of a hole,
-        * just fall back to PTEs.  Calling get_block 512 times in a loop
-        * would be silly.
-        */
-       if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
-               dax_pmd_dbg(&bh, address, "allocated block too small");
-               return VM_FAULT_FALLBACK;
-       }
-
-       /*
-        * If we allocated new storage, make sure no process has any
-        * zero pages covering this hole
-        */
-       if (alloc) {
-               loff_t lstart = pgoff << PAGE_SHIFT;
-               loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
-
-               truncate_pagecache_range(inode, lstart, lend);
-       }
-
-       if (!write && !buffer_mapped(&bh)) {
-               spinlock_t *ptl;
-               pmd_t entry;
-               struct page *zero_page = mm_get_huge_zero_page(vma->vm_mm);
-
-               if (unlikely(!zero_page)) {
-                       dax_pmd_dbg(&bh, address, "no zero page");
-                       goto fallback;
-               }
-
-               ptl = pmd_lock(vma->vm_mm, pmd);
-               if (!pmd_none(*pmd)) {
-                       spin_unlock(ptl);
-                       dax_pmd_dbg(&bh, address, "pmd already present");
-                       goto fallback;
-               }
-
-               dev_dbg(part_to_dev(bdev->bd_part),
-                               "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
-                               __func__, current->comm, address,
-                               (unsigned long long) to_sector(&bh, inode));
-
-               entry = mk_pmd(zero_page, vma->vm_page_prot);
-               entry = pmd_mkhuge(entry);
-               set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
-               result = VM_FAULT_NOPAGE;
-               spin_unlock(ptl);
-       } else {
-               struct blk_dax_ctl dax = {
-                       .sector = to_sector(&bh, inode),
-                       .size = PMD_SIZE,
-               };
-               long length = dax_map_atomic(bdev, &dax);
-
-               if (length < 0) {
-                       dax_pmd_dbg(&bh, address, "dax-error fallback");
-                       goto fallback;
-               }
-               if (length < PMD_SIZE) {
-                       dax_pmd_dbg(&bh, address, "dax-length too small");
-                       dax_unmap_atomic(bdev, &dax);
-                       goto fallback;
-               }
-               if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
-                       dax_pmd_dbg(&bh, address, "pfn unaligned");
-                       dax_unmap_atomic(bdev, &dax);
-                       goto fallback;
-               }
-
-               if (!pfn_t_devmap(dax.pfn)) {
-                       dax_unmap_atomic(bdev, &dax);
-                       dax_pmd_dbg(&bh, address, "pfn not in memmap");
-                       goto fallback;
-               }
-               dax_unmap_atomic(bdev, &dax);
-
-               /*
-                * For PTE faults we insert a radix tree entry for reads, and
-                * leave it clean.  Then on the first write we dirty the radix
-                * tree entry via the dax_pfn_mkwrite() path.  This sequence
-                * allows the dax_pfn_mkwrite() call to be simpler and avoid a
-                * call into get_block() to translate the pgoff to a sector in
-                * order to be able to create a new radix tree entry.
-                *
-                * The PMD path doesn't have an equivalent to
-                * dax_pfn_mkwrite(), though, so for a read followed by a
-                * write we traverse all the way through dax_pmd_fault()
-                * twice.  This means we can just skip inserting a radix tree
-                * entry completely on the initial read and just wait until
-                * the write to insert a dirty entry.
-                */
-               if (write) {
-                       /*
-                        * We should insert radix-tree entry and dirty it here.
-                        * For now this is broken...
-                        */
-               }
-
-               dev_dbg(part_to_dev(bdev->bd_part),
-                               "%s: %s addr: %lx pfn: %lx sect: %llx\n",
-                               __func__, current->comm, address,
-                               pfn_t_to_pfn(dax.pfn),
-                               (unsigned long long) dax.sector);
-               result |= vmf_insert_pfn_pmd(vma, address, pmd,
-                               dax.pfn, write);
-       }
-
- out:
-       return result;
-
- fallback:
-       count_vm_event(THP_FAULT_FALLBACK);
-       result = VM_FAULT_FALLBACK;
-       goto out;
-}
-EXPORT_SYMBOL_GPL(dax_pmd_fault);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
 /**
  * dax_pfn_mkwrite - handle first write to DAX page
  * @vma: The virtual memory area where the fault occurred
@@ -1214,7 +1101,8 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
        /* Block boundary? Nothing to do */
        if (!length)
                return 0;
-       BUG_ON((offset + length) > PAGE_SIZE);
+       if (WARN_ON_ONCE((offset + length) > PAGE_SIZE))
+               return -EINVAL;
 
        memset(&bh, 0, sizeof(bh));
        bh.b_bdev = inode->i_sb->s_bdev;
@@ -1245,8 +1133,13 @@ int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
 EXPORT_SYMBOL_GPL(dax_truncate_page);
 
 #ifdef CONFIG_FS_IOMAP
+static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
+{
+       return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
+}
+
 static loff_t
-iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                struct iomap *iomap)
 {
        struct iov_iter *iter = data;
@@ -1270,8 +1163,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                struct blk_dax_ctl dax = { 0 };
                ssize_t map_len;
 
-               dax.sector = iomap->blkno +
-                       (((pos & PAGE_MASK) - iomap->offset) >> 9);
+               dax.sector = dax_iomap_sector(iomap, pos);
                dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
                map_len = dax_map_atomic(iomap->bdev, &dax);
                if (map_len < 0) {
@@ -1303,7 +1195,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 }
 
 /**
- * iomap_dax_rw - Perform I/O to a DAX file
+ * dax_iomap_rw - Perform I/O to a DAX file
  * @iocb:      The control block for this I/O
  * @iter:      The addresses to do I/O from or to
  * @ops:       iomap ops passed from the file system
@@ -1313,7 +1205,7 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
  * and evicting any page cache pages in the region under I/O.
  */
 ssize_t
-iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
                struct iomap_ops *ops)
 {
        struct address_space *mapping = iocb->ki_filp->f_mapping;
@@ -1343,7 +1235,7 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
 
        while (iov_iter_count(iter)) {
                ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
-                               iter, iomap_dax_actor);
+                               iter, dax_iomap_actor);
                if (ret <= 0)
                        break;
                pos += ret;
@@ -1353,10 +1245,10 @@ iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
        iocb->ki_pos += done;
        return done ? done : ret;
 }
-EXPORT_SYMBOL_GPL(iomap_dax_rw);
+EXPORT_SYMBOL_GPL(dax_iomap_rw);
 
 /**
- * iomap_dax_fault - handle a page fault on a DAX file
+ * dax_iomap_fault - handle a page fault on a DAX file
  * @vma: The virtual memory area where the fault occurred
  * @vmf: The description of the fault
  * @ops: iomap ops passed from the file system
@@ -1365,7 +1257,7 @@ EXPORT_SYMBOL_GPL(iomap_dax_rw);
  * or mkwrite handler for DAX files. Assumes the caller has done all the
  * necessary locking for the page fault to proceed successfully.
  */
-int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                        struct iomap_ops *ops)
 {
        struct address_space *mapping = vma->vm_file->f_mapping;
@@ -1374,8 +1266,9 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
        loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
        sector_t sector;
        struct iomap iomap = { 0 };
-       unsigned flags = 0;
+       unsigned flags = IOMAP_FAULT;
        int error, major = 0;
+       int locked_status = 0;
        void *entry;
 
        /*
@@ -1386,7 +1279,7 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
        if (pos >= i_size_read(inode))
                return VM_FAULT_SIGBUS;
 
-       entry = grab_mapping_entry(mapping, vmf->pgoff);
+       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
        if (IS_ERR(entry)) {
                error = PTR_ERR(entry);
                goto out;
@@ -1405,10 +1298,10 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                goto unlock_entry;
        if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
                error = -EIO;           /* fs corruption? */
-               goto unlock_entry;
+               goto finish_iomap;
        }
 
-       sector = iomap.blkno + (((pos & PAGE_MASK) - iomap.offset) >> 9);
+       sector = dax_iomap_sector(&iomap, pos);
 
        if (vmf->cow_page) {
                switch (iomap.type) {
@@ -1427,13 +1320,15 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                }
 
                if (error)
-                       goto unlock_entry;
+                       goto finish_iomap;
                if (!radix_tree_exceptional_entry(entry)) {
                        vmf->page = entry;
-                       return VM_FAULT_LOCKED;
+                       locked_status = VM_FAULT_LOCKED;
+               } else {
+                       vmf->entry = entry;
+                       locked_status = VM_FAULT_DAX_LOCKED;
                }
-               vmf->entry = entry;
-               return VM_FAULT_DAX_LOCKED;
+               goto finish_iomap;
        }
 
        switch (iomap.type) {
@@ -1448,8 +1343,10 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                break;
        case IOMAP_UNWRITTEN:
        case IOMAP_HOLE:
-               if (!(vmf->flags & FAULT_FLAG_WRITE))
-                       return dax_load_hole(mapping, entry, vmf);
+               if (!(vmf->flags & FAULT_FLAG_WRITE)) {
+                       locked_status = dax_load_hole(mapping, entry, vmf);
+                       break;
+               }
                /*FALLTHRU*/
        default:
                WARN_ON_ONCE(1);
@@ -1457,15 +1354,218 @@ int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                break;
        }
 
+ finish_iomap:
+       if (ops->iomap_end) {
+               if (error) {
+                       /* keep previous error */
+                       ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
+                                       &iomap);
+               } else {
+                       error = ops->iomap_end(inode, pos, PAGE_SIZE,
+                                       PAGE_SIZE, flags, &iomap);
+               }
+       }
  unlock_entry:
-       put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+       if (!locked_status || error)
+               put_locked_mapping_entry(mapping, vmf->pgoff, entry);
  out:
        if (error == -ENOMEM)
                return VM_FAULT_OOM | major;
        /* -EBUSY is fine, somebody else faulted on the same PTE */
        if (error < 0 && error != -EBUSY)
                return VM_FAULT_SIGBUS | major;
+       if (locked_status) {
+               WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
+               return locked_status;
+       }
        return VM_FAULT_NOPAGE | major;
 }
-EXPORT_SYMBOL_GPL(iomap_dax_fault);
+EXPORT_SYMBOL_GPL(dax_iomap_fault);
+
+#ifdef CONFIG_FS_DAX_PMD
+/*
+ * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
+ * more often than one might expect in the below functions.
+ */
+#define PG_PMD_COLOUR  ((PMD_SIZE >> PAGE_SHIFT) - 1)
+
+static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
+               struct vm_fault *vmf, unsigned long address,
+               struct iomap *iomap, loff_t pos, bool write, void **entryp)
+{
+       struct address_space *mapping = vma->vm_file->f_mapping;
+       struct block_device *bdev = iomap->bdev;
+       struct blk_dax_ctl dax = {
+               .sector = dax_iomap_sector(iomap, pos),
+               .size = PMD_SIZE,
+       };
+       long length = dax_map_atomic(bdev, &dax);
+       void *ret;
+
+       if (length < 0) /* dax_map_atomic() failed */
+               return VM_FAULT_FALLBACK;
+       if (length < PMD_SIZE)
+               goto unmap_fallback;
+       if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
+               goto unmap_fallback;
+       if (!pfn_t_devmap(dax.pfn))
+               goto unmap_fallback;
+
+       dax_unmap_atomic(bdev, &dax);
+
+       ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
+                       RADIX_DAX_PMD);
+       if (IS_ERR(ret))
+               return VM_FAULT_FALLBACK;
+       *entryp = ret;
+
+       return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
+
+ unmap_fallback:
+       dax_unmap_atomic(bdev, &dax);
+       return VM_FAULT_FALLBACK;
+}
+
+static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
+               struct vm_fault *vmf, unsigned long address,
+               struct iomap *iomap, void **entryp)
+{
+       struct address_space *mapping = vma->vm_file->f_mapping;
+       unsigned long pmd_addr = address & PMD_MASK;
+       struct page *zero_page;
+       spinlock_t *ptl;
+       pmd_t pmd_entry;
+       void *ret;
+
+       zero_page = mm_get_huge_zero_page(vma->vm_mm);
+
+       if (unlikely(!zero_page))
+               return VM_FAULT_FALLBACK;
+
+       ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
+                       RADIX_DAX_PMD | RADIX_DAX_HZP);
+       if (IS_ERR(ret))
+               return VM_FAULT_FALLBACK;
+       *entryp = ret;
+
+       ptl = pmd_lock(vma->vm_mm, pmd);
+       if (!pmd_none(*pmd)) {
+               spin_unlock(ptl);
+               return VM_FAULT_FALLBACK;
+       }
+
+       pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
+       pmd_entry = pmd_mkhuge(pmd_entry);
+       set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
+       spin_unlock(ptl);
+       return VM_FAULT_NOPAGE;
+}
+
+int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+               pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
+{
+       struct address_space *mapping = vma->vm_file->f_mapping;
+       unsigned long pmd_addr = address & PMD_MASK;
+       bool write = flags & FAULT_FLAG_WRITE;
+       unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
+       struct inode *inode = mapping->host;
+       int result = VM_FAULT_FALLBACK;
+       struct iomap iomap = { 0 };
+       pgoff_t max_pgoff, pgoff;
+       struct vm_fault vmf;
+       void *entry;
+       loff_t pos;
+       int error;
+
+       /* Fall back to PTEs if we're going to COW */
+       if (write && !(vma->vm_flags & VM_SHARED))
+               goto fallback;
+
+       /* If the PMD would extend outside the VMA */
+       if (pmd_addr < vma->vm_start)
+               goto fallback;
+       if ((pmd_addr + PMD_SIZE) > vma->vm_end)
+               goto fallback;
+
+       /*
+        * Check whether offset isn't beyond end of file now. Caller is
+        * supposed to hold locks serializing us with truncate / punch hole so
+        * this is a reliable test.
+        */
+       pgoff = linear_page_index(vma, pmd_addr);
+       max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
+
+       if (pgoff > max_pgoff)
+               return VM_FAULT_SIGBUS;
+
+       /* If the PMD would extend beyond the file size */
+       if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
+               goto fallback;
+
+       /*
+        * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
+        * PMD or a HZP entry.  If it can't (because a 4k page is already in
+        * the tree, for instance), it will return -EEXIST and we just fall
+        * back to 4k entries.
+        */
+       entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
+       if (IS_ERR(entry))
+               goto fallback;
+
+       /*
+        * Note that we don't use iomap_apply here.  We aren't doing I/O, only
+        * setting up a mapping, so really we're using iomap_begin() as a way
+        * to look up our filesystem block.
+        */
+       pos = (loff_t)pgoff << PAGE_SHIFT;
+       error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
+       if (error)
+               goto unlock_entry;
+       if (iomap.offset + iomap.length < pos + PMD_SIZE)
+               goto finish_iomap;
+
+       vmf.pgoff = pgoff;
+       vmf.flags = flags;
+       vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
+
+       switch (iomap.type) {
+       case IOMAP_MAPPED:
+               result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
+                               &iomap, pos, write, &entry);
+               break;
+       case IOMAP_UNWRITTEN:
+       case IOMAP_HOLE:
+               if (WARN_ON_ONCE(write))
+                       goto finish_iomap;
+               result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
+                               &entry);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               break;
+       }
+
+ finish_iomap:
+       if (ops->iomap_end) {
+               if (result == VM_FAULT_FALLBACK) {
+                       ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
+                                       &iomap);
+               } else {
+                       error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
+                                       iomap_flags, &iomap);
+                       if (error)
+                               result = VM_FAULT_FALLBACK;
+               }
+       }
+ unlock_entry:
+       put_locked_mapping_entry(mapping, pgoff, entry);
+ fallback:
+       if (result == VM_FAULT_FALLBACK) {
+               split_huge_pmd(vma, pmd, address);
+               count_vm_event(THP_FAULT_FALLBACK);
+       }
+       return result;
+}
+EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
+#endif /* CONFIG_FS_DAX_PMD */
 #endif /* CONFIG_FS_IOMAP */
index a0e1478dfd04746e31bd04d113a75a67744bcaf7..b0f241528a30a5e631e134556c1e9db9e3772c41 100644 (file)
@@ -38,7 +38,7 @@ static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
                return 0; /* skip atime */
 
        inode_lock_shared(inode);
-       ret = iomap_dax_rw(iocb, to, &ext2_iomap_ops);
+       ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
        inode_unlock_shared(inode);
 
        file_accessed(iocb->ki_filp);
@@ -62,7 +62,7 @@ static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (ret)
                goto out_unlock;
 
-       ret = iomap_dax_rw(iocb, from, &ext2_iomap_ops);
+       ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
        if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
                i_size_write(inode, iocb->ki_pos);
                mark_inode_dirty(inode);
@@ -99,7 +99,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
        down_read(&ei->dax_sem);
 
-       ret = iomap_dax_fault(vma, vmf, &ext2_iomap_ops);
+       ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops);
 
        up_read(&ei->dax_sem);
        if (vmf->flags & FAULT_FLAG_WRITE)
@@ -107,27 +107,6 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return ret;
 }
 
-static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
-                                               pmd_t *pmd, unsigned int flags)
-{
-       struct inode *inode = file_inode(vma->vm_file);
-       struct ext2_inode_info *ei = EXT2_I(inode);
-       int ret;
-
-       if (flags & FAULT_FLAG_WRITE) {
-               sb_start_pagefault(inode->i_sb);
-               file_update_time(vma->vm_file);
-       }
-       down_read(&ei->dax_sem);
-
-       ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
-
-       up_read(&ei->dax_sem);
-       if (flags & FAULT_FLAG_WRITE)
-               sb_end_pagefault(inode->i_sb);
-       return ret;
-}
-
 static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
                struct vm_fault *vmf)
 {
@@ -154,7 +133,11 @@ static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
 
 static const struct vm_operations_struct ext2_dax_vm_ops = {
        .fault          = ext2_dax_fault,
-       .pmd_fault      = ext2_dax_pmd_fault,
+       /*
+        * .pmd_fault is not supported for DAX because allocation in ext2
+        * cannot be reliably aligned to huge page sizes and so pmd faults
+        * will always fail and fail back to regular faults.
+        */
        .page_mkwrite   = ext2_dax_fault,
        .pfn_mkwrite    = ext2_dax_pfn_mkwrite,
 };
@@ -166,7 +149,7 @@ static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
 
        file_accessed(file);
        vma->vm_ops = &ext2_dax_vm_ops;
-       vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+       vma->vm_flags |= VM_MIXEDMAP;
        return 0;
 }
 #else
index 9c064727ed62978e95f27f2fe73c5e946ce672b0..3d58b2b477e8edee3bff7f5b3be26017e95c4aa2 100644 (file)
@@ -767,6 +767,9 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
                ext4_update_bh_state(bh, map.m_flags);
                bh->b_size = inode->i_sb->s_blocksize * map.m_len;
                ret = 0;
+       } else if (ret == 0) {
+               /* hole case, need to fill in bh->b_size */
+               bh->b_size = inode->i_sb->s_blocksize * map.m_len;
        }
        return ret;
 }
index a8ee8c33ca782dbe4a4c17f42bf91fda9e83a523..13dd413b2b9c6a52e4ff0a966aff6902fc29df9a 100644 (file)
@@ -467,8 +467,9 @@ int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
 
        offset = page_offset(page);
        while (length > 0) {
-               ret = iomap_apply(inode, offset, length, IOMAP_WRITE,
-                               ops, page, iomap_page_mkwrite_actor);
+               ret = iomap_apply(inode, offset, length,
+                               IOMAP_WRITE | IOMAP_FAULT, ops, page,
+                               iomap_page_mkwrite_actor);
                if (unlikely(ret <= 0))
                        goto out_unlock;
                offset += ret;
index 4f2aed04f8273b5924f34180211dbb9b11e14aa4..8ef420a16f0819ea4e97a3ce9f58767cb5b75513 100644 (file)
@@ -51,7 +51,7 @@ int   xfs_attr_shortform_getvalue(struct xfs_da_args *args);
 int    xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
 int    xfs_attr_shortform_remove(struct xfs_da_args *args);
 int    xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
-int    xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
+int    xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes);
 void   xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
 
 /*
index 42f4e7a84e2e8019343c6a7613612fad2ee1b040..5c3c4dd1473564ba33eb98aeeae1d560d6da4b80 100644 (file)
@@ -4902,8 +4902,11 @@ xfs_bmap_del_extent_delay(
         * sb counters as we might have to borrow some blocks for the
         * indirect block accounting.
         */
-       xfs_trans_reserve_quota_nblks(NULL, ip, -((long)del->br_blockcount), 0,
+       error = xfs_trans_reserve_quota_nblks(NULL, ip,
+                       -((long)del->br_blockcount), 0,
                        isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+       if (error)
+               return error;
        ip->i_delayed_blks -= del->br_blockcount;
 
        if (whichfork == XFS_COW_FORK)
index becc926c3e3d900db0a021dd091e46e828fa6f09..0197590fa7d7c0a3d97d68dccdcfb3bd0709964b 100644 (file)
@@ -157,6 +157,9 @@ extern int xfs_dir2_isleaf(struct xfs_da_args *args, int *r);
 extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
                                struct xfs_buf *bp);
 
+extern void xfs_dir2_data_freescan_int(struct xfs_da_geometry *geo,
+               const struct xfs_dir_ops *ops,
+               struct xfs_dir2_data_hdr *hdr, int *loghead);
 extern void xfs_dir2_data_freescan(struct xfs_inode *dp,
                struct xfs_dir2_data_hdr *hdr, int *loghead);
 extern void xfs_dir2_data_log_entry(struct xfs_da_args *args,
@@ -177,6 +180,8 @@ extern struct xfs_dir2_data_free *xfs_dir2_data_freefind(
                struct xfs_dir2_data_hdr *hdr, struct xfs_dir2_data_free *bf,
                struct xfs_dir2_data_unused *dup);
 
+extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
+
 extern const struct xfs_buf_ops xfs_dir3_block_buf_ops;
 extern const struct xfs_buf_ops xfs_dir3_leafn_buf_ops;
 extern const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops;
index 725fc7841fdeb38fdfdba08f1621a83f764baeea..cd75ab9f3bf85c65e456e29fe08a1f24ffbea1bc 100644 (file)
@@ -505,8 +505,9 @@ xfs_dir2_data_freeremove(
  * Given a data block, reconstruct its bestfree map.
  */
 void
-xfs_dir2_data_freescan(
-       struct xfs_inode        *dp,
+xfs_dir2_data_freescan_int(
+       struct xfs_da_geometry  *geo,
+       const struct xfs_dir_ops *ops,
        struct xfs_dir2_data_hdr *hdr,
        int                     *loghead)
 {
@@ -516,7 +517,6 @@ xfs_dir2_data_freescan(
        struct xfs_dir2_data_free *bf;
        char                    *endp;          /* end of block's data */
        char                    *p;             /* current entry pointer */
-       struct xfs_da_geometry  *geo = dp->i_mount->m_dir_geo;
 
        ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
               hdr->magic == cpu_to_be32(XFS_DIR3_DATA_MAGIC) ||
@@ -526,13 +526,13 @@ xfs_dir2_data_freescan(
        /*
         * Start by clearing the table.
         */
-       bf = dp->d_ops->data_bestfree_p(hdr);
+       bf = ops->data_bestfree_p(hdr);
        memset(bf, 0, sizeof(*bf) * XFS_DIR2_DATA_FD_COUNT);
        *loghead = 1;
        /*
         * Set up pointers.
         */
-       p = (char *)dp->d_ops->data_entry_p(hdr);
+       p = (char *)ops->data_entry_p(hdr);
        if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
            hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
                btp = xfs_dir2_block_tail_p(geo, hdr);
@@ -559,12 +559,22 @@ xfs_dir2_data_freescan(
                else {
                        dep = (xfs_dir2_data_entry_t *)p;
                        ASSERT((char *)dep - (char *)hdr ==
-                              be16_to_cpu(*dp->d_ops->data_entry_tag_p(dep)));
-                       p += dp->d_ops->data_entsize(dep->namelen);
+                              be16_to_cpu(*ops->data_entry_tag_p(dep)));
+                       p += ops->data_entsize(dep->namelen);
                }
        }
 }
 
+void
+xfs_dir2_data_freescan(
+       struct xfs_inode        *dp,
+       struct xfs_dir2_data_hdr *hdr,
+       int                     *loghead)
+{
+       return xfs_dir2_data_freescan_int(dp->i_mount->m_dir_geo, dp->d_ops,
+                       hdr, loghead);
+}
+
 /*
  * Initialize a data block at the given block number in the directory.
  * Give back the buffer for the created block.
index ef9f6ead96a469f33dca80241fd2903f91a692d4..d04547fcf274af0eaee18096c94b22652551b9f7 100644 (file)
@@ -21,7 +21,6 @@
 struct dir_context;
 
 /* xfs_dir2.c */
-extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
 extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
                                xfs_dir2_db_t *dbp);
 extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
index 51b4e0de1fdc424e13f039adf98ae2789a27ba74..c507c1b17ca1866fcef37401aeeeb6a1c740ba6a 100644 (file)
@@ -2344,7 +2344,8 @@ xfs_imap(
 
                imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
                imap->im_len = XFS_FSB_TO_BB(mp, 1);
-               imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
+               imap->im_boffset = (unsigned short)(offset <<
+                                                       mp->m_sb.sb_inodelog);
                return 0;
        }
 
@@ -2372,7 +2373,7 @@ out_map:
 
        imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
        imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
-       imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog);
+       imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
 
        /*
         * If the inode number maps to a block outside the bounds
index 134424fac434fdd7fdd3cecf12d3007712b9734b..54817f82212cfe34784313ac4928f4ffe6589d90 100644 (file)
@@ -383,7 +383,7 @@ xfs_log_dinode_to_disk(
 static bool
 xfs_dinode_verify(
        struct xfs_mount        *mp,
-       struct xfs_inode        *ip,
+       xfs_ino_t               ino,
        struct xfs_dinode       *dip)
 {
        uint16_t                flags;
@@ -401,7 +401,7 @@ xfs_dinode_verify(
        if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
                              XFS_DINODE_CRC_OFF))
                return false;
-       if (be64_to_cpu(dip->di_ino) != ip->i_ino)
+       if (be64_to_cpu(dip->di_ino) != ino)
                return false;
        if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
                return false;
@@ -493,7 +493,7 @@ xfs_iread(
                return error;
 
        /* even unallocated inodes are verified */
-       if (!xfs_dinode_verify(mp, ip, dip)) {
+       if (!xfs_dinode_verify(mp, ip->i_ino, dip)) {
                xfs_alert(mp, "%s: validation failed for inode %lld failed",
                                __func__, ip->i_ino);
 
index 3cfe12a4f58ac8560e1cd92e529a85477ff5ee02..6848a0afbce7a4db3b03938858ca0e591cbf40ef 100644 (file)
@@ -58,8 +58,8 @@ struct xfs_icdinode {
  */
 struct xfs_imap {
        xfs_daddr_t     im_blkno;       /* starting BB of inode chunk */
-       ushort          im_len;         /* length in BBs of inode chunk */
-       ushort          im_boffset;     /* inode offset in block in bytes */
+       unsigned short  im_len;         /* length in BBs of inode chunk */
+       unsigned short  im_boffset;     /* inode offset in block in bytes */
 };
 
 int    xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
index 083cdd6d6c28cecdf9af7d45df56cdc6d9a13665..7ae571f8e34ac738b9c1ed1d628e5bc17b0e7079 100644 (file)
@@ -481,8 +481,8 @@ static inline uint xfs_log_dinode_size(int version)
 typedef struct xfs_buf_log_format {
        unsigned short  blf_type;       /* buf log item type indicator */
        unsigned short  blf_size;       /* size of this item */
-       ushort          blf_flags;      /* misc state */
-       ushort          blf_len;        /* number of blocks in this buf */
+       unsigned short  blf_flags;      /* misc state */
+       unsigned short  blf_len;        /* number of blocks in this buf */
        __int64_t       blf_blkno;      /* starting blkno of this buf */
        unsigned int    blf_map_size;   /* used size of data bitmap in words */
        unsigned int    blf_data_map[XFS_BLF_DATAMAP_SIZE]; /* dirty bitmap */
index 8e385f91d660233deceb8c484f8a9a60eb94d887..d9f65e2d5cc818260c72cc43fe5687f296cea73b 100644 (file)
@@ -52,7 +52,7 @@ typedef struct xlog_recover {
        struct list_head        r_itemq;        /* q for items */
 } xlog_recover_t;
 
-#define ITEM_TYPE(i)   (*(ushort *)(i)->ri_buf[0].i_addr)
+#define ITEM_TYPE(i)   (*(unsigned short *)(i)->ri_buf[0].i_addr)
 
 /*
  * This is the number of entries in the l_buf_cancel_table used during
index e2e1106c9fadc91d41ab235904409b5428d7a83c..ea45584a9913bbd51908c9a477bb25c90d000fa9 100644 (file)
@@ -1016,4 +1016,3 @@ xfs_rtfree_extent(
        }
        return 0;
 }
-
index a70aec9106263f3e45e454e6fa3ef5ebcdac26ea..2580262e4ea00c3dc728b041dca125f4f7078373 100644 (file)
@@ -262,6 +262,12 @@ xfs_mount_validate_sb(
                return -EFSCORRUPTED;
        }
 
+       if (xfs_sb_version_hascrc(&mp->m_sb) &&
+           sbp->sb_blocksize < XFS_MIN_CRC_BLOCKSIZE) {
+               xfs_notice(mp, "v5 SB sanity check failed");
+               return -EFSCORRUPTED;
+       }
+
        /*
         * Until this is fixed only page-sized or smaller data blocks work.
         */
@@ -338,13 +344,16 @@ xfs_sb_quota_from_disk(struct xfs_sb *sbp)
                                        XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD;
        sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD);
 
-       if (sbp->sb_qflags & XFS_PQUOTA_ACCT)  {
+       if (sbp->sb_qflags & XFS_PQUOTA_ACCT &&
+           sbp->sb_gquotino != NULLFSINO)  {
                /*
                 * In older version of superblock, on-disk superblock only
                 * has sb_gquotino, and in-core superblock has both sb_gquotino
                 * and sb_pquotino. But, only one of them is supported at any
                 * point of time. So, if PQUOTA is set in disk superblock,
-                * copy over sb_gquotino to sb_pquotino.
+                * copy over sb_gquotino to sb_pquotino.  The NULLFSINO test
+                * above is to make sure we don't do this twice and wipe them
+                * both out!
                 */
                sbp->sb_pquotino = sbp->sb_gquotino;
                sbp->sb_gquotino = NULLFSINO;
index 8d74870468c24bb5661a0afa0e7ab436f8b4ded0..cf044c0f4d4178ee09e9b3e4c1eba2ceef161a1a 100644 (file)
@@ -75,11 +75,14 @@ typedef __int64_t   xfs_sfiloff_t;  /* signed block number in a file */
  * Minimum and maximum blocksize and sectorsize.
  * The blocksize upper limit is pretty much arbitrary.
  * The sectorsize upper limit is due to sizeof(sb_sectsize).
+ * CRC enable filesystems use 512 byte inodes, meaning 512 byte block sizes
+ * cannot be used.
  */
 #define XFS_MIN_BLOCKSIZE_LOG  9       /* i.e. 512 bytes */
 #define XFS_MAX_BLOCKSIZE_LOG  16      /* i.e. 65536 bytes */
 #define XFS_MIN_BLOCKSIZE      (1 << XFS_MIN_BLOCKSIZE_LOG)
 #define XFS_MAX_BLOCKSIZE      (1 << XFS_MAX_BLOCKSIZE_LOG)
+#define XFS_MIN_CRC_BLOCKSIZE  (1 << (XFS_MIN_BLOCKSIZE_LOG + 1))
 #define XFS_MIN_SECTORSIZE_LOG 9       /* i.e. 512 bytes */
 #define XFS_MAX_SECTORSIZE_LOG 15      /* i.e. 32768 bytes */
 #define XFS_MIN_SECTORSIZE     (1 << XFS_MIN_SECTORSIZE_LOG)
index 2693ba84ec2541072396d138fbf970bca90a597c..dd6cacf59b5aa0d48850de0d2a626a8dd3d02de0 100644 (file)
@@ -1298,8 +1298,7 @@ __xfs_get_blocks(
        sector_t                iblock,
        struct buffer_head      *bh_result,
        int                     create,
-       bool                    direct,
-       bool                    dax_fault)
+       bool                    direct)
 {
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
@@ -1440,13 +1439,8 @@ __xfs_get_blocks(
                if (ISUNWRITTEN(&imap))
                        set_buffer_unwritten(bh_result);
                /* direct IO needs special help */
-               if (create) {
-                       if (dax_fault)
-                               ASSERT(!ISUNWRITTEN(&imap));
-                       else
-                               xfs_map_direct(inode, bh_result, &imap, offset,
-                                               is_cow);
-               }
+               if (create)
+                       xfs_map_direct(inode, bh_result, &imap, offset, is_cow);
        }
 
        /*
@@ -1484,7 +1478,7 @@ xfs_get_blocks(
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
+       return __xfs_get_blocks(inode, iblock, bh_result, create, false);
 }
 
 int
@@ -1494,17 +1488,7 @@ xfs_get_blocks_direct(
        struct buffer_head      *bh_result,
        int                     create)
 {
-       return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
-}
-
-int
-xfs_get_blocks_dax_fault(
-       struct inode            *inode,
-       sector_t                iblock,
-       struct buffer_head      *bh_result,
-       int                     create)
-{
-       return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
+       return __xfs_get_blocks(inode, iblock, bh_result, create, true);
 }
 
 /*
index b3c6634f9518484d3e0d1691a53e7ff705a5f380..34dc00dfb91d803e074f99077eb89b98354f04d4 100644 (file)
@@ -59,9 +59,6 @@ int   xfs_get_blocks(struct inode *inode, sector_t offset,
                       struct buffer_head *map_bh, int create);
 int    xfs_get_blocks_direct(struct inode *inode, sector_t offset,
                              struct buffer_head *map_bh, int create);
-int    xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
-                                struct buffer_head *map_bh, int create);
-
 int    xfs_end_io_direct_write(struct kiocb *iocb, loff_t offset,
                ssize_t size, void *private);
 int    xfs_setfilesize(struct xfs_inode *ip, xfs_off_t offset, size_t size);
index 6e4f7f900fea4c30f44477dc258db5334b980486..d818c160451f50486fd6f72fa88fe6b3d889e1e6 100644 (file)
@@ -318,7 +318,7 @@ xfs_file_dax_read(
                return 0; /* skip atime */
 
        xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
-       ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops);
+       ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
        xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
 
        file_accessed(iocb->ki_filp);
@@ -653,7 +653,7 @@ xfs_file_dax_write(
 
        trace_xfs_file_dax_write(ip, count, pos);
 
-       ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops);
+       ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
        if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
                i_size_write(inode, iocb->ki_pos);
                error = xfs_setfilesize(ip, pos, ret);
@@ -1474,7 +1474,7 @@ xfs_filemap_page_mkwrite(
        xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
        if (IS_DAX(inode)) {
-               ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
+               ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
        } else {
                ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
                ret = block_page_mkwrite_return(ret);
@@ -1508,7 +1508,7 @@ xfs_filemap_fault(
                 * changes to xfs_get_blocks_direct() to map unwritten extent
                 * ioend for conversion on read-only mappings.
                 */
-               ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops);
+               ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
        } else
                ret = filemap_fault(vma, vmf);
        xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
@@ -1545,7 +1545,7 @@ xfs_filemap_pmd_fault(
        }
 
        xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-       ret = dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault);
+       ret = dax_iomap_pmd_fault(vma, addr, pmd, flags, &xfs_iomap_ops);
        xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
 
        if (flags & FAULT_FLAG_WRITE)
index 9b3d7c76915d9c92948bed063d79bd10a6107a77..cf754bcbcb1caa8bcb72b44b15e94eca3bcffbc2 100644 (file)
@@ -2025,7 +2025,7 @@ xlog_peek_buffer_cancelled(
        struct xlog             *log,
        xfs_daddr_t             blkno,
        uint                    len,
-       ushort                  flags)
+       unsigned short                  flags)
 {
        struct list_head        *bucket;
        struct xfs_buf_cancel   *bcp;
@@ -2065,7 +2065,7 @@ xlog_check_buffer_cancelled(
        struct xlog             *log,
        xfs_daddr_t             blkno,
        uint                    len,
-       ushort                  flags)
+       unsigned short                  flags)
 {
        struct xfs_buf_cancel   *bcp;
 
index add6c4bc568f150cea20c058efee0419f9672fab..8d1a5c47945f2bf6de635ca340f0226b80a9c3d3 100644 (file)
@@ -8,21 +8,46 @@
 
 struct iomap_ops;
 
-/* We use lowest available exceptional entry bit for locking */
+/*
+ * We use lowest available bit in exceptional entry for locking, one bit for
+ * the entry size (PMD) and two more to tell us if the entry is a huge zero
+ * page (HZP) or an empty entry that is just used for locking.  In total four
+ * special bits.
+ *
+ * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the HZP and
+ * EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
+ * block allocation.
+ */
+#define RADIX_DAX_SHIFT        (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
+#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
+#define RADIX_DAX_HZP (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
+#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
 
-ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
+static inline unsigned long dax_radix_sector(void *entry)
+{
+       return (unsigned long)entry >> RADIX_DAX_SHIFT;
+}
+
+static inline void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
+{
+       return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
+                       ((unsigned long)sector << RADIX_DAX_SHIFT) |
+                       RADIX_DAX_ENTRY_LOCK);
+}
+
+ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
                struct iomap_ops *ops);
 ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
                  get_block_t, dio_iodone_t, int flags);
 int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
 int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                        struct iomap_ops *ops);
 int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
-                                  pgoff_t index, bool wake_all);
+               pgoff_t index, void *entry, bool wake_all);
 
 #ifdef CONFIG_FS_DAX
 struct page *read_dax_sector(struct block_device *bdev, sector_t n);
@@ -48,15 +73,32 @@ static inline int __dax_zero_page_range(struct block_device *bdev,
 }
 #endif
 
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
-int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
-                               unsigned int flags, get_block_t);
-#else
 static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
                                pmd_t *pmd, unsigned int flags, get_block_t gb)
 {
        return VM_FAULT_FALLBACK;
 }
+
+#ifdef CONFIG_FS_DAX_PMD
+static inline unsigned int dax_radix_order(void *entry)
+{
+       if ((unsigned long)entry & RADIX_DAX_PMD)
+               return PMD_SHIFT - PAGE_SHIFT;
+       return 0;
+}
+int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+               pmd_t *pmd, unsigned int flags, struct iomap_ops *ops);
+#else
+static inline unsigned int dax_radix_order(void *entry)
+{
+       return 0;
+}
+static inline int dax_iomap_pmd_fault(struct vm_area_struct *vma,
+               unsigned long address, pmd_t *pmd, unsigned int flags,
+               struct iomap_ops *ops)
+{
+       return VM_FAULT_FALLBACK;
+}
 #endif
 int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
 #define dax_mkwrite(vma, vmf, gb)      dax_fault(vma, vmf, gb)
index 7892f55a1866db26d5c4edabf69a59606a2efa3f..f185156de74d8270bf931cda18c46f7dd918f294 100644 (file)
@@ -49,6 +49,7 @@ struct iomap {
 #define IOMAP_WRITE            (1 << 0) /* writing, must allocate blocks */
 #define IOMAP_ZERO             (1 << 1) /* zeroing operation, may skip holes */
 #define IOMAP_REPORT           (1 << 2) /* report extent status, e.g. FIEMAP */
+#define IOMAP_FAULT            (1 << 3) /* mapping for page fault */
 
 struct iomap_ops {
        /*
index 849f459ad0780e27bc256ff13fd52fa8c9007661..00ab94a882de59a2be1dc764d0de9cb6790ed344 100644 (file)
@@ -137,13 +137,12 @@ static int page_cache_tree_insert(struct address_space *mapping,
                } else {
                        /* DAX can replace empty locked entry with a hole */
                        WARN_ON_ONCE(p !=
-                               (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
-                                        RADIX_DAX_ENTRY_LOCK));
+                               dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
                        /* DAX accounts exceptional entries as normal pages */
                        if (node)
                                workingset_node_pages_dec(node);
                        /* Wakeup waiters for exceptional entry lock */
-                       dax_wake_mapping_entry_waiter(mapping, page->index,
+                       dax_wake_mapping_entry_waiter(mapping, page->index, p,
                                                      false);
                }
        }