]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/commitdiff
Merge branch 'cleanups-post-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorChris Mason <clm@fb.com>
Wed, 25 Mar 2015 17:52:48 +0000 (10:52 -0700)
committerChris Mason <clm@fb.com>
Wed, 25 Mar 2015 17:52:48 +0000 (10:52 -0700)
Signed-off-by: Chris Mason <clm@fb.com>
Conflicts:
fs/btrfs/disk-io.c

14 files changed:
1  2 
fs/btrfs/check-integrity.c
fs/btrfs/compression.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c

index 089d6fac01a7cff7ba558177f6c234503d3d4d68,77ba1634c9ab1eee00ffd91b4b78e9f702613835..ce7dec88f4b82c63ddefdc9398adcbda767d6c99
@@@ -2990,8 -2990,8 +2990,8 @@@ static void __btrfsic_submit_bio(int rw
                               (unsigned long long)bio->bi_iter.bi_sector,
                               dev_bytenr, bio->bi_bdev);
  
 -              mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
 -                                     GFP_NOFS);
 +              mapped_datav = kmalloc_array(bio->bi_vcnt,
 +                                           sizeof(*mapped_datav), GFP_NOFS);
                if (!mapped_datav)
                        goto leave;
                cur_bytenr = dev_bytenr;
@@@ -3241,8 -3241,5 +3241,5 @@@ void btrfsic_unmount(struct btrfs_root 
  
        mutex_unlock(&btrfsic_mutex);
  
-       if (is_vmalloc_addr(state))
-               vfree(state);
-       else
-               kfree(state);
+       kvfree(state);
  }
diff --combined fs/btrfs/compression.c
index ecacb7a2d422926e6f9fd3d36354577eca5c5883,e7a94f8cdfd71e63c7715440f9c9be448dc267ae..ce62324c78e7dbc734a2793c39a89de38238e27b
@@@ -622,7 -622,7 +622,7 @@@ int btrfs_submit_compressed_read(struc
        cb->orig_bio = bio;
  
        nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE);
 -      cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
 +      cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
                                       GFP_NOFS);
        if (!cb->compressed_pages)
                goto fail1;
@@@ -750,7 -750,7 +750,7 @@@ static int comp_num_workspace[BTRFS_COM
  static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES];
  static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES];
  
- static struct btrfs_compress_op *btrfs_compress_op[] = {
+ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
        &btrfs_zlib_compress,
        &btrfs_lzo_compress,
  };
diff --combined fs/btrfs/ctree.c
index 1c7e913f1a4b2d8743c620ed5936d085d1ed793f,e1a0981159ab689b65e11c869edf670ba07e7852..0f11ebc92f02ac69ffe3302624852cf9e467656a
@@@ -578,7 -578,7 +578,7 @@@ tree_mod_log_insert_move(struct btrfs_f
        if (!tree_mod_need_log(fs_info, eb))
                return 0;
  
 -      tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
 +      tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
        if (!tm_list)
                return -ENOMEM;
  
@@@ -677,7 -677,7 +677,7 @@@ tree_mod_log_insert_root(struct btrfs_f
  
        if (log_removal && btrfs_header_level(old_root) > 0) {
                nritems = btrfs_header_nritems(old_root);
 -              tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
 +              tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
                                  flags);
                if (!tm_list) {
                        ret = -ENOMEM;
@@@ -814,7 -814,7 +814,7 @@@ tree_mod_log_eb_copy(struct btrfs_fs_in
        if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
                return 0;
  
 -      tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
 +      tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
                          GFP_NOFS);
        if (!tm_list)
                return -ENOMEM;
@@@ -905,7 -905,8 +905,7 @@@ tree_mod_log_free_eb(struct btrfs_fs_in
                return 0;
  
        nritems = btrfs_header_nritems(eb);
 -      tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
 -                        GFP_NOFS);
 +      tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
        if (!tm_list)
                return -ENOMEM;
  
@@@ -1072,7 -1073,7 +1072,7 @@@ static noinline int update_ref_for_cow(
                        ret = btrfs_dec_ref(trans, root, buf, 1);
                        BUG_ON(ret); /* -ENOMEM */
                }
-               clean_tree_block(trans, root, buf);
+               clean_tree_block(trans, root->fs_info, buf);
                *last_ref = 1;
        }
        return 0;
@@@ -1644,14 -1645,14 +1644,14 @@@ int btrfs_realloc_node(struct btrfs_tra
  
        parent_nritems = btrfs_header_nritems(parent);
        blocksize = root->nodesize;
 -      end_slot = parent_nritems;
 +      end_slot = parent_nritems - 1;
  
 -      if (parent_nritems == 1)
 +      if (parent_nritems <= 1)
                return 0;
  
        btrfs_set_lock_blocking(parent);
  
 -      for (i = start_slot; i < end_slot; i++) {
 +      for (i = start_slot; i <= end_slot; i++) {
                int close = 1;
  
                btrfs_node_key(parent, &disk_key, i);
                        other = btrfs_node_blockptr(parent, i - 1);
                        close = close_blocks(blocknr, other, blocksize);
                }
 -              if (!close && i < end_slot - 2) {
 +              if (!close && i < end_slot) {
                        other = btrfs_node_blockptr(parent, i + 1);
                        close = close_blocks(blocknr, other, blocksize);
                }
                        continue;
                }
  
-               cur = btrfs_find_tree_block(root, blocknr);
+               cur = btrfs_find_tree_block(root->fs_info, blocknr);
                if (cur)
                        uptodate = btrfs_buffer_uptodate(cur, gen, 0);
                else
@@@ -1942,7 -1943,7 +1942,7 @@@ static noinline int balance_level(struc
  
                path->locks[level] = 0;
                path->nodes[level] = NULL;
-               clean_tree_block(trans, root, mid);
+               clean_tree_block(trans, root->fs_info, mid);
                btrfs_tree_unlock(mid);
                /* once for the path */
                free_extent_buffer(mid);
                if (wret < 0 && wret != -ENOSPC)
                        ret = wret;
                if (btrfs_header_nritems(right) == 0) {
-                       clean_tree_block(trans, root, right);
+                       clean_tree_block(trans, root->fs_info, right);
                        btrfs_tree_unlock(right);
                        del_ptr(root, path, level + 1, pslot + 1);
                        root_sub_used(root, right->len);
                BUG_ON(wret == 1);
        }
        if (btrfs_header_nritems(mid) == 0) {
-               clean_tree_block(trans, root, mid);
+               clean_tree_block(trans, root->fs_info, mid);
                btrfs_tree_unlock(mid);
                del_ptr(root, path, level + 1, pslot);
                root_sub_used(root, mid->len);
@@@ -2258,7 -2259,7 +2258,7 @@@ static void reada_for_search(struct btr
  
        search = btrfs_node_blockptr(node, slot);
        blocksize = root->nodesize;
-       eb = btrfs_find_tree_block(root, search);
+       eb = btrfs_find_tree_block(root->fs_info, search);
        if (eb) {
                free_extent_buffer(eb);
                return;
@@@ -2318,7 -2319,7 +2318,7 @@@ static noinline void reada_for_balance(
        if (slot > 0) {
                block1 = btrfs_node_blockptr(parent, slot - 1);
                gen = btrfs_node_ptr_generation(parent, slot - 1);
-               eb = btrfs_find_tree_block(root, block1);
+               eb = btrfs_find_tree_block(root->fs_info, block1);
                /*
                 * if we get -eagain from btrfs_buffer_uptodate, we
                 * don't want to return eagain here.  That will loop
        if (slot + 1 < nritems) {
                block2 = btrfs_node_blockptr(parent, slot + 1);
                gen = btrfs_node_ptr_generation(parent, slot + 1);
-               eb = btrfs_find_tree_block(root, block2);
+               eb = btrfs_find_tree_block(root->fs_info, block2);
                if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
                        block2 = 0;
                free_extent_buffer(eb);
@@@ -2449,7 -2450,7 +2449,7 @@@ read_block_for_search(struct btrfs_tran
        blocknr = btrfs_node_blockptr(b, slot);
        gen = btrfs_node_ptr_generation(b, slot);
  
-       tmp = btrfs_find_tree_block(root, blocknr);
+       tmp = btrfs_find_tree_block(root->fs_info, blocknr);
        if (tmp) {
                /* first we do an atomic uptodate check */
                if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
@@@ -3125,7 -3126,8 +3125,8 @@@ again
   * higher levels
   *
   */
- static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
+ static void fixup_low_keys(struct btrfs_fs_info *fs_info,
+                          struct btrfs_path *path,
                           struct btrfs_disk_key *key, int level)
  {
        int i;
                if (!path->nodes[i])
                        break;
                t = path->nodes[i];
-               tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
+               tree_mod_log_set_node_key(fs_info, t, tslot, 1);
                btrfs_set_node_key(t, key, tslot);
                btrfs_mark_buffer_dirty(path->nodes[i]);
                if (tslot != 0)
   * This function isn't completely safe. It's the caller's responsibility
   * that the new key won't break the order
   */
- void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
+ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+                            struct btrfs_path *path,
                             struct btrfs_key *new_key)
  {
        struct btrfs_disk_key disk_key;
        btrfs_set_item_key(eb, &disk_key, slot);
        btrfs_mark_buffer_dirty(eb);
        if (slot == 0)
-               fixup_low_keys(root, path, &disk_key, 1);
+               fixup_low_keys(fs_info, path, &disk_key, 1);
  }
  
  /*
@@@ -3691,7 -3694,7 +3693,7 @@@ static noinline int __push_leaf_right(s
        if (left_nritems)
                btrfs_mark_buffer_dirty(left);
        else
-               clean_tree_block(trans, root, left);
+               clean_tree_block(trans, root->fs_info, left);
  
        btrfs_mark_buffer_dirty(right);
  
        if (path->slots[0] >= left_nritems) {
                path->slots[0] -= left_nritems;
                if (btrfs_header_nritems(path->nodes[0]) == 0)
-                       clean_tree_block(trans, root, path->nodes[0]);
+                       clean_tree_block(trans, root->fs_info, path->nodes[0]);
                btrfs_tree_unlock(path->nodes[0]);
                free_extent_buffer(path->nodes[0]);
                path->nodes[0] = right;
@@@ -3927,10 -3930,10 +3929,10 @@@ static noinline int __push_leaf_left(st
        if (right_nritems)
                btrfs_mark_buffer_dirty(right);
        else
-               clean_tree_block(trans, root, right);
+               clean_tree_block(trans, root->fs_info, right);
  
        btrfs_item_key(right, &disk_key, 0);
-       fixup_low_keys(root, path, &disk_key, 1);
+       fixup_low_keys(root->fs_info, path, &disk_key, 1);
  
        /* then fixup the leaf pointer in the path */
        if (path->slots[0] < push_items) {
@@@ -4167,6 -4170,7 +4169,7 @@@ static noinline int split_leaf(struct b
        int mid;
        int slot;
        struct extent_buffer *right;
+       struct btrfs_fs_info *fs_info = root->fs_info;
        int ret = 0;
        int wret;
        int split;
@@@ -4270,10 -4274,10 +4273,10 @@@ again
        btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
        btrfs_set_header_owner(right, root->root_key.objectid);
        btrfs_set_header_level(right, 0);
-       write_extent_buffer(right, root->fs_info->fsid,
+       write_extent_buffer(right, fs_info->fsid,
                            btrfs_header_fsid(), BTRFS_FSID_SIZE);
  
-       write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
+       write_extent_buffer(right, fs_info->chunk_tree_uuid,
                            btrfs_header_chunk_tree_uuid(right),
                            BTRFS_UUID_SIZE);
  
                        path->nodes[0] = right;
                        path->slots[0] = 0;
                        if (path->slots[1] == 0)
-                               fixup_low_keys(root, path, &disk_key, 1);
+                               fixup_low_keys(fs_info, path, &disk_key, 1);
                }
                btrfs_mark_buffer_dirty(right);
                return ret;
@@@ -4614,7 -4618,7 +4617,7 @@@ void btrfs_truncate_item(struct btrfs_r
                btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
                btrfs_set_item_key(leaf, &disk_key, slot);
                if (slot == 0)
-                       fixup_low_keys(root, path, &disk_key, 1);
+                       fixup_low_keys(root->fs_info, path, &disk_key, 1);
        }
  
        item = btrfs_item_nr(slot);
@@@ -4715,7 -4719,7 +4718,7 @@@ void setup_items_for_insert(struct btrf
  
        if (path->slots[0] == 0) {
                btrfs_cpu_key_to_disk(&disk_key, cpu_key);
-               fixup_low_keys(root, path, &disk_key, 1);
+               fixup_low_keys(root->fs_info, path, &disk_key, 1);
        }
        btrfs_unlock_up_safe(path, 1);
  
@@@ -4887,7 -4891,7 +4890,7 @@@ static void del_ptr(struct btrfs_root *
                struct btrfs_disk_key disk_key;
  
                btrfs_node_key(parent, &disk_key, 0);
-               fixup_low_keys(root, path, &disk_key, level + 1);
+               fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
        }
        btrfs_mark_buffer_dirty(parent);
  }
@@@ -4980,7 -4984,7 +4983,7 @@@ int btrfs_del_items(struct btrfs_trans_
                        btrfs_set_header_level(leaf, 0);
                } else {
                        btrfs_set_path_blocking(path);
-                       clean_tree_block(trans, root, leaf);
+                       clean_tree_block(trans, root->fs_info, leaf);
                        btrfs_del_leaf(trans, root, path, leaf);
                }
        } else {
                        struct btrfs_disk_key disk_key;
  
                        btrfs_item_key(leaf, &disk_key, 0);
-                       fixup_low_keys(root, path, &disk_key, 1);
+                       fixup_low_keys(root->fs_info, path, &disk_key, 1);
                }
  
                /* delete the leaf if it is mostly empty */
diff --combined fs/btrfs/ctree.h
index f8548d669eb6a094c010cfecad5e1c5680b0ea5f,216056c37940a6e7a088cca2f225bbc17d3a3456..95944b81ed5cbc65752599cf4027e286e54f1438
@@@ -1176,7 -1176,6 +1176,7 @@@ struct btrfs_space_info 
        struct percpu_counter total_bytes_pinned;
  
        struct list_head list;
 +      /* Protected by the spinlock 'lock'. */
        struct list_head ro_bgs;
  
        struct rw_semaphore groups_sem;
@@@ -1329,8 -1328,6 +1329,8 @@@ struct seq_list 
        u64 seq;
  };
  
 +#define SEQ_LIST_INIT(name)   { .list = LIST_HEAD_INIT((name).list), .seq = 0 }
 +
  enum btrfs_orphan_cleanup_state {
        ORPHAN_CLEANUP_STARTED  = 1,
        ORPHAN_CLEANUP_DONE     = 2,
@@@ -3389,8 -3386,6 +3389,8 @@@ int btrfs_inc_extent_ref(struct btrfs_t
  
  int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                                    struct btrfs_root *root);
 +int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
 +                          struct btrfs_root *root);
  int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
  int btrfs_free_block_groups(struct btrfs_fs_info *info);
  int btrfs_read_block_groups(struct btrfs_root *root);
@@@ -3488,7 -3483,8 +3488,8 @@@ int btrfs_previous_item(struct btrfs_ro
                        int type);
  int btrfs_previous_extent_item(struct btrfs_root *root,
                        struct btrfs_path *path, u64 min_objectid);
- void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
+ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
+                            struct btrfs_path *path,
                             struct btrfs_key *new_key);
  struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
  struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
@@@ -3913,9 -3909,6 +3914,9 @@@ int btrfs_prealloc_file_range_trans(str
                                    loff_t actual_len, u64 *alloc_hint);
  int btrfs_inode_check_errors(struct inode *inode);
  extern const struct dentry_operations btrfs_dentry_operations;
 +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 +void btrfs_test_inode_set_ops(struct inode *inode);
 +#endif
  
  /* ioctl.c */
  long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
diff --combined fs/btrfs/disk-io.c
index 6aaaf987fd31f2571769ccc6927e30f7d07b04ba,f770e8b5cb8674fdce509f7b58125400d88e9e20..23c49ab2de4c0cf14dff74daf60e1d1669182e6f
@@@ -54,7 -54,7 +54,7 @@@
  #include <asm/cpufeature.h>
  #endif
  
- static struct extent_io_ops btree_extent_io_ops;
+ static const struct extent_io_ops btree_extent_io_ops;
  static void end_workqueue_fn(struct btrfs_work *work);
  static void free_fs_root(struct btrfs_root *root);
  static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@@@ -274,10 -274,11 +274,11 @@@ void btrfs_csum_final(u32 crc, char *re
   * compute the csum for a btree block, and either verify it or write it
   * into the csum field of the block.
   */
- static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
+ static int csum_tree_block(struct btrfs_fs_info *fs_info,
+                          struct extent_buffer *buf,
                           int verify)
  {
-       u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+       u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
        char *result = NULL;
        unsigned long len;
        unsigned long cur_len;
                offset += cur_len;
        }
        if (csum_size > sizeof(inline_result)) {
 -              result = kzalloc(csum_size * sizeof(char), GFP_NOFS);
 +              result = kzalloc(csum_size, GFP_NOFS);
                if (!result)
                        return 1;
        } else {
                        printk_ratelimited(KERN_WARNING
                                "BTRFS: %s checksum verify failed on %llu wanted %X found %X "
                                "level %d\n",
-                               root->fs_info->sb->s_id, buf->start,
+                               fs_info->sb->s_id, buf->start,
                                val, found, btrfs_header_level(buf));
                        if (result != (char *)&inline_result)
                                kfree(result);
@@@ -501,7 -502,7 +502,7 @@@ static int btree_read_extent_buffer_pag
   * we only fill in the checksum field in the first page of a multi-page block
   */
  
- static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
+ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
  {
        u64 start = page_offset(page);
        u64 found_start;
        found_start = btrfs_header_bytenr(eb);
        if (WARN_ON(found_start != start || !PageUptodate(page)))
                return 0;
-       csum_tree_block(root, eb, 0);
+       csum_tree_block(fs_info, eb, 0);
        return 0;
  }
  
- static int check_tree_block_fsid(struct btrfs_root *root,
+ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
                                 struct extent_buffer *eb)
  {
-       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
        u8 fsid[BTRFS_UUID_SIZE];
        int ret = 1;
  
@@@ -640,7 -641,7 +641,7 @@@ static int btree_readpage_end_io_hook(s
                ret = -EIO;
                goto err;
        }
-       if (check_tree_block_fsid(root, eb)) {
+       if (check_tree_block_fsid(root->fs_info, eb)) {
                printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n",
                               eb->fs_info->sb->s_id, eb->start);
                ret = -EIO;
        btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb),
                                       eb, found_level);
  
-       ret = csum_tree_block(root, eb, 1);
+       ret = csum_tree_block(root->fs_info, eb, 1);
        if (ret) {
                ret = -EIO;
                goto err;
@@@ -882,7 -883,7 +883,7 @@@ static int btree_csum_one_bio(struct bi
  
        bio_for_each_segment_all(bvec, bio, i) {
                root = BTRFS_I(bvec->bv_page->mapping->host)->root;
-               ret = csum_dirty_buffer(root, bvec->bv_page);
+               ret = csum_dirty_buffer(root->fs_info, bvec->bv_page);
                if (ret)
                        break;
        }
@@@ -1119,10 -1120,10 +1120,10 @@@ int reada_tree_block_flagged(struct btr
        return 0;
  }
  
- struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
+ struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
                                            u64 bytenr)
  {
-       return find_extent_buffer(root->fs_info, bytenr);
+       return find_extent_buffer(fs_info, bytenr);
  }
  
  struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
@@@ -1165,11 -1166,10 +1166,10 @@@ struct extent_buffer *read_tree_block(s
  
  }
  
- void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ void clean_tree_block(struct btrfs_trans_handle *trans,
+                     struct btrfs_fs_info *fs_info,
                      struct extent_buffer *buf)
  {
-       struct btrfs_fs_info *fs_info = root->fs_info;
        if (btrfs_header_generation(buf) ==
            fs_info->running_transaction->transid) {
                btrfs_assert_tree_locked(buf);
@@@ -1724,11 -1724,12 +1724,11 @@@ static int setup_bdi(struct btrfs_fs_in
  {
        int err;
  
 -      bdi->capabilities = BDI_CAP_MAP_COPY;
 -      err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY);
 +      err = bdi_setup_and_register(bdi, "btrfs");
        if (err)
                return err;
  
 -      bdi->ra_pages   = default_backing_dev_info.ra_pages;
 +      bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
        bdi->congested_fn       = btrfs_congested_fn;
        bdi->congested_data     = info;
        return 0;
@@@ -2146,6 -2147,268 +2146,267 @@@ void btrfs_free_fs_roots(struct btrfs_f
        }
  }
  
 -      fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
+ static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
+ {
+       mutex_init(&fs_info->scrub_lock);
+       atomic_set(&fs_info->scrubs_running, 0);
+       atomic_set(&fs_info->scrub_pause_req, 0);
+       atomic_set(&fs_info->scrubs_paused, 0);
+       atomic_set(&fs_info->scrub_cancel_req, 0);
+       init_waitqueue_head(&fs_info->scrub_pause_wait);
+       fs_info->scrub_workers_refcnt = 0;
+ }
+ static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
+ {
+       spin_lock_init(&fs_info->balance_lock);
+       mutex_init(&fs_info->balance_mutex);
+       atomic_set(&fs_info->balance_running, 0);
+       atomic_set(&fs_info->balance_pause_req, 0);
+       atomic_set(&fs_info->balance_cancel_req, 0);
+       fs_info->balance_ctl = NULL;
+       init_waitqueue_head(&fs_info->balance_wait_q);
+ }
+ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info,
+                                  struct btrfs_root *tree_root)
+ {
+       fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
+       set_nlink(fs_info->btree_inode, 1);
+       /*
+        * we set the i_size on the btree inode to the max possible int.
+        * the real end of the address space is determined by all of
+        * the devices in the system
+        */
+       fs_info->btree_inode->i_size = OFFSET_MAX;
+       fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
+       RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
+       extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
+                            fs_info->btree_inode->i_mapping);
+       BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
+       extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
+       BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
+       BTRFS_I(fs_info->btree_inode)->root = tree_root;
+       memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
+              sizeof(struct btrfs_key));
+       set_bit(BTRFS_INODE_DUMMY,
+               &BTRFS_I(fs_info->btree_inode)->runtime_flags);
+       btrfs_insert_inode_hash(fs_info->btree_inode);
+ }
+ static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
+ {
+       fs_info->dev_replace.lock_owner = 0;
+       atomic_set(&fs_info->dev_replace.nesting_level, 0);
+       mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
+       mutex_init(&fs_info->dev_replace.lock_management_lock);
+       mutex_init(&fs_info->dev_replace.lock);
+       init_waitqueue_head(&fs_info->replace_wait);
+ }
+ static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
+ {
+       spin_lock_init(&fs_info->qgroup_lock);
+       mutex_init(&fs_info->qgroup_ioctl_lock);
+       fs_info->qgroup_tree = RB_ROOT;
+       fs_info->qgroup_op_tree = RB_ROOT;
+       INIT_LIST_HEAD(&fs_info->dirty_qgroups);
+       fs_info->qgroup_seq = 1;
+       fs_info->quota_enabled = 0;
+       fs_info->pending_quota_state = 0;
+       fs_info->qgroup_ulist = NULL;
+       mutex_init(&fs_info->qgroup_rescan_lock);
+ }
+ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
+               struct btrfs_fs_devices *fs_devices)
+ {
+       int max_active = fs_info->thread_pool_size;
+       unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
+       fs_info->workers =
+               btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
+                                     max_active, 16);
+       fs_info->delalloc_workers =
+               btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
+       fs_info->flush_workers =
+               btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
+       fs_info->caching_workers =
+               btrfs_alloc_workqueue("cache", flags, max_active, 0);
+       /*
+        * a higher idle thresh on the submit workers makes it much more
+        * likely that bios will be send down in a sane order to the
+        * devices
+        */
+       fs_info->submit_workers =
+               btrfs_alloc_workqueue("submit", flags,
+                                     min_t(u64, fs_devices->num_devices,
+                                           max_active), 64);
+       fs_info->fixup_workers =
+               btrfs_alloc_workqueue("fixup", flags, 1, 0);
+       /*
+        * endios are largely parallel and should have a very
+        * low idle thresh
+        */
+       fs_info->endio_workers =
+               btrfs_alloc_workqueue("endio", flags, max_active, 4);
+       fs_info->endio_meta_workers =
+               btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
+       fs_info->endio_meta_write_workers =
+               btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
+       fs_info->endio_raid56_workers =
+               btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
+       fs_info->endio_repair_workers =
+               btrfs_alloc_workqueue("endio-repair", flags, 1, 0);
+       fs_info->rmw_workers =
+               btrfs_alloc_workqueue("rmw", flags, max_active, 2);
+       fs_info->endio_write_workers =
+               btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
+       fs_info->endio_freespace_worker =
+               btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
+       fs_info->delayed_workers =
+               btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
+       fs_info->readahead_workers =
+               btrfs_alloc_workqueue("readahead", flags, max_active, 2);
+       fs_info->qgroup_rescan_workers =
+               btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
+       fs_info->extent_workers =
+               btrfs_alloc_workqueue("extent-refs", flags,
+                                     min_t(u64, fs_devices->num_devices,
+                                           max_active), 8);
+       if (!(fs_info->workers && fs_info->delalloc_workers &&
+             fs_info->submit_workers && fs_info->flush_workers &&
+             fs_info->endio_workers && fs_info->endio_meta_workers &&
+             fs_info->endio_meta_write_workers &&
+             fs_info->endio_repair_workers &&
+             fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
+             fs_info->endio_freespace_worker && fs_info->rmw_workers &&
+             fs_info->caching_workers && fs_info->readahead_workers &&
+             fs_info->fixup_workers && fs_info->delayed_workers &&
+             fs_info->extent_workers &&
+             fs_info->qgroup_rescan_workers)) {
+               return -ENOMEM;
+       }
+       return 0;
+ }
+ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
+                           struct btrfs_fs_devices *fs_devices)
+ {
+       int ret;
+       struct btrfs_root *tree_root = fs_info->tree_root;
+       struct btrfs_root *log_tree_root;
+       struct btrfs_super_block *disk_super = fs_info->super_copy;
+       u64 bytenr = btrfs_super_log_root(disk_super);
+       if (fs_devices->rw_devices == 0) {
+               printk(KERN_WARNING "BTRFS: log replay required "
+                      "on RO media\n");
+               return -EIO;
+       }
+       log_tree_root = btrfs_alloc_root(fs_info);
+       if (!log_tree_root)
+               return -ENOMEM;
+       __setup_root(tree_root->nodesize, tree_root->sectorsize,
+                       tree_root->stripesize, log_tree_root, fs_info,
+                       BTRFS_TREE_LOG_OBJECTID);
+       log_tree_root->node = read_tree_block(tree_root, bytenr,
+                       fs_info->generation + 1);
+       if (!log_tree_root->node ||
+           !extent_buffer_uptodate(log_tree_root->node)) {
+               printk(KERN_ERR "BTRFS: failed to read log tree\n");
+               free_extent_buffer(log_tree_root->node);
+               kfree(log_tree_root);
+               return -EIO;
+       }
+       /* returns with log_tree_root freed on success */
+       ret = btrfs_recover_log_trees(log_tree_root);
+       if (ret) {
+               btrfs_error(tree_root->fs_info, ret,
+                           "Failed to recover log tree");
+               free_extent_buffer(log_tree_root->node);
+               kfree(log_tree_root);
+               return ret;
+       }
+       if (fs_info->sb->s_flags & MS_RDONLY) {
+               ret = btrfs_commit_super(tree_root);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+ }
+ static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
+                           struct btrfs_root *tree_root)
+ {
+       struct btrfs_root *root;
+       struct btrfs_key location;
+       int ret;
+       location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
+       location.type = BTRFS_ROOT_ITEM_KEY;
+       location.offset = 0;
+       root = btrfs_read_tree_root(tree_root, &location);
+       if (IS_ERR(root))
+               return PTR_ERR(root);
+       set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+       fs_info->extent_root = root;
+       location.objectid = BTRFS_DEV_TREE_OBJECTID;
+       root = btrfs_read_tree_root(tree_root, &location);
+       if (IS_ERR(root))
+               return PTR_ERR(root);
+       set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+       fs_info->dev_root = root;
+       btrfs_init_devices_late(fs_info);
+       location.objectid = BTRFS_CSUM_TREE_OBJECTID;
+       root = btrfs_read_tree_root(tree_root, &location);
+       if (IS_ERR(root))
+               return PTR_ERR(root);
+       set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+       fs_info->csum_root = root;
+       location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
+       root = btrfs_read_tree_root(tree_root, &location);
+       if (!IS_ERR(root)) {
+               set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+               fs_info->quota_enabled = 1;
+               fs_info->pending_quota_state = 1;
+               fs_info->quota_root = root;
+       }
+       location.objectid = BTRFS_UUID_TREE_OBJECTID;
+       root = btrfs_read_tree_root(tree_root, &location);
+       if (IS_ERR(root)) {
+               ret = PTR_ERR(root);
+               if (ret != -ENOENT)
+                       return ret;
+       } else {
+               set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+               fs_info->uuid_root = root;
+       }
+       return 0;
+ }
  int open_ctree(struct super_block *sb,
               struct btrfs_fs_devices *fs_devices,
               char *options)
        struct btrfs_super_block *disk_super;
        struct btrfs_fs_info *fs_info = btrfs_sb(sb);
        struct btrfs_root *tree_root;
-       struct btrfs_root *extent_root;
-       struct btrfs_root *csum_root;
        struct btrfs_root *chunk_root;
-       struct btrfs_root *dev_root;
-       struct btrfs_root *quota_root;
-       struct btrfs_root *uuid_root;
-       struct btrfs_root *log_tree_root;
        int ret;
        int err = -EINVAL;
        int num_backups_tried = 0;
        int backup_index = 0;
        int max_active;
-       int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
-       bool create_uuid_tree;
-       bool check_uuid_tree;
  
        tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
        chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
        fs_info->free_chunk_space = 0;
        fs_info->tree_mod_log = RB_ROOT;
        fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
 -      fs_info->avg_delayed_ref_runtime = div64_u64(NSEC_PER_SEC, 64);
 +      fs_info->avg_delayed_ref_runtime = NSEC_PER_SEC >> 6; /* div by 64 */
        /* readahead state */
        INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
        spin_lock_init(&fs_info->reada_lock);
        }
        btrfs_init_delayed_root(fs_info->delayed_root);
  
-       mutex_init(&fs_info->scrub_lock);
-       atomic_set(&fs_info->scrubs_running, 0);
-       atomic_set(&fs_info->scrub_pause_req, 0);
-       atomic_set(&fs_info->scrubs_paused, 0);
-       atomic_set(&fs_info->scrub_cancel_req, 0);
-       init_waitqueue_head(&fs_info->replace_wait);
-       init_waitqueue_head(&fs_info->scrub_pause_wait);
-       fs_info->scrub_workers_refcnt = 0;
+       btrfs_init_scrub(fs_info);
  #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
        fs_info->check_integrity_print_mask = 0;
  #endif
-       spin_lock_init(&fs_info->balance_lock);
-       mutex_init(&fs_info->balance_mutex);
-       atomic_set(&fs_info->balance_running, 0);
-       atomic_set(&fs_info->balance_pause_req, 0);
-       atomic_set(&fs_info->balance_cancel_req, 0);
-       fs_info->balance_ctl = NULL;
-       init_waitqueue_head(&fs_info->balance_wait_q);
+       btrfs_init_balance(fs_info);
        btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work);
  
        sb->s_blocksize = 4096;
        sb->s_blocksize_bits = blksize_bits(4096);
        sb->s_bdi = &fs_info->bdi;
  
-       fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
-       set_nlink(fs_info->btree_inode, 1);
-       /*
-        * we set the i_size on the btree inode to the max possible int.
-        * the real end of the address space is determined by all of
-        * the devices in the system
-        */
-       fs_info->btree_inode->i_size = OFFSET_MAX;
-       fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
-       RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
-       extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
-                            fs_info->btree_inode->i_mapping);
-       BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
-       extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
-       BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
-       BTRFS_I(fs_info->btree_inode)->root = tree_root;
-       memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
-              sizeof(struct btrfs_key));
-       set_bit(BTRFS_INODE_DUMMY,
-               &BTRFS_I(fs_info->btree_inode)->runtime_flags);
-       btrfs_insert_inode_hash(fs_info->btree_inode);
+       btrfs_init_btree_inode(fs_info, tree_root);
  
        spin_lock_init(&fs_info->block_group_cache_lock);
        fs_info->block_group_cache_tree = RB_ROOT;
        init_rwsem(&fs_info->cleanup_work_sem);
        init_rwsem(&fs_info->subvol_sem);
        sema_init(&fs_info->uuid_tree_rescan_sem, 1);
-       fs_info->dev_replace.lock_owner = 0;
-       atomic_set(&fs_info->dev_replace.nesting_level, 0);
-       mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
-       mutex_init(&fs_info->dev_replace.lock_management_lock);
-       mutex_init(&fs_info->dev_replace.lock);
  
-       spin_lock_init(&fs_info->qgroup_lock);
-       mutex_init(&fs_info->qgroup_ioctl_lock);
-       fs_info->qgroup_tree = RB_ROOT;
-       fs_info->qgroup_op_tree = RB_ROOT;
-       INIT_LIST_HEAD(&fs_info->dirty_qgroups);
-       fs_info->qgroup_seq = 1;
-       fs_info->quota_enabled = 0;
-       fs_info->pending_quota_state = 0;
-       fs_info->qgroup_ulist = NULL;
-       mutex_init(&fs_info->qgroup_rescan_lock);
+       btrfs_init_dev_replace_locks(fs_info);
+       btrfs_init_qgroup(fs_info);
  
        btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
        btrfs_init_free_cluster(&fs_info->data_alloc_cluster);
  
        max_active = fs_info->thread_pool_size;
  
-       fs_info->workers =
-               btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
-                                     max_active, 16);
-       fs_info->delalloc_workers =
-               btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
-       fs_info->flush_workers =
-               btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
-       fs_info->caching_workers =
-               btrfs_alloc_workqueue("cache", flags, max_active, 0);
-       /*
-        * a higher idle thresh on the submit workers makes it much more
-        * likely that bios will be send down in a sane order to the
-        * devices
-        */
-       fs_info->submit_workers =
-               btrfs_alloc_workqueue("submit", flags,
-                                     min_t(u64, fs_devices->num_devices,
-                                           max_active), 64);
-       fs_info->fixup_workers =
-               btrfs_alloc_workqueue("fixup", flags, 1, 0);
-       /*
-        * endios are largely parallel and should have a very
-        * low idle thresh
-        */
-       fs_info->endio_workers =
-               btrfs_alloc_workqueue("endio", flags, max_active, 4);
-       fs_info->endio_meta_workers =
-               btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
-       fs_info->endio_meta_write_workers =
-               btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
-       fs_info->endio_raid56_workers =
-               btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
-       fs_info->endio_repair_workers =
-               btrfs_alloc_workqueue("endio-repair", flags, 1, 0);
-       fs_info->rmw_workers =
-               btrfs_alloc_workqueue("rmw", flags, max_active, 2);
-       fs_info->endio_write_workers =
-               btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
-       fs_info->endio_freespace_worker =
-               btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
-       fs_info->delayed_workers =
-               btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
-       fs_info->readahead_workers =
-               btrfs_alloc_workqueue("readahead", flags, max_active, 2);
-       fs_info->qgroup_rescan_workers =
-               btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
-       fs_info->extent_workers =
-               btrfs_alloc_workqueue("extent-refs", flags,
-                                     min_t(u64, fs_devices->num_devices,
-                                           max_active), 8);
-       if (!(fs_info->workers && fs_info->delalloc_workers &&
-             fs_info->submit_workers && fs_info->flush_workers &&
-             fs_info->endio_workers && fs_info->endio_meta_workers &&
-             fs_info->endio_meta_write_workers &&
-             fs_info->endio_repair_workers &&
-             fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
-             fs_info->endio_freespace_worker && fs_info->rmw_workers &&
-             fs_info->caching_workers && fs_info->readahead_workers &&
-             fs_info->fixup_workers && fs_info->delayed_workers &&
-             fs_info->extent_workers &&
-             fs_info->qgroup_rescan_workers)) {
-               err = -ENOMEM;
+       ret = btrfs_init_workqueues(fs_info, fs_devices);
+       if (ret) {
+               err = ret;
                goto fail_sb_buffer;
        }
  
         * keep the device that is marked to be the target device for the
         * dev_replace procedure
         */
-       btrfs_close_extra_devices(fs_info, fs_devices, 0);
+       btrfs_close_extra_devices(fs_devices, 0);
  
        if (!fs_devices->latest_bdev) {
                printk(KERN_ERR "BTRFS: failed to read devices on %s\n",
@@@ -2714,61 -2852,9 +2850,9 @@@ retry_root_backup
        tree_root->commit_root = btrfs_root_node(tree_root);
        btrfs_set_root_refs(&tree_root->root_item, 1);
  
-       location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
-       location.type = BTRFS_ROOT_ITEM_KEY;
-       location.offset = 0;
-       extent_root = btrfs_read_tree_root(tree_root, &location);
-       if (IS_ERR(extent_root)) {
-               ret = PTR_ERR(extent_root);
-               goto recovery_tree_root;
-       }
-       set_bit(BTRFS_ROOT_TRACK_DIRTY, &extent_root->state);
-       fs_info->extent_root = extent_root;
-       location.objectid = BTRFS_DEV_TREE_OBJECTID;
-       dev_root = btrfs_read_tree_root(tree_root, &location);
-       if (IS_ERR(dev_root)) {
-               ret = PTR_ERR(dev_root);
-               goto recovery_tree_root;
-       }
-       set_bit(BTRFS_ROOT_TRACK_DIRTY, &dev_root->state);
-       fs_info->dev_root = dev_root;
-       btrfs_init_devices_late(fs_info);
-       location.objectid = BTRFS_CSUM_TREE_OBJECTID;
-       csum_root = btrfs_read_tree_root(tree_root, &location);
-       if (IS_ERR(csum_root)) {
-               ret = PTR_ERR(csum_root);
+       ret = btrfs_read_roots(fs_info, tree_root);
+       if (ret)
                goto recovery_tree_root;
-       }
-       set_bit(BTRFS_ROOT_TRACK_DIRTY, &csum_root->state);
-       fs_info->csum_root = csum_root;
-       location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
-       quota_root = btrfs_read_tree_root(tree_root, &location);
-       if (!IS_ERR(quota_root)) {
-               set_bit(BTRFS_ROOT_TRACK_DIRTY, &quota_root->state);
-               fs_info->quota_enabled = 1;
-               fs_info->pending_quota_state = 1;
-               fs_info->quota_root = quota_root;
-       }
-       location.objectid = BTRFS_UUID_TREE_OBJECTID;
-       uuid_root = btrfs_read_tree_root(tree_root, &location);
-       if (IS_ERR(uuid_root)) {
-               ret = PTR_ERR(uuid_root);
-               if (ret != -ENOENT)
-                       goto recovery_tree_root;
-               create_uuid_tree = true;
-               check_uuid_tree = false;
-       } else {
-               set_bit(BTRFS_ROOT_TRACK_DIRTY, &uuid_root->state);
-               fs_info->uuid_root = uuid_root;
-               create_uuid_tree = false;
-               check_uuid_tree =
-                   generation != btrfs_super_uuid_tree_generation(disk_super);
-       }
  
        fs_info->generation = generation;
        fs_info->last_trans_committed = generation;
                goto fail_block_groups;
        }
  
-       btrfs_close_extra_devices(fs_info, fs_devices, 1);
+       btrfs_close_extra_devices(fs_devices, 1);
  
        ret = btrfs_sysfs_add_one(fs_info);
        if (ret) {
                goto fail_sysfs;
        }
  
-       ret = btrfs_read_block_groups(extent_root);
+       ret = btrfs_read_block_groups(fs_info->extent_root);
        if (ret) {
                printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret);
                goto fail_sysfs;
  
        /* do not make disk changes in broken FS */
        if (btrfs_super_log_root(disk_super) != 0) {
-               u64 bytenr = btrfs_super_log_root(disk_super);
-               if (fs_devices->rw_devices == 0) {
-                       printk(KERN_WARNING "BTRFS: log replay required "
-                              "on RO media\n");
-                       err = -EIO;
-                       goto fail_qgroup;
-               }
-               log_tree_root = btrfs_alloc_root(fs_info);
-               if (!log_tree_root) {
-                       err = -ENOMEM;
-                       goto fail_qgroup;
-               }
-               __setup_root(nodesize, sectorsize, stripesize,
-                            log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
-               log_tree_root->node = read_tree_block(tree_root, bytenr,
-                                                     generation + 1);
-               if (!log_tree_root->node ||
-                   !extent_buffer_uptodate(log_tree_root->node)) {
-                       printk(KERN_ERR "BTRFS: failed to read log tree\n");
-                       free_extent_buffer(log_tree_root->node);
-                       kfree(log_tree_root);
-                       goto fail_qgroup;
-               }
-               /* returns with log_tree_root freed on success */
-               ret = btrfs_recover_log_trees(log_tree_root);
+               ret = btrfs_replay_log(fs_info, fs_devices);
                if (ret) {
-                       btrfs_error(tree_root->fs_info, ret,
-                                   "Failed to recover log tree");
-                       free_extent_buffer(log_tree_root->node);
-                       kfree(log_tree_root);
+                       err = ret;
                        goto fail_qgroup;
                }
-               if (sb->s_flags & MS_RDONLY) {
-                       ret = btrfs_commit_super(tree_root);
-                       if (ret)
-                               goto fail_qgroup;
-               }
        }
  
        ret = btrfs_find_orphan_roots(tree_root);
  
        btrfs_qgroup_rescan_resume(fs_info);
  
-       if (create_uuid_tree) {
+       if (!fs_info->uuid_root) {
                pr_info("BTRFS: creating UUID tree\n");
                ret = btrfs_create_uuid_tree(fs_info);
                if (ret) {
                        close_ctree(tree_root);
                        return ret;
                }
-       } else if (check_uuid_tree ||
-                  btrfs_test_opt(tree_root, RESCAN_UUID_TREE)) {
+       } else if (btrfs_test_opt(tree_root, RESCAN_UUID_TREE) ||
+                  fs_info->generation !=
+                               btrfs_super_uuid_tree_generation(disk_super)) {
                pr_info("BTRFS: checking UUID tree\n");
                ret = btrfs_check_uuid_tree(fs_info);
                if (ret) {
@@@ -3668,7 -3718,7 +3716,7 @@@ void close_ctree(struct btrfs_root *roo
        if (!(fs_info->sb->s_flags & MS_RDONLY)) {
                ret = btrfs_commit_super(root);
                if (ret)
-                       btrfs_err(root->fs_info, "commit super ret %d", ret);
+                       btrfs_err(fs_info, "commit super ret %d", ret);
        }
  
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
        fs_info->closing = 2;
        smp_mb();
  
-       btrfs_free_qgroup_config(root->fs_info);
+       btrfs_free_qgroup_config(fs_info);
  
        if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
-               btrfs_info(root->fs_info, "at unmount delalloc count %lld",
+               btrfs_info(fs_info, "at unmount delalloc count %lld",
                       percpu_counter_sum(&fs_info->delalloc_bytes));
        }
  
@@@ -4134,7 -4184,7 +4182,7 @@@ static int btrfs_destroy_marked_extents
  
                clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
                while (start <= end) {
-                       eb = btrfs_find_tree_block(root, start);
+                       eb = btrfs_find_tree_block(root->fs_info, start);
                        start += root->nodesize;
                        if (!eb)
                                continue;
@@@ -4285,7 -4335,7 +4333,7 @@@ static int btrfs_cleanup_transaction(st
        return 0;
  }
  
- static struct extent_io_ops btree_extent_io_ops = {
+ static const struct extent_io_ops btree_extent_io_ops = {
        .readpage_end_io_hook = btree_readpage_end_io_hook,
        .readpage_io_failed_hook = btree_io_failed_hook,
        .submit_bio_hook = btree_submit_bio_hook,
diff --combined fs/btrfs/extent-tree.c
index d2158c4d6b24e691594b51b1b32dc654177d4df7,262e16fe27d8af99a67d3b8895c44946cec8bbd7..d8ce3cfe7ae0285d8dab8be60a2ab1e2f85613d6
@@@ -2561,7 -2561,8 +2561,7 @@@ static noinline int __btrfs_run_delayed
                 */
                spin_lock(&delayed_refs->lock);
                avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
 -              avg = div64_u64(avg, 4);
 -              fs_info->avg_delayed_ref_runtime = avg;
 +              fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
                spin_unlock(&delayed_refs->lock);
        }
        return 0;
@@@ -2623,7 -2624,7 +2623,7 @@@ static inline u64 heads_to_leaves(struc
         * We don't ever fill up leaves all the way so multiply by 2 just to be
         * closer to what we're really going to want to ouse.
         */
 -      return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
 +      return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
  }
  
  int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
@@@ -3192,7 -3193,7 +3192,7 @@@ static int cache_save_setup(struct btrf
        struct inode *inode = NULL;
        u64 alloc_hint = 0;
        int dcs = BTRFS_DC_ERROR;
 -      int num_pages = 0;
 +      u64 num_pages = 0;
        int retries = 0;
        int ret = 0;
  
                return 0;
        }
  
 +      if (trans->aborted)
 +              return 0;
  again:
        inode = lookup_free_space_inode(root, block_group, path);
        if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
         */
        BTRFS_I(inode)->generation = 0;
        ret = btrfs_update_inode(trans, root, inode);
 +      if (ret) {
 +              /*
 +               * So theoretically we could recover from this, simply set the
 +               * super cache generation to 0 so we know to invalidate the
 +               * cache, but then we'd have to keep track of the block groups
 +               * that fail this way so we know we _have_ to reset this cache
 +               * before the next commit or risk reading stale cache.  So to
 +               * limit our exposure to horrible edge cases lets just abort the
 +               * transaction, this only happens in really bad situations
 +               * anyway.
 +               */
 +              btrfs_abort_transaction(trans, root, ret);
 +              goto out_put;
 +      }
        WARN_ON(ret);
  
        if (i_size_read(inode) > 0) {
         * taking up quite a bit since it's not folded into the other space
         * cache.
         */
 -      num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
 +      num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
        if (!num_pages)
                num_pages = 1;
  
        return ret;
  }
  
 +int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
 +                          struct btrfs_root *root)
 +{
 +      struct btrfs_block_group_cache *cache, *tmp;
 +      struct btrfs_transaction *cur_trans = trans->transaction;
 +      struct btrfs_path *path;
 +
 +      if (list_empty(&cur_trans->dirty_bgs) ||
 +          !btrfs_test_opt(root, SPACE_CACHE))
 +              return 0;
 +
 +      path = btrfs_alloc_path();
 +      if (!path)
 +              return -ENOMEM;
 +
 +      /* Could add new block groups, use _safe just in case */
 +      list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
 +                               dirty_list) {
 +              if (cache->disk_cache_state == BTRFS_DC_CLEAR)
 +                      cache_save_setup(cache, trans, path);
 +      }
 +
 +      btrfs_free_path(path);
 +      return 0;
 +}
 +
  int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root)
  {
@@@ -3640,7 -3599,7 +3640,7 @@@ int btrfs_check_data_free_space(struct 
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_fs_info *fs_info = root->fs_info;
        u64 used;
-       int ret = 0, committed = 0, alloc_chunk = 1;
+       int ret = 0, committed = 0;
  
        /* make sure bytes are sectorsize aligned */
        bytes = ALIGN(bytes, root->sectorsize);
@@@ -3668,7 -3627,7 +3668,7 @@@ again
                 * if we don't have enough free bytes in this space then we need
                 * to alloc a new chunk.
                 */
-               if (!data_sinfo->full && alloc_chunk) {
+               if (!data_sinfo->full) {
                        u64 alloc_target;
  
                        data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
@@@ -4811,10 -4770,10 +4811,10 @@@ static u64 calc_global_metadata_size(st
  
        num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
                    csum_size * 2;
 -      num_bytes += div64_u64(data_used + meta_used, 50);
 +      num_bytes += div_u64(data_used + meta_used, 50);
  
        if (num_bytes * 3 > meta_used)
 -              num_bytes = div64_u64(meta_used, 3);
 +              num_bytes = div_u64(meta_used, 3);
  
        return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
  }
@@@ -5074,16 -5033,16 +5074,16 @@@ static u64 calc_csum_metadata_size(stru
            BTRFS_I(inode)->csum_bytes == 0)
                return 0;
  
 -      old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
 +      old_csums = (int)div_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
        if (reserve)
                BTRFS_I(inode)->csum_bytes += num_bytes;
        else
                BTRFS_I(inode)->csum_bytes -= num_bytes;
        csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
 -      num_csums_per_leaf = (int)div64_u64(csum_size,
 +      num_csums_per_leaf = (int)div_u64(csum_size,
                                            sizeof(struct btrfs_csum_item) +
                                            sizeof(struct btrfs_disk_key));
 -      num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
 +      num_csums = (int)div_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
        num_csums = num_csums + num_csums_per_leaf - 1;
        num_csums = num_csums / num_csums_per_leaf;
  
@@@ -5135,11 -5094,7 +5135,11 @@@ int btrfs_delalloc_reserve_metadata(str
        num_bytes = ALIGN(num_bytes, root->sectorsize);
  
        spin_lock(&BTRFS_I(inode)->lock);
 -      BTRFS_I(inode)->outstanding_extents++;
 +      nr_extents = (unsigned)div64_u64(num_bytes +
 +                                       BTRFS_MAX_EXTENT_SIZE - 1,
 +                                       BTRFS_MAX_EXTENT_SIZE);
 +      BTRFS_I(inode)->outstanding_extents += nr_extents;
 +      nr_extents = 0;
  
        if (BTRFS_I(inode)->outstanding_extents >
            BTRFS_I(inode)->reserved_extents)
@@@ -5284,9 -5239,6 +5284,9 @@@ void btrfs_delalloc_release_metadata(st
        if (dropped > 0)
                to_free += btrfs_calc_trans_metadata_size(root, dropped);
  
 +      if (btrfs_test_is_dummy_root(root))
 +              return;
 +
        trace_btrfs_space_reservation(root->fs_info, "delalloc",
                                      btrfs_ino(inode), to_free, 0);
        if (root->fs_info->quota_enabled) {
@@@ -7216,7 -7168,7 +7216,7 @@@ btrfs_init_new_buffer(struct btrfs_tran
        btrfs_set_header_generation(buf, trans->transid);
        btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
        btrfs_tree_lock(buf);
-       clean_tree_block(trans, root, buf);
+       clean_tree_block(trans, root->fs_info, buf);
        clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
  
        btrfs_set_lock_blocking(buf);
@@@ -7814,7 -7766,7 +7814,7 @@@ static noinline int do_walk_down(struc
        bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
        blocksize = root->nodesize;
  
-       next = btrfs_find_tree_block(root, bytenr);
+       next = btrfs_find_tree_block(root->fs_info, bytenr);
        if (!next) {
                next = btrfs_find_create_tree_block(root, bytenr);
                if (!next)
@@@ -8015,7 -7967,7 +8015,7 @@@ static noinline int walk_up_proc(struc
                        btrfs_set_lock_blocking(eb);
                        path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
                }
-               clean_tree_block(trans, root, eb);
+               clean_tree_block(trans, root->fs_info, eb);
        }
  
        if (eb == root->node) {
@@@ -8719,7 -8671,7 +8719,7 @@@ int btrfs_can_relocate(struct btrfs_roo
                min_free <<= 1;
        } else if (index == BTRFS_RAID_RAID0) {
                dev_min = fs_devices->rw_devices;
 -              do_div(min_free, dev_min);
 +              min_free = div64_u64(min_free, dev_min);
        }
  
        /* We need to do this so that we can look at pending chunks */
@@@ -9407,6 -9359,7 +9407,6 @@@ int btrfs_remove_block_group(struct btr
         * are still on the list after taking the semaphore
         */
        list_del_init(&block_group->list);
 -      list_del_init(&block_group->ro_list);
        if (list_empty(&block_group->space_info->block_groups[index])) {
                kobj = block_group->space_info->block_group_kobjs[index];
                block_group->space_info->block_group_kobjs[index] = NULL;
        btrfs_remove_free_space_cache(block_group);
  
        spin_lock(&block_group->space_info->lock);
 +      list_del_init(&block_group->ro_list);
        block_group->space_info->total_bytes -= block_group->key.offset;
        block_group->space_info->bytes_readonly -= block_group->key.offset;
        block_group->space_info->disk_total -= block_group->key.offset * factor;
diff --combined fs/btrfs/file-item.c
index 3a4a7924fa38ca292a6547e1d0c218ee793ee4f7,fc003321bdd4432591f36e25b59da9c89159d2fd..58ece6558430094969d7dcea7e477f6949c2eac6
@@@ -185,8 -185,8 +185,8 @@@ static int __btrfs_lookup_bio_sums(stru
        nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
        if (!dst) {
                if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
 -                      btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
 -                                                          GFP_NOFS);
 +                      btrfs_bio->csum_allocated = kmalloc_array(nblocks,
 +                                      csum_size, GFP_NOFS);
                        if (!btrfs_bio->csum_allocated) {
                                btrfs_free_path(path);
                                return -ENOMEM;
@@@ -553,7 -553,7 +553,7 @@@ static noinline void truncate_one_csum(
                btrfs_truncate_item(root, path, new_size, 0);
  
                key->offset = end_byte;
-               btrfs_set_item_key_safe(root, path, key);
+               btrfs_set_item_key_safe(root->fs_info, path, key);
        } else {
                BUG();
        }
diff --combined fs/btrfs/file.c
index ac26a917df7bd22efdeaf7a959bb7b2e8dd4dad1,6b796f03de1016da5a89ebb3f8515276de5f9980..7d4bb3b6fbc23cd8df679dae0b0aa89a1b78c794
@@@ -273,7 -273,11 +273,7 @@@ void btrfs_cleanup_defrag_inodes(struc
                defrag = rb_entry(node, struct inode_defrag, rb_node);
                kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
  
 -              if (need_resched()) {
 -                      spin_unlock(&fs_info->defrag_inodes_lock);
 -                      cond_resched();
 -                      spin_lock(&fs_info->defrag_inodes_lock);
 -              }
 +              cond_resched_lock(&fs_info->defrag_inodes_lock);
  
                node = rb_first(&fs_info->defrag_inodes);
        }
@@@ -864,7 -868,7 +864,7 @@@ next_slot
  
                        memcpy(&new_key, &key, sizeof(new_key));
                        new_key.offset = end;
-                       btrfs_set_item_key_safe(root, path, &new_key);
+                       btrfs_set_item_key_safe(root->fs_info, path, &new_key);
  
                        extent_offset += end - key.offset;
                        btrfs_set_file_extent_offset(leaf, fi, extent_offset);
@@@ -1122,7 -1126,7 +1122,7 @@@ again
                                     ino, bytenr, orig_offset,
                                     &other_start, &other_end)) {
                        new_key.offset = end;
-                       btrfs_set_item_key_safe(root, path, &new_key);
+                       btrfs_set_item_key_safe(root->fs_info, path, &new_key);
                        fi = btrfs_item_ptr(leaf, path->slots[0],
                                            struct btrfs_file_extent_item);
                        btrfs_set_file_extent_generation(leaf, fi,
                                                         trans->transid);
                        path->slots[0]++;
                        new_key.offset = start;
-                       btrfs_set_item_key_safe(root, path, &new_key);
+                       btrfs_set_item_key_safe(root->fs_info, path, &new_key);
  
                        fi = btrfs_item_ptr(leaf, path->slots[0],
                                            struct btrfs_file_extent_item);
@@@ -1481,7 -1485,7 +1481,7 @@@ static noinline ssize_t __btrfs_buffere
                        PAGE_CACHE_SIZE / (sizeof(struct page *)));
        nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
        nrptrs = max(nrptrs, 8);
 -      pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
 +      pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
        if (!pages)
                return -ENOMEM;
  
@@@ -1631,8 -1635,8 +1631,8 @@@ again
                        btrfs_end_write_no_snapshoting(root);
  
                if (only_release_metadata && copied > 0) {
 -                      u64 lockstart = round_down(pos, root->sectorsize);
 -                      u64 lockend = lockstart +
 +                      lockstart = round_down(pos, root->sectorsize);
 +                      lockend = lockstart +
                                (dirty_pages << PAGE_CACHE_SHIFT) - 1;
  
                        set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
@@@ -1742,7 -1746,7 +1742,7 @@@ static ssize_t btrfs_file_write_iter(st
  
        mutex_lock(&inode->i_mutex);
  
 -      current->backing_dev_info = inode->i_mapping->backing_dev_info;
 +      current->backing_dev_info = inode_to_bdi(inode);
        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
        if (err) {
                mutex_unlock(&inode->i_mutex);
        mutex_unlock(&inode->i_mutex);
  
        /*
 -       * we want to make sure fsync finds this change
 -       * but we haven't joined a transaction running right now.
 -       *
 -       * Later on, someone is sure to update the inode and get the
 -       * real transid recorded.
 -       *
 -       * We set last_trans now to the fs_info generation + 1,
 -       * this will either be one more than the running transaction
 -       * or the generation used for the next transaction if there isn't
 -       * one running right now.
 -       *
         * We also have to set last_sub_trans to the current log transid,
         * otherwise subsequent syncs to a file that's been synced in this
         * transaction will appear to have already occured.
         */
 -      BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
        BTRFS_I(inode)->last_sub_trans = root->log_transid;
        if (num_written > 0) {
                err = generic_write_sync(file, pos, num_written);
@@@ -1943,37 -1959,25 +1943,37 @@@ int btrfs_sync_file(struct file *file, 
        atomic_inc(&root->log_batch);
  
        /*
 -       * check the transaction that last modified this inode
 -       * and see if its already been committed
 -       */
 -      if (!BTRFS_I(inode)->last_trans) {
 -              mutex_unlock(&inode->i_mutex);
 -              goto out;
 -      }
 -
 -      /*
 -       * if the last transaction that changed this file was before
 -       * the current transaction, we can bail out now without any
 -       * syncing
 +       * If the last transaction that changed this file was before the current
 +       * transaction and we have the full sync flag set in our inode, we can
 +       * bail out now without any syncing.
 +       *
 +       * Note that we can't bail out if the full sync flag isn't set. This is
 +       * because when the full sync flag is set we start all ordered extents
 +       * and wait for them to fully complete - when they complete they update
 +       * the inode's last_trans field through:
 +       *
 +       *     btrfs_finish_ordered_io() ->
 +       *         btrfs_update_inode_fallback() ->
 +       *             btrfs_update_inode() ->
 +       *                 btrfs_set_inode_last_trans()
 +       *
 +       * So we are sure that last_trans is up to date and can do this check to
 +       * bail out safely. For the fast path, when the full sync flag is not
 +       * set in our inode, we can not do it because we start only our ordered
 +       * extents and don't wait for them to complete (that is when
 +       * btrfs_finish_ordered_io runs), so here at this point their last_trans
 +       * value might be less than or equals to fs_info->last_trans_committed,
 +       * and setting a speculative last_trans for an inode when a buffered
 +       * write is made (such as fs_info->generation + 1 for example) would not
 +       * be reliable since after setting the value and before fsync is called
 +       * any number of transactions can start and commit (transaction kthread
 +       * commits the current transaction periodically), and a transaction
 +       * commit does not start nor waits for ordered extents to complete.
         */
        smp_mb();
        if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
 -          BTRFS_I(inode)->last_trans <=
 -          root->fs_info->last_trans_committed) {
 -              BTRFS_I(inode)->last_trans = 0;
 -
 +          (full_sync && BTRFS_I(inode)->last_trans <=
 +           root->fs_info->last_trans_committed)) {
                /*
                 * We'v had everything committed since the last time we were
                 * modified so clear this flag in case it was set for whatever
@@@ -2077,6 -2081,7 +2077,6 @@@ static const struct vm_operations_struc
        .fault          = filemap_fault,
        .map_pages      = filemap_map_pages,
        .page_mkwrite   = btrfs_page_mkwrite,
 -      .remap_pages    = generic_file_remap_pages,
  };
  
  static int btrfs_file_mmap(struct file        *filp, struct vm_area_struct *vma)
@@@ -2164,7 -2169,7 +2164,7 @@@ static int fill_holes(struct btrfs_tran
                u64 num_bytes;
  
                key.offset = offset;
-               btrfs_set_item_key_safe(root, path, &key);
+               btrfs_set_item_key_safe(root->fs_info, path, &key);
                fi = btrfs_item_ptr(leaf, path->slots[0],
                                    struct btrfs_file_extent_item);
                num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@@@ -2271,8 -2276,6 +2271,8 @@@ static int btrfs_punch_hole(struct inod
        bool same_page;
        bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
        u64 ino_size;
 +      bool truncated_page = false;
 +      bool updated_inode = false;
  
        ret = btrfs_wait_ordered_range(inode, offset, len);
        if (ret)
         * entire page.
         */
        if (same_page && len < PAGE_CACHE_SIZE) {
 -              if (offset < ino_size)
 +              if (offset < ino_size) {
 +                      truncated_page = true;
                        ret = btrfs_truncate_page(inode, offset, len, 0);
 +              } else {
 +                      ret = 0;
 +              }
                goto out_only_mutex;
        }
  
        /* zero back part of the first page */
        if (offset < ino_size) {
 +              truncated_page = true;
                ret = btrfs_truncate_page(inode, offset, 0, 0);
                if (ret) {
                        mutex_unlock(&inode->i_mutex);
                if (!ret) {
                        /* zero the front end of the last page */
                        if (tail_start + tail_len < ino_size) {
 +                              truncated_page = true;
                                ret = btrfs_truncate_page(inode,
                                                tail_start + tail_len, 0, 1);
                                if (ret)
        }
  
        if (lockend < lockstart) {
 -              mutex_unlock(&inode->i_mutex);
 -              return 0;
 +              ret = 0;
 +              goto out_only_mutex;
        }
  
        while (1) {
@@@ -2510,7 -2507,6 +2510,7 @@@ out_trans
  
        trans->block_rsv = &root->fs_info->trans_block_rsv;
        ret = btrfs_update_inode(trans, root, inode);
 +      updated_inode = true;
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root);
  out_free:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                             &cached_state, GFP_NOFS);
  out_only_mutex:
 +      if (!updated_inode && truncated_page && !ret && !err) {
 +              /*
 +               * If we only end up zeroing part of a page, we still need to
 +               * update the inode item, so that all the time fields are
 +               * updated as well as the necessary btrfs inode in memory fields
 +               * for detecting, at fsync time, if the inode isn't yet in the
 +               * log tree or it's there but not up to date.
 +               */
 +              trans = btrfs_start_transaction(root, 1);
 +              if (IS_ERR(trans)) {
 +                      err = PTR_ERR(trans);
 +              } else {
 +                      err = btrfs_update_inode(trans, root, inode);
 +                      ret = btrfs_end_transaction(trans, root);
 +              }
 +      }
        mutex_unlock(&inode->i_mutex);
        if (ret && !err)
                err = ret;
diff --combined fs/btrfs/qgroup.c
index e08d26aa017d632aa246c60c777fe395416f6146,50ee58614f720d8ac61fd2b4fd2741db37c38bd7..33f31375259abc6adacbbe72703ef990d7b5bc08
@@@ -982,7 -982,7 +982,7 @@@ int btrfs_quota_disable(struct btrfs_tr
        list_del(&quota_root->dirty_list);
  
        btrfs_tree_lock(quota_root->node);
-       clean_tree_block(trans, tree_root, quota_root->node);
+       clean_tree_block(trans, tree_root->fs_info, quota_root->node);
        btrfs_tree_unlock(quota_root->node);
        btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
  
@@@ -1259,7 -1259,7 +1259,7 @@@ static int comp_oper(struct btrfs_qgrou
        if (oper1->seq < oper2->seq)
                return -1;
        if (oper1->seq > oper2->seq)
 -              return -1;
 +              return 1;
        if (oper1->ref_root < oper2->ref_root)
                return -1;
        if (oper1->ref_root > oper2->ref_root)
@@@ -1845,7 -1845,7 +1845,7 @@@ static int qgroup_shared_accounting(str
        struct ulist *roots = NULL;
        struct ulist *qgroups, *tmp;
        struct btrfs_qgroup *qgroup;
 -      struct seq_list elem = {};
 +      struct seq_list elem = SEQ_LIST_INIT(elem);
        u64 seq;
        int old_roots = 0;
        int new_roots = 0;
@@@ -1967,7 -1967,7 +1967,7 @@@ static int qgroup_subtree_accounting(st
        int err;
        struct btrfs_qgroup *qg;
        u64 root_obj = 0;
 -      struct seq_list elem = {};
 +      struct seq_list elem = SEQ_LIST_INIT(elem);
  
        parents = ulist_alloc(GFP_NOFS);
        if (!parents)
@@@ -2522,7 -2522,7 +2522,7 @@@ qgroup_rescan_leaf(struct btrfs_fs_inf
  {
        struct btrfs_key found;
        struct ulist *roots = NULL;
 -      struct seq_list tree_mod_seq_elem = {};
 +      struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
        u64 num_bytes;
        u64 seq;
        int new_roots;
diff --combined fs/btrfs/raid56.c
index b4634c30981eb3966323af9473356502f984225c,1ccc40bb61f5956c169c7285cf57aa574cccd2f2..fa72068bd256018e27a13fbc8326b82ce49b207b
@@@ -237,12 -237,8 +237,8 @@@ int btrfs_alloc_stripe_hash_table(struc
        }
  
        x = cmpxchg(&info->stripe_hash_table, NULL, table);
-       if (x) {
-               if (is_vmalloc_addr(x))
-                       vfree(x);
-               else
-                       kfree(x);
-       }
+       if (x)
+               kvfree(x);
        return 0;
  }
  
@@@ -453,10 -449,7 +449,7 @@@ void btrfs_free_stripe_hash_table(struc
        if (!info->stripe_hash_table)
                return;
        btrfs_clear_rbio_cache(info);
-       if (is_vmalloc_addr(info->stripe_hash_table))
-               vfree(info->stripe_hash_table);
-       else
-               kfree(info->stripe_hash_table);
+       kvfree(info->stripe_hash_table);
        info->stripe_hash_table = NULL;
  }
  
@@@ -1807,7 -1800,8 +1800,7 @@@ static void __raid_recover_end_io(struc
        int err;
        int i;
  
 -      pointers = kzalloc(rbio->real_stripes * sizeof(void *),
 -                         GFP_NOFS);
 +      pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
        if (!pointers) {
                err = -ENOMEM;
                goto cleanup_io;
diff --combined fs/btrfs/scrub.c
index 34e6499ba5a43c70d5ed2f641ece1720592801e6,4af5f49a310594dd37b6b920010afac3755cc78f..ab5811545a988edf685ef4acce607cbff1ac7b81
@@@ -964,8 -964,9 +964,8 @@@ static int scrub_handle_errored_block(s
         * the statistics.
         */
  
 -      sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
 -                                   sizeof(*sblocks_for_recheck),
 -                                   GFP_NOFS);
 +      sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
 +                                    sizeof(*sblocks_for_recheck), GFP_NOFS);
        if (!sblocks_for_recheck) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.malloc_errors++;
@@@ -2318,7 -2319,7 +2318,7 @@@ static inline void __scrub_mark_bitmap(
                                       unsigned long *bitmap,
                                       u64 start, u64 len)
  {
 -      int offset;
 +      u32 offset;
        int nsectors;
        int sectorsize = sparity->sctx->dev_root->sectorsize;
  
        }
  
        start -= sparity->logic_start;
 -      offset = (int)do_div(start, sparity->stripe_len);
 +      start = div_u64_rem(start, sparity->stripe_len, &offset);
        offset /= sectorsize;
        nsectors = (int)len / sectorsize;
  
@@@ -2611,8 -2612,8 +2611,8 @@@ static int get_raid56_logic_offset(u64 
        int j = 0;
        u64 stripe_nr;
        u64 last_offset;
 -      int stripe_index;
 -      int rot;
 +      u32 stripe_index;
 +      u32 rot;
  
        last_offset = (physical - map->stripes[num].physical) *
                      nr_data_stripes(map);
        for (i = 0; i < nr_data_stripes(map); i++) {
                *offset = last_offset + i * map->stripe_len;
  
 -              stripe_nr = *offset;
 -              do_div(stripe_nr, map->stripe_len);
 -              do_div(stripe_nr, nr_data_stripes(map));
 +              stripe_nr = div_u64(*offset, map->stripe_len);
 +              stripe_nr = div_u64(stripe_nr, nr_data_stripes(map));
  
                /* Work out the disk rotation on this stripe-set */
 -              rot = do_div(stripe_nr, map->num_stripes);
 +              stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot);
                /* calculate which stripe this data locates */
                rot += i;
                stripe_index = rot % map->num_stripes;
@@@ -2993,9 -2995,10 +2993,9 @@@ static noinline_for_stack int scrub_str
        int extent_mirror_num;
        int stop_loop = 0;
  
 -      nstripes = length;
        physical = map->stripes[num].physical;
        offset = 0;
 -      do_div(nstripes, map->stripe_len);
 +      nstripes = div_u64(length, map->stripe_len);
        if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
                offset = map->stripe_len * num;
                increment = map->stripe_len * map->num_stripes;
  
        ppath = btrfs_alloc_path();
        if (!ppath) {
 -              btrfs_free_path(ppath);
 +              btrfs_free_path(path);
                return -ENOMEM;
        }
  
        path->search_commit_root = 1;
        path->skip_locking = 1;
  
 +      ppath->search_commit_root = 1;
 +      ppath->skip_locking = 1;
        /*
         * trigger the readahead for extent tree csum tree and wait for
         * completion. During readahead, the scrub is officially paused
@@@ -3560,7 -3561,7 +3560,7 @@@ static noinline_for_stack int scrub_wor
                                                int is_dev_replace)
  {
        int ret = 0;
-       int flags = WQ_FREEZABLE | WQ_UNBOUND;
+       unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
        int max_active = fs_info->thread_pool_size;
  
        if (fs_info->scrub_workers_refcnt == 0) {
diff --combined fs/btrfs/transaction.c
index fae816b6671d875beae95fb40d4a5d3d652a7c7f,dde9d285308ecb3a0f67ac192d150654e76d67fa..91c303ac40b638b9ea0dfef326d62d9759440b33
@@@ -35,7 -35,7 +35,7 @@@
  
  #define BTRFS_ROOT_TRANS_TAG 0
  
- static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
+ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
        [TRANS_STATE_RUNNING]           = 0U,
        [TRANS_STATE_BLOCKED]           = (__TRANS_USERSPACE |
                                           __TRANS_START),
@@@ -93,8 -93,11 +93,8 @@@ static void clear_btree_io_tree(struct 
                 */
                ASSERT(!waitqueue_active(&state->wq));
                free_extent_state(state);
 -              if (need_resched()) {
 -                      spin_unlock(&tree->lock);
 -                      cond_resched();
 -                      spin_lock(&tree->lock);
 -              }
 +
 +              cond_resched_lock(&tree->lock);
        }
        spin_unlock(&tree->lock);
  }
@@@ -1020,13 -1023,17 +1020,13 @@@ static int update_cowonly_root(struct b
        u64 old_root_bytenr;
        u64 old_root_used;
        struct btrfs_root *tree_root = root->fs_info->tree_root;
 -      bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID);
  
        old_root_used = btrfs_root_used(&root->root_item);
 -      btrfs_write_dirty_block_groups(trans, root);
  
        while (1) {
                old_root_bytenr = btrfs_root_bytenr(&root->root_item);
                if (old_root_bytenr == root->node->start &&
 -                  old_root_used == btrfs_root_used(&root->root_item) &&
 -                  (!extent_root ||
 -                   list_empty(&trans->transaction->dirty_bgs)))
 +                  old_root_used == btrfs_root_used(&root->root_item))
                        break;
  
                btrfs_set_root_node(&root->root_item, root->node);
                        return ret;
  
                old_root_used = btrfs_root_used(&root->root_item);
 -              if (extent_root) {
 -                      ret = btrfs_write_dirty_block_groups(trans, root);
 -                      if (ret)
 -                              return ret;
 -              }
 -              ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
 -              if (ret)
 -                      return ret;
 -              ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
 -              if (ret)
 -                      return ret;
        }
  
        return 0;
@@@ -1053,7 -1071,6 +1053,7 @@@ static noinline int commit_cowonly_root
                                         struct btrfs_root *root)
  {
        struct btrfs_fs_info *fs_info = root->fs_info;
 +      struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
        struct list_head *next;
        struct extent_buffer *eb;
        int ret;
        if (ret)
                return ret;
  
 +      ret = btrfs_setup_space_cache(trans, root);
 +      if (ret)
 +              return ret;
 +
        /* run_qgroups might have added some more refs */
        ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
        if (ret)
                return ret;
 -
 +again:
        while (!list_empty(&fs_info->dirty_cowonly_roots)) {
                next = fs_info->dirty_cowonly_roots.next;
                list_del_init(next);
                ret = update_cowonly_root(trans, root);
                if (ret)
                        return ret;
 +              ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      while (!list_empty(dirty_bgs)) {
 +              ret = btrfs_write_dirty_block_groups(trans, root);
 +              if (ret)
 +                      return ret;
 +              ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
 +              if (ret)
 +                      return ret;
        }
  
 +      if (!list_empty(&fs_info->dirty_cowonly_roots))
 +              goto again;
 +
        list_add_tail(&fs_info->extent_root->dirty_list,
                      &trans->transaction->switch_commits);
        btrfs_after_dev_replace_commit(fs_info);
@@@ -1816,9 -1814,6 +1816,9 @@@ int btrfs_commit_transaction(struct btr
  
                wait_for_commit(root, cur_trans);
  
 +              if (unlikely(cur_trans->aborted))
 +                      ret = cur_trans->aborted;
 +
                btrfs_put_transaction(cur_trans);
  
                return ret;
@@@ -2138,7 -2133,7 +2138,7 @@@ void btrfs_apply_pending_changes(struc
        unsigned long prev;
        unsigned long bit;
  
 -      prev = cmpxchg(&fs_info->pending_changes, 0, 0);
 +      prev = xchg(&fs_info->pending_changes, 0);
        if (!prev)
                return;
  
diff --combined fs/btrfs/tree-log.c
index c5b8ba37f88e31fa188258af9c6e13036c2df18f,4f59140c8c006ca0b91be6bee478a4d6ebd681bb..066e754b1294e367fe5d5314f43f44f41b37c401
@@@ -1012,7 -1012,7 +1012,7 @@@ again
                base = btrfs_item_ptr_offset(leaf, path->slots[0]);
  
                while (cur_offset < item_size) {
 -                      extref = (struct btrfs_inode_extref *)base + cur_offset;
 +                      extref = (struct btrfs_inode_extref *)(base + cur_offset);
  
                        victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
  
@@@ -2230,7 -2230,8 +2230,8 @@@ static noinline int walk_down_log_tree(
                                if (trans) {
                                        btrfs_tree_lock(next);
                                        btrfs_set_lock_blocking(next);
-                                       clean_tree_block(trans, root, next);
+                                       clean_tree_block(trans, root->fs_info,
+                                                       next);
                                        btrfs_wait_tree_block_writeback(next);
                                        btrfs_tree_unlock(next);
                                }
@@@ -2308,7 -2309,8 +2309,8 @@@ static noinline int walk_up_log_tree(st
                                if (trans) {
                                        btrfs_tree_lock(next);
                                        btrfs_set_lock_blocking(next);
-                                       clean_tree_block(trans, root, next);
+                                       clean_tree_block(trans, root->fs_info,
+                                                       next);
                                        btrfs_wait_tree_block_writeback(next);
                                        btrfs_tree_unlock(next);
                                }
@@@ -2384,7 -2386,7 +2386,7 @@@ static int walk_log_tree(struct btrfs_t
                        if (trans) {
                                btrfs_tree_lock(next);
                                btrfs_set_lock_blocking(next);
-                               clean_tree_block(trans, log, next);
+                               clean_tree_block(trans, log->fs_info, next);
                                btrfs_wait_tree_block_writeback(next);
                                btrfs_tree_unlock(next);
                        }
@@@ -2635,7 -2637,6 +2637,7 @@@ int btrfs_sync_log(struct btrfs_trans_h
        }
  
        if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
 +              blk_finish_plug(&plug);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = root_log_ctx.log_ret;
                goto out;
diff --combined fs/btrfs/volumes.c
index 64ec2fd624da03e17f83c3daf8be687243bd7b00,6d4fb246eb7e8d3ea557be3db7f8dacb5ae06d11..a73acf496e10fe798a1594cafbb5660736460f81
@@@ -366,8 -366,8 +366,8 @@@ loop_lock
                btrfsic_submit_bio(cur->bi_rw, cur);
                num_run++;
                batch_run++;
 -              if (need_resched())
 -                      cond_resched();
 +
 +              cond_resched();
  
                /*
                 * we made progress, there is more work to do and the bdi
                                 * against it before looping
                                 */
                                last_waited = ioc->last_waited;
 -                              if (need_resched())
 -                                      cond_resched();
 +                              cond_resched();
                                continue;
                        }
                        spin_lock(&device->io_lock);
@@@ -608,8 -609,7 +608,7 @@@ error
        return ERR_PTR(-ENOMEM);
  }
  
- void btrfs_close_extra_devices(struct btrfs_fs_info *fs_info,
-                              struct btrfs_fs_devices *fs_devices, int step)
+ void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step)
  {
        struct btrfs_device *device, *next;
        struct btrfs_device *latest_dev = NULL;
@@@ -2486,8 -2486,7 +2485,7 @@@ int btrfs_grow_device(struct btrfs_tran
  }
  
  static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
-                           struct btrfs_root *root,
-                           u64 chunk_tree, u64 chunk_objectid,
+                           struct btrfs_root *root, u64 chunk_objectid,
                            u64 chunk_offset)
  {
        int ret;
@@@ -2579,7 -2578,6 +2577,6 @@@ int btrfs_remove_chunk(struct btrfs_tra
        struct map_lookup *map;
        u64 dev_extent_len = 0;
        u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
-       u64 chunk_tree = root->fs_info->chunk_root->objectid;
        int i, ret = 0;
  
        /* Just in case */
                        }
                }
        }
-       ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
-                              chunk_offset);
+       ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
        if (ret) {
                btrfs_abort_transaction(trans, root, ret);
                goto out;
@@@ -2663,8 -2660,8 +2659,8 @@@ out
  }
  
  static int btrfs_relocate_chunk(struct btrfs_root *root,
-                        u64 chunk_tree, u64 chunk_objectid,
-                        u64 chunk_offset)
+                               u64 chunk_objectid,
+                               u64 chunk_offset)
  {
        struct btrfs_root *extent_root;
        struct btrfs_trans_handle *trans;
@@@ -2706,7 -2703,6 +2702,6 @@@ static int btrfs_relocate_sys_chunks(st
        struct btrfs_chunk *chunk;
        struct btrfs_key key;
        struct btrfs_key found_key;
-       u64 chunk_tree = chunk_root->root_key.objectid;
        u64 chunk_type;
        bool retried = false;
        int failed = 0;
@@@ -2743,7 -2739,7 +2738,7 @@@ again
                btrfs_release_path(path);
  
                if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
-                       ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
+                       ret = btrfs_relocate_chunk(chunk_root,
                                                   found_key.objectid,
                                                   found_key.offset);
                        if (ret == -ENOSPC)
@@@ -3021,7 -3017,7 +3016,7 @@@ static int chunk_drange_filter(struct e
  
                stripe_offset = btrfs_stripe_offset(leaf, stripe);
                stripe_length = btrfs_chunk_length(leaf, chunk);
 -              do_div(stripe_length, factor);
 +              stripe_length = div_u64(stripe_length, factor);
  
                if (stripe_offset < bargs->pend &&
                    stripe_offset + stripe_length > bargs->pstart)
@@@ -3254,7 -3250,6 +3249,6 @@@ again
                }
  
                ret = btrfs_relocate_chunk(chunk_root,
-                                          chunk_root->root_key.objectid,
                                           found_key.objectid,
                                           found_key.offset);
                if (ret && ret != -ENOSPC)
@@@ -3956,7 -3951,6 +3950,6 @@@ int btrfs_shrink_device(struct btrfs_de
        struct btrfs_dev_extent *dev_extent = NULL;
        struct btrfs_path *path;
        u64 length;
-       u64 chunk_tree;
        u64 chunk_objectid;
        u64 chunk_offset;
        int ret;
@@@ -4026,13 -4020,11 +4019,11 @@@ again
                        break;
                }
  
-               chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
                chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
                chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
                btrfs_release_path(path);
  
-               ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
-                                          chunk_offset);
+               ret = btrfs_relocate_chunk(root, chunk_objectid, chunk_offset);
                if (ret && ret != -ENOSPC)
                        goto done;
                if (ret == -ENOSPC)
@@@ -4130,7 -4122,7 +4121,7 @@@ static int btrfs_cmp_device_info(const 
        return 0;
  }
  
- static struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
+ static const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
        [BTRFS_RAID_RAID10] = {
                .sub_stripes    = 2,
                .dev_stripes    = 1,
@@@ -4288,7 -4280,7 +4279,7 @@@ static int __btrfs_alloc_chunk(struct b
        max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
                             max_chunk_size);
  
 -      devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
 +      devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
                               GFP_NOFS);
        if (!devices_info)
                return -ENOMEM;
         */
        if (stripe_size * data_stripes > max_chunk_size) {
                u64 mask = (1ULL << 24) - 1;
 -              stripe_size = max_chunk_size;
 -              do_div(stripe_size, data_stripes);
 +
 +              stripe_size = div_u64(max_chunk_size, data_stripes);
  
                /* bump the answer up to a 16MB boundary */
                stripe_size = (stripe_size + mask) & ~mask;
                        stripe_size = devices_info[ndevs-1].max_avail;
        }
  
 -      do_div(stripe_size, dev_stripes);
 +      stripe_size = div_u64(stripe_size, dev_stripes);
  
        /* align to BTRFS_STRIPE_LEN */
 -      do_div(stripe_size, raid_stripe_len);
 +      stripe_size = div_u64(stripe_size, raid_stripe_len);
        stripe_size *= raid_stripe_len;
  
        map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
@@@ -4902,17 -4894,10 +4893,17 @@@ static void sort_parity_stripes(struct 
  static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
  {
        struct btrfs_bio *bbio = kzalloc(
 +               /* the size of the btrfs_bio */
                sizeof(struct btrfs_bio) +
 +              /* plus the variable array for the stripes */
                sizeof(struct btrfs_bio_stripe) * (total_stripes) +
 +              /* plus the variable array for the tgt dev */
                sizeof(int) * (real_stripes) +
 -              sizeof(u64) * (real_stripes),
 +              /*
 +               * plus the raid_map, which includes both the tgt dev
 +               * and the stripes
 +               */
 +              sizeof(u64) * (total_stripes),
                GFP_NOFS);
        if (!bbio)
                return NULL;
@@@ -4953,7 -4938,7 +4944,7 @@@ static int __btrfs_map_block(struct btr
        u64 stripe_nr_orig;
        u64 stripe_nr_end;
        u64 stripe_len;
 -      int stripe_index;
 +      u32 stripe_index;
        int i;
        int ret = 0;
        int num_stripes;
         * stripe_nr counts the total number of stripes we have to stride
         * to get to this block
         */
 -      do_div(stripe_nr, stripe_len);
 +      stripe_nr = div64_u64(stripe_nr, stripe_len);
  
        stripe_offset = stripe_nr * stripe_len;
        BUG_ON(offset < stripe_offset);
                /* allow a write of a full stripe, but make sure we don't
                 * allow straddling of stripes
                 */
 -              do_div(raid56_full_stripe_start, full_stripe_len);
 +              raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
 +                              full_stripe_len);
                raid56_full_stripe_start *= full_stripe_len;
        }
  
        stripe_index = 0;
        stripe_nr_orig = stripe_nr;
        stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
 -      do_div(stripe_nr_end, map->stripe_len);
 +      stripe_nr_end = div_u64(stripe_nr_end, map->stripe_len);
        stripe_end_offset = stripe_nr_end * map->stripe_len -
                            (offset + *length);
  
                if (rw & REQ_DISCARD)
                        num_stripes = min_t(u64, map->num_stripes,
                                            stripe_nr_end - stripe_nr_orig);
 -              stripe_index = do_div(stripe_nr, map->num_stripes);
 +              stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
 +                              &stripe_index);
                if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
                        mirror_num = 1;
        } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
                }
  
        } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
 -              int factor = map->num_stripes / map->sub_stripes;
 +              u32 factor = map->num_stripes / map->sub_stripes;
  
 -              stripe_index = do_div(stripe_nr, factor);
 +              stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
                stripe_index *= map->sub_stripes;
  
                if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
                    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
                     mirror_num > 1)) {
                        /* push stripe_nr back to the start of the full stripe */
 -                      stripe_nr = raid56_full_stripe_start;
 -                      do_div(stripe_nr, stripe_len * nr_data_stripes(map));
 +                      stripe_nr = div_u64(raid56_full_stripe_start,
 +                                      stripe_len * nr_data_stripes(map));
  
                        /* RAID[56] write or recovery. Return all stripes */
                        num_stripes = map->num_stripes;
                        stripe_index = 0;
                        stripe_offset = 0;
                } else {
 -                      u64 tmp;
 -
                        /*
                         * Mirror #0 or #1 means the original data block.
                         * Mirror #2 is RAID5 parity block.
                         * Mirror #3 is RAID6 Q block.
                         */
 -                      stripe_index = do_div(stripe_nr, nr_data_stripes(map));
 +                      stripe_nr = div_u64_rem(stripe_nr,
 +                                      nr_data_stripes(map), &stripe_index);
                        if (mirror_num > 1)
                                stripe_index = nr_data_stripes(map) +
                                                mirror_num - 2;
  
                        /* We distribute the parity blocks across stripes */
 -                      tmp = stripe_nr + stripe_index;
 -                      stripe_index = do_div(tmp, map->num_stripes);
 +                      div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
 +                                      &stripe_index);
                        if (!(rw & (REQ_WRITE | REQ_DISCARD |
                                    REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
                                mirror_num = 1;
                }
        } else {
                /*
 -               * after this do_div call, stripe_nr is the number of stripes
 -               * on this device we have to walk to find the data, and
 -               * stripe_index is the number of our device in the stripe array
 +               * after this, stripe_nr is the number of stripes on this
 +               * device we have to walk to find the data, and stripe_index is
 +               * the number of our device in the stripe array
                 */
 -              stripe_index = do_div(stripe_nr, map->num_stripes);
 +              stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
 +                              &stripe_index);
                mirror_num = stripe_index + 1;
        }
        BUG_ON(stripe_index >= map->num_stripes);
            need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
            mirror_num > 1)) {
                u64 tmp;
 -              int i, rot;
 +              unsigned rot;
  
                bbio->raid_map = (u64 *)((void *)bbio->stripes +
                                 sizeof(struct btrfs_bio_stripe) *
                                 sizeof(int) * tgtdev_indexes);
  
                /* Work out the disk rotation on this stripe-set */
 -              tmp = stripe_nr;
 -              rot = do_div(tmp, num_stripes);
 +              div_u64_rem(stripe_nr, num_stripes, &rot);
  
                /* Fill in the logical address of each stripe */
                tmp = stripe_nr * nr_data_stripes(map);
        }
  
        if (rw & REQ_DISCARD) {
 -              int factor = 0;
 -              int sub_stripes = 0;
 +              u32 factor = 0;
 +              u32 sub_stripes = 0;
                u64 stripes_per_dev = 0;
                u32 remaining_stripes = 0;
                u32 last_stripe = 0;
                        }
                }
                if (found) {
 -                      u64 length = map->stripe_len;
 -
 -                      if (physical_of_found + length <=
 +                      if (physical_of_found + map->stripe_len <=
                            dev_replace->cursor_left) {
                                struct btrfs_bio_stripe *tgtdev_stripe =
                                        bbio->stripes + num_stripes;
@@@ -5533,15 -5519,15 +5524,15 @@@ int btrfs_rmap_block(struct btrfs_mappi
        rmap_len = map->stripe_len;
  
        if (map->type & BTRFS_BLOCK_GROUP_RAID10)
 -              do_div(length, map->num_stripes / map->sub_stripes);
 +              length = div_u64(length, map->num_stripes / map->sub_stripes);
        else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
 -              do_div(length, map->num_stripes);
 +              length = div_u64(length, map->num_stripes);
        else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
 -              do_div(length, nr_data_stripes(map));
 +              length = div_u64(length, nr_data_stripes(map));
                rmap_len = map->stripe_len * nr_data_stripes(map);
        }
  
 -      buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
 +      buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
        BUG_ON(!buf); /* -ENOMEM */
  
        for (i = 0; i < map->num_stripes; i++) {
                        continue;
  
                stripe_nr = physical - map->stripes[i].physical;
 -              do_div(stripe_nr, map->stripe_len);
 +              stripe_nr = div_u64(stripe_nr, map->stripe_len);
  
                if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
                        stripe_nr = stripe_nr * map->num_stripes + i;
 -                      do_div(stripe_nr, map->sub_stripes);
 +                      stripe_nr = div_u64(stripe_nr, map->sub_stripes);
                } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
                        stripe_nr = stripe_nr * map->num_stripes + i;
                } /* else if RAID[56], multiply by nr_data_stripes().
@@@ -5833,8 -5819,8 +5824,8 @@@ int btrfs_map_bio(struct btrfs_root *ro
        u64 length = 0;
        u64 map_length;
        int ret;
-       int dev_nr = 0;
-       int total_devs = 1;
+       int dev_nr;
+       int total_devs;
        struct btrfs_bio *bbio = NULL;
  
        length = bio->bi_iter.bi_size;
                BUG();
        }
  
-       while (dev_nr < total_devs) {
+       for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
                dev = bbio->stripes[dev_nr].dev;
                if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
                        bbio_error(bbio, first_bio, logical);
-                       dev_nr++;
                        continue;
                }
  
                        ret = breakup_stripe_bio(root, bbio, first_bio, dev,
                                                 dev_nr, rw, async_submit);
                        BUG_ON(ret);
-                       dev_nr++;
                        continue;
                }
  
                submit_stripe_bio(root, bbio, bio,
                                  bbio->stripes[dev_nr].physical, dev_nr, rw,
                                  async_submit);
-               dev_nr++;
        }
        btrfs_bio_counter_dec(root->fs_info);
        return 0;