]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - fs/f2fs/data.c
Merge tag 'for-f2fs-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk...
[mirror_ubuntu-artful-kernel.git] / fs / f2fs / data.c
index 319eda511c4ff6e4bd869aaf2e1b711ecdc6470c..b91b0e10678eb1adcb87e45628f7794caa0dcc9b 100644 (file)
@@ -25,6 +25,9 @@
 #include "trace.h"
 #include <trace/events/f2fs.h>
 
+static struct kmem_cache *extent_tree_slab;
+static struct kmem_cache *extent_node_slab;
+
 static void f2fs_read_end_io(struct bio *bio, int err)
 {
        struct bio_vec *bvec;
@@ -197,7 +200,7 @@ alloc_new:
  *  ->node_page
  *    update block addresses in the node page
  */
-static void __set_data_blkaddr(struct dnode_of_data *dn)
+void set_data_blkaddr(struct dnode_of_data *dn)
 {
        struct f2fs_node *rn;
        __le32 *addr_array;
@@ -226,7 +229,7 @@ int reserve_new_block(struct dnode_of_data *dn)
        trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
 
        dn->data_blkaddr = NEW_ADDR;
-       __set_data_blkaddr(dn);
+       set_data_blkaddr(dn);
        mark_inode_dirty(dn->inode);
        sync_inode_page(dn);
        return 0;
@@ -248,73 +251,62 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
        return err;
 }
 
-static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
-                                       struct buffer_head *bh_result)
+static void f2fs_map_bh(struct super_block *sb, pgoff_t pgofs,
+                       struct extent_info *ei, struct buffer_head *bh_result)
+{
+       unsigned int blkbits = sb->s_blocksize_bits;
+       size_t max_size = bh_result->b_size;
+       size_t mapped_size;
+
+       clear_buffer_new(bh_result);
+       map_bh(bh_result, sb, ei->blk + pgofs - ei->fofs);
+       mapped_size = (ei->fofs + ei->len - pgofs) << blkbits;
+       bh_result->b_size = min(max_size, mapped_size);
+}
+
+static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
+                                                       struct extent_info *ei)
 {
        struct f2fs_inode_info *fi = F2FS_I(inode);
        pgoff_t start_fofs, end_fofs;
        block_t start_blkaddr;
 
-       if (is_inode_flag_set(fi, FI_NO_EXTENT))
-               return 0;
-
-       read_lock(&fi->ext.ext_lock);
+       read_lock(&fi->ext_lock);
        if (fi->ext.len == 0) {
-               read_unlock(&fi->ext.ext_lock);
-               return 0;
+               read_unlock(&fi->ext_lock);
+               return false;
        }
 
        stat_inc_total_hit(inode->i_sb);
 
        start_fofs = fi->ext.fofs;
        end_fofs = fi->ext.fofs + fi->ext.len - 1;
-       start_blkaddr = fi->ext.blk_addr;
+       start_blkaddr = fi->ext.blk;
 
        if (pgofs >= start_fofs && pgofs <= end_fofs) {
-               unsigned int blkbits = inode->i_sb->s_blocksize_bits;
-               size_t count;
-
-               set_buffer_new(bh_result);
-               map_bh(bh_result, inode->i_sb,
-                               start_blkaddr + pgofs - start_fofs);
-               count = end_fofs - pgofs + 1;
-               if (count < (UINT_MAX >> blkbits))
-                       bh_result->b_size = (count << blkbits);
-               else
-                       bh_result->b_size = UINT_MAX;
-
+               *ei = fi->ext;
                stat_inc_read_hit(inode->i_sb);
-               read_unlock(&fi->ext.ext_lock);
-               return 1;
+               read_unlock(&fi->ext_lock);
+               return true;
        }
-       read_unlock(&fi->ext.ext_lock);
-       return 0;
+       read_unlock(&fi->ext_lock);
+       return false;
 }
 
-void update_extent_cache(struct dnode_of_data *dn)
+static bool update_extent_info(struct inode *inode, pgoff_t fofs,
+                                                               block_t blkaddr)
 {
-       struct f2fs_inode_info *fi = F2FS_I(dn->inode);
-       pgoff_t fofs, start_fofs, end_fofs;
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+       pgoff_t start_fofs, end_fofs;
        block_t start_blkaddr, end_blkaddr;
        int need_update = true;
 
-       f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
-
-       /* Update the page address in the parent node */
-       __set_data_blkaddr(dn);
-
-       if (is_inode_flag_set(fi, FI_NO_EXTENT))
-               return;
-
-       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
-                                                       dn->ofs_in_node;
-
-       write_lock(&fi->ext.ext_lock);
+       write_lock(&fi->ext_lock);
 
        start_fofs = fi->ext.fofs;
        end_fofs = fi->ext.fofs + fi->ext.len - 1;
-       start_blkaddr = fi->ext.blk_addr;
-       end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
+       start_blkaddr = fi->ext.blk;
+       end_blkaddr = fi->ext.blk + fi->ext.len - 1;
 
        /* Drop and initialize the matched extent */
        if (fi->ext.len == 1 && fofs == start_fofs)
@@ -322,24 +314,24 @@ void update_extent_cache(struct dnode_of_data *dn)
 
        /* Initial extent */
        if (fi->ext.len == 0) {
-               if (dn->data_blkaddr != NULL_ADDR) {
+               if (blkaddr != NULL_ADDR) {
                        fi->ext.fofs = fofs;
-                       fi->ext.blk_addr = dn->data_blkaddr;
+                       fi->ext.blk = blkaddr;
                        fi->ext.len = 1;
                }
                goto end_update;
        }
 
        /* Front merge */
-       if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) {
+       if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) {
                fi->ext.fofs--;
-               fi->ext.blk_addr--;
+               fi->ext.blk--;
                fi->ext.len++;
                goto end_update;
        }
 
        /* Back merge */
-       if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) {
+       if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) {
                fi->ext.len++;
                goto end_update;
        }
@@ -351,8 +343,7 @@ void update_extent_cache(struct dnode_of_data *dn)
                        fi->ext.len = fofs - start_fofs;
                } else {
                        fi->ext.fofs = fofs + 1;
-                       fi->ext.blk_addr = start_blkaddr +
-                                       fofs - start_fofs + 1;
+                       fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
                        fi->ext.len -= fofs - start_fofs + 1;
                }
        } else {
@@ -366,27 +357,583 @@ void update_extent_cache(struct dnode_of_data *dn)
                need_update = true;
        }
 end_update:
-       write_unlock(&fi->ext.ext_lock);
-       if (need_update)
-               sync_inode_page(dn);
+       write_unlock(&fi->ext_lock);
+       return need_update;
+}
+
+static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+                               struct extent_tree *et, struct extent_info *ei,
+                               struct rb_node *parent, struct rb_node **p)
+{
+       struct extent_node *en;
+
+       en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
+       if (!en)
+               return NULL;
+
+       en->ei = *ei;
+       INIT_LIST_HEAD(&en->list);
+
+       rb_link_node(&en->rb_node, parent, p);
+       rb_insert_color(&en->rb_node, &et->root);
+       et->count++;
+       atomic_inc(&sbi->total_ext_node);
+       return en;
+}
+
+static void __detach_extent_node(struct f2fs_sb_info *sbi,
+                               struct extent_tree *et, struct extent_node *en)
+{
+       rb_erase(&en->rb_node, &et->root);
+       et->count--;
+       atomic_dec(&sbi->total_ext_node);
+
+       if (et->cached_en == en)
+               et->cached_en = NULL;
+}
+
+static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi,
+                                                       nid_t ino)
+{
+       struct extent_tree *et;
+
+       down_read(&sbi->extent_tree_lock);
+       et = radix_tree_lookup(&sbi->extent_tree_root, ino);
+       if (!et) {
+               up_read(&sbi->extent_tree_lock);
+               return NULL;
+       }
+       atomic_inc(&et->refcount);
+       up_read(&sbi->extent_tree_lock);
+
+       return et;
+}
+
+static struct extent_tree *__grab_extent_tree(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et;
+       nid_t ino = inode->i_ino;
+
+       down_write(&sbi->extent_tree_lock);
+       et = radix_tree_lookup(&sbi->extent_tree_root, ino);
+       if (!et) {
+               et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
+               f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
+               memset(et, 0, sizeof(struct extent_tree));
+               et->ino = ino;
+               et->root = RB_ROOT;
+               et->cached_en = NULL;
+               rwlock_init(&et->lock);
+               atomic_set(&et->refcount, 0);
+               et->count = 0;
+               sbi->total_ext_tree++;
+       }
+       atomic_inc(&et->refcount);
+       up_write(&sbi->extent_tree_lock);
+
+       return et;
+}
+
+static struct extent_node *__lookup_extent_tree(struct extent_tree *et,
+                                                       unsigned int fofs)
+{
+       struct rb_node *node = et->root.rb_node;
+       struct extent_node *en;
+
+       if (et->cached_en) {
+               struct extent_info *cei = &et->cached_en->ei;
+
+               if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
+                       return et->cached_en;
+       }
+
+       while (node) {
+               en = rb_entry(node, struct extent_node, rb_node);
+
+               if (fofs < en->ei.fofs) {
+                       node = node->rb_left;
+               } else if (fofs >= en->ei.fofs + en->ei.len) {
+                       node = node->rb_right;
+               } else {
+                       et->cached_en = en;
+                       return en;
+               }
+       }
+       return NULL;
+}
+
+static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi,
+                               struct extent_tree *et, struct extent_node *en)
+{
+       struct extent_node *prev;
+       struct rb_node *node;
+
+       node = rb_prev(&en->rb_node);
+       if (!node)
+               return NULL;
+
+       prev = rb_entry(node, struct extent_node, rb_node);
+       if (__is_back_mergeable(&en->ei, &prev->ei)) {
+               en->ei.fofs = prev->ei.fofs;
+               en->ei.blk = prev->ei.blk;
+               en->ei.len += prev->ei.len;
+               __detach_extent_node(sbi, et, prev);
+               return prev;
+       }
+       return NULL;
+}
+
+static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi,
+                               struct extent_tree *et, struct extent_node *en)
+{
+       struct extent_node *next;
+       struct rb_node *node;
+
+       node = rb_next(&en->rb_node);
+       if (!node)
+               return NULL;
+
+       next = rb_entry(node, struct extent_node, rb_node);
+       if (__is_front_mergeable(&en->ei, &next->ei)) {
+               en->ei.len += next->ei.len;
+               __detach_extent_node(sbi, et, next);
+               return next;
+       }
+       return NULL;
+}
+
+static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+                               struct extent_tree *et, struct extent_info *ei,
+                               struct extent_node **den)
+{
+       struct rb_node **p = &et->root.rb_node;
+       struct rb_node *parent = NULL;
+       struct extent_node *en;
+
+       while (*p) {
+               parent = *p;
+               en = rb_entry(parent, struct extent_node, rb_node);
+
+               if (ei->fofs < en->ei.fofs) {
+                       if (__is_front_mergeable(ei, &en->ei)) {
+                               f2fs_bug_on(sbi, !den);
+                               en->ei.fofs = ei->fofs;
+                               en->ei.blk = ei->blk;
+                               en->ei.len += ei->len;
+                               *den = __try_back_merge(sbi, et, en);
+                               return en;
+                       }
+                       p = &(*p)->rb_left;
+               } else if (ei->fofs >= en->ei.fofs + en->ei.len) {
+                       if (__is_back_mergeable(ei, &en->ei)) {
+                               f2fs_bug_on(sbi, !den);
+                               en->ei.len += ei->len;
+                               *den = __try_front_merge(sbi, et, en);
+                               return en;
+                       }
+                       p = &(*p)->rb_right;
+               } else {
+                       f2fs_bug_on(sbi, 1);
+               }
+       }
+
+       return __attach_extent_node(sbi, et, ei, parent, p);
+}
+
+static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+                                       struct extent_tree *et, bool free_all)
+{
+       struct rb_node *node, *next;
+       struct extent_node *en;
+       unsigned int count = et->count;
+
+       node = rb_first(&et->root);
+       while (node) {
+               next = rb_next(node);
+               en = rb_entry(node, struct extent_node, rb_node);
+
+               if (free_all) {
+                       spin_lock(&sbi->extent_lock);
+                       if (!list_empty(&en->list))
+                               list_del_init(&en->list);
+                       spin_unlock(&sbi->extent_lock);
+               }
+
+               if (free_all || list_empty(&en->list)) {
+                       __detach_extent_node(sbi, et, en);
+                       kmem_cache_free(extent_node_slab, en);
+               }
+               node = next;
+       }
+
+       return count - et->count;
+}
+
+static void f2fs_init_extent_tree(struct inode *inode,
+                                               struct f2fs_extent *i_ext)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et;
+       struct extent_node *en;
+       struct extent_info ei;
+
+       if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
+               return;
+
+       et = __grab_extent_tree(inode);
+
+       write_lock(&et->lock);
+       if (et->count)
+               goto out;
+
+       set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
+               le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
+
+       en = __insert_extent_tree(sbi, et, &ei, NULL);
+       if (en) {
+               et->cached_en = en;
+
+               spin_lock(&sbi->extent_lock);
+               list_add_tail(&en->list, &sbi->extent_list);
+               spin_unlock(&sbi->extent_lock);
+       }
+out:
+       write_unlock(&et->lock);
+       atomic_dec(&et->refcount);
+}
+
+static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+                                                       struct extent_info *ei)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et;
+       struct extent_node *en;
+
+       trace_f2fs_lookup_extent_tree_start(inode, pgofs);
+
+       et = __find_extent_tree(sbi, inode->i_ino);
+       if (!et)
+               return false;
+
+       read_lock(&et->lock);
+       en = __lookup_extent_tree(et, pgofs);
+       if (en) {
+               *ei = en->ei;
+               spin_lock(&sbi->extent_lock);
+               if (!list_empty(&en->list))
+                       list_move_tail(&en->list, &sbi->extent_list);
+               spin_unlock(&sbi->extent_lock);
+               stat_inc_read_hit(sbi->sb);
+       }
+       stat_inc_total_hit(sbi->sb);
+       read_unlock(&et->lock);
+
+       trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
+
+       atomic_dec(&et->refcount);
+       return en ? true : false;
+}
+
+static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
+                                                       block_t blkaddr)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et;
+       struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
+       struct extent_node *den = NULL;
+       struct extent_info ei, dei;
+       unsigned int endofs;
+
+       trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
+
+       et = __grab_extent_tree(inode);
+
+       write_lock(&et->lock);
+
+       /* 1. lookup and remove existing extent info in cache */
+       en = __lookup_extent_tree(et, fofs);
+       if (!en)
+               goto update_extent;
+
+       dei = en->ei;
+       __detach_extent_node(sbi, et, en);
+
+       /* 2. if extent can be split more, split and insert the left part */
+       if (dei.len > 1) {
+               /*  insert left part of split extent into cache */
+               if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
+                       set_extent_info(&ei, dei.fofs, dei.blk,
+                                                       fofs - dei.fofs);
+                       en1 = __insert_extent_tree(sbi, et, &ei, NULL);
+               }
+
+               /* insert right part of split extent into cache */
+               endofs = dei.fofs + dei.len - 1;
+               if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) {
+                       set_extent_info(&ei, fofs + 1,
+                               fofs - dei.fofs + dei.blk, endofs - fofs);
+                       en2 = __insert_extent_tree(sbi, et, &ei, NULL);
+               }
+       }
+
+update_extent:
+       /* 3. update extent in extent cache */
+       if (blkaddr) {
+               set_extent_info(&ei, fofs, blkaddr, 1);
+               en3 = __insert_extent_tree(sbi, et, &ei, &den);
+       }
+
+       /* 4. update in global extent list */
+       spin_lock(&sbi->extent_lock);
+       if (en && !list_empty(&en->list))
+               list_del(&en->list);
+       /*
+        * en1 and en2 split from en, they will become more and more smaller
+        * fragments after splitting several times. So if the length is smaller
+        * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
+        */
+       if (en1)
+               list_add_tail(&en1->list, &sbi->extent_list);
+       if (en2)
+               list_add_tail(&en2->list, &sbi->extent_list);
+       if (en3) {
+               if (list_empty(&en3->list))
+                       list_add_tail(&en3->list, &sbi->extent_list);
+               else
+                       list_move_tail(&en3->list, &sbi->extent_list);
+       }
+       if (den && !list_empty(&den->list))
+               list_del(&den->list);
+       spin_unlock(&sbi->extent_lock);
+
+       /* 5. release extent node */
+       if (en)
+               kmem_cache_free(extent_node_slab, en);
+       if (den)
+               kmem_cache_free(extent_node_slab, den);
+
+       write_unlock(&et->lock);
+       atomic_dec(&et->refcount);
+}
+
+void f2fs_preserve_extent_tree(struct inode *inode)
+{
+       struct extent_tree *et;
+       struct extent_info *ext = &F2FS_I(inode)->ext;
+       bool sync = false;
+
+       if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
+               return;
+
+       et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino);
+       if (!et) {
+               if (ext->len) {
+                       ext->len = 0;
+                       update_inode_page(inode);
+               }
+               return;
+       }
+
+       read_lock(&et->lock);
+       if (et->count) {
+               struct extent_node *en;
+
+               if (et->cached_en) {
+                       en = et->cached_en;
+               } else {
+                       struct rb_node *node = rb_first(&et->root);
+
+                       if (!node)
+                               node = rb_last(&et->root);
+                       en = rb_entry(node, struct extent_node, rb_node);
+               }
+
+               if (__is_extent_same(ext, &en->ei))
+                       goto out;
+
+               *ext = en->ei;
+               sync = true;
+       } else if (ext->len) {
+               ext->len = 0;
+               sync = true;
+       }
+out:
+       read_unlock(&et->lock);
+       atomic_dec(&et->refcount);
+
+       if (sync)
+               update_inode_page(inode);
+}
+
+void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+       struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
+       struct extent_node *en, *tmp;
+       unsigned long ino = F2FS_ROOT_INO(sbi);
+       struct radix_tree_iter iter;
+       void **slot;
+       unsigned int found;
+       unsigned int node_cnt = 0, tree_cnt = 0;
+
+       if (!test_opt(sbi, EXTENT_CACHE))
+               return;
+
+       if (available_free_memory(sbi, EXTENT_CACHE))
+               return;
+
+       spin_lock(&sbi->extent_lock);
+       list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
+               if (!nr_shrink--)
+                       break;
+               list_del_init(&en->list);
+       }
+       spin_unlock(&sbi->extent_lock);
+
+       down_read(&sbi->extent_tree_lock);
+       while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
+                               (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
+               unsigned i;
+
+               ino = treevec[found - 1]->ino + 1;
+               for (i = 0; i < found; i++) {
+                       struct extent_tree *et = treevec[i];
+
+                       atomic_inc(&et->refcount);
+                       write_lock(&et->lock);
+                       node_cnt += __free_extent_tree(sbi, et, false);
+                       write_unlock(&et->lock);
+                       atomic_dec(&et->refcount);
+               }
+       }
+       up_read(&sbi->extent_tree_lock);
+
+       down_write(&sbi->extent_tree_lock);
+       radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
+                                                       F2FS_ROOT_INO(sbi)) {
+               struct extent_tree *et = (struct extent_tree *)*slot;
+
+               if (!atomic_read(&et->refcount) && !et->count) {
+                       radix_tree_delete(&sbi->extent_tree_root, et->ino);
+                       kmem_cache_free(extent_tree_slab, et);
+                       sbi->total_ext_tree--;
+                       tree_cnt++;
+               }
+       }
+       up_write(&sbi->extent_tree_lock);
+
+       trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
+}
+
+void f2fs_destroy_extent_tree(struct inode *inode)
+{
+       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       struct extent_tree *et;
+       unsigned int node_cnt = 0;
+
+       if (!test_opt(sbi, EXTENT_CACHE))
+               return;
+
+       et = __find_extent_tree(sbi, inode->i_ino);
+       if (!et)
+               goto out;
+
+       /* free all extent info belong to this extent tree */
+       write_lock(&et->lock);
+       node_cnt = __free_extent_tree(sbi, et, true);
+       write_unlock(&et->lock);
+
+       atomic_dec(&et->refcount);
+
+       /* try to find and delete extent tree entry in radix tree */
+       down_write(&sbi->extent_tree_lock);
+       et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
+       if (!et) {
+               up_write(&sbi->extent_tree_lock);
+               goto out;
+       }
+       f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
+       radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
+       kmem_cache_free(extent_tree_slab, et);
+       sbi->total_ext_tree--;
+       up_write(&sbi->extent_tree_lock);
+out:
+       trace_f2fs_destroy_extent_tree(inode, node_cnt);
        return;
 }
 
+void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext)
+{
+       if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
+               f2fs_init_extent_tree(inode, i_ext);
+
+       write_lock(&F2FS_I(inode)->ext_lock);
+       get_extent_info(&F2FS_I(inode)->ext, *i_ext);
+       write_unlock(&F2FS_I(inode)->ext_lock);
+}
+
+static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+                                                       struct extent_info *ei)
+{
+       if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+               return false;
+
+       if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
+               return f2fs_lookup_extent_tree(inode, pgofs, ei);
+
+       return lookup_extent_info(inode, pgofs, ei);
+}
+
+void f2fs_update_extent_cache(struct dnode_of_data *dn)
+{
+       struct f2fs_inode_info *fi = F2FS_I(dn->inode);
+       pgoff_t fofs;
+
+       f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
+
+       if (is_inode_flag_set(fi, FI_NO_EXTENT))
+               return;
+
+       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+                                                       dn->ofs_in_node;
+
+       if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE))
+               return f2fs_update_extent_tree(dn->inode, fofs,
+                                                       dn->data_blkaddr);
+
+       if (update_extent_info(dn->inode, fofs, dn->data_blkaddr))
+               sync_inode_page(dn);
+}
+
 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
 {
        struct address_space *mapping = inode->i_mapping;
        struct dnode_of_data dn;
        struct page *page;
+       struct extent_info ei;
        int err;
        struct f2fs_io_info fio = {
                .type = DATA,
                .rw = sync ? READ_SYNC : READA,
        };
 
+       /*
+        * If sync is false, it needs to check its block allocation.
+        * This is need and triggered by two flows:
+        *   gc and truncate_partial_data_page.
+        */
+       if (!sync)
+               goto search;
+
        page = find_get_page(mapping, index);
        if (page && PageUptodate(page))
                return page;
        f2fs_put_page(page, 0);
+search:
+       if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+               dn.data_blkaddr = ei.blk + index - ei.fofs;
+               goto got_it;
+       }
 
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
@@ -401,6 +948,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
        if (unlikely(dn.data_blkaddr == NEW_ADDR))
                return ERR_PTR(-EINVAL);
 
+got_it:
        page = grab_cache_page(mapping, index);
        if (!page)
                return ERR_PTR(-ENOMEM);
@@ -435,6 +983,7 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
        struct address_space *mapping = inode->i_mapping;
        struct dnode_of_data dn;
        struct page *page;
+       struct extent_info ei;
        int err;
        struct f2fs_io_info fio = {
                .type = DATA,
@@ -445,6 +994,11 @@ repeat:
        if (!page)
                return ERR_PTR(-ENOMEM);
 
+       if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+               dn.data_blkaddr = ei.blk + index - ei.fofs;
+               goto got_it;
+       }
+
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
        if (err) {
@@ -458,6 +1012,7 @@ repeat:
                return ERR_PTR(-ENOENT);
        }
 
+got_it:
        if (PageUptodate(page))
                return page;
 
@@ -569,19 +1124,26 @@ static int __allocate_data_block(struct dnode_of_data *dn)
 
        if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
                return -EPERM;
+
+       dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+       if (dn->data_blkaddr == NEW_ADDR)
+               goto alloc;
+
        if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
                return -ENOSPC;
 
+alloc:
        get_node_info(sbi, dn->nid, &ni);
        set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
 
        if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
                seg = CURSEG_DIRECT_IO;
 
-       allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg);
+       allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+                                                               &sum, seg);
 
        /* direct IO doesn't use extent cache to maximize the performance */
-       __set_data_blkaddr(dn);
+       set_data_blkaddr(dn);
 
        /* update i_size */
        fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
@@ -615,7 +1177,10 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
                end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
 
                while (dn.ofs_in_node < end_offset && len) {
-                       if (dn.data_blkaddr == NULL_ADDR) {
+                       block_t blkaddr;
+
+                       blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+                       if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
                                if (__allocate_data_block(&dn))
                                        goto sync_out;
                                allocated = true;
@@ -659,13 +1224,16 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
        int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
        pgoff_t pgofs, end_offset;
        int err = 0, ofs = 1;
+       struct extent_info ei;
        bool allocated = false;
 
        /* Get the page offset from the block offset(iblock) */
        pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
 
-       if (check_extent_cache(inode, pgofs, bh_result))
+       if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+               f2fs_map_bh(inode->i_sb, pgofs, &ei, bh_result);
                goto out;
+       }
 
        if (create)
                f2fs_lock_op(F2FS_I_SB(inode));
@@ -682,7 +1250,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
                goto put_out;
 
        if (dn.data_blkaddr != NULL_ADDR) {
-               set_buffer_new(bh_result);
+               clear_buffer_new(bh_result);
                map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
        } else if (create) {
                err = __allocate_data_block(&dn);
@@ -727,6 +1295,7 @@ get_next:
                        if (err)
                                goto sync_out;
                        allocated = true;
+                       set_buffer_new(bh_result);
                        blkaddr = dn.data_blkaddr;
                }
                /* Give more consecutive addresses for the readahead */
@@ -813,8 +1382,10 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
        fio->blk_addr = dn.data_blkaddr;
 
        /* This page is already truncated */
-       if (fio->blk_addr == NULL_ADDR)
+       if (fio->blk_addr == NULL_ADDR) {
+               ClearPageUptodate(page);
                goto out_writepage;
+       }
 
        set_page_writeback(page);
 
@@ -827,10 +1398,15 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
                        need_inplace_update(inode))) {
                rewrite_data_page(page, fio);
                set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
+               trace_f2fs_do_write_data_page(page, IPU);
        } else {
                write_data_page(page, &dn, fio);
-               update_extent_cache(&dn);
+               set_data_blkaddr(&dn);
+               f2fs_update_extent_cache(&dn);
+               trace_f2fs_do_write_data_page(page, OPU);
                set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+               if (page->index == 0)
+                       set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
        }
 out_writepage:
        f2fs_put_dnode(&dn);
@@ -909,6 +1485,8 @@ done:
        clear_cold_data(page);
 out:
        inode_dec_dirty_pages(inode);
+       if (err)
+               ClearPageUptodate(page);
        unlock_page(page);
        if (need_balance_fs)
                f2fs_balance_fs(sbi);
@@ -935,7 +1513,6 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-       bool locked = false;
        int ret;
        long diff;
 
@@ -950,15 +1527,13 @@ static int f2fs_write_data_pages(struct address_space *mapping,
                        available_free_memory(sbi, DIRTY_DENTS))
                goto skip_write;
 
+       /* during POR, we don't need to trigger writepage at all. */
+       if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+               goto skip_write;
+
        diff = nr_pages_to_write(sbi, DATA, wbc);
 
-       if (!S_ISDIR(inode->i_mode)) {
-               mutex_lock(&sbi->writepages);
-               locked = true;
-       }
        ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
-       if (locked)
-               mutex_unlock(&sbi->writepages);
 
        f2fs_submit_merged_bio(sbi, DATA, WRITE);
 
@@ -1236,6 +1811,37 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, get_data_block);
 }
 
+void init_extent_cache_info(struct f2fs_sb_info *sbi)
+{
+       INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
+       init_rwsem(&sbi->extent_tree_lock);
+       INIT_LIST_HEAD(&sbi->extent_list);
+       spin_lock_init(&sbi->extent_lock);
+       sbi->total_ext_tree = 0;
+       atomic_set(&sbi->total_ext_node, 0);
+}
+
+int __init create_extent_cache(void)
+{
+       extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
+                       sizeof(struct extent_tree));
+       if (!extent_tree_slab)
+               return -ENOMEM;
+       extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
+                       sizeof(struct extent_node));
+       if (!extent_node_slab) {
+               kmem_cache_destroy(extent_tree_slab);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void destroy_extent_cache(void)
+{
+       kmem_cache_destroy(extent_node_slab);
+       kmem_cache_destroy(extent_tree_slab);
+}
+
 const struct address_space_operations f2fs_dblock_aops = {
        .readpage       = f2fs_read_data_page,
        .readpages      = f2fs_read_data_pages,