]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
ext4: add some tracepoints in ext4/extents.c
authorAditya Kali <adityakali@google.com>
Fri, 9 Sep 2011 23:18:51 +0000 (19:18 -0400)
committerTheodore Ts'o <tytso@mit.edu>
Fri, 9 Sep 2011 23:18:51 +0000 (19:18 -0400)
This patch adds some tracepoints in ext4/extents.c and updates a tracepoint in
ext4/inode.c.

Tested: Built and ran the kernel and verified that these tracepoints work.
Also ran xfstests.

Signed-off-by: Aditya Kali <adityakali@google.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/migrate.c
fs/ext4/move_extent.c
include/trace/events/ext4.h

index 21ea65d8bd4692a42c1b1b9ab40b65a8a947a491..751277a4890c59e1b0c17675e92e5835aa52c039 100644 (file)
@@ -2339,4 +2339,6 @@ extern void ext4_resize_end(struct super_block *sb);
 
 #endif /* __KERNEL__ */
 
+#include "ext4_extents.h"
+
 #endif /* _EXT4_H */
index c4e0058645345f1b1655431fc28826f31006ffe6..9b119308daea78b3313ac92e01f3a641c43c5887 100644 (file)
@@ -42,7 +42,6 @@
 #include <asm/uaccess.h>
 #include <linux/fiemap.h>
 #include "ext4_jbd2.h"
-#include "ext4_extents.h"
 
 #include <trace/events/ext4.h>
 
@@ -1969,6 +1968,7 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
        struct ext4_ext_cache *cex;
        BUG_ON(len == 0);
        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+       trace_ext4_ext_put_in_cache(inode, block, len, start);
        cex = &EXT4_I(inode)->i_cached_extent;
        cex->ec_block = block;
        cex->ec_len = len;
@@ -2070,6 +2070,7 @@ errout:
                sbi->extent_cache_misses++;
        else
                sbi->extent_cache_hits++;
+       trace_ext4_ext_in_cache(inode, block, ret);
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
        return ret;
 }
@@ -2137,6 +2138,8 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
        if (err)
                return err;
        ext_debug("index is empty, remove it, free block %llu\n", leaf);
+       trace_ext4_ext_rm_idx(inode, leaf);
+
        ext4_free_blocks(handle, inode, NULL, leaf, 1,
                         EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
        return err;
@@ -2222,6 +2225,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
         */
        flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
 
+       trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
        /*
         * If we have a partial cluster, and it's different from the
         * cluster of the last block, we need to explicitly free the
@@ -2336,6 +2340,8 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
        ex_ee_block = le32_to_cpu(ex->ee_block);
        ex_ee_len = ext4_ext_get_actual_len(ex);
 
+       trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
+
        while (ex >= EXT_FIRST_EXTENT(eh) &&
                        ex_ee_block + ex_ee_len > start) {
 
@@ -2591,6 +2597,8 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
 again:
        ext4_ext_invalidate_cache(inode);
 
+       trace_ext4_ext_remove_space(inode, start, depth);
+
        /*
         * We start scanning from right side, freeing all the blocks
         * after i_size and walking into the tree depth-wise.
@@ -2686,6 +2694,9 @@ again:
                }
        }
 
+       trace_ext4_ext_remove_space_done(inode, start, depth, partial_cluster,
+                       path->p_hdr->eh_entries);
+
        /* If we still have something in the partial cluster and we have removed
         * even the first extent, then we should free the blocks in the partial
         * cluster as well. */
@@ -3300,6 +3311,10 @@ static int ext4_find_delalloc_range(struct inode *inode,
                         * detect that here.
                         */
                        page_cache_release(page);
+                       trace_ext4_find_delalloc_range(inode,
+                                       lblk_start, lblk_end,
+                                       search_hint_reverse,
+                                       0, i);
                        return 0;
                }
 
@@ -3327,6 +3342,10 @@ static int ext4_find_delalloc_range(struct inode *inode,
 
                        if (buffer_delay(bh)) {
                                page_cache_release(page);
+                               trace_ext4_find_delalloc_range(inode,
+                                               lblk_start, lblk_end,
+                                               search_hint_reverse,
+                                               1, i);
                                return 1;
                        }
                        if (search_hint_reverse)
@@ -3349,6 +3368,8 @@ nextpage:
                i = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
        }
 
+       trace_ext4_find_delalloc_range(inode, lblk_start, lblk_end,
+                                       search_hint_reverse, 0, 0);
        return 0;
 }
 
@@ -3414,6 +3435,8 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
        /* max possible clusters for this allocation */
        allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
 
+       trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
+
        /* Check towards left side */
        c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
        if (c_offset) {
@@ -3453,6 +3476,9 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                  flags, allocated);
        ext4_ext_show_leaf(inode, path);
 
+       trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
+                                                   newblock);
+
        /* get_block() before submit the IO, split the extent */
        if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
                ret = ext4_split_unwritten_extents(handle, inode, map,
@@ -3572,7 +3598,7 @@ out2:
  * get_implied_cluster_alloc - check to see if the requested
  * allocation (in the map structure) overlaps with a cluster already
  * allocated in an extent.
- *     @sbi    The ext4-specific superblock structure
+ *     @sb     The filesystem superblock structure
  *     @map    The requested lblk->pblk mapping
  *     @ex     The extent structure which might contain an implied
  *                     cluster allocation
@@ -3609,11 +3635,12 @@ out2:
  * ext4_ext_map_blocks() will then allocate one or more new clusters
  * by calling ext4_mb_new_blocks().
  */
-static int get_implied_cluster_alloc(struct ext4_sb_info *sbi,
+static int get_implied_cluster_alloc(struct super_block *sb,
                                     struct ext4_map_blocks *map,
                                     struct ext4_extent *ex,
                                     struct ext4_ext_path *path)
 {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
        ext4_lblk_t ex_cluster_start, ex_cluster_end;
        ext4_lblk_t rr_cluster_start, rr_cluster_end;
@@ -3662,8 +3689,12 @@ static int get_implied_cluster_alloc(struct ext4_sb_info *sbi,
                        ext4_lblk_t next = ext4_ext_next_allocated_block(path);
                        map->m_len = min(map->m_len, next - map->m_lblk);
                }
+
+               trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
                return 1;
        }
+
+       trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
        return 0;
 }
 
@@ -3772,6 +3803,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                 * we split out initialized portions during a write.
                 */
                ee_len = ext4_ext_get_actual_len(ex);
+
+               trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
+
                /* if found extent covers block, simply return it */
                if (in_range(map->m_lblk, ee_block, ee_len)) {
                        ext4_fsblk_t partial_cluster = 0;
@@ -3912,7 +3946,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
         * by ext4_ext_find_extent() implies a cluster we can use.
         */
        if (cluster_offset && ex &&
-           get_implied_cluster_alloc(sbi, map, ex, path)) {
+           get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
                ar.len = allocated = map->m_len;
                newblock = map->m_pblk;
                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
@@ -3933,7 +3967,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
        /* Check if the extent after searching to the right implies a
         * cluster we can use. */
        if ((sbi->s_cluster_ratio > 1) && ex2 &&
-           get_implied_cluster_alloc(sbi, map, ex2, path)) {
+           get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
                ar.len = allocated = map->m_len;
                newblock = map->m_pblk;
                map->m_flags |= EXT4_MAP_FROM_CLUSTER;
index 88dc63a01756fdc38469fc4f14ec3fc03762b339..2dcd4fed96ec43c5d295eb72b50779462bdf7c17 100644 (file)
@@ -42,7 +42,6 @@
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
-#include "ext4_extents.h"
 #include "truncate.h"
 
 #include <trace/events/ext4.h>
@@ -268,7 +267,7 @@ void ext4_da_update_reserve_space(struct inode *inode,
        struct ext4_inode_info *ei = EXT4_I(inode);
 
        spin_lock(&ei->i_block_reservation_lock);
-       trace_ext4_da_update_reserve_space(inode, used);
+       trace_ext4_da_update_reserve_space(inode, used, quota_claim);
        if (unlikely(used > ei->i_reserved_data_blocks)) {
                ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
                         "with only %d reserved data blocks\n",
index b57b98fb44d1457ec9f98e60290d6da3a90fb6c8..6f07a06f2437feedf458f45ec33159735ac9a671 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include "ext4_jbd2.h"
-#include "ext4_extents.h"
 
 /*
  * The contiguous blocks details which can be
index f57455a1b1b281bdf21e12f63bc46087abe58194..c5826c623e7af8e19aa0baf5829ce5e25fcc1f71 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/quotaops.h>
 #include <linux/slab.h>
 #include "ext4_jbd2.h"
-#include "ext4_extents.h"
 #include "ext4.h"
 
 /**
index b50a54736242ca21d19a814ab37b27783216397a..c9a341e385a38779b3621e36e37bd1f19ec1ea71 100644 (file)
@@ -12,6 +12,8 @@ struct ext4_allocation_request;
 struct ext4_prealloc_space;
 struct ext4_inode_info;
 struct mpage_da_data;
+struct ext4_map_blocks;
+struct ext4_extent;
 
 #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
 
@@ -1032,9 +1034,9 @@ TRACE_EVENT(ext4_forget,
 );
 
 TRACE_EVENT(ext4_da_update_reserve_space,
-       TP_PROTO(struct inode *inode, int used_blocks),
+       TP_PROTO(struct inode *inode, int used_blocks, int quota_claim),
 
-       TP_ARGS(inode, used_blocks),
+       TP_ARGS(inode, used_blocks, quota_claim),
 
        TP_STRUCT__entry(
                __field(        dev_t,  dev                     )
@@ -1045,6 +1047,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
                __field(        int,    reserved_data_blocks    )
                __field(        int,    reserved_meta_blocks    )
                __field(        int,    allocated_meta_blocks   )
+               __field(        int,    quota_claim             )
        ),
 
        TP_fast_assign(
@@ -1053,19 +1056,24 @@ TRACE_EVENT(ext4_da_update_reserve_space,
                __entry->mode   = inode->i_mode;
                __entry->i_blocks = inode->i_blocks;
                __entry->used_blocks = used_blocks;
-               __entry->reserved_data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
-               __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
-               __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
+               __entry->reserved_data_blocks =
+                               EXT4_I(inode)->i_reserved_data_blocks;
+               __entry->reserved_meta_blocks =
+                               EXT4_I(inode)->i_reserved_meta_blocks;
+               __entry->allocated_meta_blocks =
+                               EXT4_I(inode)->i_allocated_meta_blocks;
+               __entry->quota_claim = quota_claim;
        ),
 
        TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
                  "reserved_data_blocks %d reserved_meta_blocks %d "
-                 "allocated_meta_blocks %d",
+                 "allocated_meta_blocks %d quota_claim %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  (unsigned long) __entry->ino,
                  __entry->mode, __entry->i_blocks,
                  __entry->used_blocks, __entry->reserved_data_blocks,
-                 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
+                 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks,
+                 __entry->quota_claim)
 );
 
 TRACE_EVENT(ext4_da_reserve_space,
@@ -1589,6 +1597,382 @@ DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
        TP_ARGS(sb, group, start, len)
 );
 
+TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
+       TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+                unsigned int allocated, ext4_fsblk_t newblock),
+
+       TP_ARGS(inode, map, allocated, newblock),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino             )
+               __field(        dev_t,          dev             )
+               __field(        ext4_lblk_t,    lblk            )
+               __field(        ext4_fsblk_t,   pblk            )
+               __field(        unsigned int,   len             )
+               __field(        int,            flags           )
+               __field(        unsigned int,   allocated       )
+               __field(        ext4_fsblk_t,   newblk          )
+       ),
+
+       TP_fast_assign(
+               __entry->ino            = inode->i_ino;
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->lblk           = map->m_lblk;
+               __entry->pblk           = map->m_pblk;
+               __entry->len            = map->m_len;
+               __entry->flags          = map->m_flags;
+               __entry->allocated      = allocated;
+               __entry->newblk         = newblock;
+       ),
+
+       TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
+                 "allocated %d newblock %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
+                 __entry->len, __entry->flags,
+                 (unsigned int) __entry->allocated,
+                 (unsigned long long) __entry->newblk)
+);
+
+TRACE_EVENT(ext4_get_implied_cluster_alloc_exit,
+       TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret),
+
+       TP_ARGS(sb, map, ret),
+
+       TP_STRUCT__entry(
+               __field(        dev_t,          dev     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        ext4_fsblk_t,   pblk    )
+               __field(        unsigned int,   len     )
+               __field(        unsigned int,   flags   )
+               __field(        int,            ret     )
+       ),
+
+       TP_fast_assign(
+               __entry->dev    = sb->s_dev;
+               __entry->lblk   = map->m_lblk;
+               __entry->pblk   = map->m_pblk;
+               __entry->len    = map->m_len;
+               __entry->flags  = map->m_flags;
+               __entry->ret    = ret;
+       ),
+
+       TP_printk("dev %d,%d m_lblk %u m_pblk %llu m_len %u m_flags %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 __entry->lblk, (unsigned long long) __entry->pblk,
+                 __entry->len, __entry->flags, __entry->ret)
+);
+
+TRACE_EVENT(ext4_ext_put_in_cache,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len,
+                ext4_fsblk_t start),
+
+       TP_ARGS(inode, lblk, len, start),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino     )
+               __field(        dev_t,          dev     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        unsigned int,   len     )
+               __field(        ext4_fsblk_t,   start   )
+       ),
+
+       TP_fast_assign(
+               __entry->ino    = inode->i_ino;
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->lblk   = lblk;
+               __entry->len    = len;
+               __entry->start  = start;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u len %u start %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 __entry->len,
+                 (unsigned long long) __entry->start)
+);
+
+TRACE_EVENT(ext4_ext_in_cache,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, int ret),
+
+       TP_ARGS(inode, lblk, ret),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino     )
+               __field(        dev_t,          dev     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        int,            ret     )
+       ),
+
+       TP_fast_assign(
+               __entry->ino    = inode->i_ino;
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->lblk   = lblk;
+               __entry->ret    = ret;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u ret %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 __entry->ret)
+
+);
+
+TRACE_EVENT(ext4_find_delalloc_range,
+       TP_PROTO(struct inode *inode, ext4_lblk_t from, ext4_lblk_t to,
+               int reverse, int found, ext4_lblk_t found_blk),
+
+       TP_ARGS(inode, from, to, reverse, found, found_blk),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino             )
+               __field(        dev_t,          dev             )
+               __field(        ext4_lblk_t,    from            )
+               __field(        ext4_lblk_t,    to              )
+               __field(        int,            reverse         )
+               __field(        int,            found           )
+               __field(        ext4_lblk_t,    found_blk       )
+       ),
+
+       TP_fast_assign(
+               __entry->ino            = inode->i_ino;
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->from           = from;
+               __entry->to             = to;
+               __entry->reverse        = reverse;
+               __entry->found          = found;
+               __entry->found_blk      = found_blk;
+       ),
+
+       TP_printk("dev %d,%d ino %lu from %u to %u reverse %d found %d "
+                 "(blk = %u)",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->from, (unsigned) __entry->to,
+                 __entry->reverse, __entry->found,
+                 (unsigned) __entry->found_blk)
+);
+
+TRACE_EVENT(ext4_get_reserved_cluster_alloc,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, unsigned int len),
+
+       TP_ARGS(inode, lblk, len),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino     )
+               __field(        dev_t,          dev     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        unsigned int,   len     )
+       ),
+
+       TP_fast_assign(
+               __entry->ino    = inode->i_ino;
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->lblk   = lblk;
+               __entry->len    = len;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u len %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 __entry->len)
+);
+
+TRACE_EVENT(ext4_ext_show_extent,
+       TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
+                unsigned short len),
+
+       TP_ARGS(inode, lblk, pblk, len),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino     )
+               __field(        dev_t,          dev     )
+               __field(        ext4_lblk_t,    lblk    )
+               __field(        ext4_fsblk_t,   pblk    )
+               __field(        unsigned short, len     )
+       ),
+
+       TP_fast_assign(
+               __entry->ino    = inode->i_ino;
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->lblk   = lblk;
+               __entry->pblk   = pblk;
+               __entry->len    = len;
+       ),
+
+       TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->lblk,
+                 (unsigned long long) __entry->pblk,
+                 (unsigned short) __entry->len)
+);
+
+TRACE_EVENT(ext4_remove_blocks,
+           TP_PROTO(struct inode *inode, struct ext4_extent *ex,
+               ext4_lblk_t from, ext4_fsblk_t to,
+               ext4_fsblk_t partial_cluster),
+
+       TP_ARGS(inode, ex, from, to, partial_cluster),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino     )
+               __field(        dev_t,          dev     )
+               __field(        ext4_lblk_t,    ee_lblk )
+               __field(        ext4_fsblk_t,   ee_pblk )
+               __field(        unsigned short, ee_len  )
+               __field(        ext4_lblk_t,    from    )
+               __field(        ext4_lblk_t,    to      )
+               __field(        ext4_fsblk_t,   partial )
+       ),
+
+       TP_fast_assign(
+               __entry->ino            = inode->i_ino;
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->ee_lblk        = cpu_to_le32(ex->ee_block);
+               __entry->ee_pblk        = ext4_ext_pblock(ex);
+               __entry->ee_len         = ext4_ext_get_actual_len(ex);
+               __entry->from           = from;
+               __entry->to             = to;
+               __entry->partial        = partial_cluster;
+       ),
+
+       TP_printk("dev %d,%d ino %lu extent [%u(%llu), %u]"
+                 "from %u to %u partial_cluster %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->ee_lblk,
+                 (unsigned long long) __entry->ee_pblk,
+                 (unsigned short) __entry->ee_len,
+                 (unsigned) __entry->from,
+                 (unsigned) __entry->to,
+                 (unsigned) __entry->partial)
+);
+
+TRACE_EVENT(ext4_ext_rm_leaf,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start,
+                struct ext4_extent *ex, ext4_fsblk_t partial_cluster),
+
+       TP_ARGS(inode, start, ex, partial_cluster),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino     )
+               __field(        dev_t,          dev     )
+               __field(        ext4_lblk_t,    start   )
+               __field(        ext4_lblk_t,    ee_lblk )
+               __field(        ext4_fsblk_t,   ee_pblk )
+               __field(        short,          ee_len  )
+               __field(        ext4_fsblk_t,   partial )
+       ),
+
+       TP_fast_assign(
+               __entry->ino            = inode->i_ino;
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->start          = start;
+               __entry->ee_lblk        = le32_to_cpu(ex->ee_block);
+               __entry->ee_pblk        = ext4_ext_pblock(ex);
+               __entry->ee_len         = ext4_ext_get_actual_len(ex);
+               __entry->partial        = partial_cluster;
+       ),
+
+       TP_printk("dev %d,%d ino %lu start_lblk %u last_extent [%u(%llu), %u]"
+                 "partial_cluster %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->start,
+                 (unsigned) __entry->ee_lblk,
+                 (unsigned long long) __entry->ee_pblk,
+                 (unsigned short) __entry->ee_len,
+                 (unsigned) __entry->partial)
+);
+
+TRACE_EVENT(ext4_ext_rm_idx,
+       TP_PROTO(struct inode *inode, ext4_fsblk_t pblk),
+
+       TP_ARGS(inode, pblk),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino     )
+               __field(        dev_t,          dev     )
+               __field(        ext4_fsblk_t,   pblk    )
+       ),
+
+       TP_fast_assign(
+               __entry->ino    = inode->i_ino;
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->pblk   = pblk;
+       ),
+
+       TP_printk("dev %d,%d ino %lu index_pblk %llu",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned long long) __entry->pblk)
+);
+
+TRACE_EVENT(ext4_ext_remove_space,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth),
+
+       TP_ARGS(inode, start, depth),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino     )
+               __field(        dev_t,          dev     )
+               __field(        ext4_lblk_t,    start   )
+               __field(        int,            depth   )
+       ),
+
+       TP_fast_assign(
+               __entry->ino    = inode->i_ino;
+               __entry->dev    = inode->i_sb->s_dev;
+               __entry->start  = start;
+               __entry->depth  = depth;
+       ),
+
+       TP_printk("dev %d,%d ino %lu since %u depth %d",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->start,
+                 __entry->depth)
+);
+
+TRACE_EVENT(ext4_ext_remove_space_done,
+       TP_PROTO(struct inode *inode, ext4_lblk_t start, int depth,
+               ext4_lblk_t partial, unsigned short eh_entries),
+
+       TP_ARGS(inode, start, depth, partial, eh_entries),
+
+       TP_STRUCT__entry(
+               __field(        ino_t,          ino             )
+               __field(        dev_t,          dev             )
+               __field(        ext4_lblk_t,    start           )
+               __field(        int,            depth           )
+               __field(        ext4_lblk_t,    partial         )
+               __field(        unsigned short, eh_entries      )
+       ),
+
+       TP_fast_assign(
+               __entry->ino            = inode->i_ino;
+               __entry->dev            = inode->i_sb->s_dev;
+               __entry->start          = start;
+               __entry->depth          = depth;
+               __entry->partial        = partial;
+               __entry->eh_entries     = eh_entries;
+       ),
+
+       TP_printk("dev %d,%d ino %lu since %u depth %d partial %u "
+                 "remaining_entries %u",
+                 MAJOR(__entry->dev), MINOR(__entry->dev),
+                 (unsigned long) __entry->ino,
+                 (unsigned) __entry->start,
+                 __entry->depth,
+                 (unsigned) __entry->partial,
+                 (unsigned short) __entry->eh_entries)
+);
+
 #endif /* _TRACE_EXT4_H */
 
 /* This part must be outside protection */