flags, allocated);
ext4_ext_show_leaf(inode, path);
- trace_ext4_ext_handle_uninitialized_extents(inode, map, allocated,
- newblock);
+ trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
+ allocated, newblock);
/* get_block() before submit the IO, split the extent */
if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
);
TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
- TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
+ TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags,
unsigned int allocated, ext4_fsblk_t newblock),
- TP_ARGS(inode, map, allocated, newblock),
+ TP_ARGS(inode, map, flags, allocated, newblock),
TP_STRUCT__entry(
__field( dev_t, dev )
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->flags = map->m_flags;
+ __entry->flags = flags;
__entry->lblk = map->m_lblk;
__entry->pblk = map->m_pblk;
__entry->len = map->m_len;
__entry->newblk = newblock;
),
- TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %d"
+ TP_printk("dev %d,%d ino %lu m_lblk %u m_pblk %llu m_len %u flags %x "
"allocated %d newblock %llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long) __entry->ino,