/* components used during write (step 1): */
atomic_t rs_sizehint; /* hint of the write size */
- /* components used during get_local_rgrp (step 3): */
- struct gfs2_rbm rs_rbm;
struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
struct rb_node rs_node; /* link to other block reservations */
-
- /* components used during block searches and assignments (step 4): */
+ struct gfs2_rbm rs_rbm; /* Start of reservation */
u32 rs_free; /* how many blocks are still free */
+ u64 rs_inum; /* Inode number for reservation */
/* ancillary quota stuff */
struct gfs2_quota_data *rs_qa_qd[2 * MAXQUOTAS];
return error;
}
-static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
+static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs)
{
- gfs2_print_dbg(seq, " r: %llu s:%llu b:%u f:%u\n",
- rs->rs_rbm.rgd->rd_addr, gfs2_rbm_to_block(&rs->rs_rbm),
+ gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n",
+ (unsigned long long)rs->rs_inum,
+ (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm),
rs->rs_rbm.offset, rs->rs_free);
}
return;
rgd = rs->rs_rbm.rgd;
- trace_gfs2_rs(ip, rs, TRACE_RS_TREEDEL);
+ trace_gfs2_rs(rs, TRACE_RS_TREEDEL);
rb_erase(&rs->rs_node, &rgd->rd_rstree);
RB_CLEAR_NODE(&rs->rs_node);
BUG_ON(!rgd->rd_rs_cnt);
down_write(&ip->i_rw_mutex);
if (ip->i_res) {
gfs2_rs_deltree(ip, ip->i_res);
- trace_gfs2_rs(ip, ip->i_res, TRACE_RS_DELETE);
BUG_ON(ip->i_res->rs_free);
kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
ip->i_res = NULL;
rs->rs_free = amount;
rs->rs_rbm.offset = biblk;
rs->rs_rbm.bi = bi;
+ rs->rs_inum = ip->i_no_addr;
rb_link_node(&rs->rs_node, parent, newn);
rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
rgd->rd_reserved += amount; /* blocks reserved */
rgd->rd_rs_cnt++; /* number of in-tree reservations */
spin_unlock(&rgd->rd_rsspin);
- trace_gfs2_rs(ip, rs, TRACE_RS_INSERT);
+ trace_gfs2_rs(rs, TRACE_RS_INSERT);
return rs;
}
rlen = min(rs->rs_free, len);
rs->rs_free -= rlen;
rgd->rd_reserved -= rlen;
- trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
+ trace_gfs2_rs(rs, TRACE_RS_CLAIM);
if (rs->rs_free && !ret)
goto out;
}
else
goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
- if ((goal < rbm.rgd->rd_data0) ||
- (goal >= rbm.rgd->rd_data0 + rbm.rgd->rd_data))
- rbm.rgd = gfs2_blk2rgrpd(sdp, goal, 1);
-
gfs2_rbm_from_block(&rbm, goal);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, ip, false);
/* Since all blocks are reserved in advance, this shouldn't happen */
if (error) {
- fs_warn(sdp, "error=%d, nblocks=%u, full=%d\n", error, *nblocks,
+ fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n",
+ (unsigned long long)ip->i_no_addr, error, *nblocks,
test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
goto rgrp_error;
}
/* Keep track of multi-block reservations as they are allocated/freed */
TRACE_EVENT(gfs2_rs,
- TP_PROTO(const struct gfs2_inode *ip, const struct gfs2_blkreserv *rs,
- u8 func),
+ TP_PROTO(const struct gfs2_blkreserv *rs, u8 func),
- TP_ARGS(ip, rs, func),
+ TP_ARGS(rs, func),
TP_STRUCT__entry(
__field( dev_t, dev )
),
TP_fast_assign(
- __entry->dev = rs->rs_rbm.rgd ? rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev : 0;
- __entry->rd_addr = rs->rs_rbm.rgd ? rs->rs_rbm.rgd->rd_addr : 0;
- __entry->rd_free_clone = rs->rs_rbm.rgd ? rs->rs_rbm.rgd->rd_free_clone : 0;
- __entry->rd_reserved = rs->rs_rbm.rgd ? rs->rs_rbm.rgd->rd_reserved : 0;
- __entry->inum = ip ? ip->i_no_addr : 0;
+ __entry->dev = rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev;
+ __entry->rd_addr = rs->rs_rbm.rgd->rd_addr;
+ __entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone;
+ __entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved;
+ __entry->inum = rs->rs_inum;
__entry->start = gfs2_rbm_to_block(&rs->rs_rbm);
__entry->free = rs->rs_free;
__entry->func = func;
),
- TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s "
- "f:%lu",
+ TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s f:%lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->inum,
(unsigned long long)__entry->start,