x(reflink_p, 15) \
x(reflink_v, 16) \
x(inline_data, 17) \
- x(btree_ptr_v2, 18)
+ x(btree_ptr_v2, 18) \
+ x(indirect_inline_data, 19)
enum bch_bkey_type {
#define x(name, nr) KEY_TYPE_##name = nr,
__u64 _data[0];
};
+struct bch_indirect_inline_data {
+ struct bch_val v;
+ __le64 refcount;
+ u8 data[0];
+};
+
/* Inline data */
struct bch_inline_data {
x(incompressible, 10) \
x(btree_ptr_v2, 11) \
x(extents_above_btree_updates, 12) \
- x(btree_updates_journalled, 13)
+ x(btree_updates_journalled, 13) \
+ x(reflink_inline_data, 14)
#define BCH_SB_FEATURES_ALL \
((1ULL << BCH_FEATURE_new_siphash)| \
BKEY_VAL_ACCESSORS(reflink_v);
BKEY_VAL_ACCESSORS(inline_data);
BKEY_VAL_ACCESSORS(btree_ptr_v2);
+BKEY_VAL_ACCESSORS(indirect_inline_data);
/* byte order helpers */
static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
- pr_buf(out, "(%zu bytes)", bkey_val_bytes(k.k));
+ struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
+ unsigned datalen = bkey_inline_data_bytes(k.k);
+
+ pr_buf(out, "datalen %u: %*phN",
+ datalen, min(datalen, 32U), d.v->data);
}
#define bch2_bkey_ops_inline_data (struct bkey_ops) { \
return ret;
}
+static __le64 *bkey_refcount(struct bkey_i *k)
+{
+ switch (k->k.type) {
+ case KEY_TYPE_reflink_v:
+ return &bkey_i_to_reflink_v(k)->v.refcount;
+ case KEY_TYPE_indirect_inline_data:
+ return &bkey_i_to_indirect_inline_data(k)->v.refcount;
+ default:
+ return NULL;
+ }
+}
+
static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
struct bkey_s_c_reflink_p p,
u64 idx, unsigned sectors,
struct bch_fs *c = trans->c;
struct btree_iter *iter;
struct bkey_s_c k;
- struct bkey_i_reflink_v *r_v;
+ struct bkey_i *n;
+ __le64 *refcount;
s64 ret;
ret = trans_get_key(trans, BTREE_ID_REFLINK,
if (ret < 0)
return ret;
- if (k.k->type != KEY_TYPE_reflink_v) {
- bch2_fs_inconsistent(c,
- "%llu:%llu len %u points to nonexistent indirect extent %llu",
- p.k->p.inode, p.k->p.offset, p.k->size, idx);
- ret = -EIO;
- goto err;
- }
-
if ((flags & BTREE_TRIGGER_OVERWRITE) &&
(bkey_start_offset(k.k) < idx ||
k.k->p.offset > idx + sectors))
sectors = k.k->p.offset - idx;
- r_v = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
- ret = PTR_ERR_OR_ZERO(r_v);
+ n = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(n);
if (ret)
goto err;
- bkey_reassemble(&r_v->k_i, k);
+ bkey_reassemble(n, k);
+
+ refcount = bkey_refcount(n);
+ if (!refcount) {
+ bch2_fs_inconsistent(c,
+ "%llu:%llu len %u points to nonexistent indirect extent %llu",
+ p.k->p.inode, p.k->p.offset, p.k->size, idx);
+ ret = -EIO;
+ goto err;
+ }
- le64_add_cpu(&r_v->v.refcount,
- !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
+ le64_add_cpu(refcount, !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1);
- if (!r_v->v.refcount) {
- r_v->k.type = KEY_TYPE_deleted;
- set_bkey_val_u64s(&r_v->k, 0);
+ if (!*refcount) {
+ n->k.type = KEY_TYPE_deleted;
+ set_bkey_val_u64s(&n->k, 0);
}
bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
- bch2_trans_update(trans, iter, &r_v->k_i, 0);
+ bch2_trans_update(trans, iter, n, 0);
out:
ret = sectors;
err:
le64_add_cpu(&p.v->idx, sub);
break;
}
- case KEY_TYPE_inline_data: {
- struct bkey_s_inline_data d = bkey_s_to_inline_data(k);
+ case KEY_TYPE_inline_data:
+ case KEY_TYPE_indirect_inline_data: {
+ void *p = bkey_inline_data_p(k);
+ unsigned bytes = bkey_inline_data_bytes(k.k);
- sub = min_t(u64, sub << 9, bkey_val_bytes(d.k));
+ sub = min_t(u64, sub << 9, bytes);
- memmove(d.v->data,
- d.v->data + sub,
- bkey_val_bytes(d.k) - sub);
+ memmove(p, p + sub, bytes - sub);
new_val_u64s -= sub >> 3;
break;
switch (k.k->type) {
case KEY_TYPE_inline_data:
- new_val_u64s = min(new_val_u64s, k.k->size << 6);
+ case KEY_TYPE_indirect_inline_data:
+ new_val_u64s = (bkey_inline_data_offset(k.k) +
+ min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3;
break;
}
}
}
+static inline bool bkey_extent_is_inline_data(const struct bkey *k)
+{
+ return k->type == KEY_TYPE_inline_data ||
+ k->type == KEY_TYPE_indirect_inline_data;
+}
+
+static inline unsigned bkey_inline_data_offset(const struct bkey *k)
+{
+ switch (k->type) {
+ case KEY_TYPE_inline_data:
+ return sizeof(struct bch_inline_data);
+ case KEY_TYPE_indirect_inline_data:
+ return sizeof(struct bch_indirect_inline_data);
+ default:
+ BUG();
+ }
+}
+
+static inline unsigned bkey_inline_data_bytes(const struct bkey *k)
+{
+ return bkey_val_bytes(k) - bkey_inline_data_offset(k);
+}
+
+#define bkey_inline_data_p(_k) (((void *) (_k).v) + bkey_inline_data_offset((_k).k))
+
static inline bool bkey_extent_is_data(const struct bkey *k)
{
- return bkey_extent_is_direct_data(k) ||
- k->type == KEY_TYPE_inline_data ||
+ return bkey_extent_is_direct_data(k) ||
+ bkey_extent_is_inline_data(k) ||
k->type == KEY_TYPE_reflink_p;
}
case KEY_TYPE_reflink_p:
case KEY_TYPE_reflink_v:
case KEY_TYPE_inline_data:
+ case KEY_TYPE_indirect_inline_data:
return true;
default:
return false;
if (ret)
goto err;
- if (k.k->type != KEY_TYPE_reflink_v) {
+ if (k.k->type != KEY_TYPE_reflink_v &&
+ k.k->type != KEY_TYPE_indirect_inline_data) {
__bcache_io_error(trans->c,
"pointer to nonexistent indirect extent");
ret = -EIO;
struct bpos pos = bkey_start_pos(k.k);
int pick_ret;
- if (k.k->type == KEY_TYPE_inline_data) {
- struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k);
+ if (bkey_extent_is_inline_data(k.k)) {
unsigned bytes = min_t(unsigned, iter.bi_size,
- bkey_val_bytes(d.k));
+ bkey_inline_data_bytes(k.k));
swap(iter.bi_size, bytes);
- memcpy_to_bio(&orig->bio, iter, d.v->data);
+ memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
swap(iter.bi_size, bytes);
bio_advance_iter(&orig->bio, &iter, bytes);
zero_fill_bio_iter(&orig->bio, iter);
x(inline_data, u8, \
OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \
- NO_SB_OPT, false, \
+ NO_SB_OPT, true, \
NULL, "Enable inline data extents") \
x(acl, u8, \
OPT_FORMAT|OPT_MOUNT, \
#include <linux/sched/signal.h>
+static inline unsigned bkey_type_to_indirect(const struct bkey *k)
+{
+ switch (k->type) {
+ case KEY_TYPE_extent:
+ return KEY_TYPE_reflink_v;
+ case KEY_TYPE_inline_data:
+ return KEY_TYPE_indirect_inline_data;
+ default:
+ return 0;
+ }
+}
+
/* reflink pointers */
const char *bch2_reflink_p_invalid(const struct bch_fs *c, struct bkey_s_c k)
bch2_bkey_ptrs_to_text(out, c, k);
}
+/* indirect inline data */
+
+const char *bch2_indirect_inline_data_invalid(const struct bch_fs *c,
+ struct bkey_s_c k)
+{
+ if (bkey_val_bytes(k.k) < sizeof(struct bch_indirect_inline_data))
+ return "incorrect value size";
+ return NULL;
+}
+
+void bch2_indirect_inline_data_to_text(struct printbuf *out,
+ struct bch_fs *c, struct bkey_s_c k)
+{
+ struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
+ unsigned datalen = bkey_inline_data_bytes(k.k);
+
+ pr_buf(out, "refcount %llu datalen %u: %*phN",
+ le64_to_cpu(d.v->refcount), datalen,
+ min(datalen, 32U), d.v->data);
+}
+
static int bch2_make_extent_indirect(struct btree_trans *trans,
struct btree_iter *extent_iter,
- struct bkey_i_extent *e)
+ struct bkey_i *orig)
{
struct bch_fs *c = trans->c;
struct btree_iter *reflink_iter;
struct bkey_s_c k;
- struct bkey_i_reflink_v *r_v;
+ struct bkey_i *r_v;
struct bkey_i_reflink_p *r_p;
+ __le64 *refcount;
int ret;
+ if (orig->k.type == KEY_TYPE_inline_data)
+ bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
+
for_each_btree_key(trans, reflink_iter, BTREE_ID_REFLINK,
POS(0, c->reflink_hint),
BTREE_ITER_INTENT|BTREE_ITER_SLOTS, k, ret) {
continue;
}
- if (bkey_deleted(k.k) && e->k.size <= k.k->size)
+ if (bkey_deleted(k.k) && orig->k.size <= k.k->size)
break;
}
/* rewind iter to start of hole, if necessary: */
bch2_btree_iter_set_pos(reflink_iter, bkey_start_pos(k.k));
- r_v = bch2_trans_kmalloc(trans, sizeof(*r_v) + bkey_val_bytes(&e->k));
+ r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_val_bytes(&orig->k));
ret = PTR_ERR_OR_ZERO(r_v);
if (ret)
goto err;
- bkey_reflink_v_init(&r_v->k_i);
+ bkey_init(&r_v->k);
+ r_v->k.type = bkey_type_to_indirect(&orig->k);
r_v->k.p = reflink_iter->pos;
- bch2_key_resize(&r_v->k, e->k.size);
- r_v->k.version = e->k.version;
+ bch2_key_resize(&r_v->k, orig->k.size);
+ r_v->k.version = orig->k.version;
+
+ set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
- set_bkey_val_u64s(&r_v->k, bkey_val_u64s(&r_v->k) +
- bkey_val_u64s(&e->k));
- r_v->v.refcount = 0;
- memcpy(r_v->v.start, e->v.start, bkey_val_bytes(&e->k));
+ refcount = (void *) &r_v->v;
+ *refcount = 0;
+ memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k));
- bch2_trans_update(trans, reflink_iter, &r_v->k_i, 0);
+ bch2_trans_update(trans, reflink_iter, r_v, 0);
r_p = bch2_trans_kmalloc(trans, sizeof(*r_p));
if (IS_ERR(r_p))
return PTR_ERR(r_p);
- e->k.type = KEY_TYPE_reflink_p;
- r_p = bkey_i_to_reflink_p(&e->k_i);
+ orig->k.type = KEY_TYPE_reflink_p;
+ r_p = bkey_i_to_reflink_p(orig);
set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
if (bkey_cmp(iter->pos, end) >= 0)
return bkey_s_c_null;
- if (k.k->type == KEY_TYPE_extent ||
- k.k->type == KEY_TYPE_reflink_p)
+ if (bkey_extent_is_data(k.k))
break;
}
if (!bkey_cmp(dst_iter->pos, dst_end))
break;
- if (src_k.k->type == KEY_TYPE_extent) {
+ if (src_k.k->type != KEY_TYPE_reflink_p) {
bkey_on_stack_reassemble(&new_src, c, src_k);
src_k = bkey_i_to_s_c(new_src.k);
bch2_cut_back(src_end, new_src.k);
ret = bch2_make_extent_indirect(&trans, src_iter,
- bkey_i_to_extent(new_src.k));
+ new_src.k);
if (ret)
goto btree_err;
void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
-
#define bch2_bkey_ops_reflink_v (struct bkey_ops) { \
.key_invalid = bch2_reflink_v_invalid, \
.val_to_text = bch2_reflink_v_to_text, \
.swab = bch2_ptr_swab, \
}
+const char *bch2_indirect_inline_data_invalid(const struct bch_fs *,
+ struct bkey_s_c);
+void bch2_indirect_inline_data_to_text(struct printbuf *,
+ struct bch_fs *, struct bkey_s_c);
+
+#define bch2_bkey_ops_indirect_inline_data (struct bkey_ops) { \
+ .key_invalid = bch2_indirect_inline_data_invalid, \
+ .val_to_text = bch2_indirect_inline_data_to_text, \
+}
+
s64 bch2_remap_range(struct bch_fs *, struct bpos, struct bpos,
u64, u64 *, u64, s64 *);