write_extent_buffer(c, root->fs_info->fsid,
(unsigned long)btrfs_header_fsid(c),
BTRFS_FSID_SIZE);
+
+ write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
+ (unsigned long)btrfs_header_chunk_tree_uuid(c),
+ BTRFS_UUID_SIZE);
+
btrfs_set_node_key(c, &lower_key, 0);
btrfs_set_node_blockptr(c, 0, lower->start);
lower_gen = btrfs_header_generation(lower);
write_extent_buffer(split, root->fs_info->fsid,
(unsigned long)btrfs_header_fsid(split),
BTRFS_FSID_SIZE);
+ write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
+ (unsigned long)btrfs_header_chunk_tree_uuid(split),
+ BTRFS_UUID_SIZE);
mid = (c_nritems + 1) / 2;
write_extent_buffer(right, root->fs_info->fsid,
(unsigned long)btrfs_header_fsid(right),
BTRFS_FSID_SIZE);
+
+ write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
+ (unsigned long)btrfs_header_chunk_tree_uuid(right),
+ BTRFS_UUID_SIZE);
if (mid <= slot) {
if (nritems == 1 ||
leaf_space_used(l, mid, nritems - mid) + space_needed >
* All files have objectids higher than this.
*/
#define BTRFS_FIRST_FREE_OBJECTID 256ULL
+#define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL
/*
struct extent_map_tree map_tree;
};
-#define BTRFS_DEV_UUID_SIZE 16
+#define BTRFS_UUID_SIZE 16
struct btrfs_dev_item {
/* the internal btrfs device id */
__le64 devid;
/* type and info about this device */
__le64 type;
+ /* grouping information for allocation decisions */
+ __le32 dev_group;
+
+ /* seek speed 0-100 where 100 is fastest */
+ u8 seek_speed;
+
+ /* bandwidth 0-100 where 100 is fastest */
+ u8 bandwidth;
+
/* btrfs generated uuid for this device */
- u8 uuid[BTRFS_DEV_UUID_SIZE];
+ u8 uuid[BTRFS_UUID_SIZE];
} __attribute__ ((__packed__));
struct btrfs_stripe {
__le64 devid;
__le64 offset;
+ u8 dev_uuid[BTRFS_UUID_SIZE];
} __attribute__ ((__packed__));
struct btrfs_chunk {
+ /* size of this chunk in bytes */
+ __le64 length;
+
+ /* objectid of the root referencing this chunk */
__le64 owner;
+
__le64 stripe_len;
__le64 type;
* every tree block (leaf or node) starts with this header.
*/
struct btrfs_header {
+ /* these first four must match the super block */
u8 csum[BTRFS_CSUM_SIZE];
u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
__le64 bytenr; /* which block this node is supposed to live in */
__le64 flags;
+
+ /* allowed to be different from the super from here on down */
+ u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
__le64 generation;
__le64 owner;
__le32 nritems;
u8 fsid[16]; /* FS specific uuid */
__le64 bytenr; /* this block number */
__le64 flags;
+
+ /* allowed to be different from the btrfs_header from here own down */
__le64 magic;
__le64 generation;
__le64 root;
/* dev extents record free space on individual devices. The owner
* field points back to the chunk allocation mapping tree that allocated
- * the extent
+ * the extent. The chunk tree uuid field is a way to double check the owner
*/
struct btrfs_dev_extent {
- __le64 owner;
+ __le64 chunk_tree;
+ __le64 chunk_objectid;
+ __le64 chunk_offset;
__le64 length;
+ u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
} __attribute__ ((__packed__));
-
struct btrfs_inode_ref {
__le16 name_len;
/* name goes here */
struct btrfs_block_group_item {
__le64 used;
- __le64 chunk_tree;
__le64 chunk_objectid;
__le64 flags;
} __attribute__ ((__packed__));
struct btrfs_fs_devices;
struct btrfs_fs_info {
u8 fsid[BTRFS_FSID_SIZE];
+ u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
struct btrfs_root *extent_root;
struct btrfs_root *tree_root;
struct btrfs_root *chunk_root;
BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32);
BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32);
BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64);
+BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32);
+BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8);
+BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8);
BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64);
BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item,
BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item,
sector_size, 32);
BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item,
+ dev_group, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item,
+ seek_speed, 8);
+BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item,
+ bandwidth, 8);
static inline char *btrfs_device_uuid(struct btrfs_dev_item *d)
{
return (char *)d + offsetof(struct btrfs_dev_item, uuid);
}
+BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64);
BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64);
BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64);
BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32);
BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64);
BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64);
+static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s)
+{
+ return (char *)s + offsetof(struct btrfs_stripe, dev_uuid);
+}
+
+BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64);
BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64);
BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk,
stripe_len, 64);
used, 64);
BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item,
used, 64);
-BTRFS_SETGET_STACK_FUNCS(block_group_chunk_tree, struct btrfs_block_group_item,
- chunk_tree, 64);
-BTRFS_SETGET_FUNCS(disk_block_group_chunk_tree, struct btrfs_block_group_item,
- chunk_tree, 64);
BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid,
struct btrfs_block_group_item, chunk_objectid, 64);
-BTRFS_SETGET_FUNCS(disk_block_group_chunk_objecitd,
+
+BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid,
struct btrfs_block_group_item, chunk_objectid, 64);
BTRFS_SETGET_FUNCS(disk_block_group_flags,
struct btrfs_block_group_item, flags, 64);
BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 32);
/* struct btrfs_dev_extent */
-BTRFS_SETGET_FUNCS(dev_extent_owner, struct btrfs_dev_extent, owner, 64);
+BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent,
+ chunk_tree, 64);
+BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
+ chunk_objectid, 64);
+BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent,
+ chunk_offset, 64);
BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64);
+static inline u8 *btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev)
+{
+ unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid);
+ return (u8 *)((unsigned long)dev + ptr);
+}
+
/* struct btrfs_extent_ref */
BTRFS_SETGET_FUNCS(ref_root, struct btrfs_extent_ref, root, 64);
BTRFS_SETGET_FUNCS(ref_generation, struct btrfs_extent_ref, generation, 64);
return (u8 *)ptr;
}
+static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
+{
+ unsigned long ptr = offsetof(struct btrfs_header, chunk_tree_uuid);
+ return (u8 *)ptr;
+}
+
static inline u8 *btrfs_super_fsid(struct extent_buffer *eb)
{
unsigned long ptr = offsetof(struct btrfs_super_block, fsid);
int btrfs_read_block_groups(struct btrfs_root *root);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytes_used,
- u64 type, u64 chunk_tree, u64 chunk_objectid,
+ u64 type, u64 chunk_objectid, u64 chunk_offset,
u64 size);
/* ctree.c */
int btrfs_previous_item(struct btrfs_root *root,
blocksize);
BUG_ON(!chunk_root->node);
+ read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
+ (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
+ BTRFS_UUID_SIZE);
+
ret = btrfs_read_chunk_tree(chunk_root);
BUG_ON(ret);
btrfs_set_device_sector_size(sb, dev_item, dev->sector_size);
write_extent_buffer(sb, dev->uuid,
(unsigned long)btrfs_device_uuid(dev_item),
- BTRFS_DEV_UUID_SIZE);
+ BTRFS_UUID_SIZE);
btrfs_set_header_flag(sb, BTRFS_HEADER_FLAG_WRITTEN);
csum_tree_block(root, sb, 0);
btrfs_root *extent_root);
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root);
-int btrfs_make_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, u64 bytes_used,
- u64 type, u64 chunk_tree, u64 chunk_objectid,
- u64 size);
static int cache_block_group(struct btrfs_root *root,
ret = get_state_private(block_group_cache, start, &ptr);
if (ret)
break;
-
cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
err = write_one_cache_group(trans, root,
path, cache);
BUG_ON(ret);
ret = btrfs_make_block_group(trans, extent_root, 0, flags,
- extent_root->fs_info->chunk_root->root_key.objectid,
- start, num_bytes);
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
BUG_ON(ret);
return 0;
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytes_used,
- u64 type, u64 chunk_tree, u64 chunk_objectid,
+ u64 type, u64 chunk_objectid, u64 chunk_offset,
u64 size)
{
int ret;
cache = kmalloc(sizeof(*cache), GFP_NOFS);
BUG_ON(!cache);
- cache->key.objectid = chunk_objectid;
+ cache->key.objectid = chunk_offset;
cache->key.offset = size;
cache->cached = 0;
cache->pinned = 0;
+
btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
memset(&cache->item, 0, sizeof(cache->item));
btrfs_set_block_group_used(&cache->item, bytes_used);
- btrfs_set_block_group_chunk_tree(&cache->item, chunk_tree);
btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
cache->flags = type;
btrfs_set_block_group_flags(&cache->item, type);
BUG_ON(ret);
bit = block_group_state_bits(type);
- set_extent_bits(block_group_cache, chunk_objectid,
- chunk_objectid + size - 1,
+ set_extent_bits(block_group_cache, chunk_offset,
+ chunk_offset + size - 1,
bit | EXTENT_LOCKED, GFP_NOFS);
- set_state_private(block_group_cache, chunk_objectid,
- (unsigned long)cache);
+ set_state_private(block_group_cache, chunk_offset,
+ (unsigned long)cache);
ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
sizeof(cache->item));
BUG_ON(ret);
{
int num_stripes = btrfs_chunk_num_stripes(eb, chunk);
int i;
- printk("\t\tchunk owner %llu type %llu num_stripes %d\n",
+ printk("\t\tchunk length %llu owner %llu type %llu num_stripes %d\n",
+ (unsigned long long)btrfs_chunk_length(eb, chunk),
(unsigned long long)btrfs_chunk_owner(eb, chunk),
(unsigned long long)btrfs_chunk_type(eb, chunk),
num_stripes);
case BTRFS_DEV_EXTENT_KEY:
dev_extent = btrfs_item_ptr(l, i,
struct btrfs_dev_extent);
- printk("\t\tdev extent owner %llu length %llu\n",
- (unsigned long long)btrfs_dev_extent_owner(l, dev_extent),
- (unsigned long long)btrfs_dev_extent_length(l, dev_extent));
+ printk("\t\tdev extent chunk_tree %llu\n"
+ "\t\tchunk objectid %llu chunk offset %llu "
+ "length %llu\n",
+ (unsigned long long)
+ btrfs_dev_extent_chunk_tree(l, dev_extent),
+ (unsigned long long)
+ btrfs_dev_extent_chunk_objectid(l, dev_extent),
+ (unsigned long long)
+ btrfs_dev_extent_chunk_offset(l, dev_extent),
+ (unsigned long long)
+ btrfs_dev_extent_length(l, dev_extent));
};
}
}
void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
{
- int i;
- u32 nr;
+ int i; u32 nr;
struct btrfs_key key;
int level;
list_for_each(cur, head) {
device = list_entry(cur, struct btrfs_device, dev_list);
bdev = open_bdev_excl(device->name, flags, holder);
-printk("opening %s devid %Lu\n", device->name, device->devid);
+
if (IS_ERR(bdev)) {
printk("open %s failed\n", device->name);
ret = PTR_ERR(bdev);
fs_devices->latest_bdev = bdev;
if (device->devid == fs_devices->lowest_devid) {
fs_devices->lowest_bdev = bdev;
-printk("lowest bdev %s\n", device->name);
}
device->bdev = bdev;
}
int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
- u64 owner, u64 num_bytes, u64 *start)
+ u64 chunk_tree, u64 chunk_objectid,
+ u64 chunk_offset,
+ u64 num_bytes, u64 *start)
{
int ret;
struct btrfs_path *path;
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
- btrfs_set_dev_extent_owner(leaf, extent, owner);
+ btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
+ btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
+ btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
+
+ write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
+ (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
+ BTRFS_UUID_SIZE);
+
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
btrfs_mark_buffer_dirty(leaf);
err:
return ret;
}
-static int find_next_chunk(struct btrfs_root *root, u64 *objectid)
+static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
{
struct btrfs_path *path;
int ret;
struct btrfs_key key;
+ struct btrfs_chunk *chunk;
struct btrfs_key found_key;
path = btrfs_alloc_path();
BUG_ON(!path);
- key.objectid = (u64)-1;
+ key.objectid = objectid;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
if (ret) {
- *objectid = 0;
+ *offset = 0;
} else {
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
- *objectid = found_key.objectid + found_key.offset;
+ if (found_key.objectid != objectid)
+ *offset = 0;
+ else {
+ chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_chunk);
+ *offset = found_key.offset +
+ btrfs_chunk_length(path->nodes[0], chunk);
+ }
}
ret = 0;
error:
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
+ btrfs_set_device_group(leaf, dev_item, 0);
+ btrfs_set_device_seek_speed(leaf, dev_item, 0);
+ btrfs_set_device_bandwidth(leaf, dev_item, 0);
ptr = (unsigned long)btrfs_device_uuid(dev_item);
- write_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
+ write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
return -ENOSPC;
}
- ret = find_next_chunk(chunk_root, &key.objectid);
+ key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+ key.type = BTRFS_CHUNK_ITEM_KEY;
+ ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+ &key.offset);
if (ret)
return ret;
*num_bytes = calc_size * num_stripes;
index = 0;
-printk("new chunk type %Lu start %Lu size %Lu\n", type, key.objectid, *num_bytes);
+printk("new chunk type %Lu start %Lu size %Lu\n", type, key.offset, *num_bytes);
while(index < num_stripes) {
+ struct btrfs_stripe *stripe;
BUG_ON(list_empty(&private_devs));
cur = private_devs.next;
device = list_entry(cur, struct btrfs_device, dev_list);
list_move_tail(&device->dev_list, dev_list);
ret = btrfs_alloc_dev_extent(trans, device,
- key.objectid,
- calc_size, &dev_offset);
+ info->chunk_root->root_key.objectid,
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
+ calc_size, &dev_offset);
BUG_ON(ret);
-printk("alloc chunk start %Lu size %Lu from dev %Lu type %Lu\n", key.objectid, calc_size, device->devid, type);
+printk("alloc chunk start %Lu size %Lu from dev %Lu type %Lu\n", key.offset, calc_size, device->devid, type);
device->bytes_used += calc_size;
ret = btrfs_update_device(trans, device);
BUG_ON(ret);
map->stripes[index].dev = device;
map->stripes[index].physical = dev_offset;
- btrfs_set_stack_stripe_devid(stripes + index, device->devid);
- btrfs_set_stack_stripe_offset(stripes + index, dev_offset);
+ stripe = stripes + index;
+ btrfs_set_stack_stripe_devid(stripe, device->devid);
+ btrfs_set_stack_stripe_offset(stripe, dev_offset);
+ memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
physical = dev_offset;
index++;
}
BUG_ON(!list_empty(&private_devs));
- /* key.objectid was set above */
- key.offset = *num_bytes;
- key.type = BTRFS_CHUNK_ITEM_KEY;
+ /* key was set above */
+ btrfs_set_stack_chunk_length(chunk, *num_bytes);
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
btrfs_set_stack_chunk_type(chunk, type);
ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
btrfs_chunk_item_size(num_stripes));
BUG_ON(ret);
- *start = key.objectid;
+ *start = key.offset;;
em = alloc_extent_map(GFP_NOFS);
if (!em)
return -ENOMEM;
em->bdev = (struct block_device *)map;
- em->start = key.objectid;
- em->len = key.offset;
+ em->start = key.offset;
+ em->len = *num_bytes;
em->block_start = 0;
kfree(chunk);
int ret;
int i;
- logical = key->objectid;
- length = key->offset;
+ logical = key->offset;
+ length = btrfs_chunk_length(leaf, chunk);
spin_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
spin_unlock(&map_tree->map_tree.lock);
device->sector_size = btrfs_device_sector_size(leaf, dev_item);
ptr = (unsigned long)btrfs_device_uuid(dev_item);
- read_extent_buffer(leaf, device->uuid, ptr, BTRFS_DEV_UUID_SIZE);
+ read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
return 0;
}
struct btrfs_device *device;
u64 devid;
int ret;
-
devid = btrfs_device_id(leaf, dev_item);
device = btrfs_find_device(root, devid);
if (!device) {
u64 type;
/* physical drive uuid (or lvm uuid) */
- u8 uuid[BTRFS_DEV_UUID_SIZE];
+ u8 uuid[BTRFS_UUID_SIZE];
};
struct btrfs_fs_devices {
int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
- u64 owner, u64 num_bytes, u64 *start);
+ u64 chunk_tree, u64 chunk_objectid,
+ u64 chunk_offset,
+ u64 num_bytes, u64 *start);
int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
u64 logical, u64 *length,
struct btrfs_multi_bio **multi_ret, int mirror_num);