return ret;
}
-int btrfs_cross_ref_exists(struct btrfs_root *root,
+int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct btrfs_key *key, u64 bytenr)
{
- struct btrfs_trans_handle *trans;
struct btrfs_root *old_root;
struct btrfs_path *path = NULL;
struct extent_buffer *eb;
int level;
int ret;
+ BUG_ON(trans == NULL);
BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY);
ret = get_reference_status(root, bytenr, 0, key->objectid,
&min_generation, &ref_count);
if (ref_count != 1)
return 1;
- trans = btrfs_start_transaction(root, 0);
old_root = root->dirty_root->root;
ref_generation = old_root->root_key.offset;
out:
if (path)
btrfs_free_path(path);
- btrfs_end_transaction(trans, root);
return ret;
}
found->total_bytes += total_bytes;
found->bytes_used += bytes_used;
found->full = 0;
- WARN_ON(found->total_bytes < found->bytes_used);
*space_info = found;
return 0;
}
return 0;
}
+int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
+{
+ maybe_lock_mutex(root);
+ set_extent_dirty(&root->fs_info->free_space_cache,
+ start, start + len - 1, GFP_NOFS);
+ maybe_unlock_mutex(root);
+ return 0;
+}
+
int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 num_bytes, u64 min_alloc_size,
maybe_unlock_mutex(root);
return ret;
}
+
+struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ u64 bytenr, u32 blocksize)
+{
+ struct extent_buffer *buf;
+
+ buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+ btrfs_set_header_generation(buf, trans->transid);
+ btrfs_tree_lock(buf);
+ clean_tree_block(trans, root, buf);
+ btrfs_set_buffer_uptodate(buf);
+ set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
+ buf->start + buf->len - 1, GFP_NOFS);
+ trans->blocks_used++;
+ return buf;
+}
+
/*
* helper function to allocate a block for a given tree
* returns the tree buffer or NULL.
BUG_ON(ret > 0);
return ERR_PTR(ret);
}
- buf = btrfs_find_create_tree_block(root, ins.objectid, blocksize);
- if (!buf) {
- btrfs_free_extent(trans, root, ins.objectid, blocksize,
- root->root_key.objectid, ref_generation,
- 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
- btrfs_set_header_generation(buf, trans->transid);
- btrfs_tree_lock(buf);
- clean_tree_block(trans, root, buf);
- btrfs_set_buffer_uptodate(buf);
- if (PageDirty(buf->first_page)) {
- printk("page %lu dirty\n", buf->first_page->index);
- WARN_ON(1);
- }
-
- set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
- trans->blocks_used++;
+ buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
return buf;
}
leaf_owner = btrfs_header_owner(leaf);
leaf_generation = btrfs_header_generation(leaf);
- mutex_unlock(&root->fs_info->alloc_mutex);
-
for (i = 0; i < nritems; i++) {
u64 disk_bytenr;
cond_resched();
leaf_owner, leaf_generation,
key.objectid, key.offset, 0);
mutex_unlock(&root->fs_info->alloc_mutex);
+
+ atomic_inc(&root->fs_info->throttle_gen);
+ wake_up(&root->fs_info->transaction_throttle);
+ cond_resched();
+
BUG_ON(ret);
}
-
- mutex_lock(&root->fs_info->alloc_mutex);
return 0;
}
int ret;
struct btrfs_extent_info *info = ref->extents;
- mutex_unlock(&root->fs_info->alloc_mutex);
for (i = 0; i < ref->nritems; i++) {
mutex_lock(&root->fs_info->alloc_mutex);
ret = __btrfs_free_extent(trans, root,
ref->owner, ref->generation,
info->objectid, info->offset, 0);
mutex_unlock(&root->fs_info->alloc_mutex);
+
+ atomic_inc(&root->fs_info->throttle_gen);
+ wake_up(&root->fs_info->transaction_throttle);
+ cond_resched();
+
BUG_ON(ret);
info++;
}
- mutex_lock(&root->fs_info->alloc_mutex);
return 0;
}
-static void noinline reada_walk_down(struct btrfs_root *root,
- struct extent_buffer *node,
- int slot)
+int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
+ u32 *refs)
{
- u64 bytenr;
- u64 last = 0;
- u32 nritems;
- u32 refs;
- u32 blocksize;
int ret;
- int i;
- int level;
- int skipped = 0;
-
- nritems = btrfs_header_nritems(node);
- level = btrfs_header_level(node);
- if (level)
- return;
-
- for (i = slot; i < nritems && skipped < 32; i++) {
- bytenr = btrfs_node_blockptr(node, i);
- if (last && ((bytenr > last && bytenr - last > 32 * 1024) ||
- (last > bytenr && last - bytenr > 32 * 1024))) {
- skipped++;
- continue;
+
+ ret = lookup_extent_ref(NULL, root, start, len, refs);
+ BUG_ON(ret);
+
+#if 0 // some debugging code in case we see problems here
+ /* if the refs count is one, it won't get increased again. But
+ * if the ref count is > 1, someone may be decreasing it at
+ * the same time we are.
+ */
+ if (*refs != 1) {
+ struct extent_buffer *eb = NULL;
+ eb = btrfs_find_create_tree_block(root, start, len);
+ if (eb)
+ btrfs_tree_lock(eb);
+
+ mutex_lock(&root->fs_info->alloc_mutex);
+ ret = lookup_extent_ref(NULL, root, start, len, refs);
+ BUG_ON(ret);
+ mutex_unlock(&root->fs_info->alloc_mutex);
+
+ if (eb) {
+ btrfs_tree_unlock(eb);
+ free_extent_buffer(eb);
}
- blocksize = btrfs_level_size(root, level - 1);
- if (i != slot) {
- ret = lookup_extent_ref(NULL, root, bytenr,
- blocksize, &refs);
- BUG_ON(ret);
- if (refs != 1) {
- skipped++;
- continue;
- }
+ if (*refs == 1) {
+ printk("block %llu went down to one during drop_snap\n",
+ (unsigned long long)start);
}
- ret = readahead_tree_block(root, bytenr, blocksize,
- btrfs_node_ptr_generation(node, i));
- last = bytenr + blocksize;
- cond_resched();
- if (ret)
- break;
+
}
-}
+#endif
-int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
- u32 *refs)
-{
- int ret;
- mutex_unlock(&root->fs_info->alloc_mutex);
- ret = lookup_extent_ref(NULL, root, start, len, refs);
cond_resched();
- mutex_lock(&root->fs_info->alloc_mutex);
return ret;
}
int ret;
u32 refs;
- mutex_lock(&root->fs_info->alloc_mutex);
-
WARN_ON(*level < 0);
WARN_ON(*level >= BTRFS_MAX_LEVEL);
ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
root_owner = btrfs_header_owner(parent);
root_gen = btrfs_header_generation(parent);
path->slots[*level]++;
+
+ mutex_lock(&root->fs_info->alloc_mutex);
ret = __btrfs_free_extent(trans, root, bytenr,
blocksize, root_owner,
root_gen, 0, 0, 1);
BUG_ON(ret);
+ mutex_unlock(&root->fs_info->alloc_mutex);
+
+ atomic_inc(&root->fs_info->throttle_gen);
+ wake_up(&root->fs_info->transaction_throttle);
+ cond_resched();
+
continue;
}
-
+ /*
+ * at this point, we have a single ref, and since the
+ * only place referencing this extent is a dead root
+ * the reference count should never go higher.
+ * So, we don't need to check it again
+ */
if (*level == 1) {
struct btrfs_key key;
btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
*level = 0;
break;
}
+ if (printk_ratelimit())
+ printk("leaf ref miss for bytenr %llu\n",
+ (unsigned long long)bytenr);
}
next = btrfs_find_tree_block(root, bytenr, blocksize);
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
free_extent_buffer(next);
- mutex_unlock(&root->fs_info->alloc_mutex);
- if (path->slots[*level] == 0)
- reada_walk_down(root, cur, path->slots[*level]);
next = read_tree_block(root, bytenr, blocksize,
ptr_gen);
cond_resched();
- mutex_lock(&root->fs_info->alloc_mutex);
-
- /* we've dropped the lock, double check */
+#if 0
+ /*
+ * this is a debugging check and can go away
+ * the ref should never go all the way down to 1
+ * at this point
+ */
ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
&refs);
BUG_ON(ret);
- if (refs != 1) {
- parent = path->nodes[*level];
- root_owner = btrfs_header_owner(parent);
- root_gen = btrfs_header_generation(parent);
-
- path->slots[*level]++;
- free_extent_buffer(next);
- ret = __btrfs_free_extent(trans, root, bytenr,
- blocksize,
- root_owner,
- root_gen, 0, 0, 1);
- BUG_ON(ret);
- continue;
- }
+ WARN_ON(refs != 1);
+#endif
}
WARN_ON(*level <= 0);
if (path->nodes[*level-1])
path->nodes[*level-1] = next;
*level = btrfs_header_level(next);
path->slots[*level] = 0;
+ cond_resched();
}
out:
WARN_ON(*level < 0);
root_owner = btrfs_header_owner(parent);
root_gen = btrfs_header_generation(parent);
+ mutex_lock(&root->fs_info->alloc_mutex);
ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
root_owner, root_gen, 0, 0, 1);
free_extent_buffer(path->nodes[*level]);
*level += 1;
BUG_ON(ret);
mutex_unlock(&root->fs_info->alloc_mutex);
+
cond_resched();
return 0;
}
}
}
while(1) {
- atomic_inc(&root->fs_info->throttle_gen);
wret = walk_down_tree(trans, root, path, &level);
if (wret > 0)
break;
ret = -EAGAIN;
break;
}
+ atomic_inc(&root->fs_info->throttle_gen);
wake_up(&root->fs_info->transaction_throttle);
}
for (i = 0; i <= orig_level; i++) {
}
set_page_extent_mapped(page);
+ /*
+ * make sure page_mkwrite is called for this page if userland
+ * wants to change it from mmap
+ */
+ clear_page_dirty_for_io(page);
- set_extent_delalloc(io_tree, page_start,
- page_end, GFP_NOFS);
+ btrfs_set_extent_delalloc(inode, page_start, page_end);
set_page_dirty(page);
unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
key.type = 0;
cur_byte = key.objectid;
+ mutex_unlock(&root->fs_info->alloc_mutex);
+
+ btrfs_start_delalloc_inodes(root);
+ btrfs_wait_ordered_extents(tree_root, 0);
+
+ mutex_lock(&root->fs_info->alloc_mutex);
+
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
btrfs_clean_old_snapshots(tree_root);
- btrfs_wait_ordered_extents(tree_root);
+ btrfs_start_delalloc_inodes(root);
+ btrfs_wait_ordered_extents(tree_root, 0);
trans = btrfs_start_transaction(tree_root, 1);
btrfs_commit_transaction(trans, tree_root);
key.objectid, key.objectid + key.offset - 1,
(unsigned int)-1, GFP_NOFS);
+ /*
memset(shrink_block_group, 0, sizeof(*shrink_block_group));
kfree(shrink_block_group);
+ */
btrfs_del_item(trans, root, path);
btrfs_release_path(root, path);