struct list_head list;
struct mutex mutex;
wait_queue_head_t wait;
- struct btrfs_work work;
+ struct btrfs_work_struct work;
struct btrfs_block_group_cache *block_group;
u64 progress;
atomic_t count;
struct btrfs_workqueue_struct *endio_write_workers;
struct btrfs_workqueue_struct *endio_freespace_worker;
struct btrfs_workqueue_struct *submit_workers;
- struct btrfs_workers caching_workers;
+ struct btrfs_workqueue_struct *caching_workers;
struct btrfs_workers readahead_workers;
/*
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
btrfs_destroy_workqueue(fs_info->submit_workers);
btrfs_stop_workers(&fs_info->delayed_workers);
- btrfs_stop_workers(&fs_info->caching_workers);
+ btrfs_destroy_workqueue(fs_info->caching_workers);
btrfs_stop_workers(&fs_info->readahead_workers);
btrfs_destroy_workqueue(fs_info->flush_workers);
btrfs_stop_workers(&fs_info->qgroup_rescan_workers);
fs_info->flush_workers =
btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
- btrfs_init_workers(&fs_info->caching_workers, "cache",
- fs_info->thread_pool_size, NULL);
+ fs_info->caching_workers =
+ btrfs_alloc_workqueue("cache", flags, max_active, 0);
/*
* a higher idle thresh on the submit workers makes it much more
ret = btrfs_start_workers(&fs_info->generic_worker);
ret |= btrfs_start_workers(&fs_info->fixup_workers);
ret |= btrfs_start_workers(&fs_info->delayed_workers);
- ret |= btrfs_start_workers(&fs_info->caching_workers);
ret |= btrfs_start_workers(&fs_info->readahead_workers);
ret |= btrfs_start_workers(&fs_info->qgroup_rescan_workers);
if (ret) {
fs_info->endio_workers && fs_info->endio_meta_workers &&
fs_info->endio_meta_write_workers &&
fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
- fs_info->endio_freespace_worker && fs_info->rmw_workers)) {
+ fs_info->endio_freespace_worker && fs_info->rmw_workers &&
+ fs_info->caching_workers)) {
err = -ENOMEM;
goto fail_sb_buffer;
}
return total_added;
}
-static noinline void caching_thread(struct btrfs_work *work)
+static noinline void caching_thread(struct btrfs_work_struct *work)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_fs_info *fs_info;
caching_ctl->block_group = cache;
caching_ctl->progress = cache->key.objectid;
atomic_set(&caching_ctl->count, 1);
- caching_ctl->work.func = caching_thread;
+ btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
spin_lock(&cache->lock);
/*
btrfs_get_block_group(cache);
- btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
+ btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
return ret;
}
btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->submit_workers, new_pool_size);
- btrfs_set_max_workers(&fs_info->caching_workers, new_pool_size);
+ btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
btrfs_set_max_workers(&fs_info->fixup_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_meta_workers, new_pool_size);