struct list_head rq_queue; /* incoming rq queue */
spinlock_t lock; /* queue, flags, open_count */
- struct workqueue_struct *rq_wq;
struct work_struct rq_work;
struct rbd_image_header header;
static int rbd_major;
static DEFINE_IDA(rbd_dev_id_ida);
+static struct workqueue_struct *rbd_wq;
+
/*
* Default to false for now, as single-major requires >= 0.75 version of
* userspace rbd utility.
}
if (queued)
- queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work);
+ queue_work(rbd_wq, &rbd_dev->rq_work);
}
/*
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
- rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
- rbd_dev->disk->disk_name);
- if (!rbd_dev->rq_wq) {
- ret = -ENOMEM;
- goto err_out_mapping;
- }
-
ret = rbd_bus_add_dev(rbd_dev);
if (ret)
- goto err_out_workqueue;
+ goto err_out_mapping;
/* Everything's ready. Announce the disk to the world. */
return ret;
-err_out_workqueue:
- destroy_workqueue(rbd_dev->rq_wq);
- rbd_dev->rq_wq = NULL;
err_out_mapping:
rbd_dev_mapping_clear(rbd_dev);
err_out_disk:
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- destroy_workqueue(rbd_dev->rq_wq);
rbd_free_disk(rbd_dev);
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_dev_mapping_clear(rbd_dev);
if (rc)
return rc;
+ /*
+ * The number of active work items is limited by the number of
+ * rbd devices, so leave @max_active at default.
+ */
+ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
+ if (!rbd_wq) {
+ rc = -ENOMEM;
+ goto err_out_slab;
+ }
+
if (single_major) {
rbd_major = register_blkdev(0, RBD_DRV_NAME);
if (rbd_major < 0) {
rc = rbd_major;
- goto err_out_slab;
+ goto err_out_wq;
}
}
err_out_blkdev:
if (single_major)
unregister_blkdev(rbd_major, RBD_DRV_NAME);
+err_out_wq:
+ destroy_workqueue(rbd_wq);
err_out_slab:
rbd_slab_exit();
return rc;
rbd_sysfs_cleanup();
if (single_major)
unregister_blkdev(rbd_major, RBD_DRV_NAME);
+ destroy_workqueue(rbd_wq);
rbd_slab_exit();
}