BlockBackendPublic public;
void *dev; /* attached device model, if any */
+ bool legacy_dev; /* true if dev is not a DeviceState */
/* TODO change to DeviceState when all users are qdevified */
const BlockDevOps *dev_ops;
void *dev_opaque;
typedef struct BlockBackendAIOCB {
BlockAIOCB common;
- QEMUBH *bh;
BlockBackend *blk;
int ret;
} BlockBackendAIOCB;
return bdrv_first_blk(bs) != NULL;
}
+/*
+ * Returns true if @bs has only BlockBackends as parents.
+ */
+bool bdrv_is_root_node(BlockDriverState *bs)
+{
+ BdrvChild *c;
+
+ QLIST_FOREACH(c, &bs->parents, next_parent) {
+ if (c->role != &child_root) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
/*
* Return @blk's DriveInfo if any, else null.
*/
}
}
-/*
- * Attach device model @dev to @blk.
- * Return 0 on success, -EBUSY when a device model is attached already.
- */
-int blk_attach_dev(BlockBackend *blk, void *dev)
-/* TODO change to DeviceState *dev when all users are qdevified */
+static int blk_do_attach_dev(BlockBackend *blk, void *dev)
{
if (blk->dev) {
return -EBUSY;
}
blk_ref(blk);
blk->dev = dev;
+ blk->legacy_dev = false;
blk_iostatus_reset(blk);
return 0;
}
+/*
+ * Attach device model @dev to @blk.
+ * Return 0 on success, -EBUSY when a device model is attached already.
+ */
+int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
+{
+ return blk_do_attach_dev(blk, dev);
+}
+
/*
* Attach device model @dev to @blk.
* @blk must not have a device model attached already.
* TODO qdevified devices don't use this, remove when devices are qdevified
*/
-void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
+void blk_attach_dev_legacy(BlockBackend *blk, void *dev)
{
- if (blk_attach_dev(blk, dev) < 0) {
+ if (blk_do_attach_dev(blk, dev) < 0) {
abort();
}
+ blk->legacy_dev = true;
}
/*
return blk->dev;
}
+/* Return the qdev ID, or if no ID is assigned the QOM path, of the block
+ * device attached to the BlockBackend. */
+static char *blk_get_attached_dev_id(BlockBackend *blk)
+{
+ DeviceState *dev;
+
+ assert(!blk->legacy_dev);
+ dev = blk->dev;
+
+ if (!dev) {
+ return g_strdup("");
+ } else if (dev->id) {
+ return g_strdup(dev->id);
+ }
+ return object_get_canonical_path(OBJECT(dev));
+}
+
+/*
+ * Return the BlockBackend which has the device model @dev attached if it
+ * exists, else null.
+ *
+ * @dev must not be null.
+ */
+BlockBackend *blk_by_dev(void *dev)
+{
+ BlockBackend *blk = NULL;
+
+ assert(dev != NULL);
+ while ((blk = blk_all_next(blk)) != NULL) {
+ if (blk->dev == dev) {
+ return blk;
+ }
+ }
+ return NULL;
+}
+
/*
* Set @blk's device model callbacks to @ops.
* @opaque is the opaque argument to pass to the callbacks.
void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
void *opaque)
{
+ /* All drivers that use blk_set_dev_ops() are qdevified and we want to keep
+ * it that way, so we can assume blk->dev is a DeviceState if blk->dev_ops
+ * is set. */
+ assert(!blk->legacy_dev);
+
blk->dev_ops = ops;
blk->dev_opaque = opaque;
}
if (blk->dev_ops && blk->dev_ops->change_media_cb) {
bool tray_was_open, tray_is_open;
+ assert(!blk->legacy_dev);
+
tray_was_open = blk_dev_is_tray_open(blk);
blk->dev_ops->change_media_cb(blk->dev_opaque, load);
tray_is_open = blk_dev_is_tray_open(blk);
if (tray_was_open != tray_is_open) {
- qapi_event_send_device_tray_moved(blk_name(blk), tray_is_open,
+ char *id = blk_get_attached_dev_id(blk);
+ qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open,
&error_abort);
+ g_free(id);
}
}
}
return 0;
}
-static int blk_check_request(BlockBackend *blk, int64_t sector_num,
- int nb_sectors)
-{
- if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
- return -EIO;
- }
-
- if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
- return -EIO;
- }
-
- return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
- nb_sectors * BDRV_SECTOR_SIZE);
-}
-
int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
int ret;
+ BlockDriverState *bs = blk_bs(blk);
- trace_blk_co_preadv(blk, blk_bs(blk), offset, bytes, flags);
+ trace_blk_co_preadv(blk, bs, offset, bytes, flags);
ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
+ bdrv_inc_in_flight(bs);
+
/* throttling disk I/O */
if (blk->public.throttle_state) {
throttle_group_co_io_limits_intercept(blk, bytes, false);
}
- return bdrv_co_preadv(blk_bs(blk), offset, bytes, qiov, flags);
+ ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
+ bdrv_dec_in_flight(bs);
+ return ret;
}
int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
BdrvRequestFlags flags)
{
int ret;
+ BlockDriverState *bs = blk_bs(blk);
- trace_blk_co_pwritev(blk, blk_bs(blk), offset, bytes, flags);
+ trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
+ bdrv_inc_in_flight(bs);
+
/* throttling disk I/O */
if (blk->public.throttle_state) {
throttle_group_co_io_limits_intercept(blk, bytes, true);
flags |= BDRV_REQ_FUA;
}
- return bdrv_co_pwritev(blk_bs(blk), offset, bytes, qiov, flags);
+ ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
+ bdrv_dec_in_flight(bs);
+ return ret;
}
typedef struct BlkRwCo {
int64_t bytes, CoroutineEntry co_entry,
BdrvRequestFlags flags)
{
- AioContext *aio_context;
QEMUIOVector qiov;
struct iovec iov;
- Coroutine *co;
BlkRwCo rwco;
iov = (struct iovec) {
.ret = NOT_DONE,
};
- co = qemu_coroutine_create(co_entry);
- qemu_coroutine_enter(co, &rwco);
-
- aio_context = blk_get_aio_context(blk);
- while (rwco.ret == NOT_DONE) {
- aio_poll(aio_context, true);
+ if (qemu_in_coroutine()) {
+ /* Fast-path if already in coroutine context */
+ co_entry(&rwco);
+ } else {
+ Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
+ qemu_coroutine_enter(co);
+ BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
}
return rwco.ret;
flags | BDRV_REQ_ZERO_WRITE);
}
+int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
+{
+ return bdrv_make_zero(blk->root, flags);
+}
+
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
- qemu_bh_delete(acb->bh);
+
+ bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->ret);
qemu_aio_unref(acb);
}
void *opaque, int ret)
{
struct BlockBackendAIOCB *acb;
- QEMUBH *bh;
+ bdrv_inc_in_flight(blk_bs(blk));
acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
acb->blk = blk;
acb->ret = ret;
- bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
- acb->bh = bh;
- qemu_bh_schedule(bh);
-
+ aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb);
return &acb->common;
}
BlkRwCo rwco;
int bytes;
bool has_returned;
- QEMUBH* bh;
} BlkAioEmAIOCB;
static const AIOCBInfo blk_aio_em_aiocb_info = {
static void blk_aio_complete(BlkAioEmAIOCB *acb)
{
- if (acb->bh) {
- assert(acb->has_returned);
- qemu_bh_delete(acb->bh);
- }
if (acb->has_returned) {
+ bdrv_dec_in_flight(acb->common.bs);
acb->common.cb(acb->common.opaque, acb->rwco.ret);
qemu_aio_unref(acb);
}
static void blk_aio_complete_bh(void *opaque)
{
- blk_aio_complete(opaque);
+ BlkAioEmAIOCB *acb = opaque;
+ assert(acb->has_returned);
+ blk_aio_complete(acb);
}
static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
BlkAioEmAIOCB *acb;
Coroutine *co;
+ bdrv_inc_in_flight(blk_bs(blk));
acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
acb->rwco = (BlkRwCo) {
.blk = blk,
.ret = NOT_DONE,
};
acb->bytes = bytes;
- acb->bh = NULL;
acb->has_returned = false;
- co = qemu_coroutine_create(co_entry);
- qemu_coroutine_enter(co, acb);
+ co = qemu_coroutine_create(co_entry, acb);
+ qemu_coroutine_enter(co);
acb->has_returned = true;
if (acb->rwco.ret != NOT_DONE) {
- acb->bh = aio_bh_new(blk_get_aio_context(blk), blk_aio_complete_bh, acb);
- qemu_bh_schedule(acb->bh);
+ aio_bh_schedule_oneshot(blk_get_aio_context(blk),
+ blk_aio_complete_bh, acb);
}
return &acb->common;
blk_aio_write_entry, flags, cb, opaque);
}
+static void blk_aio_flush_entry(void *opaque)
+{
+ BlkAioEmAIOCB *acb = opaque;
+ BlkRwCo *rwco = &acb->rwco;
+
+ rwco->ret = blk_co_flush(rwco->blk);
+ blk_aio_complete(acb);
+}
+
BlockAIOCB *blk_aio_flush(BlockBackend *blk,
BlockCompletionFunc *cb, void *opaque)
{
- if (!blk_is_available(blk)) {
- return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
- }
-
- return bdrv_aio_flush(blk_bs(blk), cb, opaque);
+ return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
}
-BlockAIOCB *blk_aio_discard(BlockBackend *blk,
- int64_t sector_num, int nb_sectors,
- BlockCompletionFunc *cb, void *opaque)
+static void blk_aio_pdiscard_entry(void *opaque)
{
- int ret = blk_check_request(blk, sector_num, nb_sectors);
- if (ret < 0) {
- return blk_abort_aio_request(blk, cb, opaque, ret);
- }
+ BlkAioEmAIOCB *acb = opaque;
+ BlkRwCo *rwco = &acb->rwco;
- return bdrv_aio_discard(blk_bs(blk), sector_num, nb_sectors, cb, opaque);
+ rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
+ blk_aio_complete(acb);
+}
+
+BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
+ int64_t offset, int count,
+ BlockCompletionFunc *cb, void *opaque)
+{
+ return blk_aio_prwv(blk, offset, count, NULL, blk_aio_pdiscard_entry, 0,
+ cb, opaque);
}
void blk_aio_cancel(BlockAIOCB *acb)
bdrv_aio_cancel_async(acb);
}
-int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
+int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
{
if (!blk_is_available(blk)) {
return -ENOMEDIUM;
}
- return bdrv_ioctl(blk_bs(blk), req, buf);
+ return bdrv_co_ioctl(blk_bs(blk), req, buf);
+}
+
+static void blk_ioctl_entry(void *opaque)
+{
+ BlkRwCo *rwco = opaque;
+ rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
+ rwco->qiov->iov[0].iov_base);
+}
+
+int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
+{
+ return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
+}
+
+static void blk_aio_ioctl_entry(void *opaque)
+{
+ BlkAioEmAIOCB *acb = opaque;
+ BlkRwCo *rwco = &acb->rwco;
+
+ rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
+ rwco->qiov->iov[0].iov_base);
+ blk_aio_complete(acb);
}
BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque)
{
- if (!blk_is_available(blk)) {
- return blk_abort_aio_request(blk, cb, opaque, -ENOMEDIUM);
- }
+ QEMUIOVector qiov;
+ struct iovec iov;
+
+ iov = (struct iovec) {
+ .iov_base = buf,
+ .iov_len = 0,
+ };
+ qemu_iovec_init_external(&qiov, &iov, 1);
- return bdrv_aio_ioctl(blk_bs(blk), req, buf, cb, opaque);
+ return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque);
}
-int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
+int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count)
{
- int ret = blk_check_request(blk, sector_num, nb_sectors);
+ int ret = blk_check_byte_request(blk, offset, count);
if (ret < 0) {
return ret;
}
- return bdrv_co_discard(blk_bs(blk), sector_num, nb_sectors);
+ return bdrv_co_pdiscard(blk_bs(blk), offset, count);
}
int blk_co_flush(BlockBackend *blk)
return bdrv_co_flush(blk_bs(blk));
}
-int blk_flush(BlockBackend *blk)
+static void blk_flush_entry(void *opaque)
{
- if (!blk_is_available(blk)) {
- return -ENOMEDIUM;
- }
+ BlkRwCo *rwco = opaque;
+ rwco->ret = blk_co_flush(rwco->blk);
+}
- return bdrv_flush(blk_bs(blk));
+int blk_flush(BlockBackend *blk)
+{
+ return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
}
void blk_drain(BlockBackend *blk)
return BLOCK_ERROR_ACTION_REPORT;
case BLOCKDEV_ON_ERROR_IGNORE:
return BLOCK_ERROR_ACTION_IGNORE;
+ case BLOCKDEV_ON_ERROR_AUTO:
default:
abort();
}
IoOperationType optype;
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
- qapi_event_send_block_io_error(blk_name(blk), optype, action,
- blk_iostatus_is_enabled(blk),
+ qapi_event_send_block_io_error(blk_name(blk),
+ bdrv_get_node_name(blk_bs(blk)), optype,
+ action, blk_iostatus_is_enabled(blk),
error == ENOSPC, strerror(error),
&error_abort);
}
void blk_eject(BlockBackend *blk, bool eject_flag)
{
BlockDriverState *bs = blk_bs(blk);
+ char *id;
+
+ /* blk_eject is only called by qdevified devices */
+ assert(!blk->legacy_dev);
if (bs) {
bdrv_eject(bs, eject_flag);
}
+
+ /* Whether or not we ejected on the backend,
+ * the frontend experienced a tray event. */
+ id = blk_get_attached_dev_id(blk);
+ qapi_event_send_device_tray_moved(blk_name(blk), id,
+ eject_flag, &error_abort);
+ g_free(id);
}
int blk_get_flags(BlockBackend *blk)
}
}
-int blk_get_max_transfer_length(BlockBackend *blk)
+/* Returns the maximum transfer length, in bytes; guaranteed nonzero */
+uint32_t blk_get_max_transfer(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
+ uint32_t max = 0;
if (bs) {
- return bs->bl.max_transfer_length;
- } else {
- return 0;
+ max = bs->bl.max_transfer;
}
+ return MIN_NON_ZERO(max, INT_MAX);
}
int blk_get_max_iov(BlockBackend *blk)
flags | BDRV_REQ_ZERO_WRITE);
}
-int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
- const uint8_t *buf, int nb_sectors)
+int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
+ int count)
{
- int ret = blk_check_request(blk, sector_num, nb_sectors);
- if (ret < 0) {
- return ret;
- }
-
- return bdrv_write_compressed(blk_bs(blk), sector_num, buf, nb_sectors);
+ return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
+ BDRV_REQ_WRITE_COMPRESSED);
}
int blk_truncate(BlockBackend *blk, int64_t offset)
return -ENOMEDIUM;
}
- return bdrv_truncate(blk_bs(blk), offset);
+ return bdrv_truncate(blk->root, offset);
}
-int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
+static void blk_pdiscard_entry(void *opaque)
{
- int ret = blk_check_request(blk, sector_num, nb_sectors);
- if (ret < 0) {
- return ret;
- }
+ BlkRwCo *rwco = opaque;
+ rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, rwco->qiov->size);
+}
- return bdrv_discard(blk_bs(blk), sector_num, nb_sectors);
+int blk_pdiscard(BlockBackend *blk, int64_t offset, int count)
+{
+ return blk_prw(blk, offset, NULL, count, blk_pdiscard_entry, 0);
}
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
}
/*
- * Applies the information in the root state to the given BlockDriverState. This
- * does not include the flags which have to be specified for bdrv_open(), use
- * blk_get_open_flags_from_root_state() to inquire them.
+ * Returns the detect-zeroes setting to be used for bdrv_open() of a
+ * BlockDriverState which is supposed to inherit the root state.
*/
-void blk_apply_root_state(BlockBackend *blk, BlockDriverState *bs)
+bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
{
- bs->detect_zeroes = blk->root_state.detect_zeroes;
+ return blk->root_state.detect_zeroes;
}
/*
return 0;
}
-int blk_flush_all(void)
-{
- BlockBackend *blk = NULL;
- int result = 0;
-
- while ((blk = blk_all_next(blk)) != NULL) {
- AioContext *aio_context = blk_get_aio_context(blk);
- int ret;
-
- aio_context_acquire(aio_context);
- if (blk_is_inserted(blk)) {
- ret = blk_flush(blk);
- if (ret < 0 && !result) {
- result = ret;
- }
- }
- aio_context_release(aio_context);
- }
-
- return result;
-}
-
/* throttling disk I/O limits */
void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)