#include "qapi/error.h"
#include "qemu/cutils.h"
#include "qemu/error-report.h"
+#include "block/block-io.h"
#include "block/block_int.h"
#include "qemu/module.h"
#include "qemu/option.h"
#include <sys/param.h>
#include <sys/syscall.h>
#include <sys/vfs.h>
+#if defined(CONFIG_BLKZONED)
+#include <linux/blkzoned.h>
+#endif
#include <linux/cdrom.h>
#include <linux/fd.h>
#include <linux/fs.h>
PreallocMode prealloc;
Error **errp;
} truncate;
+ struct {
+ unsigned int *nr_zones;
+ BlockZoneDescriptor *zones;
+ } zone_report;
+ struct {
+ unsigned long op;
+ } zone_mgmt;
};
} RawPosixAIOData;
#ifdef CONFIG_LINUX_AIO
/* Currently Linux does AIO only for files opened with O_DIRECT */
- if (s->use_linux_aio) {
- if (!(s->open_flags & O_DIRECT)) {
- error_setg(errp, "aio=native was specified, but it requires "
- "cache.direct=on, which was not specified.");
- ret = -EINVAL;
- goto fail;
- }
- if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) {
- error_prepend(errp, "Unable to use native AIO: ");
- goto fail;
- }
+ if (s->use_linux_aio && !(s->open_flags & O_DIRECT)) {
+ error_setg(errp, "aio=native was specified, but it requires "
+ "cache.direct=on, which was not specified.");
+ ret = -EINVAL;
+ goto fail;
}
#else
if (s->use_linux_aio) {
}
#endif /* !defined(CONFIG_LINUX_AIO) */
-#ifdef CONFIG_LINUX_IO_URING
- if (s->use_linux_io_uring) {
- if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) {
- error_prepend(errp, "Unable to use io_uring: ");
- goto fail;
- }
- }
-#else
+#ifndef CONFIG_LINUX_IO_URING
if (s->use_linux_io_uring) {
error_setg(errp, "aio=io_uring was specified, but is not supported "
"in this build.");
goto fail;
}
}
+#ifdef CONFIG_BLKZONED
+ /*
+ * The kernel page cache does not reliably work for writes to SWR zones
+ * of zoned block device because it can not guarantee the order of writes.
+ */
+ if ((bs->bl.zoned != BLK_Z_NONE) &&
+ (!(s->open_flags & O_DIRECT))) {
+ error_setg(errp, "The driver supports zoned devices, and it requires "
+ "cache.direct=on, which was not specified.");
+ return -EINVAL; /* No host kernel page cache */
+ }
+#endif
if (S_ISBLK(st.st_mode)) {
#ifdef __linux__
* As part of reopen prepare we also want to create new fd by
* raw_reconfigure_getfd(). But it wants updated "perm", when in
* bdrv_reopen_multiple() .bdrv_reopen_prepare() callback called prior to
- * permission update. Happily, permission update is always a part (a seprate
- * stage) of bdrv_reopen_multiple() so we can rely on this fact and
- * reconfigure fd in raw_check_perm().
+ * permission update. Happily, permission update is always a part
+ * (a separate stage) of bdrv_reopen_multiple() so we can rely on this
+ * fact and reconfigure fd in raw_check_perm().
*/
s->reopen_state = state;
#endif
}
-static int hdev_get_max_segments(int fd, struct stat *st)
+/*
+ * Get a sysfs attribute value as character string.
+ */
+#ifdef CONFIG_LINUX
+static int get_sysfs_str_val(struct stat *st, const char *attribute,
+ char **val) {
+ g_autofree char *sysfspath = NULL;
+ size_t len;
+
+ if (!S_ISBLK(st->st_mode)) {
+ return -ENOTSUP;
+ }
+
+ sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/%s",
+ major(st->st_rdev), minor(st->st_rdev),
+ attribute);
+ if (!g_file_get_contents(sysfspath, val, &len, NULL)) {
+ return -ENOENT;
+ }
+
+ /* The file is ended with '\n' */
+ char *p;
+ p = *val;
+ if (*(p + len - 1) == '\n') {
+ *(p + len - 1) = '\0';
+ }
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_BLKZONED)
+static int get_sysfs_zoned_model(struct stat *st, BlockZoneModel *zoned)
{
+ g_autofree char *val = NULL;
+ int ret;
+
+ ret = get_sysfs_str_val(st, "zoned", &val);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (strcmp(val, "host-managed") == 0) {
+ *zoned = BLK_Z_HM;
+ } else if (strcmp(val, "host-aware") == 0) {
+ *zoned = BLK_Z_HA;
+ } else if (strcmp(val, "none") == 0) {
+ *zoned = BLK_Z_NONE;
+ } else {
+ return -ENOTSUP;
+ }
+ return 0;
+}
+#endif /* defined(CONFIG_BLKZONED) */
+
+/*
+ * Get a sysfs attribute value as a long integer.
+ */
#ifdef CONFIG_LINUX
- char buf[32];
+static long get_sysfs_long_val(struct stat *st, const char *attribute)
+{
+ g_autofree char *str = NULL;
const char *end;
- char *sysfspath = NULL;
+ long val;
+ int ret;
+
+ ret = get_sysfs_str_val(st, attribute, &str);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* The file is ended with '\n', pass 'end' to accept that. */
+ ret = qemu_strtol(str, &end, 10, &val);
+ if (ret == 0 && end && *end == '\0') {
+ ret = val;
+ }
+ return ret;
+}
+#endif
+
+static int hdev_get_max_segments(int fd, struct stat *st)
+{
+#ifdef CONFIG_LINUX
int ret;
- int sysfd = -1;
- long max_segments;
if (S_ISCHR(st->st_mode)) {
if (ioctl(fd, SG_GET_SG_TABLESIZE, &ret) == 0) {
}
return -ENOTSUP;
}
+ return get_sysfs_long_val(st, "max_segments");
+#else
+ return -ENOTSUP;
+#endif
+}
- if (!S_ISBLK(st->st_mode)) {
- return -ENOTSUP;
+#if defined(CONFIG_BLKZONED)
+/*
+ * If the reset_all flag is true, then the wps of zone whose state is
+ * not readonly or offline should be all reset to the start sector.
+ * Else, take the real wp of the device.
+ */
+static int get_zones_wp(BlockDriverState *bs, int fd, int64_t offset,
+ unsigned int nrz, bool reset_all)
+{
+ struct blk_zone *blkz;
+ size_t rep_size;
+ uint64_t sector = offset >> BDRV_SECTOR_BITS;
+ BlockZoneWps *wps = bs->wps;
+ unsigned int j = offset / bs->bl.zone_size;
+ unsigned int n = 0, i = 0;
+ int ret;
+ rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
+ g_autofree struct blk_zone_report *rep = NULL;
+
+ rep = g_malloc(rep_size);
+ blkz = (struct blk_zone *)(rep + 1);
+ while (n < nrz) {
+ memset(rep, 0, rep_size);
+ rep->sector = sector;
+ rep->nr_zones = nrz - n;
+
+ do {
+ ret = ioctl(fd, BLKREPORTZONE, rep);
+ } while (ret != 0 && errno == EINTR);
+ if (ret != 0) {
+ error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
+ fd, offset, errno);
+ return -errno;
+ }
+
+ if (!rep->nr_zones) {
+ break;
+ }
+
+ for (i = 0; i < rep->nr_zones; ++i, ++n, ++j) {
+ /*
+ * The wp tracking cares only about sequential writes required and
+ * sequential write preferred zones so that the wp can advance to
+ * the right location.
+ * Use the most significant bit of the wp location to indicate the
+ * zone type: 0 for SWR/SWP zones and 1 for conventional zones.
+ */
+ if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ wps->wp[j] |= 1ULL << 63;
+ } else {
+ switch(blkz[i].cond) {
+ case BLK_ZONE_COND_FULL:
+ case BLK_ZONE_COND_READONLY:
+ /* Zone not writable */
+ wps->wp[j] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS;
+ break;
+ case BLK_ZONE_COND_OFFLINE:
+ /* Zone not writable nor readable */
+ wps->wp[j] = (blkz[i].start) << BDRV_SECTOR_BITS;
+ break;
+ default:
+ if (reset_all) {
+ wps->wp[j] = blkz[i].start << BDRV_SECTOR_BITS;
+ } else {
+ wps->wp[j] = blkz[i].wp << BDRV_SECTOR_BITS;
+ }
+ break;
+ }
+ }
+ }
+ sector = blkz[i - 1].start + blkz[i - 1].len;
}
- sysfspath = g_strdup_printf("/sys/dev/block/%u:%u/queue/max_segments",
- major(st->st_rdev), minor(st->st_rdev));
- sysfd = open(sysfspath, O_RDONLY);
- if (sysfd == -1) {
- ret = -errno;
- goto out;
+ return 0;
+}
+
+static void update_zones_wp(BlockDriverState *bs, int fd, int64_t offset,
+ unsigned int nrz)
+{
+ if (get_zones_wp(bs, fd, offset, nrz, 0) < 0) {
+ error_report("update zone wp failed");
+ }
+}
+
+static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
+ Error **errp)
+{
+ BDRVRawState *s = bs->opaque;
+ BlockZoneModel zoned;
+ int ret;
+
+ ret = get_sysfs_zoned_model(st, &zoned);
+ if (ret < 0 || zoned == BLK_Z_NONE) {
+ goto no_zoned;
+ }
+ bs->bl.zoned = zoned;
+
+ ret = get_sysfs_long_val(st, "max_open_zones");
+ if (ret >= 0) {
+ bs->bl.max_open_zones = ret;
+ }
+
+ ret = get_sysfs_long_val(st, "max_active_zones");
+ if (ret >= 0) {
+ bs->bl.max_active_zones = ret;
}
- ret = RETRY_ON_EINTR(read(sysfd, buf, sizeof(buf) - 1));
+
+ /*
+ * The zoned device must at least have zone size and nr_zones fields.
+ */
+ ret = get_sysfs_long_val(st, "chunk_sectors");
if (ret < 0) {
- ret = -errno;
- goto out;
- } else if (ret == 0) {
- ret = -EIO;
- goto out;
+ error_setg_errno(errp, -ret, "Unable to read chunk_sectors "
+ "sysfs attribute");
+ goto no_zoned;
+ } else if (!ret) {
+ error_setg(errp, "Read 0 from chunk_sectors sysfs attribute");
+ goto no_zoned;
}
- buf[ret] = 0;
- /* The file is ended with '\n', pass 'end' to accept that. */
- ret = qemu_strtol(buf, &end, 10, &max_segments);
- if (ret == 0 && end && *end == '\n') {
- ret = max_segments;
+ bs->bl.zone_size = ret << BDRV_SECTOR_BITS;
+
+ ret = get_sysfs_long_val(st, "nr_zones");
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "Unable to read nr_zones "
+ "sysfs attribute");
+ goto no_zoned;
+ } else if (!ret) {
+ error_setg(errp, "Read 0 from nr_zones sysfs attribute");
+ goto no_zoned;
}
+ bs->bl.nr_zones = ret;
-out:
- if (sysfd != -1) {
- close(sysfd);
+ ret = get_sysfs_long_val(st, "zone_append_max_bytes");
+ if (ret > 0) {
+ bs->bl.max_append_sectors = ret >> BDRV_SECTOR_BITS;
}
- g_free(sysfspath);
- return ret;
-#else
- return -ENOTSUP;
-#endif
+
+ ret = get_sysfs_long_val(st, "physical_block_size");
+ if (ret >= 0) {
+ bs->bl.write_granularity = ret;
+ }
+
+ /* The refresh_limits() function can be called multiple times. */
+ g_free(bs->wps);
+ bs->wps = g_malloc(sizeof(BlockZoneWps) +
+ sizeof(int64_t) * bs->bl.nr_zones);
+ ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 0);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "report wps failed");
+ goto no_zoned;
+ }
+ qemu_co_mutex_init(&bs->wps->colock);
+ return;
+
+no_zoned:
+ bs->bl.zoned = BLK_Z_NONE;
+ g_free(bs->wps);
+ bs->wps = NULL;
+}
+#else /* !defined(CONFIG_BLKZONED) */
+static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st,
+ Error **errp)
+{
+ bs->bl.zoned = BLK_Z_NONE;
}
+#endif /* !defined(CONFIG_BLKZONED) */
static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
{
bs->bl.max_hw_iov = ret;
}
}
+
+ raw_refresh_zoned_limits(bs, &st, errp);
}
static int check_for_dasd(int fd)
BDRVRawState *s = bs->opaque;
int ret;
- /* If DASD, get blocksizes */
+ /* If DASD or zoned devices, get blocksizes */
if (check_for_dasd(s->fd) < 0) {
- return -ENOTSUP;
+ /* zoned devices are not DASD */
+ if (bs->bl.zoned == BLK_Z_NONE) {
+ return -ENOTSUP;
+ }
}
ret = probe_logical_blocksize(s->fd, &bsz->log);
if (ret < 0) {
ssize_t len;
len = RETRY_ON_EINTR(
- (aiocb->aio_type & QEMU_AIO_WRITE) ?
+ (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) ?
qemu_pwritev(aiocb->aio_fildes,
aiocb->io.iov,
aiocb->io.niov,
ssize_t len;
while (offset < aiocb->aio_nbytes) {
- if (aiocb->aio_type & QEMU_AIO_WRITE) {
+ if (aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) {
len = pwrite(aiocb->aio_fildes,
(const char *)buf + offset,
aiocb->aio_nbytes - offset,
}
nbytes = handle_aiocb_rw_linear(aiocb, buf);
- if (!(aiocb->aio_type & QEMU_AIO_WRITE)) {
+ if (!(aiocb->aio_type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND))) {
char *p = buf;
size_t count = aiocb->aio_nbytes, copy;
int i;
#ifdef CONFIG_FALLOCATE
/* Last resort: we are trying to extend the file with zeroed data. This
* can be done via fallocate(fd, 0) */
- len = bdrv_getlength(aiocb->bs);
+ len = raw_getlength(aiocb->bs);
if (s->has_fallocate && len >= 0 && aiocb->aio_offset >= len) {
int ret = do_fallocate(s->fd, 0, aiocb->aio_offset, aiocb->aio_nbytes);
if (ret == 0 || ret != -ENOTSUP) {
}
#endif
+/*
+ * parse_zone - Fill a zone descriptor
+ */
+#if defined(CONFIG_BLKZONED)
+static inline int parse_zone(struct BlockZoneDescriptor *zone,
+ const struct blk_zone *blkz) {
+ zone->start = blkz->start << BDRV_SECTOR_BITS;
+ zone->length = blkz->len << BDRV_SECTOR_BITS;
+ zone->wp = blkz->wp << BDRV_SECTOR_BITS;
+
+#ifdef HAVE_BLK_ZONE_REP_CAPACITY
+ zone->cap = blkz->capacity << BDRV_SECTOR_BITS;
+#else
+ zone->cap = blkz->len << BDRV_SECTOR_BITS;
+#endif
+
+ switch (blkz->type) {
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ zone->type = BLK_ZT_SWR;
+ break;
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ zone->type = BLK_ZT_SWP;
+ break;
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ zone->type = BLK_ZT_CONV;
+ break;
+ default:
+ error_report("Unsupported zone type: 0x%x", blkz->type);
+ return -ENOTSUP;
+ }
+
+ switch (blkz->cond) {
+ case BLK_ZONE_COND_NOT_WP:
+ zone->state = BLK_ZS_NOT_WP;
+ break;
+ case BLK_ZONE_COND_EMPTY:
+ zone->state = BLK_ZS_EMPTY;
+ break;
+ case BLK_ZONE_COND_IMP_OPEN:
+ zone->state = BLK_ZS_IOPEN;
+ break;
+ case BLK_ZONE_COND_EXP_OPEN:
+ zone->state = BLK_ZS_EOPEN;
+ break;
+ case BLK_ZONE_COND_CLOSED:
+ zone->state = BLK_ZS_CLOSED;
+ break;
+ case BLK_ZONE_COND_READONLY:
+ zone->state = BLK_ZS_RDONLY;
+ break;
+ case BLK_ZONE_COND_FULL:
+ zone->state = BLK_ZS_FULL;
+ break;
+ case BLK_ZONE_COND_OFFLINE:
+ zone->state = BLK_ZS_OFFLINE;
+ break;
+ default:
+ error_report("Unsupported zone state: 0x%x", blkz->cond);
+ return -ENOTSUP;
+ }
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_BLKZONED)
+static int handle_aiocb_zone_report(void *opaque)
+{
+ RawPosixAIOData *aiocb = opaque;
+ int fd = aiocb->aio_fildes;
+ unsigned int *nr_zones = aiocb->zone_report.nr_zones;
+ BlockZoneDescriptor *zones = aiocb->zone_report.zones;
+ /* zoned block devices use 512-byte sectors */
+ uint64_t sector = aiocb->aio_offset / 512;
+
+ struct blk_zone *blkz;
+ size_t rep_size;
+ unsigned int nrz;
+ int ret;
+ unsigned int n = 0, i = 0;
+
+ nrz = *nr_zones;
+ rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
+ g_autofree struct blk_zone_report *rep = NULL;
+ rep = g_malloc(rep_size);
+
+ blkz = (struct blk_zone *)(rep + 1);
+ while (n < nrz) {
+ memset(rep, 0, rep_size);
+ rep->sector = sector;
+ rep->nr_zones = nrz - n;
+
+ do {
+ ret = ioctl(fd, BLKREPORTZONE, rep);
+ } while (ret != 0 && errno == EINTR);
+ if (ret != 0) {
+ error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
+ fd, sector, errno);
+ return -errno;
+ }
+
+ if (!rep->nr_zones) {
+ break;
+ }
+
+ for (i = 0; i < rep->nr_zones; i++, n++) {
+ ret = parse_zone(&zones[n], &blkz[i]);
+ if (ret != 0) {
+ return ret;
+ }
+
+ /* The next report should start after the last zone reported */
+ sector = blkz[i].start + blkz[i].len;
+ }
+ }
+
+ *nr_zones = n;
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_BLKZONED)
+static int handle_aiocb_zone_mgmt(void *opaque)
+{
+ RawPosixAIOData *aiocb = opaque;
+ int fd = aiocb->aio_fildes;
+ uint64_t sector = aiocb->aio_offset / 512;
+ int64_t nr_sectors = aiocb->aio_nbytes / 512;
+ struct blk_zone_range range;
+ int ret;
+
+ /* Execute the operation */
+ range.sector = sector;
+ range.nr_sectors = nr_sectors;
+ do {
+ ret = ioctl(fd, aiocb->zone_mgmt.op, &range);
+ } while (ret != 0 && errno == EINTR);
+
+ return ret < 0 ? -errno : ret;
+}
+#endif
+
static int handle_aiocb_copy_range(void *opaque)
{
RawPosixAIOData *aiocb = opaque;
return result;
}
-static int coroutine_fn raw_thread_pool_submit(BlockDriverState *bs,
- ThreadPoolFunc func, void *arg)
+static int coroutine_fn raw_thread_pool_submit(ThreadPoolFunc func, void *arg)
{
- /* @bs can be NULL, bdrv_get_aio_context() returns the main context then */
- ThreadPool *pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
- return thread_pool_submit_co(pool, func, arg);
+ return thread_pool_submit_co(func, arg);
}
/*
return true;
}
-static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
+#ifdef CONFIG_LINUX_IO_URING
+static inline bool raw_check_linux_io_uring(BDRVRawState *s)
+{
+ Error *local_err = NULL;
+ AioContext *ctx;
+
+ if (!s->use_linux_io_uring) {
+ return false;
+ }
+
+ ctx = qemu_get_current_aio_context();
+ if (unlikely(!aio_setup_linux_io_uring(ctx, &local_err))) {
+ error_reportf_err(local_err, "Unable to use linux io_uring, "
+ "falling back to thread pool: ");
+ s->use_linux_io_uring = false;
+ return false;
+ }
+ return true;
+}
+#endif
+
+#ifdef CONFIG_LINUX_AIO
+static inline bool raw_check_linux_aio(BDRVRawState *s)
+{
+ Error *local_err = NULL;
+ AioContext *ctx;
+
+ if (!s->use_linux_aio) {
+ return false;
+ }
+
+ ctx = qemu_get_current_aio_context();
+ if (unlikely(!aio_setup_linux_aio(ctx, &local_err))) {
+ error_reportf_err(local_err, "Unable to use Linux AIO, "
+ "falling back to thread pool: ");
+ s->use_linux_aio = false;
+ return false;
+ }
+ return true;
+}
+#endif
+
+static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr,
uint64_t bytes, QEMUIOVector *qiov, int type)
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData acb;
+ int ret;
+ uint64_t offset = *offset_ptr;
if (fd_open(bs) < 0)
return -EIO;
+#if defined(CONFIG_BLKZONED)
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
+ bs->bl.zoned != BLK_Z_NONE) {
+ qemu_co_mutex_lock(&bs->wps->colock);
+ if (type & QEMU_AIO_ZONE_APPEND) {
+ int index = offset / bs->bl.zone_size;
+ offset = bs->wps->wp[index];
+ }
+ }
+#endif
/*
* When using O_DIRECT, the request must be aligned to be able to use
if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) {
type |= QEMU_AIO_MISALIGNED;
#ifdef CONFIG_LINUX_IO_URING
- } else if (s->use_linux_io_uring) {
- LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
+ } else if (raw_check_linux_io_uring(s)) {
assert(qiov->size == bytes);
- return luring_co_submit(bs, aio, s->fd, offset, qiov, type);
+ ret = luring_co_submit(bs, s->fd, offset, qiov, type);
+ goto out;
#endif
#ifdef CONFIG_LINUX_AIO
- } else if (s->use_linux_aio) {
- LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
+ } else if (raw_check_linux_aio(s)) {
assert(qiov->size == bytes);
- return laio_co_submit(bs, aio, s->fd, offset, qiov, type,
+ ret = laio_co_submit(s->fd, offset, qiov, type,
s->aio_max_batch);
+ goto out;
#endif
}
};
assert(qiov->size == bytes);
- return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
+ ret = raw_thread_pool_submit(handle_aiocb_rw, &acb);
+ goto out; /* Avoid the compiler err of unused label */
+
+out:
+#if defined(CONFIG_BLKZONED)
+ if ((type & (QEMU_AIO_WRITE | QEMU_AIO_ZONE_APPEND)) &&
+ bs->bl.zoned != BLK_Z_NONE) {
+ BlockZoneWps *wps = bs->wps;
+ if (ret == 0) {
+ uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
+ if (!BDRV_ZT_IS_CONV(*wp)) {
+ if (type & QEMU_AIO_ZONE_APPEND) {
+ *offset_ptr = *wp;
+ trace_zbd_zone_append_complete(bs, *offset_ptr
+ >> BDRV_SECTOR_BITS);
+ }
+ /* Advance the wp if needed */
+ if (offset + bytes > *wp) {
+ *wp = offset + bytes;
+ }
+ }
+ } else {
+ /*
+ * write and append write are not allowed to cross zone boundaries
+ */
+ update_zones_wp(bs, s->fd, offset, 1);
+ }
+
+ qemu_co_mutex_unlock(&wps->colock);
+ }
+#endif
+ return ret;
}
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
- return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ);
+ return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_READ);
}
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
int64_t bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
- return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE);
-}
-
-static void raw_aio_plug(BlockDriverState *bs)
-{
- BDRVRawState __attribute__((unused)) *s = bs->opaque;
-#ifdef CONFIG_LINUX_AIO
- if (s->use_linux_aio) {
- LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
- laio_io_plug(bs, aio);
- }
-#endif
-#ifdef CONFIG_LINUX_IO_URING
- if (s->use_linux_io_uring) {
- LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
- luring_io_plug(bs, aio);
- }
-#endif
-}
-
-static void raw_aio_unplug(BlockDriverState *bs)
-{
- BDRVRawState __attribute__((unused)) *s = bs->opaque;
-#ifdef CONFIG_LINUX_AIO
- if (s->use_linux_aio) {
- LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
- laio_io_unplug(bs, aio, s->aio_max_batch);
- }
-#endif
-#ifdef CONFIG_LINUX_IO_URING
- if (s->use_linux_io_uring) {
- LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
- luring_io_unplug(bs, aio);
- }
-#endif
+ return raw_co_prw(bs, &offset, bytes, qiov, QEMU_AIO_WRITE);
}
static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs)
};
#ifdef CONFIG_LINUX_IO_URING
- if (s->use_linux_io_uring) {
- LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
- return luring_co_submit(bs, aio, s->fd, 0, NULL, QEMU_AIO_FLUSH);
- }
-#endif
- return raw_thread_pool_submit(bs, handle_aiocb_flush, &acb);
-}
-
-static void raw_aio_attach_aio_context(BlockDriverState *bs,
- AioContext *new_context)
-{
- BDRVRawState __attribute__((unused)) *s = bs->opaque;
-#ifdef CONFIG_LINUX_AIO
- if (s->use_linux_aio) {
- Error *local_err = NULL;
- if (!aio_setup_linux_aio(new_context, &local_err)) {
- error_reportf_err(local_err, "Unable to use native AIO, "
- "falling back to thread pool: ");
- s->use_linux_aio = false;
- }
- }
-#endif
-#ifdef CONFIG_LINUX_IO_URING
- if (s->use_linux_io_uring) {
- Error *local_err = NULL;
- if (!aio_setup_linux_io_uring(new_context, &local_err)) {
- error_reportf_err(local_err, "Unable to use linux io_uring, "
- "falling back to thread pool: ");
- s->use_linux_io_uring = false;
- }
+ if (raw_check_linux_io_uring(s)) {
+ return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH);
}
#endif
+ return raw_thread_pool_submit(handle_aiocb_flush, &acb);
}
static void raw_close(BlockDriverState *bs)
BDRVRawState *s = bs->opaque;
if (s->fd >= 0) {
+#if defined(CONFIG_BLKZONED)
+ g_free(bs->wps);
+#endif
qemu_close(s->fd);
s->fd = -1;
}
},
};
- return raw_thread_pool_submit(bs, handle_aiocb_truncate, &acb);
+ return raw_thread_pool_submit(handle_aiocb_truncate, &acb);
}
static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset,
}
#endif
-static int64_t raw_get_allocated_file_size(BlockDriverState *bs)
+static int64_t coroutine_fn raw_co_getlength(BlockDriverState *bs)
+{
+ return raw_getlength(bs);
+}
+
+static int64_t coroutine_fn raw_co_get_allocated_file_size(BlockDriverState *bs)
{
struct stat st;
BDRVRawState *s = bs->opaque;
return result;
}
-static int coroutine_fn raw_co_create_opts(BlockDriver *drv,
- const char *filename,
- QemuOpts *opts,
- Error **errp)
+static int coroutine_fn GRAPH_RDLOCK
+raw_co_create_opts(BlockDriver *drv, const char *filename,
+ QemuOpts *opts, Error **errp)
{
BlockdevCreateOptions options;
int64_t total_size = 0;
}
#endif /* __linux__ */
-static void coroutine_fn raw_co_invalidate_cache(BlockDriverState *bs,
- Error **errp)
+static void coroutine_fn GRAPH_RDLOCK
+raw_co_invalidate_cache(BlockDriverState *bs, Error **errp)
{
BDRVRawState *s = bs->opaque;
int ret;
}
}
+/*
+ * zone report - Get a zone block device's information in the form
+ * of an array of zone descriptors.
+ * zones is an array of zone descriptors to hold zone information on reply;
+ * offset can be any byte within the entire size of the device;
+ * nr_zones is the maximum number of sectors the command should operate on.
+ */
+#if defined(CONFIG_BLKZONED)
+static int coroutine_fn raw_co_zone_report(BlockDriverState *bs, int64_t offset,
+ unsigned int *nr_zones,
+ BlockZoneDescriptor *zones) {
+ BDRVRawState *s = bs->opaque;
+ RawPosixAIOData acb = (RawPosixAIOData) {
+ .bs = bs,
+ .aio_fildes = s->fd,
+ .aio_type = QEMU_AIO_ZONE_REPORT,
+ .aio_offset = offset,
+ .zone_report = {
+ .nr_zones = nr_zones,
+ .zones = zones,
+ },
+ };
+
+ trace_zbd_zone_report(bs, *nr_zones, offset >> BDRV_SECTOR_BITS);
+ return raw_thread_pool_submit(handle_aiocb_zone_report, &acb);
+}
+#endif
+
+/*
+ * zone management operations - Execute an operation on a zone
+ */
+#if defined(CONFIG_BLKZONED)
+static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
+ int64_t offset, int64_t len) {
+ BDRVRawState *s = bs->opaque;
+ RawPosixAIOData acb;
+ int64_t zone_size, zone_size_mask;
+ const char *op_name;
+ unsigned long zo;
+ int ret;
+ BlockZoneWps *wps = bs->wps;
+ int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
+
+ zone_size = bs->bl.zone_size;
+ zone_size_mask = zone_size - 1;
+ if (offset & zone_size_mask) {
+ error_report("sector offset %" PRId64 " is not aligned to zone size "
+ "%" PRId64 "", offset / 512, zone_size / 512);
+ return -EINVAL;
+ }
+
+ if (((offset + len) < capacity && len & zone_size_mask) ||
+ offset + len > capacity) {
+ error_report("number of sectors %" PRId64 " is not aligned to zone size"
+ " %" PRId64 "", len / 512, zone_size / 512);
+ return -EINVAL;
+ }
+
+ uint32_t i = offset / bs->bl.zone_size;
+ uint32_t nrz = len / bs->bl.zone_size;
+ uint64_t *wp = &wps->wp[i];
+ if (BDRV_ZT_IS_CONV(*wp) && len != capacity) {
+ error_report("zone mgmt operations are not allowed for conventional zones");
+ return -EIO;
+ }
+
+ switch (op) {
+ case BLK_ZO_OPEN:
+ op_name = "BLKOPENZONE";
+ zo = BLKOPENZONE;
+ break;
+ case BLK_ZO_CLOSE:
+ op_name = "BLKCLOSEZONE";
+ zo = BLKCLOSEZONE;
+ break;
+ case BLK_ZO_FINISH:
+ op_name = "BLKFINISHZONE";
+ zo = BLKFINISHZONE;
+ break;
+ case BLK_ZO_RESET:
+ op_name = "BLKRESETZONE";
+ zo = BLKRESETZONE;
+ break;
+ default:
+ error_report("Unsupported zone op: 0x%x", op);
+ return -ENOTSUP;
+ }
+
+ acb = (RawPosixAIOData) {
+ .bs = bs,
+ .aio_fildes = s->fd,
+ .aio_type = QEMU_AIO_ZONE_MGMT,
+ .aio_offset = offset,
+ .aio_nbytes = len,
+ .zone_mgmt = {
+ .op = zo,
+ },
+ };
+
+ trace_zbd_zone_mgmt(bs, op_name, offset >> BDRV_SECTOR_BITS,
+ len >> BDRV_SECTOR_BITS);
+ ret = raw_thread_pool_submit(handle_aiocb_zone_mgmt, &acb);
+ if (ret != 0) {
+ update_zones_wp(bs, s->fd, offset, nrz);
+ error_report("ioctl %s failed %d", op_name, ret);
+ return ret;
+ }
+
+ if (zo == BLKRESETZONE && len == capacity) {
+ ret = get_zones_wp(bs, s->fd, 0, bs->bl.nr_zones, 1);
+ if (ret < 0) {
+ error_report("reporting single wp failed");
+ return ret;
+ }
+ } else if (zo == BLKRESETZONE) {
+ for (unsigned int j = 0; j < nrz; ++j) {
+ wp[j] = offset + j * zone_size;
+ }
+ } else if (zo == BLKFINISHZONE) {
+ for (unsigned int j = 0; j < nrz; ++j) {
+ /* The zoned device allows the last zone smaller that the
+ * zone size. */
+ wp[j] = MIN(offset + (j + 1) * zone_size, offset + len);
+ }
+ }
+
+ return ret;
+}
+#endif
+
+#if defined(CONFIG_BLKZONED)
+static int coroutine_fn raw_co_zone_append(BlockDriverState *bs,
+ int64_t *offset,
+ QEMUIOVector *qiov,
+ BdrvRequestFlags flags) {
+ assert(flags == 0);
+ int64_t zone_size_mask = bs->bl.zone_size - 1;
+ int64_t iov_len = 0;
+ int64_t len = 0;
+
+ if (*offset & zone_size_mask) {
+ error_report("sector offset %" PRId64 " is not aligned to zone size "
+ "%" PRId32 "", *offset / 512, bs->bl.zone_size / 512);
+ return -EINVAL;
+ }
+
+ int64_t wg = bs->bl.write_granularity;
+ int64_t wg_mask = wg - 1;
+ for (int i = 0; i < qiov->niov; i++) {
+ iov_len = qiov->iov[i].iov_len;
+ if (iov_len & wg_mask) {
+ error_report("len of IOVector[%d] %" PRId64 " is not aligned to "
+ "block size %" PRId64 "", i, iov_len, wg);
+ return -EINVAL;
+ }
+ len += iov_len;
+ }
+
+ trace_zbd_zone_append(bs, *offset >> BDRV_SECTOR_BITS);
+ return raw_co_prw(bs, offset, len, qiov, QEMU_AIO_ZONE_APPEND);
+}
+#endif
+
static coroutine_fn int
raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes,
bool blkdev)
acb.aio_type |= QEMU_AIO_BLKDEV;
}
- ret = raw_thread_pool_submit(bs, handle_aiocb_discard, &acb);
+ ret = raw_thread_pool_submit(handle_aiocb_discard, &acb);
raw_account_discard(s, bytes, ret);
return ret;
}
handler = handle_aiocb_write_zeroes;
}
- return raw_thread_pool_submit(bs, handler, &acb);
+ return raw_thread_pool_submit(handler, &acb);
}
static int coroutine_fn raw_co_pwrite_zeroes(
return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false);
}
-static int raw_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
+static int coroutine_fn
+raw_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
{
return 0;
}
+static ImageInfoSpecific *raw_get_specific_info(BlockDriverState *bs,
+ Error **errp)
+{
+ ImageInfoSpecificFile *file_info = g_new0(ImageInfoSpecificFile, 1);
+ ImageInfoSpecific *spec_info = g_new(ImageInfoSpecific, 1);
+
+ *spec_info = (ImageInfoSpecific){
+ .type = IMAGE_INFO_SPECIFIC_KIND_FILE,
+ .u.file.data = file_info,
+ };
+
+#ifdef FS_IOC_FSGETXATTR
+ {
+ BDRVRawState *s = bs->opaque;
+ struct fsxattr attr;
+ int ret;
+
+ ret = ioctl(s->fd, FS_IOC_FSGETXATTR, &attr);
+ if (!ret && attr.fsx_extsize != 0) {
+ file_info->has_extent_size_hint = true;
+ file_info->extent_size_hint = attr.fsx_extsize;
+ }
+ }
+#endif
+
+ return spec_info;
+}
+
static BlockStatsSpecificFile get_blockstats_specific_file(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
raw_handle_perm_lock(bs, RAW_PL_ABORT, 0, 0, NULL);
}
-static int coroutine_fn raw_co_copy_range_from(
+static int coroutine_fn GRAPH_RDLOCK raw_co_copy_range_from(
BlockDriverState *bs, BdrvChild *src, int64_t src_offset,
BdrvChild *dst, int64_t dst_offset, int64_t bytes,
BdrvRequestFlags read_flags, BdrvRequestFlags write_flags)
read_flags, write_flags);
}
-static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs,
- BdrvChild *src,
- int64_t src_offset,
- BdrvChild *dst,
- int64_t dst_offset,
- int64_t bytes,
- BdrvRequestFlags read_flags,
- BdrvRequestFlags write_flags)
+static int coroutine_fn GRAPH_RDLOCK
+raw_co_copy_range_to(BlockDriverState *bs,
+ BdrvChild *src, int64_t src_offset,
+ BdrvChild *dst, int64_t dst_offset,
+ int64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags)
{
RawPosixAIOData acb;
BDRVRawState *s = bs->opaque;
},
};
- return raw_thread_pool_submit(bs, handle_aiocb_copy_range, &acb);
+ return raw_thread_pool_submit(handle_aiocb_copy_range, &acb);
}
BlockDriver bdrv_file = {
.bdrv_co_copy_range_from = raw_co_copy_range_from,
.bdrv_co_copy_range_to = raw_co_copy_range_to,
.bdrv_refresh_limits = raw_refresh_limits,
- .bdrv_io_plug = raw_aio_plug,
- .bdrv_io_unplug = raw_aio_unplug,
- .bdrv_attach_aio_context = raw_aio_attach_aio_context,
-
- .bdrv_co_truncate = raw_co_truncate,
- .bdrv_getlength = raw_getlength,
- .bdrv_get_info = raw_get_info,
- .bdrv_get_allocated_file_size
- = raw_get_allocated_file_size,
+
+ .bdrv_co_truncate = raw_co_truncate,
+ .bdrv_co_getlength = raw_co_getlength,
+ .bdrv_co_get_info = raw_co_get_info,
+ .bdrv_get_specific_info = raw_get_specific_info,
+ .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
.bdrv_get_specific_stats = raw_get_specific_stats,
.bdrv_check_perm = raw_check_perm,
.bdrv_set_perm = raw_set_perm,
struct sg_io_hdr *io_hdr = buf;
if (io_hdr->cmdp[0] == PERSISTENT_RESERVE_OUT ||
io_hdr->cmdp[0] == PERSISTENT_RESERVE_IN) {
- return pr_manager_execute(s->pr_mgr, bdrv_get_aio_context(bs),
+ return pr_manager_execute(s->pr_mgr, qemu_get_current_aio_context(),
s->fd, io_hdr);
}
}
},
};
- return raw_thread_pool_submit(bs, handle_aiocb_ioctl, &acb);
+ return raw_thread_pool_submit(handle_aiocb_ioctl, &acb);
}
#endif /* linux */
.bdrv_co_copy_range_from = raw_co_copy_range_from,
.bdrv_co_copy_range_to = raw_co_copy_range_to,
.bdrv_refresh_limits = raw_refresh_limits,
- .bdrv_io_plug = raw_aio_plug,
- .bdrv_io_unplug = raw_aio_unplug,
- .bdrv_attach_aio_context = raw_aio_attach_aio_context,
-
- .bdrv_co_truncate = raw_co_truncate,
- .bdrv_getlength = raw_getlength,
- .bdrv_get_info = raw_get_info,
- .bdrv_get_allocated_file_size
- = raw_get_allocated_file_size,
+
+ .bdrv_co_truncate = raw_co_truncate,
+ .bdrv_co_getlength = raw_co_getlength,
+ .bdrv_co_get_info = raw_co_get_info,
+ .bdrv_get_specific_info = raw_get_specific_info,
+ .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
.bdrv_get_specific_stats = hdev_get_specific_stats,
.bdrv_check_perm = raw_check_perm,
.bdrv_set_perm = raw_set_perm,
#ifdef __linux__
.bdrv_co_ioctl = hdev_co_ioctl,
#endif
+
+ /* zoned device */
+#if defined(CONFIG_BLKZONED)
+ /* zone management operations */
+ .bdrv_co_zone_report = raw_co_zone_report,
+ .bdrv_co_zone_mgmt = raw_co_zone_mgmt,
+ .bdrv_co_zone_append = raw_co_zone_append,
+#endif
};
#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
{
bdrv_parse_filename_strip_prefix(filename, "host_cdrom:", options);
}
+
+static void cdrom_refresh_limits(BlockDriverState *bs, Error **errp)
+{
+ bs->bl.has_variable_length = true;
+ raw_refresh_limits(bs, errp);
+}
#endif
#ifdef __linux__
return prio;
}
-static bool cdrom_is_inserted(BlockDriverState *bs)
+static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs)
{
BDRVRawState *s = bs->opaque;
int ret;
return ret == CDS_DISC_OK;
}
-static void cdrom_eject(BlockDriverState *bs, bool eject_flag)
+static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag)
{
BDRVRawState *s = bs->opaque;
}
}
-static void cdrom_lock_medium(BlockDriverState *bs, bool locked)
+static void coroutine_fn cdrom_co_lock_medium(BlockDriverState *bs, bool locked)
{
BDRVRawState *s = bs->opaque;
.bdrv_co_preadv = raw_co_preadv,
.bdrv_co_pwritev = raw_co_pwritev,
.bdrv_co_flush_to_disk = raw_co_flush_to_disk,
- .bdrv_refresh_limits = raw_refresh_limits,
- .bdrv_io_plug = raw_aio_plug,
- .bdrv_io_unplug = raw_aio_unplug,
- .bdrv_attach_aio_context = raw_aio_attach_aio_context,
+ .bdrv_refresh_limits = cdrom_refresh_limits,
- .bdrv_co_truncate = raw_co_truncate,
- .bdrv_getlength = raw_getlength,
- .has_variable_length = true,
- .bdrv_get_allocated_file_size
- = raw_get_allocated_file_size,
+ .bdrv_co_truncate = raw_co_truncate,
+ .bdrv_co_getlength = raw_co_getlength,
+ .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
/* removable device support */
- .bdrv_is_inserted = cdrom_is_inserted,
- .bdrv_eject = cdrom_eject,
- .bdrv_lock_medium = cdrom_lock_medium,
+ .bdrv_co_is_inserted = cdrom_co_is_inserted,
+ .bdrv_co_eject = cdrom_co_eject,
+ .bdrv_co_lock_medium = cdrom_co_lock_medium,
/* generic scsi device */
.bdrv_co_ioctl = hdev_co_ioctl,
return 0;
}
-static bool cdrom_is_inserted(BlockDriverState *bs)
+static bool coroutine_fn cdrom_co_is_inserted(BlockDriverState *bs)
{
return raw_getlength(bs) > 0;
}
-static void cdrom_eject(BlockDriverState *bs, bool eject_flag)
+static void coroutine_fn cdrom_co_eject(BlockDriverState *bs, bool eject_flag)
{
BDRVRawState *s = bs->opaque;
cdrom_reopen(bs);
}
-static void cdrom_lock_medium(BlockDriverState *bs, bool locked)
+static void coroutine_fn cdrom_co_lock_medium(BlockDriverState *bs, bool locked)
{
BDRVRawState *s = bs->opaque;
.bdrv_co_preadv = raw_co_preadv,
.bdrv_co_pwritev = raw_co_pwritev,
.bdrv_co_flush_to_disk = raw_co_flush_to_disk,
- .bdrv_refresh_limits = raw_refresh_limits,
- .bdrv_io_plug = raw_aio_plug,
- .bdrv_io_unplug = raw_aio_unplug,
- .bdrv_attach_aio_context = raw_aio_attach_aio_context,
+ .bdrv_refresh_limits = cdrom_refresh_limits,
- .bdrv_co_truncate = raw_co_truncate,
- .bdrv_getlength = raw_getlength,
- .has_variable_length = true,
- .bdrv_get_allocated_file_size
- = raw_get_allocated_file_size,
+ .bdrv_co_truncate = raw_co_truncate,
+ .bdrv_co_getlength = raw_co_getlength,
+ .bdrv_co_get_allocated_file_size = raw_co_get_allocated_file_size,
/* removable device support */
- .bdrv_is_inserted = cdrom_is_inserted,
- .bdrv_eject = cdrom_eject,
- .bdrv_lock_medium = cdrom_lock_medium,
+ .bdrv_co_is_inserted = cdrom_co_is_inserted,
+ .bdrv_co_eject = cdrom_co_eject,
+ .bdrv_co_lock_medium = cdrom_co_lock_medium,
};
#endif /* __FreeBSD__ */