Prepare to move the blk_io_plug_call() API out of the block layer so
that other subsystems call use this deferred call mechanism. Rename it
to defer_call() but leave the code in block/plug.c.
The next commit will move the code out of the block layer.
Suggested-by: Ilya Maximets <i.maximets@ovn.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Paul Durrant <paul@xen.org>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-ID: <
20230913200045.
1024233-2-stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
- * Called by blk_io_unplug() or immediately if not plugged. Called without
- * blkio_lock.
+ * Called by defer_call_end() or immediately if not in a deferred section.
+ * Called without blkio_lock.
-static void blkio_unplug_fn(void *opaque)
+static void blkio_deferred_fn(void *opaque)
{
BDRVBlkioState *s = opaque;
{
BDRVBlkioState *s = opaque;
{
BDRVBlkioState *s = bs->opaque;
{
BDRVBlkioState *s = bs->opaque;
- blk_io_plug_call(blkio_unplug_fn, s);
+ defer_call(blkio_deferred_fn, s);
}
static int coroutine_fn
}
static int coroutine_fn
-static void luring_unplug_fn(void *opaque)
+static void luring_deferred_fn(void *opaque)
{
LuringState *s = opaque;
trace_luring_unplug_fn(s, s->io_q.blocked, s->io_q.in_queue,
{
LuringState *s = opaque;
trace_luring_unplug_fn(s, s->io_q.blocked, s->io_q.in_queue,
- blk_io_plug_call(luring_unplug_fn, s);
+ defer_call(luring_deferred_fn, s);
-static void laio_unplug_fn(void *opaque)
+static void laio_deferred_fn(void *opaque)
{
LinuxAioState *s = opaque;
{
LinuxAioState *s = opaque;
if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) {
ioq_submit(s);
} else {
if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) {
ioq_submit(s);
} else {
- blk_io_plug_call(laio_unplug_fn, s);
+ defer_call(laio_deferred_fn, s);
-static void nvme_unplug_fn(void *opaque)
+static void nvme_deferred_fn(void *opaque)
{
NVMeQueuePair *q = opaque;
{
NVMeQueuePair *q = opaque;
q->need_kick++;
qemu_mutex_unlock(&q->lock);
q->need_kick++;
qemu_mutex_unlock(&q->lock);
- blk_io_plug_call(nvme_unplug_fn, q);
+ defer_call(nvme_deferred_fn, q);
}
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
}
static void nvme_admin_cmd_sync_cb(void *opaque, int ret)
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
- * This API defers a function call within a blk_io_plug()/blk_io_unplug()
+ * This API defers a function call within a defer_call_begin()/defer_call_end()
* section, allowing multiple calls to batch up. This is a performance
* optimization that is used in the block layer to submit several I/O requests
* at once instead of individually:
*
* section, allowing multiple calls to batch up. This is a performance
* optimization that is used in the block layer to submit several I/O requests
* at once instead of individually:
*
- * blk_io_plug(); <-- start of plugged region
+ * defer_call_begin(); <-- start of section
- * blk_io_plug_call(my_func, my_obj); <-- deferred my_func(my_obj) call
- * blk_io_plug_call(my_func, my_obj); <-- another
- * blk_io_plug_call(my_func, my_obj); <-- another
+ * defer_call(my_func, my_obj); <-- deferred my_func(my_obj) call
+ * defer_call(my_func, my_obj); <-- another
+ * defer_call(my_func, my_obj); <-- another
- * blk_io_unplug(); <-- end of plugged region, my_func(my_obj) is called once
- *
- * This code is actually generic and not tied to the block layer. If another
- * subsystem needs this functionality, it could be renamed.
+ * defer_call_end(); <-- end of section, my_func(my_obj) is called once
*/
#include "qemu/osdep.h"
*/
#include "qemu/osdep.h"
#include "qemu/thread.h"
#include "sysemu/block-backend.h"
#include "qemu/thread.h"
#include "sysemu/block-backend.h"
-/* A function call that has been deferred until unplug() */
+/* A function call that has been deferred until defer_call_end() */
typedef struct {
void (*fn)(void *);
void *opaque;
typedef struct {
void (*fn)(void *);
void *opaque;
/* Per-thread state */
typedef struct {
/* Per-thread state */
typedef struct {
- unsigned count; /* how many times has plug() been called? */
- GArray *unplug_fns; /* functions to call at unplug time */
-} Plug;
+ unsigned nesting_level;
+ GArray *deferred_call_array;
+} DeferCallThreadState;
-/* Use get_ptr_plug() to fetch this thread-local value */
-QEMU_DEFINE_STATIC_CO_TLS(Plug, plug);
+/* Use get_ptr_defer_call_thread_state() to fetch this thread-local value */
+QEMU_DEFINE_STATIC_CO_TLS(DeferCallThreadState, defer_call_thread_state);
/* Called at thread cleanup time */
/* Called at thread cleanup time */
-static void blk_io_plug_atexit(Notifier *n, void *value)
+static void defer_call_atexit(Notifier *n, void *value)
- Plug *plug = get_ptr_plug();
- g_array_free(plug->unplug_fns, TRUE);
+ DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
+ g_array_free(thread_state->deferred_call_array, TRUE);
}
/* This won't involve coroutines, so use __thread */
}
/* This won't involve coroutines, so use __thread */
-static __thread Notifier blk_io_plug_atexit_notifier;
+static __thread Notifier defer_call_atexit_notifier;
* @fn: a function pointer to be invoked
* @opaque: a user-defined argument to @fn()
*
* @fn: a function pointer to be invoked
* @opaque: a user-defined argument to @fn()
*
- * Call @fn(@opaque) immediately if not within a blk_io_plug()/blk_io_unplug()
- * section.
+ * Call @fn(@opaque) immediately if not within a
+ * defer_call_begin()/defer_call_end() section.
*
* Otherwise defer the call until the end of the outermost
*
* Otherwise defer the call until the end of the outermost
- * blk_io_plug()/blk_io_unplug() section in this thread. If the same
+ * defer_call_begin()/defer_call_end() section in this thread. If the same
* @fn/@opaque pair has already been deferred, it will only be called once upon
* @fn/@opaque pair has already been deferred, it will only be called once upon
- * blk_io_unplug() so that accumulated calls are batched into a single call.
+ * defer_call_end() so that accumulated calls are batched into a single call.
*
* The caller must ensure that @opaque is not freed before @fn() is invoked.
*/
*
* The caller must ensure that @opaque is not freed before @fn() is invoked.
*/
-void blk_io_plug_call(void (*fn)(void *), void *opaque)
+void defer_call(void (*fn)(void *), void *opaque)
- Plug *plug = get_ptr_plug();
+ DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
- /* Call immediately if we're not plugged */
- if (plug->count == 0) {
+ /* Call immediately if we're not deferring calls */
+ if (thread_state->nesting_level == 0) {
- GArray *array = plug->unplug_fns;
+ GArray *array = thread_state->deferred_call_array;
- array = g_array_new(FALSE, FALSE, sizeof(UnplugFn));
- plug->unplug_fns = array;
- blk_io_plug_atexit_notifier.notify = blk_io_plug_atexit;
- qemu_thread_atexit_add(&blk_io_plug_atexit_notifier);
+ array = g_array_new(FALSE, FALSE, sizeof(DeferredCall));
+ thread_state->deferred_call_array = array;
+ defer_call_atexit_notifier.notify = defer_call_atexit;
+ qemu_thread_atexit_add(&defer_call_atexit_notifier);
- UnplugFn *fns = (UnplugFn *)array->data;
- UnplugFn new_fn = {
+ DeferredCall *fns = (DeferredCall *)array->data;
+ DeferredCall new_fn = {
.fn = fn,
.opaque = opaque,
};
.fn = fn,
.opaque = opaque,
};
- * blk_io_plug: Defer blk_io_plug_call() functions until blk_io_unplug()
+ * defer_call_begin: Defer defer_call() functions until defer_call_end()
- * blk_io_plug/unplug are thread-local operations. This means that multiple
- * threads can simultaneously call plug/unplug, but the caller must ensure that
- * each unplug() is called in the same thread of the matching plug().
+ * defer_call_begin() and defer_call_end() are thread-local operations. The
+ * caller must ensure that each defer_call_begin() has a matching
+ * defer_call_end() in the same thread.
- * Nesting is supported. blk_io_plug_call() functions are only called at the
- * outermost blk_io_unplug().
+ * Nesting is supported. defer_call() functions are only called at the
+ * outermost defer_call_end().
+void defer_call_begin(void)
- Plug *plug = get_ptr_plug();
+ DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
- assert(plug->count < UINT32_MAX);
+ assert(thread_state->nesting_level < UINT32_MAX);
+ thread_state->nesting_level++;
- * blk_io_unplug: Run any pending blk_io_plug_call() functions
+ * defer_call_end: Run any pending defer_call() functions
- * There must have been a matching blk_io_plug() call in the same thread prior
- * to this blk_io_unplug() call.
+ * There must have been a matching defer_call_begin() call in the same thread
+ * prior to this defer_call_end() call.
-void blk_io_unplug(void)
+void defer_call_end(void)
- Plug *plug = get_ptr_plug();
+ DeferCallThreadState *thread_state = get_ptr_defer_call_thread_state();
- assert(plug->count > 0);
+ assert(thread_state->nesting_level > 0);
- if (--plug->count > 0) {
+ if (--thread_state->nesting_level > 0) {
- GArray *array = plug->unplug_fns;
+ GArray *array = thread_state->deferred_call_array;
- UnplugFn *fns = (UnplugFn *)array->data;
+ DeferredCall *fns = (DeferredCall *)array->data;
for (guint i = 0; i < array->len; i++) {
fns[i].fn(fns[i].opaque);
for (guint i = 0; i < array->len; i++) {
fns[i].fn(fns[i].opaque);
/*
* Threshold of in-flight requests above which we will start using
/*
* Threshold of in-flight requests above which we will start using
- * blk_io_plug()/blk_io_unplug() to batch requests.
+ * defer_call_begin()/defer_call_end() to batch requests.
*/
#define IO_PLUG_THRESHOLD 1
*/
#define IO_PLUG_THRESHOLD 1
* is below us.
*/
if (inflight_atstart > IO_PLUG_THRESHOLD) {
* is below us.
*/
if (inflight_atstart > IO_PLUG_THRESHOLD) {
}
while (rc != rp) {
/* pull request from ring */
}
while (rc != rp) {
/* pull request from ring */
if (inflight_atstart > IO_PLUG_THRESHOLD &&
batched >= inflight_atstart) {
if (inflight_atstart > IO_PLUG_THRESHOLD &&
batched >= inflight_atstart) {
}
xen_block_do_aio(request);
if (inflight_atstart > IO_PLUG_THRESHOLD) {
if (batched >= inflight_atstart) {
}
xen_block_do_aio(request);
if (inflight_atstart > IO_PLUG_THRESHOLD) {
if (batched >= inflight_atstart) {
batched = 0;
} else {
batched++;
batched = 0;
} else {
batched++;
}
}
if (inflight_atstart > IO_PLUG_THRESHOLD) {
}
}
if (inflight_atstart > IO_PLUG_THRESHOLD) {
bool suppress_notifications = virtio_queue_get_notification(vq);
aio_context_acquire(blk_get_aio_context(s->blk));
bool suppress_notifications = virtio_queue_get_notification(vq);
aio_context_acquire(blk_get_aio_context(s->blk));
do {
if (suppress_notifications) {
do {
if (suppress_notifications) {
virtio_blk_submit_multireq(s, &mrb);
}
virtio_blk_submit_multireq(s, &mrb);
}
aio_context_release(blk_get_aio_context(s->blk));
}
aio_context_release(blk_get_aio_context(s->blk));
}
return -ENOBUFS;
}
scsi_req_ref(req->sreq);
return -ENOBUFS;
}
scsi_req_ref(req->sreq);
object_unref(OBJECT(d));
return 0;
}
object_unref(OBJECT(d));
return 0;
}
if (scsi_req_enqueue(sreq)) {
scsi_req_continue(sreq);
}
if (scsi_req_enqueue(sreq)) {
scsi_req_continue(sreq);
}
while (!QTAILQ_EMPTY(&reqs)) {
req = QTAILQ_FIRST(&reqs);
QTAILQ_REMOVE(&reqs, req, next);
while (!QTAILQ_EMPTY(&reqs)) {
req = QTAILQ_FIRST(&reqs);
QTAILQ_REMOVE(&reqs, req, next);
scsi_req_unref(req->sreq);
virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_scsi_free_req(req);
scsi_req_unref(req->sreq);
virtqueue_detach_element(req->vq, &req->elem, 0);
virtio_scsi_free_req(req);
int blk_get_max_iov(BlockBackend *blk);
int blk_get_max_hw_iov(BlockBackend *blk);
int blk_get_max_iov(BlockBackend *blk);
int blk_get_max_hw_iov(BlockBackend *blk);
-void blk_io_plug(void);
-void blk_io_unplug(void);
-void blk_io_plug_call(void (*fn)(void *), void *opaque);
+void defer_call_begin(void);
+void defer_call_end(void);
+void defer_call(void (*fn)(void *), void *opaque);
AioContext *blk_get_aio_context(BlockBackend *blk);
BlockAcctStats *blk_get_stats(BlockBackend *blk);
AioContext *blk_get_aio_context(BlockBackend *blk);
BlockAcctStats *blk_get_stats(BlockBackend *blk);