X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=iothread.c;h=beeb8705344071e9198510a3a99a1291e9a15523;hb=8cfef89271e27e4a7cd047835dc8161fad50bc5a;hp=878a594ef497e74db89bc90308678382f02048f8;hpb=93f6d1c16036aaf34055d16f54ea770fb8d6d280;p=mirror_qemu.git diff --git a/iothread.c b/iothread.c index 878a594ef4..beeb870534 100644 --- a/iothread.c +++ b/iothread.c @@ -11,15 +11,17 @@ * */ +#include "qemu/osdep.h" #include "qom/object.h" #include "qom/object_interfaces.h" #include "qemu/module.h" #include "block/aio.h" +#include "block/block.h" #include "sysemu/iothread.h" #include "qmp-commands.h" #include "qemu/error-report.h" - -#define IOTHREADS_PATH "/objects" +#include "qemu/rcu.h" +#include "qemu/main-loop.h" typedef ObjectClass IOThreadClass; @@ -28,40 +30,70 @@ typedef ObjectClass IOThreadClass; #define IOTHREAD_CLASS(klass) \ OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD) +/* Benchmark results from 2016 on NVMe SSD drives show max polling times around + * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32 + * workloads. + */ +#define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL + +static __thread IOThread *my_iothread; + +AioContext *qemu_get_current_aio_context(void) +{ + return my_iothread ? my_iothread->ctx : qemu_get_aio_context(); +} + static void *iothread_run(void *opaque) { IOThread *iothread = opaque; - bool blocking; + rcu_register_thread(); + + my_iothread = iothread; qemu_mutex_lock(&iothread->init_done_lock); iothread->thread_id = qemu_get_thread_id(); qemu_cond_signal(&iothread->init_done_cond); qemu_mutex_unlock(&iothread->init_done_lock); - while (!iothread->stopping) { - aio_context_acquire(iothread->ctx); - blocking = true; - while (!iothread->stopping && aio_poll(iothread->ctx, blocking)) { - /* Progress was made, keep going */ - blocking = false; - } - aio_context_release(iothread->ctx); + while (!atomic_read(&iothread->stopping)) { + aio_poll(iothread->ctx, true); } + + rcu_unregister_thread(); return NULL; } -static void iothread_instance_finalize(Object *obj) +static int iothread_stop(Object *object, void *opaque) { - IOThread *iothread = IOTHREAD(obj); + IOThread *iothread; - if (!iothread->ctx) { - return; + iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD); + if (!iothread || !iothread->ctx) { + return 0; } iothread->stopping = true; aio_notify(iothread->ctx); qemu_thread_join(&iothread->thread); + return 0; +} + +static void iothread_instance_init(Object *obj) +{ + IOThread *iothread = IOTHREAD(obj); + + iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT; +} + +static void iothread_instance_finalize(Object *obj) +{ + IOThread *iothread = IOTHREAD(obj); + + iothread_stop(obj, NULL); qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); + if (!iothread->ctx) { + return; + } aio_context_unref(iothread->ctx); } @@ -69,6 +101,7 @@ static void iothread_complete(UserCreatable *obj, Error **errp) { Error *local_error = NULL; IOThread *iothread = IOTHREAD(obj); + char *name, *thread_name; iothread->stopping = false; iothread->thread_id = -1; @@ -78,14 +111,30 @@ static void iothread_complete(UserCreatable *obj, Error **errp) return; } + aio_context_set_poll_params(iothread->ctx, + iothread->poll_max_ns, + iothread->poll_grow, + iothread->poll_shrink, + &local_error); + if (local_error) { + error_propagate(errp, local_error); + aio_context_unref(iothread->ctx); + iothread->ctx = NULL; + return; + } + qemu_mutex_init(&iothread->init_done_lock); qemu_cond_init(&iothread->init_done_cond); /* This assumes we are called from a thread with useful CPU affinity for us * to inherit. */ - qemu_thread_create(&iothread->thread, "iothread", iothread_run, + name = object_get_canonical_path_component(OBJECT(obj)); + thread_name = g_strdup_printf("IO %s", name); + qemu_thread_create(&iothread->thread, thread_name, iothread_run, iothread, QEMU_THREAD_JOINABLE); + g_free(thread_name); + g_free(name); /* Wait for initialization to complete */ qemu_mutex_lock(&iothread->init_done_lock); @@ -96,10 +145,82 @@ static void iothread_complete(UserCreatable *obj, Error **errp) qemu_mutex_unlock(&iothread->init_done_lock); } +typedef struct { + const char *name; + ptrdiff_t offset; /* field's byte offset in IOThread struct */ +} PollParamInfo; + +static PollParamInfo poll_max_ns_info = { + "poll-max-ns", offsetof(IOThread, poll_max_ns), +}; +static PollParamInfo poll_grow_info = { + "poll-grow", offsetof(IOThread, poll_grow), +}; +static PollParamInfo poll_shrink_info = { + "poll-shrink", offsetof(IOThread, poll_shrink), +}; + +static void iothread_get_poll_param(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + IOThread *iothread = IOTHREAD(obj); + PollParamInfo *info = opaque; + int64_t *field = (void *)iothread + info->offset; + + visit_type_int64(v, name, field, errp); +} + +static void iothread_set_poll_param(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + IOThread *iothread = IOTHREAD(obj); + PollParamInfo *info = opaque; + int64_t *field = (void *)iothread + info->offset; + Error *local_err = NULL; + int64_t value; + + visit_type_int64(v, name, &value, &local_err); + if (local_err) { + goto out; + } + + if (value < 0) { + error_setg(&local_err, "%s value must be in range [0, %"PRId64"]", + info->name, INT64_MAX); + goto out; + } + + *field = value; + + if (iothread->ctx) { + aio_context_set_poll_params(iothread->ctx, + iothread->poll_max_ns, + iothread->poll_grow, + iothread->poll_shrink, + &local_err); + } + +out: + error_propagate(errp, local_err); +} + static void iothread_class_init(ObjectClass *klass, void *class_data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass); ucc->complete = iothread_complete; + + object_class_property_add(klass, "poll-max-ns", "int", + iothread_get_poll_param, + iothread_set_poll_param, + NULL, &poll_max_ns_info, &error_abort); + object_class_property_add(klass, "poll-grow", "int", + iothread_get_poll_param, + iothread_set_poll_param, + NULL, &poll_grow_info, &error_abort); + object_class_property_add(klass, "poll-shrink", "int", + iothread_get_poll_param, + iothread_set_poll_param, + NULL, &poll_shrink_info, &error_abort); } static const TypeInfo iothread_info = { @@ -107,6 +228,7 @@ static const TypeInfo iothread_info = { .parent = TYPE_OBJECT, .class_init = iothread_class_init, .instance_size = sizeof(IOThread), + .instance_init = iothread_instance_init, .instance_finalize = iothread_instance_finalize, .interfaces = (InterfaceInfo[]) { {TYPE_USER_CREATABLE}, @@ -146,6 +268,9 @@ static int query_one_iothread(Object *object, void *opaque) info = g_new0(IOThreadInfo, 1); info->id = iothread_get_id(iothread); info->thread_id = iothread->thread_id; + info->poll_max_ns = iothread->poll_max_ns; + info->poll_grow = iothread->poll_grow; + info->poll_shrink = iothread->poll_shrink; elem = g_new0(IOThreadInfoList, 1); elem->value = info; @@ -160,8 +285,27 @@ IOThreadInfoList *qmp_query_iothreads(Error **errp) { IOThreadInfoList *head = NULL; IOThreadInfoList **prev = &head; - Object *container = container_get(object_get_root(), IOTHREADS_PATH); + Object *container = object_get_objects_root(); object_child_foreach(container, query_one_iothread, &prev); return head; } + +void iothread_stop_all(void) +{ + Object *container = object_get_objects_root(); + BlockDriverState *bs; + BdrvNextIterator it; + + for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { + AioContext *ctx = bdrv_get_aio_context(bs); + if (ctx == qemu_get_aio_context()) { + continue; + } + aio_context_acquire(ctx); + bdrv_set_aio_context(bs, qemu_get_aio_context()); + aio_context_release(ctx); + } + + object_child_foreach(container, iothread_stop, NULL); +}