]>
Commit | Line | Data |
---|---|---|
be8d8537 SH |
1 | /* |
2 | * Event loop thread | |
3 | * | |
c3033fd3 | 4 | * Copyright Red Hat Inc., 2013, 2020 |
be8d8537 SH |
5 | * |
6 | * Authors: | |
7 | * Stefan Hajnoczi <stefanha@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
d38ea87a | 14 | #include "qemu/osdep.h" |
be8d8537 SH |
15 | #include "qom/object.h" |
16 | #include "qom/object_interfaces.h" | |
17 | #include "qemu/module.h" | |
be8d8537 | 18 | #include "block/aio.h" |
d16341fa | 19 | #include "block/block.h" |
7d5983e3 | 20 | #include "sysemu/event-loop-base.h" |
be8d8537 | 21 | #include "sysemu/iothread.h" |
e688df6b | 22 | #include "qapi/error.h" |
112ed241 | 23 | #include "qapi/qapi-commands-misc.h" |
2f78e491 | 24 | #include "qemu/error-report.h" |
ab28bd23 | 25 | #include "qemu/rcu.h" |
e4370165 | 26 | #include "qemu/main-loop.h" |
be8d8537 | 27 | |
be8d8537 | 28 | |
90c558be | 29 | #ifdef CONFIG_POSIX |
cdd7abfd SH |
30 | /* Benchmark results from 2016 on NVMe SSD drives show max polling times around |
31 | * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32 | |
32 | * workloads. | |
33 | */ | |
34 | #define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL | |
90c558be PX |
35 | #else |
36 | #define IOTHREAD_POLL_MAX_NS_DEFAULT 0ULL | |
37 | #endif | |
cdd7abfd | 38 | |
be8d8537 SH |
39 | static void *iothread_run(void *opaque) |
40 | { | |
41 | IOThread *iothread = opaque; | |
42 | ||
ab28bd23 | 43 | rcu_register_thread(); |
b60ec76a PX |
44 | /* |
45 | * g_main_context_push_thread_default() must be called before anything | |
46 | * in this new thread uses glib. | |
47 | */ | |
48 | g_main_context_push_thread_default(iothread->worker_context); | |
5f50be9b | 49 | qemu_set_current_aio_context(iothread->ctx); |
88eb7c29 | 50 | iothread->thread_id = qemu_get_thread_id(); |
21c4d15b | 51 | qemu_sem_post(&iothread->init_done_sem); |
88eb7c29 | 52 | |
2362a28e | 53 | while (iothread->running) { |
6ca20620 PX |
54 | /* |
55 | * Note: from functional-wise the g_main_loop_run() below can | |
56 | * already cover the aio_poll() events, but we can't run the | |
57 | * main loop unconditionally because explicit aio_poll() here | |
58 | * is faster than g_main_loop_run() when we do not need the | |
59 | * gcontext at all (e.g., pure block layer iothreads). In | |
60 | * other words, when we want to run the gcontext with the | |
61 | * iothread we need to pay some performance for functionality. | |
62 | */ | |
65c1b5b6 | 63 | aio_poll(iothread->ctx, true); |
329163cb | 64 | |
6c95363d PX |
65 | /* |
66 | * We must check the running state again in case it was | |
67 | * changed in previous aio_poll() | |
68 | */ | |
d73415a3 | 69 | if (iothread->running && qatomic_read(&iothread->run_gcontext)) { |
329163cb | 70 | g_main_loop_run(iothread->main_loop); |
329163cb | 71 | } |
be8d8537 | 72 | } |
ab28bd23 | 73 | |
b60ec76a | 74 | g_main_context_pop_thread_default(iothread->worker_context); |
ab28bd23 | 75 | rcu_unregister_thread(); |
be8d8537 SH |
76 | return NULL; |
77 | } | |
78 | ||
2362a28e SH |
79 | /* Runs in iothread_run() thread */ |
80 | static void iothread_stop_bh(void *opaque) | |
81 | { | |
82 | IOThread *iothread = opaque; | |
83 | ||
84 | iothread->running = false; /* stop iothread_run() */ | |
85 | ||
86 | if (iothread->main_loop) { | |
87 | g_main_loop_quit(iothread->main_loop); | |
88 | } | |
89 | } | |
90 | ||
82d90705 | 91 | void iothread_stop(IOThread *iothread) |
be8d8537 | 92 | { |
82d90705 PX |
93 | if (!iothread->ctx || iothread->stopping) { |
94 | return; | |
2f78e491 | 95 | } |
be8d8537 | 96 | iothread->stopping = true; |
2362a28e | 97 | aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread); |
be8d8537 | 98 | qemu_thread_join(&iothread->thread); |
82d90705 PX |
99 | } |
100 | ||
cdd7abfd SH |
101 | static void iothread_instance_init(Object *obj) |
102 | { | |
103 | IOThread *iothread = IOTHREAD(obj); | |
104 | ||
105 | iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT; | |
14a2d118 | 106 | iothread->thread_id = -1; |
21c4d15b | 107 | qemu_sem_init(&iothread->init_done_sem, 0); |
b506e0f1 | 108 | /* By default, we don't run gcontext */ |
d73415a3 | 109 | qatomic_set(&iothread->run_gcontext, 0); |
cdd7abfd SH |
110 | } |
111 | ||
dce8921b FZ |
112 | static void iothread_instance_finalize(Object *obj) |
113 | { | |
114 | IOThread *iothread = IOTHREAD(obj); | |
115 | ||
82d90705 | 116 | iothread_stop(iothread); |
14a2d118 | 117 | |
15544349 PX |
118 | /* |
119 | * Before glib2 2.33.10, there is a glib2 bug that GSource context | |
120 | * pointer may not be cleared even if the context has already been | |
121 | * destroyed (while it should). Here let's free the AIO context | |
122 | * earlier to bypass that glib bug. | |
123 | * | |
124 | * We can remove this comment after the minimum supported glib2 | |
125 | * version boosts to 2.33.10. Before that, let's free the | |
126 | * GSources first before destroying any GMainContext. | |
127 | */ | |
128 | if (iothread->ctx) { | |
129 | aio_context_unref(iothread->ctx); | |
130 | iothread->ctx = NULL; | |
131 | } | |
5b3ac23f PX |
132 | if (iothread->worker_context) { |
133 | g_main_context_unref(iothread->worker_context); | |
134 | iothread->worker_context = NULL; | |
0bd2d233 PX |
135 | g_main_loop_unref(iothread->main_loop); |
136 | iothread->main_loop = NULL; | |
5b3ac23f | 137 | } |
21c4d15b | 138 | qemu_sem_destroy(&iothread->init_done_sem); |
be8d8537 SH |
139 | } |
140 | ||
b506e0f1 PX |
141 | static void iothread_init_gcontext(IOThread *iothread) |
142 | { | |
143 | GSource *source; | |
144 | ||
145 | iothread->worker_context = g_main_context_new(); | |
146 | source = aio_get_g_source(iothread_get_aio_context(iothread)); | |
147 | g_source_attach(source, iothread->worker_context); | |
148 | g_source_unref(source); | |
0bd2d233 | 149 | iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE); |
b506e0f1 PX |
150 | } |
151 | ||
7d5983e3 | 152 | static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp) |
1793ad02 SG |
153 | { |
154 | ERRP_GUARD(); | |
05e385d2 | 155 | IOThread *iothread = IOTHREAD(base); |
1793ad02 | 156 | |
7d5983e3 NSJ |
157 | if (!iothread->ctx) { |
158 | return; | |
159 | } | |
160 | ||
1793ad02 SG |
161 | aio_context_set_poll_params(iothread->ctx, |
162 | iothread->poll_max_ns, | |
163 | iothread->poll_grow, | |
164 | iothread->poll_shrink, | |
165 | errp); | |
166 | if (*errp) { | |
167 | return; | |
168 | } | |
169 | ||
170 | aio_context_set_aio_params(iothread->ctx, | |
7d5983e3 | 171 | iothread->parent_obj.aio_max_batch, |
1793ad02 | 172 | errp); |
71ad4713 NSJ |
173 | |
174 | aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min, | |
175 | base->thread_pool_max, errp); | |
1793ad02 SG |
176 | } |
177 | ||
7d5983e3 NSJ |
178 | |
179 | static void iothread_init(EventLoopBase *base, Error **errp) | |
be8d8537 | 180 | { |
2f78e491 | 181 | Error *local_error = NULL; |
7d5983e3 | 182 | IOThread *iothread = IOTHREAD(base); |
7a309cc9 | 183 | char *thread_name; |
be8d8537 SH |
184 | |
185 | iothread->stopping = false; | |
2362a28e | 186 | iothread->running = true; |
668f62ec | 187 | iothread->ctx = aio_context_new(errp); |
2f78e491 | 188 | if (!iothread->ctx) { |
2f78e491 CN |
189 | return; |
190 | } | |
88eb7c29 | 191 | |
b506e0f1 PX |
192 | /* |
193 | * Init one GMainContext for the iothread unconditionally, even if | |
194 | * it's not used | |
195 | */ | |
196 | iothread_init_gcontext(iothread); | |
197 | ||
7d5983e3 | 198 | iothread_set_aio_context_params(base, &local_error); |
0d9d86fb SH |
199 | if (local_error) { |
200 | error_propagate(errp, local_error); | |
201 | aio_context_unref(iothread->ctx); | |
202 | iothread->ctx = NULL; | |
203 | return; | |
204 | } | |
205 | ||
be8d8537 SH |
206 | /* This assumes we are called from a thread with useful CPU affinity for us |
207 | * to inherit. | |
208 | */ | |
7a309cc9 | 209 | thread_name = g_strdup_printf("IO %s", |
7d5983e3 | 210 | object_get_canonical_path_component(OBJECT(base))); |
d21e8776 | 211 | qemu_thread_create(&iothread->thread, thread_name, iothread_run, |
be8d8537 | 212 | iothread, QEMU_THREAD_JOINABLE); |
d21e8776 | 213 | g_free(thread_name); |
88eb7c29 SH |
214 | |
215 | /* Wait for initialization to complete */ | |
88eb7c29 | 216 | while (iothread->thread_id == -1) { |
21c4d15b | 217 | qemu_sem_wait(&iothread->init_done_sem); |
88eb7c29 | 218 | } |
be8d8537 SH |
219 | } |
220 | ||
5e5db499 SH |
221 | typedef struct { |
222 | const char *name; | |
223 | ptrdiff_t offset; /* field's byte offset in IOThread struct */ | |
f0ed36a6 | 224 | } IOThreadParamInfo; |
5e5db499 | 225 | |
f0ed36a6 | 226 | static IOThreadParamInfo poll_max_ns_info = { |
5e5db499 SH |
227 | "poll-max-ns", offsetof(IOThread, poll_max_ns), |
228 | }; | |
f0ed36a6 | 229 | static IOThreadParamInfo poll_grow_info = { |
5e5db499 SH |
230 | "poll-grow", offsetof(IOThread, poll_grow), |
231 | }; | |
f0ed36a6 | 232 | static IOThreadParamInfo poll_shrink_info = { |
5e5db499 SH |
233 | "poll-shrink", offsetof(IOThread, poll_shrink), |
234 | }; | |
235 | ||
0445409d | 236 | static void iothread_get_param(Object *obj, Visitor *v, |
1cc7eada | 237 | const char *name, IOThreadParamInfo *info, Error **errp) |
0d9d86fb SH |
238 | { |
239 | IOThread *iothread = IOTHREAD(obj); | |
5e5db499 | 240 | int64_t *field = (void *)iothread + info->offset; |
0d9d86fb | 241 | |
5e5db499 | 242 | visit_type_int64(v, name, field, errp); |
0d9d86fb SH |
243 | } |
244 | ||
0445409d | 245 | static bool iothread_set_param(Object *obj, Visitor *v, |
1cc7eada | 246 | const char *name, IOThreadParamInfo *info, Error **errp) |
0d9d86fb SH |
247 | { |
248 | IOThread *iothread = IOTHREAD(obj); | |
5e5db499 | 249 | int64_t *field = (void *)iothread + info->offset; |
0d9d86fb SH |
250 | int64_t value; |
251 | ||
668f62ec | 252 | if (!visit_type_int64(v, name, &value, errp)) { |
0445409d | 253 | return false; |
0d9d86fb SH |
254 | } |
255 | ||
256 | if (value < 0) { | |
dcfe4805 | 257 | error_setg(errp, "%s value must be in range [0, %" PRId64 "]", |
5e5db499 | 258 | info->name, INT64_MAX); |
0445409d | 259 | return false; |
0d9d86fb SH |
260 | } |
261 | ||
5e5db499 | 262 | *field = value; |
0d9d86fb | 263 | |
0445409d SG |
264 | return true; |
265 | } | |
266 | ||
267 | static void iothread_get_poll_param(Object *obj, Visitor *v, | |
268 | const char *name, void *opaque, Error **errp) | |
269 | { | |
1cc7eada | 270 | IOThreadParamInfo *info = opaque; |
0445409d | 271 | |
1cc7eada | 272 | iothread_get_param(obj, v, name, info, errp); |
0445409d SG |
273 | } |
274 | ||
275 | static void iothread_set_poll_param(Object *obj, Visitor *v, | |
276 | const char *name, void *opaque, Error **errp) | |
277 | { | |
278 | IOThread *iothread = IOTHREAD(obj); | |
1cc7eada | 279 | IOThreadParamInfo *info = opaque; |
0445409d | 280 | |
1cc7eada | 281 | if (!iothread_set_param(obj, v, name, info, errp)) { |
0445409d SG |
282 | return; |
283 | } | |
284 | ||
0d9d86fb | 285 | if (iothread->ctx) { |
5e5db499 SH |
286 | aio_context_set_poll_params(iothread->ctx, |
287 | iothread->poll_max_ns, | |
288 | iothread->poll_grow, | |
289 | iothread->poll_shrink, | |
dcfe4805 | 290 | errp); |
0d9d86fb | 291 | } |
0d9d86fb SH |
292 | } |
293 | ||
be8d8537 SH |
294 | static void iothread_class_init(ObjectClass *klass, void *class_data) |
295 | { | |
7d5983e3 NSJ |
296 | EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(klass); |
297 | ||
298 | bc->init = iothread_init; | |
299 | bc->update_params = iothread_set_aio_context_params; | |
0d9d86fb SH |
300 | |
301 | object_class_property_add(klass, "poll-max-ns", "int", | |
5e5db499 SH |
302 | iothread_get_poll_param, |
303 | iothread_set_poll_param, | |
d2623129 | 304 | NULL, &poll_max_ns_info); |
5e5db499 SH |
305 | object_class_property_add(klass, "poll-grow", "int", |
306 | iothread_get_poll_param, | |
307 | iothread_set_poll_param, | |
d2623129 | 308 | NULL, &poll_grow_info); |
5e5db499 SH |
309 | object_class_property_add(klass, "poll-shrink", "int", |
310 | iothread_get_poll_param, | |
311 | iothread_set_poll_param, | |
d2623129 | 312 | NULL, &poll_shrink_info); |
be8d8537 SH |
313 | } |
314 | ||
315 | static const TypeInfo iothread_info = { | |
316 | .name = TYPE_IOTHREAD, | |
7d5983e3 | 317 | .parent = TYPE_EVENT_LOOP_BASE, |
be8d8537 SH |
318 | .class_init = iothread_class_init, |
319 | .instance_size = sizeof(IOThread), | |
cdd7abfd | 320 | .instance_init = iothread_instance_init, |
be8d8537 | 321 | .instance_finalize = iothread_instance_finalize, |
be8d8537 SH |
322 | }; |
323 | ||
324 | static void iothread_register_types(void) | |
325 | { | |
326 | type_register_static(&iothread_info); | |
327 | } | |
328 | ||
329 | type_init(iothread_register_types) | |
330 | ||
be8d8537 SH |
331 | char *iothread_get_id(IOThread *iothread) |
332 | { | |
7a309cc9 | 333 | return g_strdup(object_get_canonical_path_component(OBJECT(iothread))); |
be8d8537 SH |
334 | } |
335 | ||
336 | AioContext *iothread_get_aio_context(IOThread *iothread) | |
337 | { | |
338 | return iothread->ctx; | |
339 | } | |
dc3dd0d2 SH |
340 | |
341 | static int query_one_iothread(Object *object, void *opaque) | |
342 | { | |
c3033fd3 | 343 | IOThreadInfoList ***tail = opaque; |
dc3dd0d2 SH |
344 | IOThreadInfo *info; |
345 | IOThread *iothread; | |
346 | ||
347 | iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD); | |
348 | if (!iothread) { | |
349 | return 0; | |
350 | } | |
351 | ||
352 | info = g_new0(IOThreadInfo, 1); | |
353 | info->id = iothread_get_id(iothread); | |
354 | info->thread_id = iothread->thread_id; | |
5fc00480 PH |
355 | info->poll_max_ns = iothread->poll_max_ns; |
356 | info->poll_grow = iothread->poll_grow; | |
357 | info->poll_shrink = iothread->poll_shrink; | |
7d5983e3 | 358 | info->aio_max_batch = iothread->parent_obj.aio_max_batch; |
dc3dd0d2 | 359 | |
c3033fd3 | 360 | QAPI_LIST_APPEND(*tail, info); |
dc3dd0d2 SH |
361 | return 0; |
362 | } | |
363 | ||
364 | IOThreadInfoList *qmp_query_iothreads(Error **errp) | |
365 | { | |
366 | IOThreadInfoList *head = NULL; | |
367 | IOThreadInfoList **prev = &head; | |
bc2256c4 | 368 | Object *container = object_get_objects_root(); |
dc3dd0d2 SH |
369 | |
370 | object_child_foreach(container, query_one_iothread, &prev); | |
371 | return head; | |
372 | } | |
dce8921b | 373 | |
329163cb WY |
374 | GMainContext *iothread_get_g_main_context(IOThread *iothread) |
375 | { | |
d73415a3 | 376 | qatomic_set(&iothread->run_gcontext, 1); |
b506e0f1 | 377 | aio_notify(iothread->ctx); |
329163cb WY |
378 | return iothread->worker_context; |
379 | } | |
0173e21b PX |
380 | |
381 | IOThread *iothread_create(const char *id, Error **errp) | |
382 | { | |
383 | Object *obj; | |
384 | ||
385 | obj = object_new_with_props(TYPE_IOTHREAD, | |
386 | object_get_internal_root(), | |
387 | id, errp, NULL); | |
388 | ||
389 | return IOTHREAD(obj); | |
390 | } | |
391 | ||
392 | void iothread_destroy(IOThread *iothread) | |
393 | { | |
394 | object_unparent(OBJECT(iothread)); | |
395 | } | |
fbcc6923 SH |
396 | |
397 | /* Lookup IOThread by its id. Only finds user-created objects, not internal | |
398 | * iothread_create() objects. */ | |
399 | IOThread *iothread_by_id(const char *id) | |
400 | { | |
401 | return IOTHREAD(object_resolve_path_type(id, TYPE_IOTHREAD, NULL)); | |
402 | } | |
ad22c308 EU |
403 | |
404 | bool qemu_in_iothread(void) | |
405 | { | |
406 | return qemu_get_current_aio_context() == qemu_get_aio_context() ? | |
407 | false : true; | |
408 | } |