]> git.proxmox.com Git - mirror_qemu.git/blame - io/channel.c
target/loongarch: Fix qtest test-hmp error when KVM-only build
[mirror_qemu.git] / io / channel.c
CommitLineData
666a3af9
DB
1/*
2 * QEMU I/O channels
3 *
4 * Copyright (c) 2015 Red Hat, Inc.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
c8198bd5 9 * version 2.1 of the License, or (at your option) any later version.
666a3af9
DB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 *
19 */
20
cae9fc56 21#include "qemu/osdep.h"
7c1f51bf 22#include "block/aio-wait.h"
666a3af9 23#include "io/channel.h"
da34e65c 24#include "qapi/error.h"
c4c497d2 25#include "qemu/main-loop.h"
0b8fa32f 26#include "qemu/module.h"
d4622e55 27#include "qemu/iov.h"
666a3af9
DB
28
29bool qio_channel_has_feature(QIOChannel *ioc,
30 QIOChannelFeature feature)
31{
32 return ioc->features & (1 << feature);
33}
34
35
d8d3c7cc
FF
36void qio_channel_set_feature(QIOChannel *ioc,
37 QIOChannelFeature feature)
38{
39 ioc->features |= (1 << feature);
40}
41
42
20f4aa26
DB
43void qio_channel_set_name(QIOChannel *ioc,
44 const char *name)
45{
46 g_free(ioc->name);
47 ioc->name = g_strdup(name);
48}
49
50
666a3af9
DB
51ssize_t qio_channel_readv_full(QIOChannel *ioc,
52 const struct iovec *iov,
53 size_t niov,
54 int **fds,
55 size_t *nfds,
84615a19 56 int flags,
666a3af9
DB
57 Error **errp)
58{
59 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
60
61 if ((fds || nfds) &&
e413ae0c 62 !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS)) {
666a3af9
DB
63 error_setg_errno(errp, EINVAL,
64 "Channel does not support file descriptor passing");
65 return -1;
66 }
67
84615a19 68 if ((flags & QIO_CHANNEL_READ_FLAG_MSG_PEEK) &&
69 !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
70 error_setg_errno(errp, EINVAL,
71 "Channel does not support peek read");
72 return -1;
73 }
74
75 return klass->io_readv(ioc, iov, niov, fds, nfds, flags, errp);
666a3af9
DB
76}
77
78
79ssize_t qio_channel_writev_full(QIOChannel *ioc,
80 const struct iovec *iov,
81 size_t niov,
82 int *fds,
83 size_t nfds,
b88651cb 84 int flags,
666a3af9
DB
85 Error **errp)
86{
87 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
88
b88651cb
LB
89 if (fds || nfds) {
90 if (!qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_FD_PASS)) {
91 error_setg_errno(errp, EINVAL,
92 "Channel does not support file descriptor passing");
93 return -1;
94 }
95 if (flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) {
96 error_setg_errno(errp, EINVAL,
97 "Zero Copy does not support file descriptor passing");
98 return -1;
99 }
100 }
101
102 if ((flags & QIO_CHANNEL_WRITE_FLAG_ZERO_COPY) &&
103 !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY)) {
666a3af9 104 error_setg_errno(errp, EINVAL,
b88651cb 105 "Requested Zero Copy feature is not available");
666a3af9
DB
106 return -1;
107 }
108
b88651cb 109 return klass->io_writev(ioc, iov, niov, fds, nfds, flags, errp);
666a3af9
DB
110}
111
112
1dd91b22
PB
113int coroutine_mixed_fn qio_channel_readv_all_eof(QIOChannel *ioc,
114 const struct iovec *iov,
115 size_t niov,
116 Error **errp)
bebab91e
EU
117{
118 return qio_channel_readv_full_all_eof(ioc, iov, niov, NULL, NULL, errp);
119}
120
1dd91b22
PB
121int coroutine_mixed_fn qio_channel_readv_all(QIOChannel *ioc,
122 const struct iovec *iov,
123 size_t niov,
124 Error **errp)
bebab91e
EU
125{
126 return qio_channel_readv_full_all(ioc, iov, niov, NULL, NULL, errp);
127}
128
1dd91b22
PB
129int coroutine_mixed_fn qio_channel_readv_full_all_eof(QIOChannel *ioc,
130 const struct iovec *iov,
131 size_t niov,
132 int **fds, size_t *nfds,
133 Error **errp)
d4622e55
DB
134{
135 int ret = -1;
136 struct iovec *local_iov = g_new(struct iovec, niov);
137 struct iovec *local_iov_head = local_iov;
138 unsigned int nlocal_iov = niov;
bebab91e
EU
139 int **local_fds = fds;
140 size_t *local_nfds = nfds;
e8ffaa31 141 bool partial = false;
d4622e55 142
bebab91e
EU
143 if (nfds) {
144 *nfds = 0;
145 }
146
147 if (fds) {
148 *fds = NULL;
149 }
150
d4622e55
DB
151 nlocal_iov = iov_copy(local_iov, nlocal_iov,
152 iov, niov,
153 0, iov_size(iov, niov));
154
bebab91e 155 while ((nlocal_iov > 0) || local_fds) {
d4622e55 156 ssize_t len;
bebab91e 157 len = qio_channel_readv_full(ioc, local_iov, nlocal_iov, local_fds,
84615a19 158 local_nfds, 0, errp);
d4622e55 159 if (len == QIO_CHANNEL_ERR_BLOCK) {
9ffb8270
EB
160 if (qemu_in_coroutine()) {
161 qio_channel_yield(ioc, G_IO_IN);
162 } else {
163 qio_channel_wait(ioc, G_IO_IN);
164 }
d4622e55 165 continue;
bebab91e
EU
166 }
167
168 if (len == 0) {
169 if (local_nfds && *local_nfds) {
170 /*
171 * Got some FDs, but no data yet. This isn't an EOF
172 * scenario (yet), so carry on to try to read data
173 * on next loop iteration
174 */
175 goto next_iter;
176 } else if (!partial) {
177 /* No fds and no data - EOF before any data read */
e8ffaa31 178 ret = 0;
bebab91e
EU
179 goto cleanup;
180 } else {
181 len = -1;
182 error_setg(errp,
183 "Unexpected end-of-file before all data were read");
184 /* Fallthrough into len < 0 handling */
185 }
186 }
187
188 if (len < 0) {
189 /* Close any FDs we previously received */
190 if (nfds && fds) {
191 size_t i;
192 for (i = 0; i < (*nfds); i++) {
193 close((*fds)[i]);
194 }
195 g_free(*fds);
196 *fds = NULL;
197 *nfds = 0;
e8ffaa31 198 }
d4622e55
DB
199 goto cleanup;
200 }
201
bebab91e
EU
202 if (nlocal_iov) {
203 iov_discard_front(&local_iov, &nlocal_iov, len);
204 }
205
206next_iter:
e8ffaa31 207 partial = true;
bebab91e
EU
208 local_fds = NULL;
209 local_nfds = NULL;
d4622e55
DB
210 }
211
e8ffaa31 212 ret = 1;
d4622e55
DB
213
214 cleanup:
215 g_free(local_iov_head);
216 return ret;
217}
218
1dd91b22
PB
219int coroutine_mixed_fn qio_channel_readv_full_all(QIOChannel *ioc,
220 const struct iovec *iov,
221 size_t niov,
222 int **fds, size_t *nfds,
223 Error **errp)
e8ffaa31 224{
bebab91e 225 int ret = qio_channel_readv_full_all_eof(ioc, iov, niov, fds, nfds, errp);
e8ffaa31
EB
226
227 if (ret == 0) {
c90e3512 228 error_setg(errp, "Unexpected end-of-file before all data were read");
bebab91e 229 return -1;
e8ffaa31 230 }
bebab91e
EU
231 if (ret == 1) {
232 return 0;
233 }
234
e8ffaa31
EB
235 return ret;
236}
237
1dd91b22
PB
238int coroutine_mixed_fn qio_channel_writev_all(QIOChannel *ioc,
239 const struct iovec *iov,
240 size_t niov,
241 Error **errp)
bfa42387 242{
b88651cb 243 return qio_channel_writev_full_all(ioc, iov, niov, NULL, 0, 0, errp);
bfa42387
EU
244}
245
1dd91b22
PB
246int coroutine_mixed_fn qio_channel_writev_full_all(QIOChannel *ioc,
247 const struct iovec *iov,
248 size_t niov,
249 int *fds, size_t nfds,
250 int flags, Error **errp)
d4622e55
DB
251{
252 int ret = -1;
253 struct iovec *local_iov = g_new(struct iovec, niov);
254 struct iovec *local_iov_head = local_iov;
255 unsigned int nlocal_iov = niov;
256
257 nlocal_iov = iov_copy(local_iov, nlocal_iov,
258 iov, niov,
259 0, iov_size(iov, niov));
260
261 while (nlocal_iov > 0) {
262 ssize_t len;
b88651cb
LB
263
264 len = qio_channel_writev_full(ioc, local_iov, nlocal_iov, fds,
265 nfds, flags, errp);
266
d4622e55 267 if (len == QIO_CHANNEL_ERR_BLOCK) {
9ffb8270
EB
268 if (qemu_in_coroutine()) {
269 qio_channel_yield(ioc, G_IO_OUT);
270 } else {
271 qio_channel_wait(ioc, G_IO_OUT);
272 }
d4622e55
DB
273 continue;
274 }
275 if (len < 0) {
276 goto cleanup;
277 }
278
279 iov_discard_front(&local_iov, &nlocal_iov, len);
bfa42387
EU
280
281 fds = NULL;
282 nfds = 0;
d4622e55
DB
283 }
284
285 ret = 0;
286 cleanup:
287 g_free(local_iov_head);
288 return ret;
289}
290
666a3af9
DB
291ssize_t qio_channel_readv(QIOChannel *ioc,
292 const struct iovec *iov,
293 size_t niov,
294 Error **errp)
295{
84615a19 296 return qio_channel_readv_full(ioc, iov, niov, NULL, NULL, 0, errp);
666a3af9
DB
297}
298
299
300ssize_t qio_channel_writev(QIOChannel *ioc,
301 const struct iovec *iov,
302 size_t niov,
303 Error **errp)
304{
b88651cb 305 return qio_channel_writev_full(ioc, iov, niov, NULL, 0, 0, errp);
666a3af9
DB
306}
307
308
309ssize_t qio_channel_read(QIOChannel *ioc,
310 char *buf,
311 size_t buflen,
312 Error **errp)
313{
314 struct iovec iov = { .iov_base = buf, .iov_len = buflen };
84615a19 315 return qio_channel_readv_full(ioc, &iov, 1, NULL, NULL, 0, errp);
666a3af9
DB
316}
317
318
319ssize_t qio_channel_write(QIOChannel *ioc,
320 const char *buf,
321 size_t buflen,
322 Error **errp)
323{
324 struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen };
b88651cb 325 return qio_channel_writev_full(ioc, &iov, 1, NULL, 0, 0, errp);
666a3af9
DB
326}
327
328
1dd91b22
PB
329int coroutine_mixed_fn qio_channel_read_all_eof(QIOChannel *ioc,
330 char *buf,
331 size_t buflen,
332 Error **errp)
e8ffaa31
EB
333{
334 struct iovec iov = { .iov_base = buf, .iov_len = buflen };
335 return qio_channel_readv_all_eof(ioc, &iov, 1, errp);
336}
337
338
1dd91b22
PB
339int coroutine_mixed_fn qio_channel_read_all(QIOChannel *ioc,
340 char *buf,
341 size_t buflen,
342 Error **errp)
d4622e55
DB
343{
344 struct iovec iov = { .iov_base = buf, .iov_len = buflen };
345 return qio_channel_readv_all(ioc, &iov, 1, errp);
346}
347
348
1dd91b22
PB
349int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc,
350 const char *buf,
351 size_t buflen,
352 Error **errp)
d4622e55
DB
353{
354 struct iovec iov = { .iov_base = (char *)buf, .iov_len = buflen };
355 return qio_channel_writev_all(ioc, &iov, 1, errp);
356}
357
358
666a3af9
DB
359int qio_channel_set_blocking(QIOChannel *ioc,
360 bool enabled,
361 Error **errp)
362{
363 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
364 return klass->io_set_blocking(ioc, enabled, errp);
365}
366
367
06e0f098
SH
368void qio_channel_set_follow_coroutine_ctx(QIOChannel *ioc, bool enabled)
369{
370 ioc->follow_coroutine_ctx = enabled;
371}
372
373
666a3af9
DB
374int qio_channel_close(QIOChannel *ioc,
375 Error **errp)
376{
377 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
378 return klass->io_close(ioc, errp);
379}
380
381
382GSource *qio_channel_create_watch(QIOChannel *ioc,
383 GIOCondition condition)
384{
385 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
20f4aa26
DB
386 GSource *ret = klass->io_create_watch(ioc, condition);
387
388 if (ioc->name) {
389 g_source_set_name(ret, ioc->name);
390 }
391
392 return ret;
666a3af9
DB
393}
394
395
bf88c124 396void qio_channel_set_aio_fd_handler(QIOChannel *ioc,
06e0f098 397 AioContext *read_ctx,
bf88c124 398 IOHandler *io_read,
06e0f098 399 AioContext *write_ctx,
bf88c124
PB
400 IOHandler *io_write,
401 void *opaque)
402{
403 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
404
06e0f098
SH
405 klass->io_set_aio_fd_handler(ioc, read_ctx, io_read, write_ctx, io_write,
406 opaque);
bf88c124
PB
407}
408
315409c7
PX
409guint qio_channel_add_watch_full(QIOChannel *ioc,
410 GIOCondition condition,
411 QIOChannelFunc func,
412 gpointer user_data,
413 GDestroyNotify notify,
414 GMainContext *context)
666a3af9
DB
415{
416 GSource *source;
417 guint id;
418
419 source = qio_channel_create_watch(ioc, condition);
420
421 g_source_set_callback(source, (GSourceFunc)func, user_data, notify);
422
315409c7 423 id = g_source_attach(source, context);
666a3af9
DB
424 g_source_unref(source);
425
426 return id;
427}
428
315409c7
PX
429guint qio_channel_add_watch(QIOChannel *ioc,
430 GIOCondition condition,
431 QIOChannelFunc func,
432 gpointer user_data,
433 GDestroyNotify notify)
434{
435 return qio_channel_add_watch_full(ioc, condition, func,
436 user_data, notify, NULL);
437}
438
439GSource *qio_channel_add_watch_source(QIOChannel *ioc,
440 GIOCondition condition,
441 QIOChannelFunc func,
442 gpointer user_data,
443 GDestroyNotify notify,
444 GMainContext *context)
445{
446 GSource *source;
447 guint id;
448
449 id = qio_channel_add_watch_full(ioc, condition, func,
450 user_data, notify, context);
451 source = g_main_context_find_source_by_id(context, id);
452 g_source_ref(source);
453 return source;
454}
455
666a3af9
DB
456
457int qio_channel_shutdown(QIOChannel *ioc,
458 QIOChannelShutdown how,
459 Error **errp)
460{
461 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
462
463 if (!klass->io_shutdown) {
464 error_setg(errp, "Data path shutdown not supported");
465 return -1;
466 }
467
468 return klass->io_shutdown(ioc, how, errp);
469}
470
471
472void qio_channel_set_delay(QIOChannel *ioc,
473 bool enabled)
474{
475 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
476
477 if (klass->io_set_delay) {
478 klass->io_set_delay(ioc, enabled);
479 }
480}
481
482
483void qio_channel_set_cork(QIOChannel *ioc,
484 bool enabled)
485{
486 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
487
488 if (klass->io_set_cork) {
489 klass->io_set_cork(ioc, enabled);
490 }
491}
492
493
494off_t qio_channel_io_seek(QIOChannel *ioc,
495 off_t offset,
496 int whence,
497 Error **errp)
498{
499 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
500
501 if (!klass->io_seek) {
502 error_setg(errp, "Channel does not support random access");
503 return -1;
504 }
505
506 return klass->io_seek(ioc, offset, whence, errp);
507}
508
b88651cb
LB
509int qio_channel_flush(QIOChannel *ioc,
510 Error **errp)
511{
512 QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc);
513
514 if (!klass->io_flush ||
515 !qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY)) {
516 return 0;
517 }
518
519 return klass->io_flush(ioc, errp);
520}
521
666a3af9 522
c4c497d2
PB
523static void qio_channel_restart_read(void *opaque)
524{
525 QIOChannel *ioc = opaque;
7c1f51bf
KW
526 Coroutine *co = qatomic_xchg(&ioc->read_coroutine, NULL);
527
528 if (!co) {
529 return;
530 }
c4c497d2 531
2a239e6e
KW
532 /* Assert that aio_co_wake() reenters the coroutine directly */
533 assert(qemu_get_current_aio_context() ==
534 qemu_coroutine_get_aio_context(co));
c4c497d2
PB
535 aio_co_wake(co);
536}
666a3af9 537
c4c497d2 538static void qio_channel_restart_write(void *opaque)
666a3af9 539{
c4c497d2 540 QIOChannel *ioc = opaque;
7c1f51bf
KW
541 Coroutine *co = qatomic_xchg(&ioc->write_coroutine, NULL);
542
543 if (!co) {
544 return;
545 }
c4c497d2 546
2a239e6e
KW
547 /* Assert that aio_co_wake() reenters the coroutine directly */
548 assert(qemu_get_current_aio_context() ==
549 qemu_coroutine_get_aio_context(co));
c4c497d2 550 aio_co_wake(co);
666a3af9
DB
551}
552
06e0f098
SH
553static void coroutine_fn
554qio_channel_set_fd_handlers(QIOChannel *ioc, GIOCondition condition)
c4c497d2 555{
06e0f098
SH
556 AioContext *ctx = ioc->follow_coroutine_ctx ?
557 qemu_coroutine_get_aio_context(qemu_coroutine_self()) :
558 iohandler_get_aio_context();
559 AioContext *read_ctx = NULL;
560 IOHandler *io_read = NULL;
561 AioContext *write_ctx = NULL;
562 IOHandler *io_write = NULL;
c4c497d2 563
06e0f098
SH
564 if (condition == G_IO_IN) {
565 ioc->read_coroutine = qemu_coroutine_self();
566 ioc->read_ctx = ctx;
567 read_ctx = ctx;
568 io_read = qio_channel_restart_read;
569
570 /*
571 * Thread safety: if the other coroutine is set and its AioContext
572 * matches ours, then there is mutual exclusion between read and write
573 * because they share a single thread and it's safe to set both read
574 * and write fd handlers here. If the AioContext does not match ours,
575 * then both threads may run in parallel but there is no shared state
576 * to worry about.
577 */
578 if (ioc->write_coroutine && ioc->write_ctx == ctx) {
579 write_ctx = ctx;
580 io_write = qio_channel_restart_write;
581 }
582 } else if (condition == G_IO_OUT) {
583 ioc->write_coroutine = qemu_coroutine_self();
584 ioc->write_ctx = ctx;
585 write_ctx = ctx;
586 io_write = qio_channel_restart_write;
587 if (ioc->read_coroutine && ioc->read_ctx == ctx) {
588 read_ctx = ctx;
589 io_read = qio_channel_restart_read;
590 }
591 } else {
592 abort();
c4c497d2
PB
593 }
594
06e0f098
SH
595 qio_channel_set_aio_fd_handler(ioc, read_ctx, io_read,
596 write_ctx, io_write, ioc);
c4c497d2
PB
597}
598
06e0f098
SH
599static void coroutine_fn
600qio_channel_clear_fd_handlers(QIOChannel *ioc, GIOCondition condition)
c4c497d2 601{
06e0f098
SH
602 AioContext *read_ctx = NULL;
603 IOHandler *io_read = NULL;
604 AioContext *write_ctx = NULL;
605 IOHandler *io_write = NULL;
606 AioContext *ctx;
c4c497d2 607
06e0f098
SH
608 if (condition == G_IO_IN) {
609 ctx = ioc->read_ctx;
610 read_ctx = ctx;
611 io_read = NULL;
612 if (ioc->write_coroutine && ioc->write_ctx == ctx) {
613 write_ctx = ctx;
614 io_write = qio_channel_restart_write;
615 }
616 } else if (condition == G_IO_OUT) {
617 ctx = ioc->write_ctx;
618 write_ctx = ctx;
619 io_write = NULL;
620 if (ioc->read_coroutine && ioc->read_ctx == ctx) {
621 read_ctx = ctx;
622 io_read = qio_channel_restart_read;
623 }
624 } else {
625 abort();
626 }
627
628 qio_channel_set_aio_fd_handler(ioc, read_ctx, io_read,
629 write_ctx, io_write, ioc);
c4c497d2 630}
666a3af9
DB
631
632void coroutine_fn qio_channel_yield(QIOChannel *ioc,
633 GIOCondition condition)
634{
06e0f098 635 AioContext *ioc_ctx;
7c1f51bf 636
666a3af9 637 assert(qemu_in_coroutine());
06e0f098 638 ioc_ctx = qemu_coroutine_get_aio_context(qemu_coroutine_self());
7c1f51bf 639
c4c497d2
PB
640 if (condition == G_IO_IN) {
641 assert(!ioc->read_coroutine);
c4c497d2
PB
642 } else if (condition == G_IO_OUT) {
643 assert(!ioc->write_coroutine);
c4c497d2
PB
644 } else {
645 abort();
646 }
06e0f098 647 qio_channel_set_fd_handlers(ioc, condition);
666a3af9 648 qemu_coroutine_yield();
7c1f51bf 649 assert(in_aio_context_home_thread(ioc_ctx));
6886ceaf
KW
650
651 /* Allow interrupting the operation by reentering the coroutine other than
652 * through the aio_fd_handlers. */
7c1f51bf
KW
653 if (condition == G_IO_IN) {
654 assert(ioc->read_coroutine == NULL);
7c1f51bf
KW
655 } else if (condition == G_IO_OUT) {
656 assert(ioc->write_coroutine == NULL);
6886ceaf 657 }
06e0f098 658 qio_channel_clear_fd_handlers(ioc, condition);
666a3af9
DB
659}
660
7c1f51bf
KW
661void qio_channel_wake_read(QIOChannel *ioc)
662{
663 Coroutine *co = qatomic_xchg(&ioc->read_coroutine, NULL);
664 if (co) {
665 aio_co_wake(co);
666 }
667}
666a3af9
DB
668
669static gboolean qio_channel_wait_complete(QIOChannel *ioc,
670 GIOCondition condition,
671 gpointer opaque)
672{
673 GMainLoop *loop = opaque;
674
675 g_main_loop_quit(loop);
676 return FALSE;
677}
678
679
680void qio_channel_wait(QIOChannel *ioc,
681 GIOCondition condition)
682{
683 GMainContext *ctxt = g_main_context_new();
684 GMainLoop *loop = g_main_loop_new(ctxt, TRUE);
685 GSource *source;
686
687 source = qio_channel_create_watch(ioc, condition);
688
689 g_source_set_callback(source,
690 (GSourceFunc)qio_channel_wait_complete,
691 loop,
692 NULL);
693
694 g_source_attach(source, ctxt);
695
696 g_main_loop_run(loop);
697
698 g_source_unref(source);
699 g_main_loop_unref(loop);
700 g_main_context_unref(ctxt);
701}
702
703
a5897205
PB
704static void qio_channel_finalize(Object *obj)
705{
706 QIOChannel *ioc = QIO_CHANNEL(obj);
707
acd4be64
SH
708 /* Must not have coroutines in qio_channel_yield() */
709 assert(!ioc->read_coroutine);
710 assert(!ioc->write_coroutine);
711
20f4aa26
DB
712 g_free(ioc->name);
713
714#ifdef _WIN32
a5897205
PB
715 if (ioc->event) {
716 CloseHandle(ioc->event);
717 }
a5897205 718#endif
20f4aa26 719}
a5897205 720
666a3af9
DB
721static const TypeInfo qio_channel_info = {
722 .parent = TYPE_OBJECT,
723 .name = TYPE_QIO_CHANNEL,
724 .instance_size = sizeof(QIOChannel),
a5897205 725 .instance_finalize = qio_channel_finalize,
666a3af9
DB
726 .abstract = true,
727 .class_size = sizeof(QIOChannelClass),
728};
729
730
731static void qio_channel_register_types(void)
732{
733 type_register_static(&qio_channel_info);
734}
735
736
737type_init(qio_channel_register_types);