]> git.proxmox.com Git - qemu-server.git/blob - qmeventd/qmeventd.c
qmeventd: VMID from PID: don't fail immediately when encountering unexpected entry
[qemu-server.git] / qmeventd / qmeventd.c
1 // SPDX-License-Identifier: AGPL-3.0-or-later
2 /*
3 Copyright (C) 2018 - 2021 Proxmox Server Solutions GmbH
4
5 Author: Dominik Csapak <d.csapak@proxmox.com>
6 Author: Stefan Reiter <s.reiter@proxmox.com>
7
8 Description:
9
10 qmeventd listens on a given socket, and waits for qemu processes to
11 connect. After accepting a connection qmeventd waits for shutdown events
12 followed by the closing of the socket. Once that happens `qm cleanup` will
13 be executed with following three arguments:
14 VMID <graceful> <guest>
15 Where `graceful` can be `1` or `0` depending if shutdown event was observed
16 before the socket got closed. The second parameter `guest` is also boolean
17 `1` or `0` depending if the shutdown was requested from the guest OS
18 (i.e., the "inside").
19 */
20
21 #ifndef _GNU_SOURCE
22 #define _GNU_SOURCE
23 #endif
24
25 #include <errno.h>
26 #include <fcntl.h>
27 #include <gmodule.h>
28 #include <json.h>
29 #include <signal.h>
30 #include <stdbool.h>
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <sys/epoll.h>
35 #include <sys/socket.h>
36 #include <sys/types.h>
37 #include <sys/un.h>
38 #include <sys/wait.h>
39 #include <unistd.h>
40 #include <time.h>
41
42 #include "qmeventd.h"
43
44 #define DEFAULT_KILL_TIMEOUT 60
45
46 static int verbose = 0;
47 static int kill_timeout = DEFAULT_KILL_TIMEOUT;
48 static int epoll_fd = 0;
49 static const char *progname;
50 GHashTable *vm_clients; // key=vmid (freed on remove), value=*Client (free manually)
51 GSList *forced_cleanups;
52 static int needs_cleanup = 0;
53
54 /*
55 * Helper functions
56 */
57
58 static void
59 usage()
60 {
61 fprintf(stderr, "Usage: %s [-f] [-v] PATH\n", progname);
62 fprintf(stderr, " -f run in foreground (default: false)\n");
63 fprintf(stderr, " -v verbose (default: false)\n");
64 fprintf(stderr, " -t <s> kill timeout (default: %ds)\n", DEFAULT_KILL_TIMEOUT);
65 fprintf(stderr, " PATH use PATH for socket\n");
66 }
67
68 static pid_t
69 get_pid_from_fd(int fd)
70 {
71 struct ucred credentials = { .pid = 0, .uid = 0, .gid = 0 };
72 socklen_t len = sizeof(struct ucred);
73 log_neg(getsockopt(fd, SOL_SOCKET, SO_PEERCRED, &credentials, &len), "getsockopt");
74 return credentials.pid;
75 }
76
77 /*
78 * parses the vmid from the qemu.slice entry of /proc/<pid>/cgroup
79 */
80 static unsigned long
81 get_vmid_from_pid(pid_t pid)
82 {
83 char filename[32] = { 0 };
84 int len = snprintf(filename, sizeof(filename), "/proc/%d/cgroup", pid);
85 if (len < 0) {
86 fprintf(stderr, "error during snprintf for %d: %s\n", pid,
87 strerror(errno));
88 return 0;
89 }
90 if ((size_t)len >= sizeof(filename)) {
91 fprintf(stderr, "error: pid %d too long\n", pid);
92 return 0;
93 }
94 FILE *fp = fopen(filename, "re");
95 if (fp == NULL) {
96 fprintf(stderr, "error opening %s: %s\n", filename, strerror(errno));
97 return 0;
98 }
99
100 unsigned long vmid = 0;
101 char *buf = NULL;
102 size_t buflen = 0;
103
104 while (getline(&buf, &buflen, fp) >= 0) {
105 char *cgroup_path = strrchr(buf, ':');
106 if (!cgroup_path) {
107 fprintf(stderr, "unexpected cgroup entry %s\n", buf);
108 continue;
109 }
110 cgroup_path++;
111
112 if (strncmp(cgroup_path, "/qemu.slice/", 12)) {
113 continue;
114 }
115
116 char *vmid_start = strrchr(buf, '/');
117 if (!vmid_start) {
118 fprintf(stderr, "unexpected cgroup entry %s\n", buf);
119 continue;
120 }
121 vmid_start++;
122
123 if (vmid_start[0] == '-' || vmid_start[0] == '\0') {
124 fprintf(stderr, "invalid vmid in cgroup entry %s\n", buf);
125 continue;
126 }
127
128 errno = 0;
129 char *endptr = NULL;
130 vmid = strtoul(vmid_start, &endptr, 10);
131 if (!endptr || strncmp(endptr, ".scope", 6)) {
132 fprintf(stderr, "unexpected cgroup entry %s\n", buf);
133 vmid = 0;
134 continue;
135 }
136 if (errno != 0) {
137 fprintf(stderr, "error parsing vmid for %d: %s\n", pid, strerror(errno));
138 vmid = 0;
139 }
140
141 goto ret;
142 }
143
144 if (errno) {
145 fprintf(stderr, "error parsing vmid for %d: %s\n", pid, strerror(errno));
146 } else {
147 fprintf(stderr, "error parsing vmid for %d: no matching qemu.slice cgroup entry\n", pid);
148 }
149
150 ret:
151 free(buf);
152 fclose(fp);
153 return vmid;
154 }
155
156 static bool
157 must_write(int fd, const char *buf, size_t len)
158 {
159 ssize_t wlen;
160 do {
161 wlen = write(fd, buf, len);
162 } while (wlen < 0 && errno == EINTR);
163
164 return (wlen == (ssize_t)len);
165 }
166
167 /*
168 * qmp handling functions
169 */
170
171 static void
172 send_qmp_cmd(struct Client *client, const char *buf, size_t len)
173 {
174 if (!must_write(client->fd, buf, len - 1)) {
175 fprintf(stderr, "%s: cannot send QMP message\n", client->qemu.vmid);
176 cleanup_client(client);
177 }
178 }
179
180 void
181 handle_qmp_handshake(struct Client *client)
182 {
183 VERBOSE_PRINT("pid%d: got QMP handshake, assuming QEMU client\n", client->pid);
184
185 // extract vmid from cmdline, now that we know it's a QEMU process
186 unsigned long vmid = get_vmid_from_pid(client->pid);
187 int res = snprintf(client->qemu.vmid, sizeof(client->qemu.vmid), "%lu", vmid);
188 if (vmid == 0 || res < 0 || res >= (int)sizeof(client->qemu.vmid)) {
189 fprintf(stderr, "could not get vmid from pid %d\n", client->pid);
190 cleanup_client(client);
191 return;
192 }
193
194 VERBOSE_PRINT("pid%d: assigned VMID: %s\n", client->pid, client->qemu.vmid);
195 client->type = CLIENT_QEMU;
196 if(!g_hash_table_insert(vm_clients, strdup(client->qemu.vmid), client)) {
197 // not fatal, just means backup handling won't work
198 fprintf(stderr, "%s: could not insert client into VMID->client table\n",
199 client->qemu.vmid);
200 }
201
202 static const char qmp_answer[] = "{\"execute\":\"qmp_capabilities\"}\n";
203 send_qmp_cmd(client, qmp_answer, sizeof(qmp_answer));
204 }
205
206 void
207 handle_qmp_event(struct Client *client, struct json_object *obj)
208 {
209 struct json_object *event;
210 if (!json_object_object_get_ex(obj, "event", &event)) {
211 return;
212 }
213 VERBOSE_PRINT("%s: got QMP event: %s\n", client->qemu.vmid, json_object_get_string(event));
214
215 if (client->state == STATE_TERMINATING) {
216 // QEMU sometimes sends a second SHUTDOWN after SIGTERM, ignore
217 VERBOSE_PRINT("%s: event was after termination, ignoring\n", client->qemu.vmid);
218 return;
219 }
220
221 // event, check if shutdown and get guest parameter
222 if (!strcmp(json_object_get_string(event), "SHUTDOWN")) {
223 client->qemu.graceful = 1;
224 struct json_object *data;
225 struct json_object *guest;
226 if (json_object_object_get_ex(obj, "data", &data) &&
227 json_object_object_get_ex(data, "guest", &guest))
228 {
229 client->qemu.guest = (unsigned short)json_object_get_boolean(guest);
230 }
231
232 // check if a backup is running and kill QEMU process if not
233 terminate_check(client);
234 }
235 }
236
237 void
238 terminate_check(struct Client *client)
239 {
240 if (client->state != STATE_IDLE) {
241 // if we're already in a request, queue this one until after
242 VERBOSE_PRINT("%s: terminate_check queued\n", client->qemu.vmid);
243 client->qemu.term_check_queued = true;
244 return;
245 }
246
247 client->qemu.term_check_queued = false;
248
249 VERBOSE_PRINT("%s: query-status\n", client->qemu.vmid);
250 client->state = STATE_EXPECT_STATUS_RESP;
251 static const char qmp_req[] = "{\"execute\":\"query-status\"}\n";
252 send_qmp_cmd(client, qmp_req, sizeof(qmp_req));
253 }
254
255 void
256 handle_qmp_return(struct Client *client, struct json_object *data, bool error)
257 {
258 if (error) {
259 const char *msg = "n/a";
260 struct json_object *desc;
261 if (json_object_object_get_ex(data, "desc", &desc)) {
262 msg = json_object_get_string(desc);
263 }
264 fprintf(stderr, "%s: received error from QMP: %s\n",
265 client->qemu.vmid, msg);
266 client->state = STATE_IDLE;
267 goto out;
268 }
269
270 struct json_object *status;
271 json_bool has_status = data &&
272 json_object_object_get_ex(data, "status", &status);
273
274 bool active = false;
275 if (has_status) {
276 const char *status_str = json_object_get_string(status);
277 active = status_str &&
278 (!strcmp(status_str, "running") || !strcmp(status_str, "paused"));
279 }
280
281 switch (client->state) {
282 case STATE_EXPECT_STATUS_RESP:
283 client->state = STATE_IDLE;
284 if (active) {
285 VERBOSE_PRINT("%s: got status: VM is active\n", client->qemu.vmid);
286 } else if (!client->qemu.backup) {
287 terminate_client(client);
288 } else {
289 // if we're in a backup, don't do anything, vzdump will notify
290 // us when the backup finishes
291 VERBOSE_PRINT("%s: not active, but running backup - keep alive\n",
292 client->qemu.vmid);
293 }
294 break;
295
296 // this means we received the empty return from our handshake answer
297 case STATE_HANDSHAKE:
298 client->state = STATE_IDLE;
299 VERBOSE_PRINT("%s: QMP handshake complete\n", client->qemu.vmid);
300 break;
301
302 // we expect an empty return object after sending quit
303 case STATE_TERMINATING:
304 break;
305 case STATE_IDLE:
306 VERBOSE_PRINT("%s: spurious return value received\n",
307 client->qemu.vmid);
308 break;
309 }
310
311 out:
312 if (client->qemu.term_check_queued) {
313 terminate_check(client);
314 }
315 }
316
317 /*
318 * VZDump specific client functions
319 */
320
321 void
322 handle_vzdump_handshake(struct Client *client, struct json_object *data)
323 {
324 client->state = STATE_IDLE;
325
326 struct json_object *vmid_obj;
327 json_bool has_vmid = data && json_object_object_get_ex(data, "vmid", &vmid_obj);
328
329 if (!has_vmid) {
330 VERBOSE_PRINT("pid%d: invalid vzdump handshake: no vmid\n", client->pid);
331 return;
332 }
333
334 const char *vmid_str = json_object_get_string(vmid_obj);
335
336 if (!vmid_str) {
337 VERBOSE_PRINT("pid%d: invalid vzdump handshake: vmid is not a string\n", client->pid);
338 return;
339 }
340
341 int res = snprintf(client->vzdump.vmid, sizeof(client->vzdump.vmid), "%s", vmid_str);
342 if (res < 0 || res >= (int)sizeof(client->vzdump.vmid)) {
343 VERBOSE_PRINT("pid%d: invalid vzdump handshake: vmid too long or invalid\n", client->pid);
344 return;
345 }
346
347 struct Client *vmc = (struct Client*) g_hash_table_lookup(vm_clients, client->vzdump.vmid);
348 if (vmc) {
349 vmc->qemu.backup = true;
350
351 // only mark as VZDUMP once we have set everything up, otherwise 'cleanup'
352 // might try to access an invalid value
353 client->type = CLIENT_VZDUMP;
354 VERBOSE_PRINT("%s: vzdump backup started\n", client->vzdump.vmid);
355 } else {
356 VERBOSE_PRINT("%s: vzdump requested backup start for unregistered VM\n", client->vzdump.vmid);
357 }
358 }
359
360 /*
361 * client management functions
362 */
363
364 void
365 add_new_client(int client_fd)
366 {
367 struct Client *client = calloc(sizeof(struct Client), 1);
368 if (client == NULL) {
369 fprintf(stderr, "could not add new client - allocation failed!\n");
370 fflush(stderr);
371 return;
372 }
373 client->state = STATE_HANDSHAKE;
374 client->type = CLIENT_NONE;
375 client->fd = client_fd;
376 client->pid = get_pid_from_fd(client_fd);
377 if (client->pid == 0) {
378 fprintf(stderr, "could not get pid from client\n");
379 goto err;
380 }
381
382 struct epoll_event ev;
383 ev.events = EPOLLIN;
384 ev.data.ptr = client;
385 int res = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client_fd, &ev);
386 if (res < 0) {
387 perror("epoll_ctl client add");
388 goto err;
389 }
390
391 VERBOSE_PRINT("added new client, pid: %d\n", client->pid);
392
393 return;
394 err:
395 (void)close(client_fd);
396 free(client);
397 }
398
399 static void
400 cleanup_qemu_client(struct Client *client)
401 {
402 unsigned short graceful = client->qemu.graceful;
403 unsigned short guest = client->qemu.guest;
404 char vmid[sizeof(client->qemu.vmid)];
405 strncpy(vmid, client->qemu.vmid, sizeof(vmid));
406 g_hash_table_remove(vm_clients, &vmid); // frees key, ignore errors
407 VERBOSE_PRINT("%s: executing cleanup (graceful: %d, guest: %d)\n",
408 vmid, graceful, guest);
409
410 int pid = fork();
411 if (pid < 0) {
412 fprintf(stderr, "fork failed: %s\n", strerror(errno));
413 return;
414 }
415 if (pid == 0) {
416 char *script = "/usr/sbin/qm";
417
418 char *args[] = {
419 script,
420 "cleanup",
421 vmid,
422 graceful ? "1" : "0",
423 guest ? "1" : "0",
424 NULL
425 };
426
427 execvp(script, args);
428 perror("execvp");
429 _exit(1);
430 }
431 }
432
433 void
434 cleanup_client(struct Client *client)
435 {
436 log_neg(epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->fd, NULL), "epoll del");
437 (void)close(client->fd);
438
439 struct Client *vmc;
440 switch (client->type) {
441 case CLIENT_QEMU:
442 cleanup_qemu_client(client);
443 break;
444
445 case CLIENT_VZDUMP:
446 vmc = (struct Client*) g_hash_table_lookup(vm_clients, client->vzdump.vmid);
447 if (vmc) {
448 VERBOSE_PRINT("%s: backup ended\n", client->vzdump.vmid);
449 vmc->qemu.backup = false;
450 terminate_check(vmc);
451 }
452 break;
453
454 case CLIENT_NONE:
455 // do nothing, only close socket
456 break;
457 }
458
459 if (client->pidfd > 0) {
460 (void)close(client->pidfd);
461 }
462 VERBOSE_PRINT("removing %s from forced cleanups\n", client->qemu.vmid);
463 forced_cleanups = g_slist_remove(forced_cleanups, client);
464 free(client);
465 }
466
467 void
468 terminate_client(struct Client *client)
469 {
470 VERBOSE_PRINT("%s: terminating client (pid %d)\n", client->qemu.vmid, client->pid);
471
472 client->state = STATE_TERMINATING;
473
474 // open a pidfd before kill for later cleanup
475 int pidfd = pidfd_open(client->pid, 0);
476 if (pidfd < 0) {
477 switch (errno) {
478 case ESRCH:
479 // process already dead for some reason, cleanup done
480 VERBOSE_PRINT("%s: failed to open pidfd, process already dead (pid %d)\n",
481 client->qemu.vmid, client->pid);
482 return;
483
484 // otherwise fall back to just using the PID directly, but don't
485 // print if we only failed because we're running on an older kernel
486 case ENOSYS:
487 break;
488 default:
489 perror("failed to open QEMU pidfd for cleanup");
490 break;
491 }
492 }
493
494 // try to send a 'quit' command first, fallback to SIGTERM of the pid
495 static const char qmp_quit_command[] = "{\"execute\":\"quit\"}\n";
496 VERBOSE_PRINT("%s: sending 'quit' via QMP\n", client->qemu.vmid);
497 if (!must_write(client->fd, qmp_quit_command, sizeof(qmp_quit_command) - 1)) {
498 VERBOSE_PRINT("%s: sending 'SIGTERM' to pid %d\n", client->qemu.vmid, client->pid);
499 int err = kill(client->pid, SIGTERM);
500 log_neg(err, "kill");
501 }
502
503 time_t timeout = time(NULL) + kill_timeout;
504
505 client->pidfd = pidfd;
506 client->timeout = timeout;
507
508 forced_cleanups = g_slist_prepend(forced_cleanups, (void *)client);
509 needs_cleanup = 1;
510 }
511
512 void
513 handle_client(struct Client *client)
514 {
515 VERBOSE_PRINT("pid%d: entering handle\n", client->pid);
516 ssize_t len;
517 do {
518 len = read(client->fd, (client->buf+client->buflen),
519 sizeof(client->buf) - client->buflen);
520 } while (len < 0 && errno == EINTR);
521
522 if (len < 0) {
523 if (!(errno == EAGAIN || errno == EWOULDBLOCK)) {
524 log_neg((int)len, "read");
525 cleanup_client(client);
526 }
527 return;
528 } else if (len == 0) {
529 VERBOSE_PRINT("pid%d: got EOF\n", client->pid);
530 cleanup_client(client);
531 return;
532 }
533
534 VERBOSE_PRINT("pid%d: read %ld bytes\n", client->pid, len);
535 client->buflen += len;
536
537 struct json_tokener *tok = json_tokener_new();
538 struct json_object *jobj = NULL;
539 enum json_tokener_error jerr = json_tokener_success;
540 while (jerr == json_tokener_success && client->buflen != 0) {
541 jobj = json_tokener_parse_ex(tok, client->buf, (int)client->buflen);
542 jerr = json_tokener_get_error(tok);
543 unsigned int offset = (unsigned int)tok->char_offset;
544 switch (jerr) {
545 case json_tokener_success:
546 // move rest from buffer to front
547 memmove(client->buf, client->buf + offset, client->buflen - offset);
548 client->buflen -= offset;
549 if (json_object_is_type(jobj, json_type_object)) {
550 struct json_object *obj;
551 if (json_object_object_get_ex(jobj, "QMP", &obj)) {
552 handle_qmp_handshake(client);
553 } else if (json_object_object_get_ex(jobj, "event", &obj)) {
554 handle_qmp_event(client, jobj);
555 } else if (json_object_object_get_ex(jobj, "return", &obj)) {
556 handle_qmp_return(client, obj, false);
557 } else if (json_object_object_get_ex(jobj, "error", &obj)) {
558 handle_qmp_return(client, obj, true);
559 } else if (json_object_object_get_ex(jobj, "vzdump", &obj)) {
560 handle_vzdump_handshake(client, obj);
561 } // else ignore message
562 }
563 break;
564 case json_tokener_continue:
565 if (client->buflen >= sizeof(client->buf)) {
566 VERBOSE_PRINT("pid%d: msg too large, discarding buffer\n", client->pid);
567 memset(client->buf, 0, sizeof(client->buf));
568 client->buflen = 0;
569 } // else we have enough space try again after next read
570 break;
571 default:
572 VERBOSE_PRINT("pid%d: parse error: %d, discarding buffer\n", client->pid, jerr);
573 memset(client->buf, 0, client->buflen);
574 client->buflen = 0;
575 break;
576 }
577 json_object_put(jobj);
578 }
579 json_tokener_free(tok);
580 }
581
582 static void
583 sigkill(void *ptr, void *time_ptr)
584 {
585 struct Client *data = ptr;
586 int err;
587
588 if (data->timeout != 0 && data->timeout > *(time_t *)time_ptr) {
589 return;
590 }
591
592 if (data->pidfd > 0) {
593 err = pidfd_send_signal(data->pidfd, SIGKILL, NULL, 0);
594 (void)close(data->pidfd);
595 data->pidfd = -1;
596 } else {
597 err = kill(data->pid, SIGKILL);
598 }
599
600 if (err < 0) {
601 if (errno != ESRCH) {
602 fprintf(stderr, "SIGKILL cleanup of pid '%d' failed - %s\n",
603 data->pid, strerror(errno));
604 }
605 } else {
606 fprintf(stderr, "cleanup failed, terminating pid '%d' with SIGKILL\n",
607 data->pid);
608 }
609
610 data->timeout = 0;
611
612 // remove ourselves from the list
613 forced_cleanups = g_slist_remove(forced_cleanups, ptr);
614 }
615
616 static void
617 handle_forced_cleanup()
618 {
619 if (g_slist_length(forced_cleanups) > 0) {
620 VERBOSE_PRINT("clearing forced cleanup backlog\n");
621 time_t cur_time = time(NULL);
622 g_slist_foreach(forced_cleanups, sigkill, &cur_time);
623 }
624 needs_cleanup = g_slist_length(forced_cleanups) > 0;
625 }
626
627 int
628 main(int argc, char *argv[])
629 {
630 int opt;
631 int daemonize = 1;
632 char *socket_path = NULL;
633 progname = argv[0];
634
635 while ((opt = getopt(argc, argv, "hfvt:")) != -1) {
636 switch (opt) {
637 case 'f':
638 daemonize = 0;
639 break;
640 case 'v':
641 verbose = 1;
642 break;
643 case 't':
644 errno = 0;
645 char *endptr = NULL;
646 kill_timeout = strtoul(optarg, &endptr, 10);
647 if (errno != 0 || *endptr != '\0' || kill_timeout == 0) {
648 usage();
649 exit(EXIT_FAILURE);
650 }
651 break;
652 case 'h':
653 usage();
654 exit(EXIT_SUCCESS);
655 break;
656 default:
657 usage();
658 exit(EXIT_FAILURE);
659 }
660 }
661
662 if (optind >= argc) {
663 usage();
664 exit(EXIT_FAILURE);
665 }
666
667 signal(SIGCHLD, SIG_IGN);
668
669 socket_path = argv[optind];
670
671 int sock = socket(AF_UNIX, SOCK_STREAM, 0);
672 bail_neg(sock, "socket");
673
674 struct sockaddr_un addr;
675 memset(&addr, 0, sizeof(addr));
676 addr.sun_family = AF_UNIX;
677 strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path) - 1);
678
679 unlink(socket_path);
680 bail_neg(bind(sock, (struct sockaddr*)&addr, sizeof(addr)), "bind");
681
682 struct epoll_event ev, events[1];
683 epoll_fd = epoll_create1(EPOLL_CLOEXEC);
684 bail_neg(epoll_fd, "epoll_create1");
685
686 ev.events = EPOLLIN;
687 ev.data.fd = sock;
688 bail_neg(epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev), "epoll_ctl");
689
690 bail_neg(listen(sock, 10), "listen");
691
692 if (daemonize) {
693 bail_neg(daemon(0, 1), "daemon");
694 }
695
696 vm_clients = g_hash_table_new_full(g_str_hash, g_str_equal, free, NULL);
697
698 int nevents;
699
700 for(;;) {
701 nevents = epoll_wait(epoll_fd, events, 1, needs_cleanup ? 10*1000 : -1);
702 if (nevents < 0 && errno == EINTR) {
703 continue;
704 }
705 bail_neg(nevents, "epoll_wait");
706
707 for (int n = 0; n < nevents; n++) {
708 if (events[n].data.fd == sock) {
709
710 int conn_sock = accept4(sock, NULL, NULL, SOCK_NONBLOCK | SOCK_CLOEXEC);
711 log_neg(conn_sock, "accept");
712 if (conn_sock > -1) {
713 add_new_client(conn_sock);
714 }
715 } else {
716 handle_client((struct Client *)events[n].data.ptr);
717 }
718 }
719 handle_forced_cleanup();
720 }
721 }