]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: AGPL-3.0-or-later | |
2 | /* | |
3 | Copyright (C) 2018 - 2021 Proxmox Server Solutions GmbH | |
4 | ||
5 | Author: Dominik Csapak <d.csapak@proxmox.com> | |
6 | Author: Stefan Reiter <s.reiter@proxmox.com> | |
7 | ||
8 | Description: | |
9 | ||
10 | qmeventd listens on a given socket, and waits for qemu processes to | |
11 | connect. After accepting a connection qmeventd waits for shutdown events | |
12 | followed by the closing of the socket. Once that happens `qm cleanup` will | |
13 | be executed with following three arguments: | |
14 | VMID <graceful> <guest> | |
15 | Where `graceful` can be `1` or `0` depending if shutdown event was observed | |
16 | before the socket got closed. The second parameter `guest` is also boolean | |
17 | `1` or `0` depending if the shutdown was requested from the guest OS | |
18 | (i.e., the "inside"). | |
19 | */ | |
20 | ||
21 | #ifndef _GNU_SOURCE | |
22 | #define _GNU_SOURCE | |
23 | #endif | |
24 | ||
25 | #include <errno.h> | |
26 | #include <fcntl.h> | |
27 | #include <gmodule.h> | |
28 | #include <json.h> | |
29 | #include <signal.h> | |
30 | #include <stdbool.h> | |
31 | #include <stdio.h> | |
32 | #include <stdlib.h> | |
33 | #include <string.h> | |
34 | #include <sys/epoll.h> | |
35 | #include <sys/socket.h> | |
36 | #include <sys/types.h> | |
37 | #include <sys/un.h> | |
38 | #include <sys/wait.h> | |
39 | #include <unistd.h> | |
40 | #include <time.h> | |
41 | ||
42 | #include "qmeventd.h" | |
43 | ||
44 | #define DEFAULT_KILL_TIMEOUT 60 | |
45 | ||
46 | static int verbose = 0; | |
47 | static int kill_timeout = DEFAULT_KILL_TIMEOUT; | |
48 | static int epoll_fd = 0; | |
49 | static const char *progname; | |
50 | GHashTable *vm_clients; // key=vmid (freed on remove), value=*Client (free manually) | |
51 | GSList *forced_cleanups; | |
52 | static int needs_cleanup = 0; | |
53 | ||
54 | /* | |
55 | * Helper functions | |
56 | */ | |
57 | ||
58 | static void | |
59 | usage() | |
60 | { | |
61 | fprintf(stderr, "Usage: %s [-f] [-v] PATH\n", progname); | |
62 | fprintf(stderr, " -f run in foreground (default: false)\n"); | |
63 | fprintf(stderr, " -v verbose (default: false)\n"); | |
64 | fprintf(stderr, " -t <s> kill timeout (default: %ds)\n", DEFAULT_KILL_TIMEOUT); | |
65 | fprintf(stderr, " PATH use PATH for socket\n"); | |
66 | } | |
67 | ||
68 | static pid_t | |
69 | get_pid_from_fd(int fd) | |
70 | { | |
71 | struct ucred credentials = { .pid = 0, .uid = 0, .gid = 0 }; | |
72 | socklen_t len = sizeof(struct ucred); | |
73 | log_neg(getsockopt(fd, SOL_SOCKET, SO_PEERCRED, &credentials, &len), "getsockopt"); | |
74 | return credentials.pid; | |
75 | } | |
76 | ||
77 | /* | |
78 | * parses the vmid from the qemu.slice entry of /proc/<pid>/cgroup | |
79 | */ | |
80 | static unsigned long | |
81 | get_vmid_from_pid(pid_t pid) | |
82 | { | |
83 | char filename[32] = { 0 }; | |
84 | int len = snprintf(filename, sizeof(filename), "/proc/%d/cgroup", pid); | |
85 | if (len < 0) { | |
86 | fprintf(stderr, "error during snprintf for %d: %s\n", pid, | |
87 | strerror(errno)); | |
88 | return 0; | |
89 | } | |
90 | if ((size_t)len >= sizeof(filename)) { | |
91 | fprintf(stderr, "error: pid %d too long\n", pid); | |
92 | return 0; | |
93 | } | |
94 | FILE *fp = fopen(filename, "re"); | |
95 | if (fp == NULL) { | |
96 | fprintf(stderr, "error opening %s: %s\n", filename, strerror(errno)); | |
97 | return 0; | |
98 | } | |
99 | ||
100 | unsigned long vmid = 0; | |
101 | char *buf = NULL; | |
102 | size_t buflen = 0; | |
103 | ||
104 | while (getline(&buf, &buflen, fp) >= 0) { | |
105 | char *cgroup_path = strrchr(buf, ':'); | |
106 | if (!cgroup_path) { | |
107 | fprintf(stderr, "unexpected cgroup entry %s\n", buf); | |
108 | continue; | |
109 | } | |
110 | cgroup_path++; | |
111 | ||
112 | if (strncmp(cgroup_path, "/qemu.slice/", 12)) { | |
113 | continue; | |
114 | } | |
115 | ||
116 | char *vmid_start = strrchr(buf, '/'); | |
117 | if (!vmid_start) { | |
118 | fprintf(stderr, "unexpected cgroup entry %s\n", buf); | |
119 | continue; | |
120 | } | |
121 | vmid_start++; | |
122 | ||
123 | if (vmid_start[0] == '-' || vmid_start[0] == '\0') { | |
124 | fprintf(stderr, "invalid vmid in cgroup entry %s\n", buf); | |
125 | continue; | |
126 | } | |
127 | ||
128 | errno = 0; | |
129 | char *endptr = NULL; | |
130 | vmid = strtoul(vmid_start, &endptr, 10); | |
131 | if (!endptr || strncmp(endptr, ".scope", 6)) { | |
132 | fprintf(stderr, "unexpected cgroup entry %s\n", buf); | |
133 | vmid = 0; | |
134 | continue; | |
135 | } | |
136 | if (errno != 0) { | |
137 | vmid = 0; | |
138 | } | |
139 | ||
140 | break; | |
141 | } | |
142 | ||
143 | if (errno) { | |
144 | fprintf(stderr, "error parsing vmid for %d: %s\n", pid, strerror(errno)); | |
145 | } else if (!vmid) { | |
146 | fprintf(stderr, "error parsing vmid for %d: no matching qemu.slice cgroup entry\n", pid); | |
147 | } | |
148 | ||
149 | free(buf); | |
150 | fclose(fp); | |
151 | return vmid; | |
152 | } | |
153 | ||
154 | static bool | |
155 | must_write(int fd, const char *buf, size_t len) | |
156 | { | |
157 | ssize_t wlen; | |
158 | do { | |
159 | wlen = write(fd, buf, len); | |
160 | } while (wlen < 0 && errno == EINTR); | |
161 | ||
162 | return (wlen == (ssize_t)len); | |
163 | } | |
164 | ||
165 | /* | |
166 | * qmp handling functions | |
167 | */ | |
168 | ||
169 | static void | |
170 | send_qmp_cmd(struct Client *client, const char *buf, size_t len) | |
171 | { | |
172 | if (!must_write(client->fd, buf, len - 1)) { | |
173 | fprintf(stderr, "%s: cannot send QMP message\n", client->qemu.vmid); | |
174 | cleanup_client(client); | |
175 | } | |
176 | } | |
177 | ||
178 | void | |
179 | handle_qmp_handshake(struct Client *client) | |
180 | { | |
181 | VERBOSE_PRINT("pid%d: got QMP handshake, assuming QEMU client\n", client->pid); | |
182 | ||
183 | // extract vmid from cmdline, now that we know it's a QEMU process | |
184 | unsigned long vmid = get_vmid_from_pid(client->pid); | |
185 | int res = snprintf(client->qemu.vmid, sizeof(client->qemu.vmid), "%lu", vmid); | |
186 | if (vmid == 0 || res < 0 || res >= (int)sizeof(client->qemu.vmid)) { | |
187 | fprintf(stderr, "could not get vmid from pid %d\n", client->pid); | |
188 | cleanup_client(client); | |
189 | return; | |
190 | } | |
191 | ||
192 | VERBOSE_PRINT("pid%d: assigned VMID: %s\n", client->pid, client->qemu.vmid); | |
193 | client->type = CLIENT_QEMU; | |
194 | if(!g_hash_table_insert(vm_clients, strdup(client->qemu.vmid), client)) { | |
195 | // not fatal, just means backup handling won't work | |
196 | fprintf(stderr, "%s: could not insert client into VMID->client table\n", | |
197 | client->qemu.vmid); | |
198 | } | |
199 | ||
200 | static const char qmp_answer[] = "{\"execute\":\"qmp_capabilities\"}\n"; | |
201 | send_qmp_cmd(client, qmp_answer, sizeof(qmp_answer)); | |
202 | } | |
203 | ||
204 | void | |
205 | handle_qmp_event(struct Client *client, struct json_object *obj) | |
206 | { | |
207 | struct json_object *event; | |
208 | if (!json_object_object_get_ex(obj, "event", &event)) { | |
209 | return; | |
210 | } | |
211 | VERBOSE_PRINT("%s: got QMP event: %s\n", client->qemu.vmid, json_object_get_string(event)); | |
212 | ||
213 | if (client->state == STATE_TERMINATING) { | |
214 | // QEMU sometimes sends a second SHUTDOWN after SIGTERM, ignore | |
215 | VERBOSE_PRINT("%s: event was after termination, ignoring\n", client->qemu.vmid); | |
216 | return; | |
217 | } | |
218 | ||
219 | // event, check if shutdown and get guest parameter | |
220 | if (!strcmp(json_object_get_string(event), "SHUTDOWN")) { | |
221 | client->qemu.graceful = 1; | |
222 | struct json_object *data; | |
223 | struct json_object *guest; | |
224 | if (json_object_object_get_ex(obj, "data", &data) && | |
225 | json_object_object_get_ex(data, "guest", &guest)) | |
226 | { | |
227 | client->qemu.guest = (unsigned short)json_object_get_boolean(guest); | |
228 | } | |
229 | ||
230 | // check if a backup is running and kill QEMU process if not | |
231 | terminate_check(client); | |
232 | } | |
233 | } | |
234 | ||
235 | void | |
236 | terminate_check(struct Client *client) | |
237 | { | |
238 | if (client->state != STATE_IDLE) { | |
239 | // if we're already in a request, queue this one until after | |
240 | VERBOSE_PRINT("%s: terminate_check queued\n", client->qemu.vmid); | |
241 | client->qemu.term_check_queued = true; | |
242 | return; | |
243 | } | |
244 | ||
245 | client->qemu.term_check_queued = false; | |
246 | ||
247 | VERBOSE_PRINT("%s: query-status\n", client->qemu.vmid); | |
248 | client->state = STATE_EXPECT_STATUS_RESP; | |
249 | static const char qmp_req[] = "{\"execute\":\"query-status\"}\n"; | |
250 | send_qmp_cmd(client, qmp_req, sizeof(qmp_req)); | |
251 | } | |
252 | ||
253 | void | |
254 | handle_qmp_return(struct Client *client, struct json_object *data, bool error) | |
255 | { | |
256 | if (error) { | |
257 | const char *msg = "n/a"; | |
258 | struct json_object *desc; | |
259 | if (json_object_object_get_ex(data, "desc", &desc)) { | |
260 | msg = json_object_get_string(desc); | |
261 | } | |
262 | fprintf(stderr, "%s: received error from QMP: %s\n", | |
263 | client->qemu.vmid, msg); | |
264 | client->state = STATE_IDLE; | |
265 | goto out; | |
266 | } | |
267 | ||
268 | struct json_object *status; | |
269 | json_bool has_status = data && | |
270 | json_object_object_get_ex(data, "status", &status); | |
271 | ||
272 | bool active = false; | |
273 | if (has_status) { | |
274 | const char *status_str = json_object_get_string(status); | |
275 | active = status_str && ( | |
276 | !strcmp(status_str, "running") | |
277 | || !strcmp(status_str, "paused") | |
278 | || !strcmp(status_str, "suspended") | |
279 | || !strcmp(status_str, "prelaunch") | |
280 | ); | |
281 | } | |
282 | ||
283 | switch (client->state) { | |
284 | case STATE_EXPECT_STATUS_RESP: | |
285 | client->state = STATE_IDLE; | |
286 | if (active) { | |
287 | VERBOSE_PRINT("%s: got status: VM is active\n", client->qemu.vmid); | |
288 | } else if (!client->qemu.backup) { | |
289 | terminate_client(client); | |
290 | } else { | |
291 | // if we're in a backup, don't do anything, vzdump will notify | |
292 | // us when the backup finishes | |
293 | VERBOSE_PRINT("%s: not active, but running backup - keep alive\n", | |
294 | client->qemu.vmid); | |
295 | } | |
296 | break; | |
297 | ||
298 | // this means we received the empty return from our handshake answer | |
299 | case STATE_HANDSHAKE: | |
300 | client->state = STATE_IDLE; | |
301 | VERBOSE_PRINT("%s: QMP handshake complete\n", client->qemu.vmid); | |
302 | break; | |
303 | ||
304 | // we expect an empty return object after sending quit | |
305 | case STATE_TERMINATING: | |
306 | break; | |
307 | case STATE_IDLE: | |
308 | VERBOSE_PRINT("%s: spurious return value received\n", | |
309 | client->qemu.vmid); | |
310 | break; | |
311 | } | |
312 | ||
313 | out: | |
314 | if (client->qemu.term_check_queued) { | |
315 | terminate_check(client); | |
316 | } | |
317 | } | |
318 | ||
319 | /* | |
320 | * VZDump specific client functions | |
321 | */ | |
322 | ||
323 | void | |
324 | handle_vzdump_handshake(struct Client *client, struct json_object *data) | |
325 | { | |
326 | client->state = STATE_IDLE; | |
327 | ||
328 | struct json_object *vmid_obj; | |
329 | json_bool has_vmid = data && json_object_object_get_ex(data, "vmid", &vmid_obj); | |
330 | ||
331 | if (!has_vmid) { | |
332 | VERBOSE_PRINT("pid%d: invalid vzdump handshake: no vmid\n", client->pid); | |
333 | return; | |
334 | } | |
335 | ||
336 | const char *vmid_str = json_object_get_string(vmid_obj); | |
337 | ||
338 | if (!vmid_str) { | |
339 | VERBOSE_PRINT("pid%d: invalid vzdump handshake: vmid is not a string\n", client->pid); | |
340 | return; | |
341 | } | |
342 | ||
343 | int res = snprintf(client->vzdump.vmid, sizeof(client->vzdump.vmid), "%s", vmid_str); | |
344 | if (res < 0 || res >= (int)sizeof(client->vzdump.vmid)) { | |
345 | VERBOSE_PRINT("pid%d: invalid vzdump handshake: vmid too long or invalid\n", client->pid); | |
346 | return; | |
347 | } | |
348 | ||
349 | struct Client *vmc = (struct Client*) g_hash_table_lookup(vm_clients, client->vzdump.vmid); | |
350 | if (vmc) { | |
351 | vmc->qemu.backup = true; | |
352 | ||
353 | // only mark as VZDUMP once we have set everything up, otherwise 'cleanup' | |
354 | // might try to access an invalid value | |
355 | client->type = CLIENT_VZDUMP; | |
356 | VERBOSE_PRINT("%s: vzdump backup started\n", client->vzdump.vmid); | |
357 | } else { | |
358 | VERBOSE_PRINT("%s: vzdump requested backup start for unregistered VM\n", client->vzdump.vmid); | |
359 | } | |
360 | } | |
361 | ||
362 | /* | |
363 | * client management functions | |
364 | */ | |
365 | ||
366 | void | |
367 | add_new_client(int client_fd) | |
368 | { | |
369 | struct Client *client = calloc(1, sizeof(struct Client)); | |
370 | if (client == NULL) { | |
371 | fprintf(stderr, "could not add new client - allocation failed!\n"); | |
372 | fflush(stderr); | |
373 | return; | |
374 | } | |
375 | client->state = STATE_HANDSHAKE; | |
376 | client->type = CLIENT_NONE; | |
377 | client->fd = client_fd; | |
378 | client->pid = get_pid_from_fd(client_fd); | |
379 | if (client->pid == 0) { | |
380 | fprintf(stderr, "could not get pid from client\n"); | |
381 | goto err; | |
382 | } | |
383 | ||
384 | struct epoll_event ev; | |
385 | ev.events = EPOLLIN; | |
386 | ev.data.ptr = client; | |
387 | int res = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client_fd, &ev); | |
388 | if (res < 0) { | |
389 | perror("epoll_ctl client add"); | |
390 | goto err; | |
391 | } | |
392 | ||
393 | VERBOSE_PRINT("added new client, pid: %d\n", client->pid); | |
394 | ||
395 | return; | |
396 | err: | |
397 | (void)close(client_fd); | |
398 | free(client); | |
399 | } | |
400 | ||
401 | static void | |
402 | cleanup_qemu_client(struct Client *client) | |
403 | { | |
404 | unsigned short graceful = client->qemu.graceful; | |
405 | unsigned short guest = client->qemu.guest; | |
406 | char vmid[sizeof(client->qemu.vmid)]; | |
407 | strncpy(vmid, client->qemu.vmid, sizeof(vmid)); | |
408 | g_hash_table_remove(vm_clients, &vmid); // frees key, ignore errors | |
409 | VERBOSE_PRINT("%s: executing cleanup (graceful: %d, guest: %d)\n", | |
410 | vmid, graceful, guest); | |
411 | ||
412 | int pid = fork(); | |
413 | if (pid < 0) { | |
414 | fprintf(stderr, "fork failed: %s\n", strerror(errno)); | |
415 | return; | |
416 | } | |
417 | if (pid == 0) { | |
418 | char *script = "/usr/sbin/qm"; | |
419 | ||
420 | char *args[] = { | |
421 | script, | |
422 | "cleanup", | |
423 | vmid, | |
424 | graceful ? "1" : "0", | |
425 | guest ? "1" : "0", | |
426 | NULL | |
427 | }; | |
428 | ||
429 | execvp(script, args); | |
430 | perror("execvp"); | |
431 | _exit(1); | |
432 | } | |
433 | } | |
434 | ||
435 | void | |
436 | cleanup_client(struct Client *client) | |
437 | { | |
438 | log_neg(epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->fd, NULL), "epoll del"); | |
439 | (void)close(client->fd); | |
440 | ||
441 | struct Client *vmc; | |
442 | switch (client->type) { | |
443 | case CLIENT_QEMU: | |
444 | cleanup_qemu_client(client); | |
445 | break; | |
446 | ||
447 | case CLIENT_VZDUMP: | |
448 | vmc = (struct Client*) g_hash_table_lookup(vm_clients, client->vzdump.vmid); | |
449 | if (vmc) { | |
450 | VERBOSE_PRINT("%s: backup ended\n", client->vzdump.vmid); | |
451 | vmc->qemu.backup = false; | |
452 | terminate_check(vmc); | |
453 | } | |
454 | break; | |
455 | ||
456 | case CLIENT_NONE: | |
457 | // do nothing, only close socket | |
458 | break; | |
459 | } | |
460 | ||
461 | if (client->pidfd > 0) { | |
462 | (void)close(client->pidfd); | |
463 | } | |
464 | VERBOSE_PRINT("removing %s from forced cleanups\n", client->qemu.vmid); | |
465 | forced_cleanups = g_slist_remove(forced_cleanups, client); | |
466 | free(client); | |
467 | } | |
468 | ||
469 | void | |
470 | terminate_client(struct Client *client) | |
471 | { | |
472 | VERBOSE_PRINT("%s: terminating client (pid %d)\n", client->qemu.vmid, client->pid); | |
473 | ||
474 | client->state = STATE_TERMINATING; | |
475 | ||
476 | // open a pidfd before kill for later cleanup | |
477 | int pidfd = pidfd_open(client->pid, 0); | |
478 | if (pidfd < 0) { | |
479 | switch (errno) { | |
480 | case ESRCH: | |
481 | // process already dead for some reason, cleanup done | |
482 | VERBOSE_PRINT("%s: failed to open pidfd, process already dead (pid %d)\n", | |
483 | client->qemu.vmid, client->pid); | |
484 | return; | |
485 | ||
486 | // otherwise fall back to just using the PID directly, but don't | |
487 | // print if we only failed because we're running on an older kernel | |
488 | case ENOSYS: | |
489 | break; | |
490 | default: | |
491 | perror("failed to open QEMU pidfd for cleanup"); | |
492 | break; | |
493 | } | |
494 | } | |
495 | ||
496 | // try to send a 'quit' command first, fallback to SIGTERM of the pid | |
497 | static const char qmp_quit_command[] = "{\"execute\":\"quit\"}\n"; | |
498 | VERBOSE_PRINT("%s: sending 'quit' via QMP\n", client->qemu.vmid); | |
499 | if (!must_write(client->fd, qmp_quit_command, sizeof(qmp_quit_command) - 1)) { | |
500 | VERBOSE_PRINT("%s: sending 'SIGTERM' to pid %d\n", client->qemu.vmid, client->pid); | |
501 | int err = kill(client->pid, SIGTERM); | |
502 | log_neg(err, "kill"); | |
503 | } | |
504 | ||
505 | time_t timeout = time(NULL) + kill_timeout; | |
506 | ||
507 | client->pidfd = pidfd; | |
508 | client->timeout = timeout; | |
509 | ||
510 | forced_cleanups = g_slist_prepend(forced_cleanups, (void *)client); | |
511 | needs_cleanup = 1; | |
512 | } | |
513 | ||
514 | void | |
515 | handle_client(struct Client *client) | |
516 | { | |
517 | VERBOSE_PRINT("pid%d: entering handle\n", client->pid); | |
518 | ssize_t len; | |
519 | do { | |
520 | len = read(client->fd, (client->buf+client->buflen), | |
521 | sizeof(client->buf) - client->buflen); | |
522 | } while (len < 0 && errno == EINTR); | |
523 | ||
524 | if (len < 0) { | |
525 | if (!(errno == EAGAIN || errno == EWOULDBLOCK)) { | |
526 | log_neg((int)len, "read"); | |
527 | cleanup_client(client); | |
528 | } | |
529 | return; | |
530 | } else if (len == 0) { | |
531 | VERBOSE_PRINT("pid%d: got EOF\n", client->pid); | |
532 | cleanup_client(client); | |
533 | return; | |
534 | } | |
535 | ||
536 | VERBOSE_PRINT("pid%d: read %ld bytes\n", client->pid, len); | |
537 | client->buflen += len; | |
538 | ||
539 | struct json_tokener *tok = json_tokener_new(); | |
540 | struct json_object *jobj = NULL; | |
541 | enum json_tokener_error jerr = json_tokener_success; | |
542 | while (jerr == json_tokener_success && client->buflen != 0) { | |
543 | jobj = json_tokener_parse_ex(tok, client->buf, (int)client->buflen); | |
544 | jerr = json_tokener_get_error(tok); | |
545 | unsigned int offset = (unsigned int)tok->char_offset; | |
546 | switch (jerr) { | |
547 | case json_tokener_success: | |
548 | // move rest from buffer to front | |
549 | memmove(client->buf, client->buf + offset, client->buflen - offset); | |
550 | client->buflen -= offset; | |
551 | if (json_object_is_type(jobj, json_type_object)) { | |
552 | struct json_object *obj; | |
553 | if (json_object_object_get_ex(jobj, "QMP", &obj)) { | |
554 | handle_qmp_handshake(client); | |
555 | } else if (json_object_object_get_ex(jobj, "event", &obj)) { | |
556 | handle_qmp_event(client, jobj); | |
557 | } else if (json_object_object_get_ex(jobj, "return", &obj)) { | |
558 | handle_qmp_return(client, obj, false); | |
559 | } else if (json_object_object_get_ex(jobj, "error", &obj)) { | |
560 | handle_qmp_return(client, obj, true); | |
561 | } else if (json_object_object_get_ex(jobj, "vzdump", &obj)) { | |
562 | handle_vzdump_handshake(client, obj); | |
563 | } // else ignore message | |
564 | } | |
565 | break; | |
566 | case json_tokener_continue: | |
567 | if (client->buflen >= sizeof(client->buf)) { | |
568 | VERBOSE_PRINT("pid%d: msg too large, discarding buffer\n", client->pid); | |
569 | memset(client->buf, 0, sizeof(client->buf)); | |
570 | client->buflen = 0; | |
571 | } // else we have enough space try again after next read | |
572 | break; | |
573 | default: | |
574 | VERBOSE_PRINT("pid%d: parse error: %d, discarding buffer\n", client->pid, jerr); | |
575 | memset(client->buf, 0, client->buflen); | |
576 | client->buflen = 0; | |
577 | break; | |
578 | } | |
579 | json_object_put(jobj); | |
580 | } | |
581 | json_tokener_free(tok); | |
582 | } | |
583 | ||
584 | static void | |
585 | sigkill(void *ptr, void *time_ptr) | |
586 | { | |
587 | struct Client *data = ptr; | |
588 | int err; | |
589 | ||
590 | if (data->timeout != 0 && data->timeout > *(time_t *)time_ptr) { | |
591 | return; | |
592 | } | |
593 | ||
594 | if (data->pidfd > 0) { | |
595 | err = pidfd_send_signal(data->pidfd, SIGKILL, NULL, 0); | |
596 | (void)close(data->pidfd); | |
597 | data->pidfd = -1; | |
598 | } else { | |
599 | err = kill(data->pid, SIGKILL); | |
600 | } | |
601 | ||
602 | if (err < 0) { | |
603 | if (errno != ESRCH) { | |
604 | fprintf(stderr, "SIGKILL cleanup of pid '%d' failed - %s\n", | |
605 | data->pid, strerror(errno)); | |
606 | } | |
607 | } else { | |
608 | fprintf(stderr, "cleanup failed, terminating pid '%d' with SIGKILL\n", | |
609 | data->pid); | |
610 | } | |
611 | ||
612 | data->timeout = 0; | |
613 | ||
614 | // remove ourselves from the list | |
615 | forced_cleanups = g_slist_remove(forced_cleanups, ptr); | |
616 | } | |
617 | ||
618 | static void | |
619 | handle_forced_cleanup() | |
620 | { | |
621 | if (g_slist_length(forced_cleanups) > 0) { | |
622 | VERBOSE_PRINT("clearing forced cleanup backlog\n"); | |
623 | time_t cur_time = time(NULL); | |
624 | g_slist_foreach(forced_cleanups, sigkill, &cur_time); | |
625 | } | |
626 | needs_cleanup = g_slist_length(forced_cleanups) > 0; | |
627 | } | |
628 | ||
629 | int | |
630 | main(int argc, char *argv[]) | |
631 | { | |
632 | int opt; | |
633 | int daemonize = 1; | |
634 | char *socket_path = NULL; | |
635 | progname = argv[0]; | |
636 | ||
637 | while ((opt = getopt(argc, argv, "hfvt:")) != -1) { | |
638 | switch (opt) { | |
639 | case 'f': | |
640 | daemonize = 0; | |
641 | break; | |
642 | case 'v': | |
643 | verbose = 1; | |
644 | break; | |
645 | case 't': | |
646 | errno = 0; | |
647 | char *endptr = NULL; | |
648 | kill_timeout = strtoul(optarg, &endptr, 10); | |
649 | if (errno != 0 || *endptr != '\0' || kill_timeout == 0) { | |
650 | usage(); | |
651 | exit(EXIT_FAILURE); | |
652 | } | |
653 | break; | |
654 | case 'h': | |
655 | usage(); | |
656 | exit(EXIT_SUCCESS); | |
657 | break; | |
658 | default: | |
659 | usage(); | |
660 | exit(EXIT_FAILURE); | |
661 | } | |
662 | } | |
663 | ||
664 | if (optind >= argc) { | |
665 | usage(); | |
666 | exit(EXIT_FAILURE); | |
667 | } | |
668 | ||
669 | signal(SIGCHLD, SIG_IGN); | |
670 | ||
671 | socket_path = argv[optind]; | |
672 | ||
673 | int sock = socket(AF_UNIX, SOCK_STREAM, 0); | |
674 | bail_neg(sock, "socket"); | |
675 | ||
676 | struct sockaddr_un addr; | |
677 | memset(&addr, 0, sizeof(addr)); | |
678 | addr.sun_family = AF_UNIX; | |
679 | strncpy(addr.sun_path, socket_path, sizeof(addr.sun_path) - 1); | |
680 | ||
681 | unlink(socket_path); | |
682 | bail_neg(bind(sock, (struct sockaddr*)&addr, sizeof(addr)), "bind"); | |
683 | ||
684 | struct epoll_event ev, events[1]; | |
685 | epoll_fd = epoll_create1(EPOLL_CLOEXEC); | |
686 | bail_neg(epoll_fd, "epoll_create1"); | |
687 | ||
688 | ev.events = EPOLLIN; | |
689 | ev.data.fd = sock; | |
690 | bail_neg(epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev), "epoll_ctl"); | |
691 | ||
692 | bail_neg(listen(sock, 10), "listen"); | |
693 | ||
694 | if (daemonize) { | |
695 | bail_neg(daemon(0, 1), "daemon"); | |
696 | } | |
697 | ||
698 | vm_clients = g_hash_table_new_full(g_str_hash, g_str_equal, free, NULL); | |
699 | ||
700 | int nevents; | |
701 | ||
702 | for(;;) { | |
703 | nevents = epoll_wait(epoll_fd, events, 1, needs_cleanup ? 10*1000 : -1); | |
704 | if (nevents < 0 && errno == EINTR) { | |
705 | continue; | |
706 | } | |
707 | bail_neg(nevents, "epoll_wait"); | |
708 | ||
709 | for (int n = 0; n < nevents; n++) { | |
710 | if (events[n].data.fd == sock) { | |
711 | ||
712 | int conn_sock = accept4(sock, NULL, NULL, SOCK_NONBLOCK | SOCK_CLOEXEC); | |
713 | log_neg(conn_sock, "accept"); | |
714 | if (conn_sock > -1) { | |
715 | add_new_client(conn_sock); | |
716 | } | |
717 | } else { | |
718 | handle_client((struct Client *)events[n].data.ptr); | |
719 | } | |
720 | } | |
721 | handle_forced_cleanup(); | |
722 | } | |
723 | } |