]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * os-posix-lib.c | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * Copyright (c) 2010 Red Hat, Inc. | |
6 | * | |
7 | * QEMU library functions on POSIX which are shared between QEMU and | |
8 | * the QEMU tools. | |
9 | * | |
10 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
11 | * of this software and associated documentation files (the "Software"), to deal | |
12 | * in the Software without restriction, including without limitation the rights | |
13 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
14 | * copies of the Software, and to permit persons to whom the Software is | |
15 | * furnished to do so, subject to the following conditions: | |
16 | * | |
17 | * The above copyright notice and this permission notice shall be included in | |
18 | * all copies or substantial portions of the Software. | |
19 | * | |
20 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
21 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
23 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
24 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
25 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
26 | * THE SOFTWARE. | |
27 | */ | |
28 | ||
29 | #include "qemu/osdep.h" | |
30 | #include <termios.h> | |
31 | ||
32 | #include <glib/gprintf.h> | |
33 | ||
34 | #include "sysemu/sysemu.h" | |
35 | #include "trace.h" | |
36 | #include "qapi/error.h" | |
37 | #include "qemu/error-report.h" | |
38 | #include "qemu/madvise.h" | |
39 | #include "qemu/sockets.h" | |
40 | #include "qemu/thread.h" | |
41 | #include <libgen.h> | |
42 | #include "qemu/cutils.h" | |
43 | #include "qemu/units.h" | |
44 | #include "qemu/thread-context.h" | |
45 | ||
46 | #ifdef CONFIG_LINUX | |
47 | #include <sys/syscall.h> | |
48 | #endif | |
49 | ||
50 | #ifdef __FreeBSD__ | |
51 | #include <sys/thr.h> | |
52 | #include <sys/user.h> | |
53 | #include <libutil.h> | |
54 | #endif | |
55 | ||
56 | #ifdef __NetBSD__ | |
57 | #include <lwp.h> | |
58 | #endif | |
59 | ||
60 | #include "qemu/mmap-alloc.h" | |
61 | ||
62 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
63 | #include "qemu/error-report.h" | |
64 | #endif | |
65 | ||
66 | #define MAX_MEM_PREALLOC_THREAD_COUNT 16 | |
67 | ||
68 | struct MemsetThread; | |
69 | ||
70 | typedef struct MemsetContext { | |
71 | bool all_threads_created; | |
72 | bool any_thread_failed; | |
73 | struct MemsetThread *threads; | |
74 | int num_threads; | |
75 | } MemsetContext; | |
76 | ||
77 | struct MemsetThread { | |
78 | char *addr; | |
79 | size_t numpages; | |
80 | size_t hpagesize; | |
81 | QemuThread pgthread; | |
82 | sigjmp_buf env; | |
83 | MemsetContext *context; | |
84 | }; | |
85 | typedef struct MemsetThread MemsetThread; | |
86 | ||
87 | /* used by sigbus_handler() */ | |
88 | static MemsetContext *sigbus_memset_context; | |
89 | struct sigaction sigbus_oldact; | |
90 | static QemuMutex sigbus_mutex; | |
91 | ||
92 | static QemuMutex page_mutex; | |
93 | static QemuCond page_cond; | |
94 | ||
95 | int qemu_get_thread_id(void) | |
96 | { | |
97 | #if defined(__linux__) | |
98 | return syscall(SYS_gettid); | |
99 | #elif defined(__FreeBSD__) | |
100 | /* thread id is up to INT_MAX */ | |
101 | long tid; | |
102 | thr_self(&tid); | |
103 | return (int)tid; | |
104 | #elif defined(__NetBSD__) | |
105 | return _lwp_self(); | |
106 | #elif defined(__OpenBSD__) | |
107 | return getthrid(); | |
108 | #else | |
109 | return getpid(); | |
110 | #endif | |
111 | } | |
112 | ||
113 | int qemu_daemon(int nochdir, int noclose) | |
114 | { | |
115 | return daemon(nochdir, noclose); | |
116 | } | |
117 | ||
118 | bool qemu_write_pidfile(const char *path, Error **errp) | |
119 | { | |
120 | int fd; | |
121 | char pidstr[32]; | |
122 | ||
123 | while (1) { | |
124 | struct stat a, b; | |
125 | struct flock lock = { | |
126 | .l_type = F_WRLCK, | |
127 | .l_whence = SEEK_SET, | |
128 | .l_len = 0, | |
129 | }; | |
130 | ||
131 | fd = qemu_create(path, O_WRONLY, S_IRUSR | S_IWUSR, errp); | |
132 | if (fd == -1) { | |
133 | return false; | |
134 | } | |
135 | ||
136 | if (fstat(fd, &b) < 0) { | |
137 | error_setg_errno(errp, errno, "Cannot stat file"); | |
138 | goto fail_close; | |
139 | } | |
140 | ||
141 | if (fcntl(fd, F_SETLK, &lock)) { | |
142 | error_setg_errno(errp, errno, "Cannot lock pid file"); | |
143 | goto fail_close; | |
144 | } | |
145 | ||
146 | /* | |
147 | * Now make sure the path we locked is the same one that now | |
148 | * exists on the filesystem. | |
149 | */ | |
150 | if (stat(path, &a) < 0) { | |
151 | /* | |
152 | * PID file disappeared, someone else must be racing with | |
153 | * us, so try again. | |
154 | */ | |
155 | close(fd); | |
156 | continue; | |
157 | } | |
158 | ||
159 | if (a.st_ino == b.st_ino) { | |
160 | break; | |
161 | } | |
162 | ||
163 | /* | |
164 | * PID file was recreated, someone else must be racing with | |
165 | * us, so try again. | |
166 | */ | |
167 | close(fd); | |
168 | } | |
169 | ||
170 | if (ftruncate(fd, 0) < 0) { | |
171 | error_setg_errno(errp, errno, "Failed to truncate pid file"); | |
172 | goto fail_unlink; | |
173 | } | |
174 | ||
175 | snprintf(pidstr, sizeof(pidstr), FMT_pid "\n", getpid()); | |
176 | if (qemu_write_full(fd, pidstr, strlen(pidstr)) != strlen(pidstr)) { | |
177 | error_setg(errp, "Failed to write pid file"); | |
178 | goto fail_unlink; | |
179 | } | |
180 | ||
181 | return true; | |
182 | ||
183 | fail_unlink: | |
184 | unlink(path); | |
185 | fail_close: | |
186 | close(fd); | |
187 | return false; | |
188 | } | |
189 | ||
190 | /* alloc shared memory pages */ | |
191 | void *qemu_anon_ram_alloc(size_t size, uint64_t *alignment, bool shared, | |
192 | bool noreserve) | |
193 | { | |
194 | const uint32_t qemu_map_flags = (shared ? QEMU_MAP_SHARED : 0) | | |
195 | (noreserve ? QEMU_MAP_NORESERVE : 0); | |
196 | size_t align = QEMU_VMALLOC_ALIGN; | |
197 | void *ptr = qemu_ram_mmap(-1, size, align, qemu_map_flags, 0); | |
198 | ||
199 | if (ptr == MAP_FAILED) { | |
200 | return NULL; | |
201 | } | |
202 | ||
203 | if (alignment) { | |
204 | *alignment = align; | |
205 | } | |
206 | ||
207 | trace_qemu_anon_ram_alloc(size, ptr); | |
208 | return ptr; | |
209 | } | |
210 | ||
211 | void qemu_anon_ram_free(void *ptr, size_t size) | |
212 | { | |
213 | trace_qemu_anon_ram_free(ptr, size); | |
214 | qemu_ram_munmap(-1, ptr, size); | |
215 | } | |
216 | ||
217 | void qemu_socket_set_block(int fd) | |
218 | { | |
219 | g_unix_set_fd_nonblocking(fd, false, NULL); | |
220 | } | |
221 | ||
222 | int qemu_socket_try_set_nonblock(int fd) | |
223 | { | |
224 | return g_unix_set_fd_nonblocking(fd, true, NULL) ? 0 : -errno; | |
225 | } | |
226 | ||
227 | void qemu_socket_set_nonblock(int fd) | |
228 | { | |
229 | int f; | |
230 | f = qemu_socket_try_set_nonblock(fd); | |
231 | assert(f == 0); | |
232 | } | |
233 | ||
234 | int socket_set_fast_reuse(int fd) | |
235 | { | |
236 | int val = 1, ret; | |
237 | ||
238 | ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, | |
239 | (const char *)&val, sizeof(val)); | |
240 | ||
241 | assert(ret == 0); | |
242 | ||
243 | return ret; | |
244 | } | |
245 | ||
246 | void qemu_set_cloexec(int fd) | |
247 | { | |
248 | int f; | |
249 | f = fcntl(fd, F_GETFD); | |
250 | assert(f != -1); | |
251 | f = fcntl(fd, F_SETFD, f | FD_CLOEXEC); | |
252 | assert(f != -1); | |
253 | } | |
254 | ||
255 | int qemu_socketpair(int domain, int type, int protocol, int sv[2]) | |
256 | { | |
257 | int ret; | |
258 | ||
259 | #ifdef SOCK_CLOEXEC | |
260 | ret = socketpair(domain, type | SOCK_CLOEXEC, protocol, sv); | |
261 | if (ret != -1 || errno != EINVAL) { | |
262 | return ret; | |
263 | } | |
264 | #endif | |
265 | ret = socketpair(domain, type, protocol, sv);; | |
266 | if (ret == 0) { | |
267 | qemu_set_cloexec(sv[0]); | |
268 | qemu_set_cloexec(sv[1]); | |
269 | } | |
270 | ||
271 | return ret; | |
272 | } | |
273 | ||
274 | char * | |
275 | qemu_get_local_state_dir(void) | |
276 | { | |
277 | return get_relocated_path(CONFIG_QEMU_LOCALSTATEDIR); | |
278 | } | |
279 | ||
280 | void qemu_set_tty_echo(int fd, bool echo) | |
281 | { | |
282 | struct termios tty; | |
283 | ||
284 | tcgetattr(fd, &tty); | |
285 | ||
286 | if (echo) { | |
287 | tty.c_lflag |= ECHO | ECHONL | ICANON | IEXTEN; | |
288 | } else { | |
289 | tty.c_lflag &= ~(ECHO | ECHONL | ICANON | IEXTEN); | |
290 | } | |
291 | ||
292 | tcsetattr(fd, TCSANOW, &tty); | |
293 | } | |
294 | ||
295 | #ifdef CONFIG_LINUX | |
296 | static void sigbus_handler(int signal, siginfo_t *siginfo, void *ctx) | |
297 | #else /* CONFIG_LINUX */ | |
298 | static void sigbus_handler(int signal) | |
299 | #endif /* CONFIG_LINUX */ | |
300 | { | |
301 | int i; | |
302 | ||
303 | if (sigbus_memset_context) { | |
304 | for (i = 0; i < sigbus_memset_context->num_threads; i++) { | |
305 | MemsetThread *thread = &sigbus_memset_context->threads[i]; | |
306 | ||
307 | if (qemu_thread_is_self(&thread->pgthread)) { | |
308 | siglongjmp(thread->env, 1); | |
309 | } | |
310 | } | |
311 | } | |
312 | ||
313 | #ifdef CONFIG_LINUX | |
314 | /* | |
315 | * We assume that the MCE SIGBUS handler could have been registered. We | |
316 | * should never receive BUS_MCEERR_AO on any of our threads, but only on | |
317 | * the main thread registered for PR_MCE_KILL_EARLY. Further, we should not | |
318 | * receive BUS_MCEERR_AR triggered by action of other threads on one of | |
319 | * our threads. So, no need to check for unrelated SIGBUS when seeing one | |
320 | * for our threads. | |
321 | * | |
322 | * We will forward to the MCE handler, which will either handle the SIGBUS | |
323 | * or reinstall the default SIGBUS handler and reraise the SIGBUS. The | |
324 | * default SIGBUS handler will crash the process, so we don't care. | |
325 | */ | |
326 | if (sigbus_oldact.sa_flags & SA_SIGINFO) { | |
327 | sigbus_oldact.sa_sigaction(signal, siginfo, ctx); | |
328 | return; | |
329 | } | |
330 | #endif /* CONFIG_LINUX */ | |
331 | warn_report("qemu_prealloc_mem: unrelated SIGBUS detected and ignored"); | |
332 | } | |
333 | ||
334 | static void *do_touch_pages(void *arg) | |
335 | { | |
336 | MemsetThread *memset_args = (MemsetThread *)arg; | |
337 | sigset_t set, oldset; | |
338 | int ret = 0; | |
339 | ||
340 | /* | |
341 | * On Linux, the page faults from the loop below can cause mmap_sem | |
342 | * contention with allocation of the thread stacks. Do not start | |
343 | * clearing until all threads have been created. | |
344 | */ | |
345 | qemu_mutex_lock(&page_mutex); | |
346 | while (!memset_args->context->all_threads_created) { | |
347 | qemu_cond_wait(&page_cond, &page_mutex); | |
348 | } | |
349 | qemu_mutex_unlock(&page_mutex); | |
350 | ||
351 | /* unblock SIGBUS */ | |
352 | sigemptyset(&set); | |
353 | sigaddset(&set, SIGBUS); | |
354 | pthread_sigmask(SIG_UNBLOCK, &set, &oldset); | |
355 | ||
356 | if (sigsetjmp(memset_args->env, 1)) { | |
357 | ret = -EFAULT; | |
358 | } else { | |
359 | char *addr = memset_args->addr; | |
360 | size_t numpages = memset_args->numpages; | |
361 | size_t hpagesize = memset_args->hpagesize; | |
362 | size_t i; | |
363 | for (i = 0; i < numpages; i++) { | |
364 | /* | |
365 | * Read & write back the same value, so we don't | |
366 | * corrupt existing user/app data that might be | |
367 | * stored. | |
368 | * | |
369 | * 'volatile' to stop compiler optimizing this away | |
370 | * to a no-op | |
371 | */ | |
372 | *(volatile char *)addr = *addr; | |
373 | addr += hpagesize; | |
374 | } | |
375 | } | |
376 | pthread_sigmask(SIG_SETMASK, &oldset, NULL); | |
377 | return (void *)(uintptr_t)ret; | |
378 | } | |
379 | ||
380 | static void *do_madv_populate_write_pages(void *arg) | |
381 | { | |
382 | MemsetThread *memset_args = (MemsetThread *)arg; | |
383 | const size_t size = memset_args->numpages * memset_args->hpagesize; | |
384 | char * const addr = memset_args->addr; | |
385 | int ret = 0; | |
386 | ||
387 | /* See do_touch_pages(). */ | |
388 | qemu_mutex_lock(&page_mutex); | |
389 | while (!memset_args->context->all_threads_created) { | |
390 | qemu_cond_wait(&page_cond, &page_mutex); | |
391 | } | |
392 | qemu_mutex_unlock(&page_mutex); | |
393 | ||
394 | if (size && qemu_madvise(addr, size, QEMU_MADV_POPULATE_WRITE)) { | |
395 | ret = -errno; | |
396 | } | |
397 | return (void *)(uintptr_t)ret; | |
398 | } | |
399 | ||
400 | static inline int get_memset_num_threads(size_t hpagesize, size_t numpages, | |
401 | int max_threads) | |
402 | { | |
403 | long host_procs = sysconf(_SC_NPROCESSORS_ONLN); | |
404 | int ret = 1; | |
405 | ||
406 | if (host_procs > 0) { | |
407 | ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), max_threads); | |
408 | } | |
409 | ||
410 | /* Especially with gigantic pages, don't create more threads than pages. */ | |
411 | ret = MIN(ret, numpages); | |
412 | /* Don't start threads to prealloc comparatively little memory. */ | |
413 | ret = MIN(ret, MAX(1, hpagesize * numpages / (64 * MiB))); | |
414 | ||
415 | /* In case sysconf() fails, we fall back to single threaded */ | |
416 | return ret; | |
417 | } | |
418 | ||
419 | static int touch_all_pages(char *area, size_t hpagesize, size_t numpages, | |
420 | int max_threads, ThreadContext *tc, | |
421 | bool use_madv_populate_write) | |
422 | { | |
423 | static gsize initialized = 0; | |
424 | MemsetContext context = { | |
425 | .num_threads = get_memset_num_threads(hpagesize, numpages, max_threads), | |
426 | }; | |
427 | size_t numpages_per_thread, leftover; | |
428 | void *(*touch_fn)(void *); | |
429 | int ret = 0, i = 0; | |
430 | char *addr = area; | |
431 | ||
432 | if (g_once_init_enter(&initialized)) { | |
433 | qemu_mutex_init(&page_mutex); | |
434 | qemu_cond_init(&page_cond); | |
435 | g_once_init_leave(&initialized, 1); | |
436 | } | |
437 | ||
438 | if (use_madv_populate_write) { | |
439 | /* Avoid creating a single thread for MADV_POPULATE_WRITE */ | |
440 | if (context.num_threads == 1) { | |
441 | if (qemu_madvise(area, hpagesize * numpages, | |
442 | QEMU_MADV_POPULATE_WRITE)) { | |
443 | return -errno; | |
444 | } | |
445 | return 0; | |
446 | } | |
447 | touch_fn = do_madv_populate_write_pages; | |
448 | } else { | |
449 | touch_fn = do_touch_pages; | |
450 | } | |
451 | ||
452 | context.threads = g_new0(MemsetThread, context.num_threads); | |
453 | numpages_per_thread = numpages / context.num_threads; | |
454 | leftover = numpages % context.num_threads; | |
455 | for (i = 0; i < context.num_threads; i++) { | |
456 | context.threads[i].addr = addr; | |
457 | context.threads[i].numpages = numpages_per_thread + (i < leftover); | |
458 | context.threads[i].hpagesize = hpagesize; | |
459 | context.threads[i].context = &context; | |
460 | if (tc) { | |
461 | thread_context_create_thread(tc, &context.threads[i].pgthread, | |
462 | "touch_pages", | |
463 | touch_fn, &context.threads[i], | |
464 | QEMU_THREAD_JOINABLE); | |
465 | } else { | |
466 | qemu_thread_create(&context.threads[i].pgthread, "touch_pages", | |
467 | touch_fn, &context.threads[i], | |
468 | QEMU_THREAD_JOINABLE); | |
469 | } | |
470 | addr += context.threads[i].numpages * hpagesize; | |
471 | } | |
472 | ||
473 | if (!use_madv_populate_write) { | |
474 | sigbus_memset_context = &context; | |
475 | } | |
476 | ||
477 | qemu_mutex_lock(&page_mutex); | |
478 | context.all_threads_created = true; | |
479 | qemu_cond_broadcast(&page_cond); | |
480 | qemu_mutex_unlock(&page_mutex); | |
481 | ||
482 | for (i = 0; i < context.num_threads; i++) { | |
483 | int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread); | |
484 | ||
485 | if (tmp) { | |
486 | ret = tmp; | |
487 | } | |
488 | } | |
489 | ||
490 | if (!use_madv_populate_write) { | |
491 | sigbus_memset_context = NULL; | |
492 | } | |
493 | g_free(context.threads); | |
494 | ||
495 | return ret; | |
496 | } | |
497 | ||
498 | static bool madv_populate_write_possible(char *area, size_t pagesize) | |
499 | { | |
500 | return !qemu_madvise(area, pagesize, QEMU_MADV_POPULATE_WRITE) || | |
501 | errno != EINVAL; | |
502 | } | |
503 | ||
504 | void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, | |
505 | ThreadContext *tc, Error **errp) | |
506 | { | |
507 | static gsize initialized; | |
508 | int ret; | |
509 | size_t hpagesize = qemu_fd_getpagesize(fd); | |
510 | size_t numpages = DIV_ROUND_UP(sz, hpagesize); | |
511 | bool use_madv_populate_write; | |
512 | struct sigaction act; | |
513 | ||
514 | /* | |
515 | * Sense on every invocation, as MADV_POPULATE_WRITE cannot be used for | |
516 | * some special mappings, such as mapping /dev/mem. | |
517 | */ | |
518 | use_madv_populate_write = madv_populate_write_possible(area, hpagesize); | |
519 | ||
520 | if (!use_madv_populate_write) { | |
521 | if (g_once_init_enter(&initialized)) { | |
522 | qemu_mutex_init(&sigbus_mutex); | |
523 | g_once_init_leave(&initialized, 1); | |
524 | } | |
525 | ||
526 | qemu_mutex_lock(&sigbus_mutex); | |
527 | memset(&act, 0, sizeof(act)); | |
528 | #ifdef CONFIG_LINUX | |
529 | act.sa_sigaction = &sigbus_handler; | |
530 | act.sa_flags = SA_SIGINFO; | |
531 | #else /* CONFIG_LINUX */ | |
532 | act.sa_handler = &sigbus_handler; | |
533 | act.sa_flags = 0; | |
534 | #endif /* CONFIG_LINUX */ | |
535 | ||
536 | ret = sigaction(SIGBUS, &act, &sigbus_oldact); | |
537 | if (ret) { | |
538 | qemu_mutex_unlock(&sigbus_mutex); | |
539 | error_setg_errno(errp, errno, | |
540 | "qemu_prealloc_mem: failed to install signal handler"); | |
541 | return; | |
542 | } | |
543 | } | |
544 | ||
545 | /* touch pages simultaneously */ | |
546 | ret = touch_all_pages(area, hpagesize, numpages, max_threads, tc, | |
547 | use_madv_populate_write); | |
548 | if (ret) { | |
549 | error_setg_errno(errp, -ret, | |
550 | "qemu_prealloc_mem: preallocating memory failed"); | |
551 | } | |
552 | ||
553 | if (!use_madv_populate_write) { | |
554 | ret = sigaction(SIGBUS, &sigbus_oldact, NULL); | |
555 | if (ret) { | |
556 | /* Terminate QEMU since it can't recover from error */ | |
557 | perror("qemu_prealloc_mem: failed to reinstall signal handler"); | |
558 | exit(1); | |
559 | } | |
560 | qemu_mutex_unlock(&sigbus_mutex); | |
561 | } | |
562 | } | |
563 | ||
564 | char *qemu_get_pid_name(pid_t pid) | |
565 | { | |
566 | char *name = NULL; | |
567 | ||
568 | #if defined(__FreeBSD__) | |
569 | /* BSDs don't have /proc, but they provide a nice substitute */ | |
570 | struct kinfo_proc *proc = kinfo_getproc(pid); | |
571 | ||
572 | if (proc) { | |
573 | name = g_strdup(proc->ki_comm); | |
574 | free(proc); | |
575 | } | |
576 | #else | |
577 | /* Assume a system with reasonable procfs */ | |
578 | char *pid_path; | |
579 | size_t len; | |
580 | ||
581 | pid_path = g_strdup_printf("/proc/%d/cmdline", pid); | |
582 | g_file_get_contents(pid_path, &name, &len, NULL); | |
583 | g_free(pid_path); | |
584 | #endif | |
585 | ||
586 | return name; | |
587 | } | |
588 | ||
589 | ||
590 | pid_t qemu_fork(Error **errp) | |
591 | { | |
592 | sigset_t oldmask, newmask; | |
593 | struct sigaction sig_action; | |
594 | int saved_errno; | |
595 | pid_t pid; | |
596 | ||
597 | /* | |
598 | * Need to block signals now, so that child process can safely | |
599 | * kill off caller's signal handlers without a race. | |
600 | */ | |
601 | sigfillset(&newmask); | |
602 | if (pthread_sigmask(SIG_SETMASK, &newmask, &oldmask) != 0) { | |
603 | error_setg_errno(errp, errno, | |
604 | "cannot block signals"); | |
605 | return -1; | |
606 | } | |
607 | ||
608 | pid = fork(); | |
609 | saved_errno = errno; | |
610 | ||
611 | if (pid < 0) { | |
612 | /* attempt to restore signal mask, but ignore failure, to | |
613 | * avoid obscuring the fork failure */ | |
614 | (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
615 | error_setg_errno(errp, saved_errno, | |
616 | "cannot fork child process"); | |
617 | errno = saved_errno; | |
618 | return -1; | |
619 | } else if (pid) { | |
620 | /* parent process */ | |
621 | ||
622 | /* Restore our original signal mask now that the child is | |
623 | * safely running. Only documented failures are EFAULT (not | |
624 | * possible, since we are using just-grabbed mask) or EINVAL | |
625 | * (not possible, since we are using correct arguments). */ | |
626 | (void)pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
627 | } else { | |
628 | /* child process */ | |
629 | size_t i; | |
630 | ||
631 | /* Clear out all signal handlers from parent so nothing | |
632 | * unexpected can happen in our child once we unblock | |
633 | * signals */ | |
634 | sig_action.sa_handler = SIG_DFL; | |
635 | sig_action.sa_flags = 0; | |
636 | sigemptyset(&sig_action.sa_mask); | |
637 | ||
638 | for (i = 1; i < NSIG; i++) { | |
639 | /* Only possible errors are EFAULT or EINVAL The former | |
640 | * won't happen, the latter we expect, so no need to check | |
641 | * return value */ | |
642 | (void)sigaction(i, &sig_action, NULL); | |
643 | } | |
644 | ||
645 | /* Unmask all signals in child, since we've no idea what the | |
646 | * caller's done with their signal mask and don't want to | |
647 | * propagate that to children */ | |
648 | sigemptyset(&newmask); | |
649 | if (pthread_sigmask(SIG_SETMASK, &newmask, NULL) != 0) { | |
650 | Error *local_err = NULL; | |
651 | error_setg_errno(&local_err, errno, | |
652 | "cannot unblock signals"); | |
653 | error_report_err(local_err); | |
654 | _exit(1); | |
655 | } | |
656 | } | |
657 | return pid; | |
658 | } | |
659 | ||
660 | void *qemu_alloc_stack(size_t *sz) | |
661 | { | |
662 | void *ptr, *guardpage; | |
663 | int flags; | |
664 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
665 | void *ptr2; | |
666 | #endif | |
667 | size_t pagesz = qemu_real_host_page_size(); | |
668 | #ifdef _SC_THREAD_STACK_MIN | |
669 | /* avoid stacks smaller than _SC_THREAD_STACK_MIN */ | |
670 | long min_stack_sz = sysconf(_SC_THREAD_STACK_MIN); | |
671 | *sz = MAX(MAX(min_stack_sz, 0), *sz); | |
672 | #endif | |
673 | /* adjust stack size to a multiple of the page size */ | |
674 | *sz = ROUND_UP(*sz, pagesz); | |
675 | /* allocate one extra page for the guard page */ | |
676 | *sz += pagesz; | |
677 | ||
678 | flags = MAP_PRIVATE | MAP_ANONYMOUS; | |
679 | #if defined(MAP_STACK) && defined(__OpenBSD__) | |
680 | /* Only enable MAP_STACK on OpenBSD. Other OS's such as | |
681 | * Linux/FreeBSD/NetBSD have a flag with the same name | |
682 | * but have differing functionality. OpenBSD will SEGV | |
683 | * if it spots execution with a stack pointer pointing | |
684 | * at memory that was not allocated with MAP_STACK. | |
685 | */ | |
686 | flags |= MAP_STACK; | |
687 | #endif | |
688 | ||
689 | ptr = mmap(NULL, *sz, PROT_READ | PROT_WRITE, flags, -1, 0); | |
690 | if (ptr == MAP_FAILED) { | |
691 | perror("failed to allocate memory for stack"); | |
692 | abort(); | |
693 | } | |
694 | ||
695 | #if defined(HOST_IA64) | |
696 | /* separate register stack */ | |
697 | guardpage = ptr + (((*sz - pagesz) / 2) & ~pagesz); | |
698 | #elif defined(HOST_HPPA) | |
699 | /* stack grows up */ | |
700 | guardpage = ptr + *sz - pagesz; | |
701 | #else | |
702 | /* stack grows down */ | |
703 | guardpage = ptr; | |
704 | #endif | |
705 | if (mprotect(guardpage, pagesz, PROT_NONE) != 0) { | |
706 | perror("failed to set up stack guard page"); | |
707 | abort(); | |
708 | } | |
709 | ||
710 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
711 | for (ptr2 = ptr + pagesz; ptr2 < ptr + *sz; ptr2 += sizeof(uint32_t)) { | |
712 | *(uint32_t *)ptr2 = 0xdeadbeaf; | |
713 | } | |
714 | #endif | |
715 | ||
716 | return ptr; | |
717 | } | |
718 | ||
719 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
720 | static __thread unsigned int max_stack_usage; | |
721 | #endif | |
722 | ||
723 | void qemu_free_stack(void *stack, size_t sz) | |
724 | { | |
725 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
726 | unsigned int usage; | |
727 | void *ptr; | |
728 | ||
729 | for (ptr = stack + qemu_real_host_page_size(); ptr < stack + sz; | |
730 | ptr += sizeof(uint32_t)) { | |
731 | if (*(uint32_t *)ptr != 0xdeadbeaf) { | |
732 | break; | |
733 | } | |
734 | } | |
735 | usage = sz - (uintptr_t) (ptr - stack); | |
736 | if (usage > max_stack_usage) { | |
737 | error_report("thread %d max stack usage increased from %u to %u", | |
738 | qemu_get_thread_id(), max_stack_usage, usage); | |
739 | max_stack_usage = usage; | |
740 | } | |
741 | #endif | |
742 | ||
743 | munmap(stack, sz); | |
744 | } | |
745 | ||
746 | /* | |
747 | * Disable CFI checks. | |
748 | * We are going to call a signal hander directly. Such handler may or may not | |
749 | * have been defined in our binary, so there's no guarantee that the pointer | |
750 | * used to set the handler is a cfi-valid pointer. Since the handlers are | |
751 | * stored in kernel memory, changing the handler to an attacker-defined | |
752 | * function requires being able to call a sigaction() syscall, | |
753 | * which is not as easy as overwriting a pointer in memory. | |
754 | */ | |
755 | QEMU_DISABLE_CFI | |
756 | void sigaction_invoke(struct sigaction *action, | |
757 | struct qemu_signalfd_siginfo *info) | |
758 | { | |
759 | siginfo_t si = {}; | |
760 | si.si_signo = info->ssi_signo; | |
761 | si.si_errno = info->ssi_errno; | |
762 | si.si_code = info->ssi_code; | |
763 | ||
764 | /* Convert the minimal set of fields defined by POSIX. | |
765 | * Positive si_code values are reserved for kernel-generated | |
766 | * signals, where the valid siginfo fields are determined by | |
767 | * the signal number. But according to POSIX, it is unspecified | |
768 | * whether SI_USER and SI_QUEUE have values less than or equal to | |
769 | * zero. | |
770 | */ | |
771 | if (info->ssi_code == SI_USER || info->ssi_code == SI_QUEUE || | |
772 | info->ssi_code <= 0) { | |
773 | /* SIGTERM, etc. */ | |
774 | si.si_pid = info->ssi_pid; | |
775 | si.si_uid = info->ssi_uid; | |
776 | } else if (info->ssi_signo == SIGILL || info->ssi_signo == SIGFPE || | |
777 | info->ssi_signo == SIGSEGV || info->ssi_signo == SIGBUS) { | |
778 | si.si_addr = (void *)(uintptr_t)info->ssi_addr; | |
779 | } else if (info->ssi_signo == SIGCHLD) { | |
780 | si.si_pid = info->ssi_pid; | |
781 | si.si_status = info->ssi_status; | |
782 | si.si_uid = info->ssi_uid; | |
783 | } | |
784 | action->sa_sigaction(info->ssi_signo, &si, NULL); | |
785 | } | |
786 | ||
787 | size_t qemu_get_host_physmem(void) | |
788 | { | |
789 | #ifdef _SC_PHYS_PAGES | |
790 | long pages = sysconf(_SC_PHYS_PAGES); | |
791 | if (pages > 0) { | |
792 | if (pages > SIZE_MAX / qemu_real_host_page_size()) { | |
793 | return SIZE_MAX; | |
794 | } else { | |
795 | return pages * qemu_real_host_page_size(); | |
796 | } | |
797 | } | |
798 | #endif | |
799 | return 0; | |
800 | } | |
801 | ||
802 | int qemu_msync(void *addr, size_t length, int fd) | |
803 | { | |
804 | size_t align_mask = ~(qemu_real_host_page_size() - 1); | |
805 | ||
806 | /** | |
807 | * There are no strict reqs as per the length of mapping | |
808 | * to be synced. Still the length needs to follow the address | |
809 | * alignment changes. Additionally - round the size to the multiple | |
810 | * of PAGE_SIZE | |
811 | */ | |
812 | length += ((uintptr_t)addr & (qemu_real_host_page_size() - 1)); | |
813 | length = (length + ~align_mask) & align_mask; | |
814 | ||
815 | addr = (void *)((uintptr_t)addr & align_mask); | |
816 | ||
817 | return msync(addr, length, MS_SYNC); | |
818 | } |