]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | /* Needed early for CONFIG_BSD etc. */ | |
26 | #include "config-host.h" | |
27 | ||
28 | #include "monitor.h" | |
29 | #include "sysemu.h" | |
30 | #include "gdbstub.h" | |
31 | #include "dma.h" | |
32 | #include "kvm.h" | |
33 | #include "exec-all.h" | |
34 | ||
35 | #include "cpus.h" | |
36 | #include "compatfd.h" | |
37 | ||
38 | #ifdef SIGRTMIN | |
39 | #define SIG_IPI (SIGRTMIN+4) | |
40 | #else | |
41 | #define SIG_IPI SIGUSR1 | |
42 | #endif | |
43 | ||
44 | #ifdef CONFIG_LINUX | |
45 | ||
46 | #include <sys/prctl.h> | |
47 | ||
48 | #ifndef PR_MCE_KILL | |
49 | #define PR_MCE_KILL 33 | |
50 | #endif | |
51 | ||
52 | #ifndef PR_MCE_KILL_SET | |
53 | #define PR_MCE_KILL_SET 1 | |
54 | #endif | |
55 | ||
56 | #ifndef PR_MCE_KILL_EARLY | |
57 | #define PR_MCE_KILL_EARLY 1 | |
58 | #endif | |
59 | ||
60 | #endif /* CONFIG_LINUX */ | |
61 | ||
62 | static CPUState *next_cpu; | |
63 | ||
64 | /***********************************************************/ | |
65 | void hw_error(const char *fmt, ...) | |
66 | { | |
67 | va_list ap; | |
68 | CPUState *env; | |
69 | ||
70 | va_start(ap, fmt); | |
71 | fprintf(stderr, "qemu: hardware error: "); | |
72 | vfprintf(stderr, fmt, ap); | |
73 | fprintf(stderr, "\n"); | |
74 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
75 | fprintf(stderr, "CPU #%d:\n", env->cpu_index); | |
76 | #ifdef TARGET_I386 | |
77 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); | |
78 | #else | |
79 | cpu_dump_state(env, stderr, fprintf, 0); | |
80 | #endif | |
81 | } | |
82 | va_end(ap); | |
83 | abort(); | |
84 | } | |
85 | ||
86 | void cpu_synchronize_all_states(void) | |
87 | { | |
88 | CPUState *cpu; | |
89 | ||
90 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
91 | cpu_synchronize_state(cpu); | |
92 | } | |
93 | } | |
94 | ||
95 | void cpu_synchronize_all_post_reset(void) | |
96 | { | |
97 | CPUState *cpu; | |
98 | ||
99 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
100 | cpu_synchronize_post_reset(cpu); | |
101 | } | |
102 | } | |
103 | ||
104 | void cpu_synchronize_all_post_init(void) | |
105 | { | |
106 | CPUState *cpu; | |
107 | ||
108 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
109 | cpu_synchronize_post_init(cpu); | |
110 | } | |
111 | } | |
112 | ||
113 | int cpu_is_stopped(CPUState *env) | |
114 | { | |
115 | return !vm_running || env->stopped; | |
116 | } | |
117 | ||
118 | static void do_vm_stop(int reason) | |
119 | { | |
120 | if (vm_running) { | |
121 | cpu_disable_ticks(); | |
122 | vm_running = 0; | |
123 | pause_all_vcpus(); | |
124 | vm_state_notify(0, reason); | |
125 | qemu_aio_flush(); | |
126 | bdrv_flush_all(); | |
127 | monitor_protocol_event(QEVENT_STOP, NULL); | |
128 | } | |
129 | } | |
130 | ||
131 | static int cpu_can_run(CPUState *env) | |
132 | { | |
133 | if (env->stop) | |
134 | return 0; | |
135 | if (env->stopped || !vm_running) | |
136 | return 0; | |
137 | return 1; | |
138 | } | |
139 | ||
140 | static bool cpu_thread_is_idle(CPUState *env) | |
141 | { | |
142 | if (env->stop || env->queued_work_first) { | |
143 | return false; | |
144 | } | |
145 | if (env->stopped || !vm_running) { | |
146 | return true; | |
147 | } | |
148 | if (!env->halted || qemu_cpu_has_work(env)) { | |
149 | return false; | |
150 | } | |
151 | return true; | |
152 | } | |
153 | ||
154 | static bool all_cpu_threads_idle(void) | |
155 | { | |
156 | CPUState *env; | |
157 | ||
158 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
159 | if (!cpu_thread_is_idle(env)) { | |
160 | return false; | |
161 | } | |
162 | } | |
163 | return true; | |
164 | } | |
165 | ||
166 | static void cpu_debug_handler(CPUState *env) | |
167 | { | |
168 | gdb_set_stop_cpu(env); | |
169 | debug_requested = EXCP_DEBUG; | |
170 | vm_stop(EXCP_DEBUG); | |
171 | } | |
172 | ||
173 | #ifdef CONFIG_LINUX | |
174 | static void sigbus_reraise(void) | |
175 | { | |
176 | sigset_t set; | |
177 | struct sigaction action; | |
178 | ||
179 | memset(&action, 0, sizeof(action)); | |
180 | action.sa_handler = SIG_DFL; | |
181 | if (!sigaction(SIGBUS, &action, NULL)) { | |
182 | raise(SIGBUS); | |
183 | sigemptyset(&set); | |
184 | sigaddset(&set, SIGBUS); | |
185 | sigprocmask(SIG_UNBLOCK, &set, NULL); | |
186 | } | |
187 | perror("Failed to re-raise SIGBUS!\n"); | |
188 | abort(); | |
189 | } | |
190 | ||
191 | static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, | |
192 | void *ctx) | |
193 | { | |
194 | if (kvm_on_sigbus(siginfo->ssi_code, | |
195 | (void *)(intptr_t)siginfo->ssi_addr)) { | |
196 | sigbus_reraise(); | |
197 | } | |
198 | } | |
199 | ||
200 | static void qemu_init_sigbus(void) | |
201 | { | |
202 | struct sigaction action; | |
203 | ||
204 | memset(&action, 0, sizeof(action)); | |
205 | action.sa_flags = SA_SIGINFO; | |
206 | action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; | |
207 | sigaction(SIGBUS, &action, NULL); | |
208 | ||
209 | prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); | |
210 | } | |
211 | ||
212 | #else /* !CONFIG_LINUX */ | |
213 | ||
214 | static void qemu_init_sigbus(void) | |
215 | { | |
216 | } | |
217 | #endif /* !CONFIG_LINUX */ | |
218 | ||
219 | #ifndef _WIN32 | |
220 | static int io_thread_fd = -1; | |
221 | ||
222 | static void qemu_event_increment(void) | |
223 | { | |
224 | /* Write 8 bytes to be compatible with eventfd. */ | |
225 | static const uint64_t val = 1; | |
226 | ssize_t ret; | |
227 | ||
228 | if (io_thread_fd == -1) | |
229 | return; | |
230 | ||
231 | do { | |
232 | ret = write(io_thread_fd, &val, sizeof(val)); | |
233 | } while (ret < 0 && errno == EINTR); | |
234 | ||
235 | /* EAGAIN is fine, a read must be pending. */ | |
236 | if (ret < 0 && errno != EAGAIN) { | |
237 | fprintf(stderr, "qemu_event_increment: write() filed: %s\n", | |
238 | strerror(errno)); | |
239 | exit (1); | |
240 | } | |
241 | } | |
242 | ||
243 | static void qemu_event_read(void *opaque) | |
244 | { | |
245 | int fd = (unsigned long)opaque; | |
246 | ssize_t len; | |
247 | char buffer[512]; | |
248 | ||
249 | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */ | |
250 | do { | |
251 | len = read(fd, buffer, sizeof(buffer)); | |
252 | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); | |
253 | } | |
254 | ||
255 | static int qemu_event_init(void) | |
256 | { | |
257 | int err; | |
258 | int fds[2]; | |
259 | ||
260 | err = qemu_eventfd(fds); | |
261 | if (err == -1) | |
262 | return -errno; | |
263 | ||
264 | err = fcntl_setfl(fds[0], O_NONBLOCK); | |
265 | if (err < 0) | |
266 | goto fail; | |
267 | ||
268 | err = fcntl_setfl(fds[1], O_NONBLOCK); | |
269 | if (err < 0) | |
270 | goto fail; | |
271 | ||
272 | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, | |
273 | (void *)(unsigned long)fds[0]); | |
274 | ||
275 | io_thread_fd = fds[1]; | |
276 | return 0; | |
277 | ||
278 | fail: | |
279 | close(fds[0]); | |
280 | close(fds[1]); | |
281 | return err; | |
282 | } | |
283 | ||
284 | static void dummy_signal(int sig) | |
285 | { | |
286 | } | |
287 | ||
288 | /* If we have signalfd, we mask out the signals we want to handle and then | |
289 | * use signalfd to listen for them. We rely on whatever the current signal | |
290 | * handler is to dispatch the signals when we receive them. | |
291 | */ | |
292 | static void sigfd_handler(void *opaque) | |
293 | { | |
294 | int fd = (unsigned long) opaque; | |
295 | struct qemu_signalfd_siginfo info; | |
296 | struct sigaction action; | |
297 | ssize_t len; | |
298 | ||
299 | while (1) { | |
300 | do { | |
301 | len = read(fd, &info, sizeof(info)); | |
302 | } while (len == -1 && errno == EINTR); | |
303 | ||
304 | if (len == -1 && errno == EAGAIN) { | |
305 | break; | |
306 | } | |
307 | ||
308 | if (len != sizeof(info)) { | |
309 | printf("read from sigfd returned %zd: %m\n", len); | |
310 | return; | |
311 | } | |
312 | ||
313 | sigaction(info.ssi_signo, NULL, &action); | |
314 | if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) { | |
315 | action.sa_sigaction(info.ssi_signo, | |
316 | (siginfo_t *)&info, NULL); | |
317 | } else if (action.sa_handler) { | |
318 | action.sa_handler(info.ssi_signo); | |
319 | } | |
320 | } | |
321 | } | |
322 | ||
323 | static int qemu_signalfd_init(sigset_t mask) | |
324 | { | |
325 | int sigfd; | |
326 | ||
327 | sigfd = qemu_signalfd(&mask); | |
328 | if (sigfd == -1) { | |
329 | fprintf(stderr, "failed to create signalfd\n"); | |
330 | return -errno; | |
331 | } | |
332 | ||
333 | fcntl_setfl(sigfd, O_NONBLOCK); | |
334 | ||
335 | qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, | |
336 | (void *)(unsigned long) sigfd); | |
337 | ||
338 | return 0; | |
339 | } | |
340 | ||
341 | static void qemu_kvm_eat_signals(CPUState *env) | |
342 | { | |
343 | struct timespec ts = { 0, 0 }; | |
344 | siginfo_t siginfo; | |
345 | sigset_t waitset; | |
346 | sigset_t chkset; | |
347 | int r; | |
348 | ||
349 | sigemptyset(&waitset); | |
350 | sigaddset(&waitset, SIG_IPI); | |
351 | sigaddset(&waitset, SIGBUS); | |
352 | ||
353 | do { | |
354 | r = sigtimedwait(&waitset, &siginfo, &ts); | |
355 | if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { | |
356 | perror("sigtimedwait"); | |
357 | exit(1); | |
358 | } | |
359 | ||
360 | switch (r) { | |
361 | case SIGBUS: | |
362 | if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) { | |
363 | sigbus_reraise(); | |
364 | } | |
365 | break; | |
366 | default: | |
367 | break; | |
368 | } | |
369 | ||
370 | r = sigpending(&chkset); | |
371 | if (r == -1) { | |
372 | perror("sigpending"); | |
373 | exit(1); | |
374 | } | |
375 | } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS)); | |
376 | ||
377 | #ifndef CONFIG_IOTHREAD | |
378 | if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) { | |
379 | qemu_notify_event(); | |
380 | } | |
381 | #endif | |
382 | } | |
383 | ||
384 | #else /* _WIN32 */ | |
385 | ||
386 | HANDLE qemu_event_handle; | |
387 | ||
388 | static void dummy_event_handler(void *opaque) | |
389 | { | |
390 | } | |
391 | ||
392 | static int qemu_event_init(void) | |
393 | { | |
394 | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); | |
395 | if (!qemu_event_handle) { | |
396 | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError()); | |
397 | return -1; | |
398 | } | |
399 | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL); | |
400 | return 0; | |
401 | } | |
402 | ||
403 | static void qemu_event_increment(void) | |
404 | { | |
405 | if (!SetEvent(qemu_event_handle)) { | |
406 | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n", | |
407 | GetLastError()); | |
408 | exit (1); | |
409 | } | |
410 | } | |
411 | ||
412 | static void qemu_kvm_eat_signals(CPUState *env) | |
413 | { | |
414 | } | |
415 | #endif /* _WIN32 */ | |
416 | ||
417 | #ifndef CONFIG_IOTHREAD | |
418 | static void qemu_kvm_init_cpu_signals(CPUState *env) | |
419 | { | |
420 | #ifndef _WIN32 | |
421 | int r; | |
422 | sigset_t set; | |
423 | struct sigaction sigact; | |
424 | ||
425 | memset(&sigact, 0, sizeof(sigact)); | |
426 | sigact.sa_handler = dummy_signal; | |
427 | sigaction(SIG_IPI, &sigact, NULL); | |
428 | ||
429 | sigemptyset(&set); | |
430 | sigaddset(&set, SIG_IPI); | |
431 | sigaddset(&set, SIGIO); | |
432 | sigaddset(&set, SIGALRM); | |
433 | pthread_sigmask(SIG_BLOCK, &set, NULL); | |
434 | ||
435 | pthread_sigmask(SIG_BLOCK, NULL, &set); | |
436 | sigdelset(&set, SIG_IPI); | |
437 | sigdelset(&set, SIGBUS); | |
438 | sigdelset(&set, SIGIO); | |
439 | sigdelset(&set, SIGALRM); | |
440 | r = kvm_set_signal_mask(env, &set); | |
441 | if (r) { | |
442 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |
443 | exit(1); | |
444 | } | |
445 | #endif | |
446 | } | |
447 | ||
448 | #ifndef _WIN32 | |
449 | static sigset_t block_synchronous_signals(void) | |
450 | { | |
451 | sigset_t set; | |
452 | ||
453 | sigemptyset(&set); | |
454 | sigaddset(&set, SIGBUS); | |
455 | if (kvm_enabled()) { | |
456 | /* | |
457 | * We need to process timer signals synchronously to avoid a race | |
458 | * between exit_request check and KVM vcpu entry. | |
459 | */ | |
460 | sigaddset(&set, SIGIO); | |
461 | sigaddset(&set, SIGALRM); | |
462 | } | |
463 | ||
464 | return set; | |
465 | } | |
466 | #endif | |
467 | ||
468 | int qemu_init_main_loop(void) | |
469 | { | |
470 | #ifndef _WIN32 | |
471 | sigset_t blocked_signals; | |
472 | int ret; | |
473 | ||
474 | blocked_signals = block_synchronous_signals(); | |
475 | ||
476 | ret = qemu_signalfd_init(blocked_signals); | |
477 | if (ret) { | |
478 | return ret; | |
479 | } | |
480 | #endif | |
481 | cpu_set_debug_excp_handler(cpu_debug_handler); | |
482 | ||
483 | qemu_init_sigbus(); | |
484 | ||
485 | return qemu_event_init(); | |
486 | } | |
487 | ||
488 | void qemu_main_loop_start(void) | |
489 | { | |
490 | } | |
491 | ||
492 | void qemu_init_vcpu(void *_env) | |
493 | { | |
494 | CPUState *env = _env; | |
495 | int r; | |
496 | ||
497 | env->nr_cores = smp_cores; | |
498 | env->nr_threads = smp_threads; | |
499 | ||
500 | if (kvm_enabled()) { | |
501 | r = kvm_init_vcpu(env); | |
502 | if (r < 0) { | |
503 | fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); | |
504 | exit(1); | |
505 | } | |
506 | qemu_kvm_init_cpu_signals(env); | |
507 | } | |
508 | } | |
509 | ||
510 | int qemu_cpu_self(void *env) | |
511 | { | |
512 | return 1; | |
513 | } | |
514 | ||
515 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) | |
516 | { | |
517 | func(data); | |
518 | } | |
519 | ||
520 | void resume_all_vcpus(void) | |
521 | { | |
522 | } | |
523 | ||
524 | void pause_all_vcpus(void) | |
525 | { | |
526 | } | |
527 | ||
528 | void qemu_cpu_kick(void *env) | |
529 | { | |
530 | return; | |
531 | } | |
532 | ||
533 | void qemu_cpu_kick_self(void) | |
534 | { | |
535 | #ifndef _WIN32 | |
536 | assert(cpu_single_env); | |
537 | ||
538 | raise(SIG_IPI); | |
539 | #else | |
540 | abort(); | |
541 | #endif | |
542 | } | |
543 | ||
544 | void qemu_notify_event(void) | |
545 | { | |
546 | CPUState *env = cpu_single_env; | |
547 | ||
548 | qemu_event_increment (); | |
549 | if (env) { | |
550 | cpu_exit(env); | |
551 | } | |
552 | if (next_cpu && env != next_cpu) { | |
553 | cpu_exit(next_cpu); | |
554 | } | |
555 | exit_request = 1; | |
556 | } | |
557 | ||
558 | void qemu_mutex_lock_iothread(void) {} | |
559 | void qemu_mutex_unlock_iothread(void) {} | |
560 | ||
561 | void cpu_stop_current(void) | |
562 | { | |
563 | } | |
564 | ||
565 | void vm_stop(int reason) | |
566 | { | |
567 | do_vm_stop(reason); | |
568 | } | |
569 | ||
570 | #else /* CONFIG_IOTHREAD */ | |
571 | ||
572 | #include "qemu-thread.h" | |
573 | ||
574 | QemuMutex qemu_global_mutex; | |
575 | static QemuMutex qemu_fair_mutex; | |
576 | ||
577 | static QemuThread io_thread; | |
578 | ||
579 | static QemuThread *tcg_cpu_thread; | |
580 | static QemuCond *tcg_halt_cond; | |
581 | ||
582 | static int qemu_system_ready; | |
583 | /* cpu creation */ | |
584 | static QemuCond qemu_cpu_cond; | |
585 | /* system init */ | |
586 | static QemuCond qemu_system_cond; | |
587 | static QemuCond qemu_pause_cond; | |
588 | static QemuCond qemu_work_cond; | |
589 | ||
590 | static void cpu_signal(int sig) | |
591 | { | |
592 | if (cpu_single_env) { | |
593 | cpu_exit(cpu_single_env); | |
594 | } | |
595 | exit_request = 1; | |
596 | } | |
597 | ||
598 | static void qemu_kvm_init_cpu_signals(CPUState *env) | |
599 | { | |
600 | int r; | |
601 | sigset_t set; | |
602 | struct sigaction sigact; | |
603 | ||
604 | memset(&sigact, 0, sizeof(sigact)); | |
605 | sigact.sa_handler = dummy_signal; | |
606 | sigaction(SIG_IPI, &sigact, NULL); | |
607 | ||
608 | pthread_sigmask(SIG_BLOCK, NULL, &set); | |
609 | sigdelset(&set, SIG_IPI); | |
610 | sigdelset(&set, SIGBUS); | |
611 | r = kvm_set_signal_mask(env, &set); | |
612 | if (r) { | |
613 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |
614 | exit(1); | |
615 | } | |
616 | } | |
617 | ||
618 | static void qemu_tcg_init_cpu_signals(void) | |
619 | { | |
620 | sigset_t set; | |
621 | struct sigaction sigact; | |
622 | ||
623 | memset(&sigact, 0, sizeof(sigact)); | |
624 | sigact.sa_handler = cpu_signal; | |
625 | sigaction(SIG_IPI, &sigact, NULL); | |
626 | ||
627 | sigemptyset(&set); | |
628 | sigaddset(&set, SIG_IPI); | |
629 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
630 | } | |
631 | ||
632 | static sigset_t block_io_signals(void) | |
633 | { | |
634 | sigset_t set; | |
635 | ||
636 | /* SIGUSR2 used by posix-aio-compat.c */ | |
637 | sigemptyset(&set); | |
638 | sigaddset(&set, SIGUSR2); | |
639 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
640 | ||
641 | sigemptyset(&set); | |
642 | sigaddset(&set, SIGIO); | |
643 | sigaddset(&set, SIGALRM); | |
644 | sigaddset(&set, SIG_IPI); | |
645 | sigaddset(&set, SIGBUS); | |
646 | pthread_sigmask(SIG_BLOCK, &set, NULL); | |
647 | ||
648 | return set; | |
649 | } | |
650 | ||
651 | int qemu_init_main_loop(void) | |
652 | { | |
653 | int ret; | |
654 | sigset_t blocked_signals; | |
655 | ||
656 | cpu_set_debug_excp_handler(cpu_debug_handler); | |
657 | ||
658 | qemu_init_sigbus(); | |
659 | ||
660 | blocked_signals = block_io_signals(); | |
661 | ||
662 | ret = qemu_signalfd_init(blocked_signals); | |
663 | if (ret) | |
664 | return ret; | |
665 | ||
666 | /* Note eventfd must be drained before signalfd handlers run */ | |
667 | ret = qemu_event_init(); | |
668 | if (ret) | |
669 | return ret; | |
670 | ||
671 | qemu_cond_init(&qemu_pause_cond); | |
672 | qemu_cond_init(&qemu_system_cond); | |
673 | qemu_mutex_init(&qemu_fair_mutex); | |
674 | qemu_mutex_init(&qemu_global_mutex); | |
675 | qemu_mutex_lock(&qemu_global_mutex); | |
676 | ||
677 | qemu_thread_self(&io_thread); | |
678 | ||
679 | return 0; | |
680 | } | |
681 | ||
682 | void qemu_main_loop_start(void) | |
683 | { | |
684 | qemu_system_ready = 1; | |
685 | qemu_cond_broadcast(&qemu_system_cond); | |
686 | } | |
687 | ||
688 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) | |
689 | { | |
690 | struct qemu_work_item wi; | |
691 | ||
692 | if (qemu_cpu_self(env)) { | |
693 | func(data); | |
694 | return; | |
695 | } | |
696 | ||
697 | wi.func = func; | |
698 | wi.data = data; | |
699 | if (!env->queued_work_first) | |
700 | env->queued_work_first = &wi; | |
701 | else | |
702 | env->queued_work_last->next = &wi; | |
703 | env->queued_work_last = &wi; | |
704 | wi.next = NULL; | |
705 | wi.done = false; | |
706 | ||
707 | qemu_cpu_kick(env); | |
708 | while (!wi.done) { | |
709 | CPUState *self_env = cpu_single_env; | |
710 | ||
711 | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); | |
712 | cpu_single_env = self_env; | |
713 | } | |
714 | } | |
715 | ||
716 | static void flush_queued_work(CPUState *env) | |
717 | { | |
718 | struct qemu_work_item *wi; | |
719 | ||
720 | if (!env->queued_work_first) | |
721 | return; | |
722 | ||
723 | while ((wi = env->queued_work_first)) { | |
724 | env->queued_work_first = wi->next; | |
725 | wi->func(wi->data); | |
726 | wi->done = true; | |
727 | } | |
728 | env->queued_work_last = NULL; | |
729 | qemu_cond_broadcast(&qemu_work_cond); | |
730 | } | |
731 | ||
732 | static void qemu_wait_io_event_common(CPUState *env) | |
733 | { | |
734 | if (env->stop) { | |
735 | env->stop = 0; | |
736 | env->stopped = 1; | |
737 | qemu_cond_signal(&qemu_pause_cond); | |
738 | } | |
739 | flush_queued_work(env); | |
740 | env->thread_kicked = false; | |
741 | } | |
742 | ||
743 | static void qemu_tcg_wait_io_event(void) | |
744 | { | |
745 | CPUState *env; | |
746 | ||
747 | while (all_cpu_threads_idle()) { | |
748 | qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000); | |
749 | } | |
750 | ||
751 | qemu_mutex_unlock(&qemu_global_mutex); | |
752 | ||
753 | /* | |
754 | * Users of qemu_global_mutex can be starved, having no chance | |
755 | * to acquire it since this path will get to it first. | |
756 | * So use another lock to provide fairness. | |
757 | */ | |
758 | qemu_mutex_lock(&qemu_fair_mutex); | |
759 | qemu_mutex_unlock(&qemu_fair_mutex); | |
760 | ||
761 | qemu_mutex_lock(&qemu_global_mutex); | |
762 | ||
763 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
764 | qemu_wait_io_event_common(env); | |
765 | } | |
766 | } | |
767 | ||
768 | static void qemu_kvm_wait_io_event(CPUState *env) | |
769 | { | |
770 | while (cpu_thread_is_idle(env)) { | |
771 | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000); | |
772 | } | |
773 | ||
774 | qemu_kvm_eat_signals(env); | |
775 | qemu_wait_io_event_common(env); | |
776 | } | |
777 | ||
778 | static int qemu_cpu_exec(CPUState *env); | |
779 | ||
780 | static void *qemu_kvm_cpu_thread_fn(void *arg) | |
781 | { | |
782 | CPUState *env = arg; | |
783 | int r; | |
784 | ||
785 | qemu_mutex_lock(&qemu_global_mutex); | |
786 | qemu_thread_self(env->thread); | |
787 | ||
788 | r = kvm_init_vcpu(env); | |
789 | if (r < 0) { | |
790 | fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); | |
791 | exit(1); | |
792 | } | |
793 | ||
794 | qemu_kvm_init_cpu_signals(env); | |
795 | ||
796 | /* signal CPU creation */ | |
797 | env->created = 1; | |
798 | qemu_cond_signal(&qemu_cpu_cond); | |
799 | ||
800 | /* and wait for machine initialization */ | |
801 | while (!qemu_system_ready) | |
802 | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100); | |
803 | ||
804 | while (1) { | |
805 | if (cpu_can_run(env)) | |
806 | qemu_cpu_exec(env); | |
807 | qemu_kvm_wait_io_event(env); | |
808 | } | |
809 | ||
810 | return NULL; | |
811 | } | |
812 | ||
813 | static void *qemu_tcg_cpu_thread_fn(void *arg) | |
814 | { | |
815 | CPUState *env = arg; | |
816 | ||
817 | qemu_tcg_init_cpu_signals(); | |
818 | qemu_thread_self(env->thread); | |
819 | ||
820 | /* signal CPU creation */ | |
821 | qemu_mutex_lock(&qemu_global_mutex); | |
822 | for (env = first_cpu; env != NULL; env = env->next_cpu) | |
823 | env->created = 1; | |
824 | qemu_cond_signal(&qemu_cpu_cond); | |
825 | ||
826 | /* and wait for machine initialization */ | |
827 | while (!qemu_system_ready) | |
828 | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100); | |
829 | ||
830 | while (1) { | |
831 | cpu_exec_all(); | |
832 | qemu_tcg_wait_io_event(); | |
833 | } | |
834 | ||
835 | return NULL; | |
836 | } | |
837 | ||
838 | void qemu_cpu_kick(void *_env) | |
839 | { | |
840 | CPUState *env = _env; | |
841 | qemu_cond_broadcast(env->halt_cond); | |
842 | if (!env->thread_kicked) { | |
843 | qemu_thread_signal(env->thread, SIG_IPI); | |
844 | env->thread_kicked = true; | |
845 | } | |
846 | } | |
847 | ||
848 | void qemu_cpu_kick_self(void) | |
849 | { | |
850 | assert(cpu_single_env); | |
851 | ||
852 | if (!cpu_single_env->thread_kicked) { | |
853 | qemu_thread_signal(cpu_single_env->thread, SIG_IPI); | |
854 | cpu_single_env->thread_kicked = true; | |
855 | } | |
856 | } | |
857 | ||
858 | int qemu_cpu_self(void *_env) | |
859 | { | |
860 | CPUState *env = _env; | |
861 | QemuThread this; | |
862 | ||
863 | qemu_thread_self(&this); | |
864 | ||
865 | return qemu_thread_equal(&this, env->thread); | |
866 | } | |
867 | ||
868 | void qemu_mutex_lock_iothread(void) | |
869 | { | |
870 | if (kvm_enabled()) { | |
871 | qemu_mutex_lock(&qemu_global_mutex); | |
872 | } else { | |
873 | qemu_mutex_lock(&qemu_fair_mutex); | |
874 | if (qemu_mutex_trylock(&qemu_global_mutex)) { | |
875 | qemu_thread_signal(tcg_cpu_thread, SIG_IPI); | |
876 | qemu_mutex_lock(&qemu_global_mutex); | |
877 | } | |
878 | qemu_mutex_unlock(&qemu_fair_mutex); | |
879 | } | |
880 | } | |
881 | ||
882 | void qemu_mutex_unlock_iothread(void) | |
883 | { | |
884 | qemu_mutex_unlock(&qemu_global_mutex); | |
885 | } | |
886 | ||
887 | static int all_vcpus_paused(void) | |
888 | { | |
889 | CPUState *penv = first_cpu; | |
890 | ||
891 | while (penv) { | |
892 | if (!penv->stopped) | |
893 | return 0; | |
894 | penv = (CPUState *)penv->next_cpu; | |
895 | } | |
896 | ||
897 | return 1; | |
898 | } | |
899 | ||
900 | void pause_all_vcpus(void) | |
901 | { | |
902 | CPUState *penv = first_cpu; | |
903 | ||
904 | while (penv) { | |
905 | penv->stop = 1; | |
906 | qemu_cpu_kick(penv); | |
907 | penv = (CPUState *)penv->next_cpu; | |
908 | } | |
909 | ||
910 | while (!all_vcpus_paused()) { | |
911 | qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100); | |
912 | penv = first_cpu; | |
913 | while (penv) { | |
914 | qemu_cpu_kick(penv); | |
915 | penv = (CPUState *)penv->next_cpu; | |
916 | } | |
917 | } | |
918 | } | |
919 | ||
920 | void resume_all_vcpus(void) | |
921 | { | |
922 | CPUState *penv = first_cpu; | |
923 | ||
924 | while (penv) { | |
925 | penv->stop = 0; | |
926 | penv->stopped = 0; | |
927 | qemu_cpu_kick(penv); | |
928 | penv = (CPUState *)penv->next_cpu; | |
929 | } | |
930 | } | |
931 | ||
932 | static void qemu_tcg_init_vcpu(void *_env) | |
933 | { | |
934 | CPUState *env = _env; | |
935 | /* share a single thread for all cpus with TCG */ | |
936 | if (!tcg_cpu_thread) { | |
937 | env->thread = qemu_mallocz(sizeof(QemuThread)); | |
938 | env->halt_cond = qemu_mallocz(sizeof(QemuCond)); | |
939 | qemu_cond_init(env->halt_cond); | |
940 | qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); | |
941 | while (env->created == 0) | |
942 | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); | |
943 | tcg_cpu_thread = env->thread; | |
944 | tcg_halt_cond = env->halt_cond; | |
945 | } else { | |
946 | env->thread = tcg_cpu_thread; | |
947 | env->halt_cond = tcg_halt_cond; | |
948 | } | |
949 | } | |
950 | ||
951 | static void qemu_kvm_start_vcpu(CPUState *env) | |
952 | { | |
953 | env->thread = qemu_mallocz(sizeof(QemuThread)); | |
954 | env->halt_cond = qemu_mallocz(sizeof(QemuCond)); | |
955 | qemu_cond_init(env->halt_cond); | |
956 | qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); | |
957 | while (env->created == 0) | |
958 | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); | |
959 | } | |
960 | ||
961 | void qemu_init_vcpu(void *_env) | |
962 | { | |
963 | CPUState *env = _env; | |
964 | ||
965 | env->nr_cores = smp_cores; | |
966 | env->nr_threads = smp_threads; | |
967 | if (kvm_enabled()) | |
968 | qemu_kvm_start_vcpu(env); | |
969 | else | |
970 | qemu_tcg_init_vcpu(env); | |
971 | } | |
972 | ||
973 | void qemu_notify_event(void) | |
974 | { | |
975 | qemu_event_increment(); | |
976 | } | |
977 | ||
978 | static void qemu_system_vmstop_request(int reason) | |
979 | { | |
980 | vmstop_requested = reason; | |
981 | qemu_notify_event(); | |
982 | } | |
983 | ||
984 | void cpu_stop_current(void) | |
985 | { | |
986 | if (cpu_single_env) { | |
987 | cpu_single_env->stopped = 1; | |
988 | cpu_exit(cpu_single_env); | |
989 | } | |
990 | } | |
991 | ||
992 | void vm_stop(int reason) | |
993 | { | |
994 | QemuThread me; | |
995 | qemu_thread_self(&me); | |
996 | ||
997 | if (!qemu_thread_equal(&me, &io_thread)) { | |
998 | qemu_system_vmstop_request(reason); | |
999 | /* | |
1000 | * FIXME: should not return to device code in case | |
1001 | * vm_stop() has been requested. | |
1002 | */ | |
1003 | cpu_stop_current(); | |
1004 | return; | |
1005 | } | |
1006 | do_vm_stop(reason); | |
1007 | } | |
1008 | ||
1009 | #endif | |
1010 | ||
1011 | static int qemu_cpu_exec(CPUState *env) | |
1012 | { | |
1013 | int ret; | |
1014 | #ifdef CONFIG_PROFILER | |
1015 | int64_t ti; | |
1016 | #endif | |
1017 | ||
1018 | #ifdef CONFIG_PROFILER | |
1019 | ti = profile_getclock(); | |
1020 | #endif | |
1021 | if (use_icount) { | |
1022 | int64_t count; | |
1023 | int decr; | |
1024 | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); | |
1025 | env->icount_decr.u16.low = 0; | |
1026 | env->icount_extra = 0; | |
1027 | count = qemu_icount_round (qemu_next_deadline()); | |
1028 | qemu_icount += count; | |
1029 | decr = (count > 0xffff) ? 0xffff : count; | |
1030 | count -= decr; | |
1031 | env->icount_decr.u16.low = decr; | |
1032 | env->icount_extra = count; | |
1033 | } | |
1034 | ret = cpu_exec(env); | |
1035 | #ifdef CONFIG_PROFILER | |
1036 | qemu_time += profile_getclock() - ti; | |
1037 | #endif | |
1038 | if (use_icount) { | |
1039 | /* Fold pending instructions back into the | |
1040 | instruction counter, and clear the interrupt flag. */ | |
1041 | qemu_icount -= (env->icount_decr.u16.low | |
1042 | + env->icount_extra); | |
1043 | env->icount_decr.u32 = 0; | |
1044 | env->icount_extra = 0; | |
1045 | } | |
1046 | return ret; | |
1047 | } | |
1048 | ||
1049 | bool cpu_exec_all(void) | |
1050 | { | |
1051 | int r; | |
1052 | ||
1053 | if (next_cpu == NULL) | |
1054 | next_cpu = first_cpu; | |
1055 | for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { | |
1056 | CPUState *env = next_cpu; | |
1057 | ||
1058 | qemu_clock_enable(vm_clock, | |
1059 | (env->singlestep_enabled & SSTEP_NOTIMER) == 0); | |
1060 | ||
1061 | if (qemu_alarm_pending()) | |
1062 | break; | |
1063 | if (cpu_can_run(env)) { | |
1064 | r = qemu_cpu_exec(env); | |
1065 | if (kvm_enabled()) { | |
1066 | qemu_kvm_eat_signals(env); | |
1067 | } | |
1068 | if (r == EXCP_DEBUG) { | |
1069 | break; | |
1070 | } | |
1071 | } else if (env->stop) { | |
1072 | break; | |
1073 | } | |
1074 | } | |
1075 | exit_request = 0; | |
1076 | return !all_cpu_threads_idle(); | |
1077 | } | |
1078 | ||
1079 | void set_numa_modes(void) | |
1080 | { | |
1081 | CPUState *env; | |
1082 | int i; | |
1083 | ||
1084 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
1085 | for (i = 0; i < nb_numa_nodes; i++) { | |
1086 | if (node_cpumask[i] & (1 << env->cpu_index)) { | |
1087 | env->numa_node = i; | |
1088 | } | |
1089 | } | |
1090 | } | |
1091 | } | |
1092 | ||
1093 | void set_cpu_log(const char *optarg) | |
1094 | { | |
1095 | int mask; | |
1096 | const CPULogItem *item; | |
1097 | ||
1098 | mask = cpu_str_to_log_mask(optarg); | |
1099 | if (!mask) { | |
1100 | printf("Log items (comma separated):\n"); | |
1101 | for (item = cpu_log_items; item->mask != 0; item++) { | |
1102 | printf("%-10s %s\n", item->name, item->help); | |
1103 | } | |
1104 | exit(1); | |
1105 | } | |
1106 | cpu_set_log(mask); | |
1107 | } | |
1108 | ||
1109 | /* Return the virtual CPU time, based on the instruction counter. */ | |
1110 | int64_t cpu_get_icount(void) | |
1111 | { | |
1112 | int64_t icount; | |
1113 | CPUState *env = cpu_single_env;; | |
1114 | ||
1115 | icount = qemu_icount; | |
1116 | if (env) { | |
1117 | if (!can_do_io(env)) { | |
1118 | fprintf(stderr, "Bad clock read\n"); | |
1119 | } | |
1120 | icount -= (env->icount_decr.u16.low + env->icount_extra); | |
1121 | } | |
1122 | return qemu_icount_bias + (icount << icount_time_shift); | |
1123 | } | |
1124 | ||
1125 | void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) | |
1126 | { | |
1127 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |
1128 | #if defined(cpu_list_id) | |
1129 | cpu_list_id(f, cpu_fprintf, optarg); | |
1130 | #elif defined(cpu_list) | |
1131 | cpu_list(f, cpu_fprintf); /* deprecated */ | |
1132 | #endif | |
1133 | } |