]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | /* Needed early for CONFIG_BSD etc. */ | |
26 | #include "config-host.h" | |
27 | ||
28 | #include "monitor.h" | |
29 | #include "sysemu.h" | |
30 | #include "gdbstub.h" | |
31 | #include "dma.h" | |
32 | #include "kvm.h" | |
33 | #include "exec-all.h" | |
34 | ||
35 | #include "cpus.h" | |
36 | #include "compatfd.h" | |
37 | #ifdef CONFIG_LINUX | |
38 | #include <sys/prctl.h> | |
39 | #endif | |
40 | ||
41 | #ifdef SIGRTMIN | |
42 | #define SIG_IPI (SIGRTMIN+4) | |
43 | #else | |
44 | #define SIG_IPI SIGUSR1 | |
45 | #endif | |
46 | ||
47 | #ifndef PR_MCE_KILL | |
48 | #define PR_MCE_KILL 33 | |
49 | #endif | |
50 | ||
51 | static CPUState *next_cpu; | |
52 | ||
53 | /***********************************************************/ | |
54 | void hw_error(const char *fmt, ...) | |
55 | { | |
56 | va_list ap; | |
57 | CPUState *env; | |
58 | ||
59 | va_start(ap, fmt); | |
60 | fprintf(stderr, "qemu: hardware error: "); | |
61 | vfprintf(stderr, fmt, ap); | |
62 | fprintf(stderr, "\n"); | |
63 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
64 | fprintf(stderr, "CPU #%d:\n", env->cpu_index); | |
65 | #ifdef TARGET_I386 | |
66 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); | |
67 | #else | |
68 | cpu_dump_state(env, stderr, fprintf, 0); | |
69 | #endif | |
70 | } | |
71 | va_end(ap); | |
72 | abort(); | |
73 | } | |
74 | ||
75 | void cpu_synchronize_all_states(void) | |
76 | { | |
77 | CPUState *cpu; | |
78 | ||
79 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
80 | cpu_synchronize_state(cpu); | |
81 | } | |
82 | } | |
83 | ||
84 | void cpu_synchronize_all_post_reset(void) | |
85 | { | |
86 | CPUState *cpu; | |
87 | ||
88 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
89 | cpu_synchronize_post_reset(cpu); | |
90 | } | |
91 | } | |
92 | ||
93 | void cpu_synchronize_all_post_init(void) | |
94 | { | |
95 | CPUState *cpu; | |
96 | ||
97 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
98 | cpu_synchronize_post_init(cpu); | |
99 | } | |
100 | } | |
101 | ||
102 | int cpu_is_stopped(CPUState *env) | |
103 | { | |
104 | return !vm_running || env->stopped; | |
105 | } | |
106 | ||
107 | static void do_vm_stop(int reason) | |
108 | { | |
109 | if (vm_running) { | |
110 | cpu_disable_ticks(); | |
111 | vm_running = 0; | |
112 | pause_all_vcpus(); | |
113 | vm_state_notify(0, reason); | |
114 | qemu_aio_flush(); | |
115 | bdrv_flush_all(); | |
116 | monitor_protocol_event(QEVENT_STOP, NULL); | |
117 | } | |
118 | } | |
119 | ||
120 | static int cpu_can_run(CPUState *env) | |
121 | { | |
122 | if (env->stop) | |
123 | return 0; | |
124 | if (env->stopped || !vm_running) | |
125 | return 0; | |
126 | return 1; | |
127 | } | |
128 | ||
129 | static int cpu_has_work(CPUState *env) | |
130 | { | |
131 | if (env->stop) | |
132 | return 1; | |
133 | if (env->queued_work_first) | |
134 | return 1; | |
135 | if (env->stopped || !vm_running) | |
136 | return 0; | |
137 | if (!env->halted) | |
138 | return 1; | |
139 | if (qemu_cpu_has_work(env)) | |
140 | return 1; | |
141 | return 0; | |
142 | } | |
143 | ||
144 | static int any_cpu_has_work(void) | |
145 | { | |
146 | CPUState *env; | |
147 | ||
148 | for (env = first_cpu; env != NULL; env = env->next_cpu) | |
149 | if (cpu_has_work(env)) | |
150 | return 1; | |
151 | return 0; | |
152 | } | |
153 | ||
154 | static void cpu_debug_handler(CPUState *env) | |
155 | { | |
156 | gdb_set_stop_cpu(env); | |
157 | debug_requested = EXCP_DEBUG; | |
158 | vm_stop(EXCP_DEBUG); | |
159 | } | |
160 | ||
161 | #ifndef _WIN32 | |
162 | static int io_thread_fd = -1; | |
163 | ||
164 | static void qemu_event_increment(void) | |
165 | { | |
166 | /* Write 8 bytes to be compatible with eventfd. */ | |
167 | static const uint64_t val = 1; | |
168 | ssize_t ret; | |
169 | ||
170 | if (io_thread_fd == -1) | |
171 | return; | |
172 | ||
173 | do { | |
174 | ret = write(io_thread_fd, &val, sizeof(val)); | |
175 | } while (ret < 0 && errno == EINTR); | |
176 | ||
177 | /* EAGAIN is fine, a read must be pending. */ | |
178 | if (ret < 0 && errno != EAGAIN) { | |
179 | fprintf(stderr, "qemu_event_increment: write() filed: %s\n", | |
180 | strerror(errno)); | |
181 | exit (1); | |
182 | } | |
183 | } | |
184 | ||
185 | static void qemu_event_read(void *opaque) | |
186 | { | |
187 | int fd = (unsigned long)opaque; | |
188 | ssize_t len; | |
189 | char buffer[512]; | |
190 | ||
191 | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */ | |
192 | do { | |
193 | len = read(fd, buffer, sizeof(buffer)); | |
194 | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); | |
195 | } | |
196 | ||
197 | static int qemu_event_init(void) | |
198 | { | |
199 | int err; | |
200 | int fds[2]; | |
201 | ||
202 | err = qemu_eventfd(fds); | |
203 | if (err == -1) | |
204 | return -errno; | |
205 | ||
206 | err = fcntl_setfl(fds[0], O_NONBLOCK); | |
207 | if (err < 0) | |
208 | goto fail; | |
209 | ||
210 | err = fcntl_setfl(fds[1], O_NONBLOCK); | |
211 | if (err < 0) | |
212 | goto fail; | |
213 | ||
214 | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, | |
215 | (void *)(unsigned long)fds[0]); | |
216 | ||
217 | io_thread_fd = fds[1]; | |
218 | return 0; | |
219 | ||
220 | fail: | |
221 | close(fds[0]); | |
222 | close(fds[1]); | |
223 | return err; | |
224 | } | |
225 | #else | |
226 | HANDLE qemu_event_handle; | |
227 | ||
228 | static void dummy_event_handler(void *opaque) | |
229 | { | |
230 | } | |
231 | ||
232 | static int qemu_event_init(void) | |
233 | { | |
234 | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); | |
235 | if (!qemu_event_handle) { | |
236 | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError()); | |
237 | return -1; | |
238 | } | |
239 | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL); | |
240 | return 0; | |
241 | } | |
242 | ||
243 | static void qemu_event_increment(void) | |
244 | { | |
245 | if (!SetEvent(qemu_event_handle)) { | |
246 | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n", | |
247 | GetLastError()); | |
248 | exit (1); | |
249 | } | |
250 | } | |
251 | #endif | |
252 | ||
253 | #ifndef CONFIG_IOTHREAD | |
254 | int qemu_init_main_loop(void) | |
255 | { | |
256 | cpu_set_debug_excp_handler(cpu_debug_handler); | |
257 | ||
258 | return qemu_event_init(); | |
259 | } | |
260 | ||
261 | void qemu_main_loop_start(void) | |
262 | { | |
263 | } | |
264 | ||
265 | void qemu_init_vcpu(void *_env) | |
266 | { | |
267 | CPUState *env = _env; | |
268 | ||
269 | env->nr_cores = smp_cores; | |
270 | env->nr_threads = smp_threads; | |
271 | if (kvm_enabled()) | |
272 | kvm_init_vcpu(env); | |
273 | return; | |
274 | } | |
275 | ||
276 | int qemu_cpu_self(void *env) | |
277 | { | |
278 | return 1; | |
279 | } | |
280 | ||
281 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) | |
282 | { | |
283 | func(data); | |
284 | } | |
285 | ||
286 | void resume_all_vcpus(void) | |
287 | { | |
288 | } | |
289 | ||
290 | void pause_all_vcpus(void) | |
291 | { | |
292 | } | |
293 | ||
294 | void qemu_cpu_kick(void *env) | |
295 | { | |
296 | return; | |
297 | } | |
298 | ||
299 | void qemu_notify_event(void) | |
300 | { | |
301 | CPUState *env = cpu_single_env; | |
302 | ||
303 | qemu_event_increment (); | |
304 | if (env) { | |
305 | cpu_exit(env); | |
306 | } | |
307 | if (next_cpu && env != next_cpu) { | |
308 | cpu_exit(next_cpu); | |
309 | } | |
310 | } | |
311 | ||
312 | void qemu_mutex_lock_iothread(void) {} | |
313 | void qemu_mutex_unlock_iothread(void) {} | |
314 | ||
315 | void vm_stop(int reason) | |
316 | { | |
317 | do_vm_stop(reason); | |
318 | } | |
319 | ||
320 | #else /* CONFIG_IOTHREAD */ | |
321 | ||
322 | #include "qemu-thread.h" | |
323 | ||
324 | QemuMutex qemu_global_mutex; | |
325 | static QemuMutex qemu_fair_mutex; | |
326 | ||
327 | static QemuThread io_thread; | |
328 | ||
329 | static QemuThread *tcg_cpu_thread; | |
330 | static QemuCond *tcg_halt_cond; | |
331 | ||
332 | static int qemu_system_ready; | |
333 | /* cpu creation */ | |
334 | static QemuCond qemu_cpu_cond; | |
335 | /* system init */ | |
336 | static QemuCond qemu_system_cond; | |
337 | static QemuCond qemu_pause_cond; | |
338 | static QemuCond qemu_work_cond; | |
339 | ||
340 | static void tcg_init_ipi(void); | |
341 | static void kvm_init_ipi(CPUState *env); | |
342 | static sigset_t block_io_signals(void); | |
343 | ||
344 | /* If we have signalfd, we mask out the signals we want to handle and then | |
345 | * use signalfd to listen for them. We rely on whatever the current signal | |
346 | * handler is to dispatch the signals when we receive them. | |
347 | */ | |
348 | static void sigfd_handler(void *opaque) | |
349 | { | |
350 | int fd = (unsigned long) opaque; | |
351 | struct qemu_signalfd_siginfo info; | |
352 | struct sigaction action; | |
353 | ssize_t len; | |
354 | ||
355 | while (1) { | |
356 | do { | |
357 | len = read(fd, &info, sizeof(info)); | |
358 | } while (len == -1 && errno == EINTR); | |
359 | ||
360 | if (len == -1 && errno == EAGAIN) { | |
361 | break; | |
362 | } | |
363 | ||
364 | if (len != sizeof(info)) { | |
365 | printf("read from sigfd returned %zd: %m\n", len); | |
366 | return; | |
367 | } | |
368 | ||
369 | sigaction(info.ssi_signo, NULL, &action); | |
370 | if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) { | |
371 | action.sa_sigaction(info.ssi_signo, | |
372 | (siginfo_t *)&info, NULL); | |
373 | } else if (action.sa_handler) { | |
374 | action.sa_handler(info.ssi_signo); | |
375 | } | |
376 | } | |
377 | } | |
378 | ||
379 | static int qemu_signalfd_init(sigset_t mask) | |
380 | { | |
381 | int sigfd; | |
382 | ||
383 | sigfd = qemu_signalfd(&mask); | |
384 | if (sigfd == -1) { | |
385 | fprintf(stderr, "failed to create signalfd\n"); | |
386 | return -errno; | |
387 | } | |
388 | ||
389 | fcntl_setfl(sigfd, O_NONBLOCK); | |
390 | ||
391 | qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, | |
392 | (void *)(unsigned long) sigfd); | |
393 | ||
394 | return 0; | |
395 | } | |
396 | ||
397 | int qemu_init_main_loop(void) | |
398 | { | |
399 | int ret; | |
400 | sigset_t blocked_signals; | |
401 | ||
402 | cpu_set_debug_excp_handler(cpu_debug_handler); | |
403 | ||
404 | blocked_signals = block_io_signals(); | |
405 | ||
406 | ret = qemu_signalfd_init(blocked_signals); | |
407 | if (ret) | |
408 | return ret; | |
409 | ||
410 | /* Note eventfd must be drained before signalfd handlers run */ | |
411 | ret = qemu_event_init(); | |
412 | if (ret) | |
413 | return ret; | |
414 | ||
415 | qemu_cond_init(&qemu_cpu_cond); | |
416 | qemu_cond_init(&qemu_system_cond); | |
417 | qemu_cond_init(&qemu_pause_cond); | |
418 | qemu_cond_init(&qemu_work_cond); | |
419 | qemu_mutex_init(&qemu_fair_mutex); | |
420 | qemu_mutex_init(&qemu_global_mutex); | |
421 | qemu_mutex_lock(&qemu_global_mutex); | |
422 | ||
423 | qemu_thread_self(&io_thread); | |
424 | ||
425 | return 0; | |
426 | } | |
427 | ||
428 | void qemu_main_loop_start(void) | |
429 | { | |
430 | qemu_system_ready = 1; | |
431 | qemu_cond_broadcast(&qemu_system_cond); | |
432 | } | |
433 | ||
434 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) | |
435 | { | |
436 | struct qemu_work_item wi; | |
437 | ||
438 | if (qemu_cpu_self(env)) { | |
439 | func(data); | |
440 | return; | |
441 | } | |
442 | ||
443 | wi.func = func; | |
444 | wi.data = data; | |
445 | if (!env->queued_work_first) | |
446 | env->queued_work_first = &wi; | |
447 | else | |
448 | env->queued_work_last->next = &wi; | |
449 | env->queued_work_last = &wi; | |
450 | wi.next = NULL; | |
451 | wi.done = false; | |
452 | ||
453 | qemu_cpu_kick(env); | |
454 | while (!wi.done) { | |
455 | CPUState *self_env = cpu_single_env; | |
456 | ||
457 | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); | |
458 | cpu_single_env = self_env; | |
459 | } | |
460 | } | |
461 | ||
462 | static void flush_queued_work(CPUState *env) | |
463 | { | |
464 | struct qemu_work_item *wi; | |
465 | ||
466 | if (!env->queued_work_first) | |
467 | return; | |
468 | ||
469 | while ((wi = env->queued_work_first)) { | |
470 | env->queued_work_first = wi->next; | |
471 | wi->func(wi->data); | |
472 | wi->done = true; | |
473 | } | |
474 | env->queued_work_last = NULL; | |
475 | qemu_cond_broadcast(&qemu_work_cond); | |
476 | } | |
477 | ||
478 | static void qemu_wait_io_event_common(CPUState *env) | |
479 | { | |
480 | if (env->stop) { | |
481 | env->stop = 0; | |
482 | env->stopped = 1; | |
483 | qemu_cond_signal(&qemu_pause_cond); | |
484 | } | |
485 | flush_queued_work(env); | |
486 | } | |
487 | ||
488 | static void qemu_tcg_wait_io_event(void) | |
489 | { | |
490 | CPUState *env; | |
491 | ||
492 | while (!any_cpu_has_work()) | |
493 | qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000); | |
494 | ||
495 | qemu_mutex_unlock(&qemu_global_mutex); | |
496 | ||
497 | /* | |
498 | * Users of qemu_global_mutex can be starved, having no chance | |
499 | * to acquire it since this path will get to it first. | |
500 | * So use another lock to provide fairness. | |
501 | */ | |
502 | qemu_mutex_lock(&qemu_fair_mutex); | |
503 | qemu_mutex_unlock(&qemu_fair_mutex); | |
504 | ||
505 | qemu_mutex_lock(&qemu_global_mutex); | |
506 | ||
507 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
508 | qemu_wait_io_event_common(env); | |
509 | } | |
510 | } | |
511 | ||
512 | static void sigbus_reraise(void) | |
513 | { | |
514 | sigset_t set; | |
515 | struct sigaction action; | |
516 | ||
517 | memset(&action, 0, sizeof(action)); | |
518 | action.sa_handler = SIG_DFL; | |
519 | if (!sigaction(SIGBUS, &action, NULL)) { | |
520 | raise(SIGBUS); | |
521 | sigemptyset(&set); | |
522 | sigaddset(&set, SIGBUS); | |
523 | sigprocmask(SIG_UNBLOCK, &set, NULL); | |
524 | } | |
525 | perror("Failed to re-raise SIGBUS!\n"); | |
526 | abort(); | |
527 | } | |
528 | ||
529 | static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, | |
530 | void *ctx) | |
531 | { | |
532 | #if defined(TARGET_I386) | |
533 | if (kvm_on_sigbus(siginfo->ssi_code, (void *)(intptr_t)siginfo->ssi_addr)) | |
534 | #endif | |
535 | sigbus_reraise(); | |
536 | } | |
537 | ||
538 | static void qemu_kvm_eat_signal(CPUState *env, int timeout) | |
539 | { | |
540 | struct timespec ts; | |
541 | int r, e; | |
542 | siginfo_t siginfo; | |
543 | sigset_t waitset; | |
544 | sigset_t chkset; | |
545 | ||
546 | ts.tv_sec = timeout / 1000; | |
547 | ts.tv_nsec = (timeout % 1000) * 1000000; | |
548 | ||
549 | sigemptyset(&waitset); | |
550 | sigaddset(&waitset, SIG_IPI); | |
551 | sigaddset(&waitset, SIGBUS); | |
552 | ||
553 | do { | |
554 | qemu_mutex_unlock(&qemu_global_mutex); | |
555 | ||
556 | r = sigtimedwait(&waitset, &siginfo, &ts); | |
557 | e = errno; | |
558 | ||
559 | qemu_mutex_lock(&qemu_global_mutex); | |
560 | ||
561 | if (r == -1 && !(e == EAGAIN || e == EINTR)) { | |
562 | fprintf(stderr, "sigtimedwait: %s\n", strerror(e)); | |
563 | exit(1); | |
564 | } | |
565 | ||
566 | switch (r) { | |
567 | case SIGBUS: | |
568 | #ifdef TARGET_I386 | |
569 | if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) | |
570 | #endif | |
571 | sigbus_reraise(); | |
572 | break; | |
573 | default: | |
574 | break; | |
575 | } | |
576 | ||
577 | r = sigpending(&chkset); | |
578 | if (r == -1) { | |
579 | fprintf(stderr, "sigpending: %s\n", strerror(e)); | |
580 | exit(1); | |
581 | } | |
582 | } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS)); | |
583 | } | |
584 | ||
585 | static void qemu_kvm_wait_io_event(CPUState *env) | |
586 | { | |
587 | while (!cpu_has_work(env)) | |
588 | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000); | |
589 | ||
590 | qemu_kvm_eat_signal(env, 0); | |
591 | qemu_wait_io_event_common(env); | |
592 | } | |
593 | ||
594 | static int qemu_cpu_exec(CPUState *env); | |
595 | ||
596 | static void *kvm_cpu_thread_fn(void *arg) | |
597 | { | |
598 | CPUState *env = arg; | |
599 | ||
600 | qemu_mutex_lock(&qemu_global_mutex); | |
601 | qemu_thread_self(env->thread); | |
602 | if (kvm_enabled()) | |
603 | kvm_init_vcpu(env); | |
604 | ||
605 | kvm_init_ipi(env); | |
606 | ||
607 | /* signal CPU creation */ | |
608 | env->created = 1; | |
609 | qemu_cond_signal(&qemu_cpu_cond); | |
610 | ||
611 | /* and wait for machine initialization */ | |
612 | while (!qemu_system_ready) | |
613 | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100); | |
614 | ||
615 | while (1) { | |
616 | if (cpu_can_run(env)) | |
617 | qemu_cpu_exec(env); | |
618 | qemu_kvm_wait_io_event(env); | |
619 | } | |
620 | ||
621 | return NULL; | |
622 | } | |
623 | ||
624 | static void *tcg_cpu_thread_fn(void *arg) | |
625 | { | |
626 | CPUState *env = arg; | |
627 | ||
628 | tcg_init_ipi(); | |
629 | qemu_thread_self(env->thread); | |
630 | ||
631 | /* signal CPU creation */ | |
632 | qemu_mutex_lock(&qemu_global_mutex); | |
633 | for (env = first_cpu; env != NULL; env = env->next_cpu) | |
634 | env->created = 1; | |
635 | qemu_cond_signal(&qemu_cpu_cond); | |
636 | ||
637 | /* and wait for machine initialization */ | |
638 | while (!qemu_system_ready) | |
639 | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100); | |
640 | ||
641 | while (1) { | |
642 | cpu_exec_all(); | |
643 | qemu_tcg_wait_io_event(); | |
644 | } | |
645 | ||
646 | return NULL; | |
647 | } | |
648 | ||
649 | void qemu_cpu_kick(void *_env) | |
650 | { | |
651 | CPUState *env = _env; | |
652 | qemu_cond_broadcast(env->halt_cond); | |
653 | qemu_thread_signal(env->thread, SIG_IPI); | |
654 | } | |
655 | ||
656 | int qemu_cpu_self(void *_env) | |
657 | { | |
658 | CPUState *env = _env; | |
659 | QemuThread this; | |
660 | ||
661 | qemu_thread_self(&this); | |
662 | ||
663 | return qemu_thread_equal(&this, env->thread); | |
664 | } | |
665 | ||
666 | static void cpu_signal(int sig) | |
667 | { | |
668 | if (cpu_single_env) | |
669 | cpu_exit(cpu_single_env); | |
670 | exit_request = 1; | |
671 | } | |
672 | ||
673 | static void tcg_init_ipi(void) | |
674 | { | |
675 | sigset_t set; | |
676 | struct sigaction sigact; | |
677 | ||
678 | memset(&sigact, 0, sizeof(sigact)); | |
679 | sigact.sa_handler = cpu_signal; | |
680 | sigaction(SIG_IPI, &sigact, NULL); | |
681 | ||
682 | sigemptyset(&set); | |
683 | sigaddset(&set, SIG_IPI); | |
684 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
685 | } | |
686 | ||
687 | static void dummy_signal(int sig) | |
688 | { | |
689 | } | |
690 | ||
691 | static void kvm_init_ipi(CPUState *env) | |
692 | { | |
693 | int r; | |
694 | sigset_t set; | |
695 | struct sigaction sigact; | |
696 | ||
697 | memset(&sigact, 0, sizeof(sigact)); | |
698 | sigact.sa_handler = dummy_signal; | |
699 | sigaction(SIG_IPI, &sigact, NULL); | |
700 | ||
701 | pthread_sigmask(SIG_BLOCK, NULL, &set); | |
702 | sigdelset(&set, SIG_IPI); | |
703 | sigdelset(&set, SIGBUS); | |
704 | r = kvm_set_signal_mask(env, &set); | |
705 | if (r) { | |
706 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r)); | |
707 | exit(1); | |
708 | } | |
709 | } | |
710 | ||
711 | static sigset_t block_io_signals(void) | |
712 | { | |
713 | sigset_t set; | |
714 | struct sigaction action; | |
715 | ||
716 | /* SIGUSR2 used by posix-aio-compat.c */ | |
717 | sigemptyset(&set); | |
718 | sigaddset(&set, SIGUSR2); | |
719 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
720 | ||
721 | sigemptyset(&set); | |
722 | sigaddset(&set, SIGIO); | |
723 | sigaddset(&set, SIGALRM); | |
724 | sigaddset(&set, SIG_IPI); | |
725 | sigaddset(&set, SIGBUS); | |
726 | pthread_sigmask(SIG_BLOCK, &set, NULL); | |
727 | ||
728 | memset(&action, 0, sizeof(action)); | |
729 | action.sa_flags = SA_SIGINFO; | |
730 | action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; | |
731 | sigaction(SIGBUS, &action, NULL); | |
732 | prctl(PR_MCE_KILL, 1, 1, 0, 0); | |
733 | ||
734 | return set; | |
735 | } | |
736 | ||
737 | void qemu_mutex_lock_iothread(void) | |
738 | { | |
739 | if (kvm_enabled()) { | |
740 | qemu_mutex_lock(&qemu_global_mutex); | |
741 | } else { | |
742 | qemu_mutex_lock(&qemu_fair_mutex); | |
743 | if (qemu_mutex_trylock(&qemu_global_mutex)) { | |
744 | qemu_thread_signal(tcg_cpu_thread, SIG_IPI); | |
745 | qemu_mutex_lock(&qemu_global_mutex); | |
746 | } | |
747 | qemu_mutex_unlock(&qemu_fair_mutex); | |
748 | } | |
749 | } | |
750 | ||
751 | void qemu_mutex_unlock_iothread(void) | |
752 | { | |
753 | qemu_mutex_unlock(&qemu_global_mutex); | |
754 | } | |
755 | ||
756 | static int all_vcpus_paused(void) | |
757 | { | |
758 | CPUState *penv = first_cpu; | |
759 | ||
760 | while (penv) { | |
761 | if (!penv->stopped) | |
762 | return 0; | |
763 | penv = (CPUState *)penv->next_cpu; | |
764 | } | |
765 | ||
766 | return 1; | |
767 | } | |
768 | ||
769 | void pause_all_vcpus(void) | |
770 | { | |
771 | CPUState *penv = first_cpu; | |
772 | ||
773 | while (penv) { | |
774 | penv->stop = 1; | |
775 | qemu_cpu_kick(penv); | |
776 | penv = (CPUState *)penv->next_cpu; | |
777 | } | |
778 | ||
779 | while (!all_vcpus_paused()) { | |
780 | qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100); | |
781 | penv = first_cpu; | |
782 | while (penv) { | |
783 | qemu_cpu_kick(penv); | |
784 | penv = (CPUState *)penv->next_cpu; | |
785 | } | |
786 | } | |
787 | } | |
788 | ||
789 | void resume_all_vcpus(void) | |
790 | { | |
791 | CPUState *penv = first_cpu; | |
792 | ||
793 | while (penv) { | |
794 | penv->stop = 0; | |
795 | penv->stopped = 0; | |
796 | qemu_cpu_kick(penv); | |
797 | penv = (CPUState *)penv->next_cpu; | |
798 | } | |
799 | } | |
800 | ||
801 | static void tcg_init_vcpu(void *_env) | |
802 | { | |
803 | CPUState *env = _env; | |
804 | /* share a single thread for all cpus with TCG */ | |
805 | if (!tcg_cpu_thread) { | |
806 | env->thread = qemu_mallocz(sizeof(QemuThread)); | |
807 | env->halt_cond = qemu_mallocz(sizeof(QemuCond)); | |
808 | qemu_cond_init(env->halt_cond); | |
809 | qemu_thread_create(env->thread, tcg_cpu_thread_fn, env); | |
810 | while (env->created == 0) | |
811 | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); | |
812 | tcg_cpu_thread = env->thread; | |
813 | tcg_halt_cond = env->halt_cond; | |
814 | } else { | |
815 | env->thread = tcg_cpu_thread; | |
816 | env->halt_cond = tcg_halt_cond; | |
817 | } | |
818 | } | |
819 | ||
820 | static void kvm_start_vcpu(CPUState *env) | |
821 | { | |
822 | env->thread = qemu_mallocz(sizeof(QemuThread)); | |
823 | env->halt_cond = qemu_mallocz(sizeof(QemuCond)); | |
824 | qemu_cond_init(env->halt_cond); | |
825 | qemu_thread_create(env->thread, kvm_cpu_thread_fn, env); | |
826 | while (env->created == 0) | |
827 | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); | |
828 | } | |
829 | ||
830 | void qemu_init_vcpu(void *_env) | |
831 | { | |
832 | CPUState *env = _env; | |
833 | ||
834 | env->nr_cores = smp_cores; | |
835 | env->nr_threads = smp_threads; | |
836 | if (kvm_enabled()) | |
837 | kvm_start_vcpu(env); | |
838 | else | |
839 | tcg_init_vcpu(env); | |
840 | } | |
841 | ||
842 | void qemu_notify_event(void) | |
843 | { | |
844 | qemu_event_increment(); | |
845 | } | |
846 | ||
847 | static void qemu_system_vmstop_request(int reason) | |
848 | { | |
849 | vmstop_requested = reason; | |
850 | qemu_notify_event(); | |
851 | } | |
852 | ||
853 | void vm_stop(int reason) | |
854 | { | |
855 | QemuThread me; | |
856 | qemu_thread_self(&me); | |
857 | ||
858 | if (!qemu_thread_equal(&me, &io_thread)) { | |
859 | qemu_system_vmstop_request(reason); | |
860 | /* | |
861 | * FIXME: should not return to device code in case | |
862 | * vm_stop() has been requested. | |
863 | */ | |
864 | if (cpu_single_env) { | |
865 | cpu_exit(cpu_single_env); | |
866 | cpu_single_env->stop = 1; | |
867 | } | |
868 | return; | |
869 | } | |
870 | do_vm_stop(reason); | |
871 | } | |
872 | ||
873 | #endif | |
874 | ||
875 | static int qemu_cpu_exec(CPUState *env) | |
876 | { | |
877 | int ret; | |
878 | #ifdef CONFIG_PROFILER | |
879 | int64_t ti; | |
880 | #endif | |
881 | ||
882 | #ifdef CONFIG_PROFILER | |
883 | ti = profile_getclock(); | |
884 | #endif | |
885 | if (use_icount) { | |
886 | int64_t count; | |
887 | int decr; | |
888 | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); | |
889 | env->icount_decr.u16.low = 0; | |
890 | env->icount_extra = 0; | |
891 | count = qemu_icount_round (qemu_next_deadline()); | |
892 | qemu_icount += count; | |
893 | decr = (count > 0xffff) ? 0xffff : count; | |
894 | count -= decr; | |
895 | env->icount_decr.u16.low = decr; | |
896 | env->icount_extra = count; | |
897 | } | |
898 | ret = cpu_exec(env); | |
899 | #ifdef CONFIG_PROFILER | |
900 | qemu_time += profile_getclock() - ti; | |
901 | #endif | |
902 | if (use_icount) { | |
903 | /* Fold pending instructions back into the | |
904 | instruction counter, and clear the interrupt flag. */ | |
905 | qemu_icount -= (env->icount_decr.u16.low | |
906 | + env->icount_extra); | |
907 | env->icount_decr.u32 = 0; | |
908 | env->icount_extra = 0; | |
909 | } | |
910 | return ret; | |
911 | } | |
912 | ||
913 | bool cpu_exec_all(void) | |
914 | { | |
915 | if (next_cpu == NULL) | |
916 | next_cpu = first_cpu; | |
917 | for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { | |
918 | CPUState *env = next_cpu; | |
919 | ||
920 | qemu_clock_enable(vm_clock, | |
921 | (env->singlestep_enabled & SSTEP_NOTIMER) == 0); | |
922 | ||
923 | if (qemu_alarm_pending()) | |
924 | break; | |
925 | if (cpu_can_run(env)) { | |
926 | if (qemu_cpu_exec(env) == EXCP_DEBUG) { | |
927 | break; | |
928 | } | |
929 | } else if (env->stop) { | |
930 | break; | |
931 | } | |
932 | } | |
933 | exit_request = 0; | |
934 | return any_cpu_has_work(); | |
935 | } | |
936 | ||
937 | void set_numa_modes(void) | |
938 | { | |
939 | CPUState *env; | |
940 | int i; | |
941 | ||
942 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
943 | for (i = 0; i < nb_numa_nodes; i++) { | |
944 | if (node_cpumask[i] & (1 << env->cpu_index)) { | |
945 | env->numa_node = i; | |
946 | } | |
947 | } | |
948 | } | |
949 | } | |
950 | ||
951 | void set_cpu_log(const char *optarg) | |
952 | { | |
953 | int mask; | |
954 | const CPULogItem *item; | |
955 | ||
956 | mask = cpu_str_to_log_mask(optarg); | |
957 | if (!mask) { | |
958 | printf("Log items (comma separated):\n"); | |
959 | for (item = cpu_log_items; item->mask != 0; item++) { | |
960 | printf("%-10s %s\n", item->name, item->help); | |
961 | } | |
962 | exit(1); | |
963 | } | |
964 | cpu_set_log(mask); | |
965 | } | |
966 | ||
967 | /* Return the virtual CPU time, based on the instruction counter. */ | |
968 | int64_t cpu_get_icount(void) | |
969 | { | |
970 | int64_t icount; | |
971 | CPUState *env = cpu_single_env;; | |
972 | ||
973 | icount = qemu_icount; | |
974 | if (env) { | |
975 | if (!can_do_io(env)) { | |
976 | fprintf(stderr, "Bad clock read\n"); | |
977 | } | |
978 | icount -= (env->icount_decr.u16.low + env->icount_extra); | |
979 | } | |
980 | return qemu_icount_bias + (icount << icount_time_shift); | |
981 | } | |
982 | ||
983 | void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) | |
984 | { | |
985 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |
986 | #if defined(cpu_list_id) | |
987 | cpu_list_id(f, cpu_fprintf, optarg); | |
988 | #elif defined(cpu_list) | |
989 | cpu_list(f, cpu_fprintf); /* deprecated */ | |
990 | #endif | |
991 | } |