]> git.proxmox.com Git - qemu.git/blame - cpus.c
kvm: synchronize state from cpu context
[qemu.git] / cpus.c
CommitLineData
296af7c9
BS
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* Needed early for CONFIG_BSD etc. */
26#include "config-host.h"
27
28#include "monitor.h"
29#include "sysemu.h"
30#include "gdbstub.h"
31#include "dma.h"
32#include "kvm.h"
33
34#include "cpus.h"
35
7277e027
BS
36#ifdef SIGRTMIN
37#define SIG_IPI (SIGRTMIN+4)
38#else
39#define SIG_IPI SIGUSR1
40#endif
41
296af7c9
BS
42static CPUState *cur_cpu;
43static CPUState *next_cpu;
44
45/***********************************************************/
46void hw_error(const char *fmt, ...)
47{
48 va_list ap;
49 CPUState *env;
50
51 va_start(ap, fmt);
52 fprintf(stderr, "qemu: hardware error: ");
53 vfprintf(stderr, fmt, ap);
54 fprintf(stderr, "\n");
55 for(env = first_cpu; env != NULL; env = env->next_cpu) {
56 fprintf(stderr, "CPU #%d:\n", env->cpu_index);
57#ifdef TARGET_I386
58 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
59#else
60 cpu_dump_state(env, stderr, fprintf, 0);
61#endif
62 }
63 va_end(ap);
64 abort();
65}
66
67void cpu_synchronize_all_states(void)
68{
69 CPUState *cpu;
70
71 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
72 cpu_synchronize_state(cpu);
73 }
74}
75
76void cpu_synchronize_all_post_reset(void)
77{
78 CPUState *cpu;
79
80 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
81 cpu_synchronize_post_reset(cpu);
82 }
83}
84
85void cpu_synchronize_all_post_init(void)
86{
87 CPUState *cpu;
88
89 for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
90 cpu_synchronize_post_init(cpu);
91 }
92}
93
94static void do_vm_stop(int reason)
95{
96 if (vm_running) {
97 cpu_disable_ticks();
98 vm_running = 0;
99 pause_all_vcpus();
100 vm_state_notify(0, reason);
101 monitor_protocol_event(QEVENT_STOP, NULL);
102 }
103}
104
105static int cpu_can_run(CPUState *env)
106{
107 if (env->stop)
108 return 0;
55274a30 109 if (env->stopped || !vm_running)
296af7c9
BS
110 return 0;
111 return 1;
112}
113
114static int cpu_has_work(CPUState *env)
115{
116 if (env->stop)
117 return 1;
e82bcec2
MT
118 if (env->queued_work_first)
119 return 1;
55274a30 120 if (env->stopped || !vm_running)
296af7c9
BS
121 return 0;
122 if (!env->halted)
123 return 1;
124 if (qemu_cpu_has_work(env))
125 return 1;
126 return 0;
127}
128
129static int tcg_has_work(void)
130{
131 CPUState *env;
132
133 for (env = first_cpu; env != NULL; env = env->next_cpu)
134 if (cpu_has_work(env))
135 return 1;
136 return 0;
137}
138
139#ifndef _WIN32
140static int io_thread_fd = -1;
141
142static void qemu_event_increment(void)
143{
144 /* Write 8 bytes to be compatible with eventfd. */
145 static uint64_t val = 1;
146 ssize_t ret;
147
148 if (io_thread_fd == -1)
149 return;
150
151 do {
152 ret = write(io_thread_fd, &val, sizeof(val));
153 } while (ret < 0 && errno == EINTR);
154
155 /* EAGAIN is fine, a read must be pending. */
156 if (ret < 0 && errno != EAGAIN) {
157 fprintf(stderr, "qemu_event_increment: write() filed: %s\n",
158 strerror(errno));
159 exit (1);
160 }
161}
162
163static void qemu_event_read(void *opaque)
164{
165 int fd = (unsigned long)opaque;
166 ssize_t len;
167 char buffer[512];
168
169 /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
170 do {
171 len = read(fd, buffer, sizeof(buffer));
172 } while ((len == -1 && errno == EINTR) || len == sizeof(buffer));
173}
174
175static int qemu_event_init(void)
176{
177 int err;
178 int fds[2];
179
180 err = qemu_eventfd(fds);
181 if (err == -1)
182 return -errno;
183
184 err = fcntl_setfl(fds[0], O_NONBLOCK);
185 if (err < 0)
186 goto fail;
187
188 err = fcntl_setfl(fds[1], O_NONBLOCK);
189 if (err < 0)
190 goto fail;
191
192 qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
193 (void *)(unsigned long)fds[0]);
194
195 io_thread_fd = fds[1];
196 return 0;
197
198fail:
199 close(fds[0]);
200 close(fds[1]);
201 return err;
202}
203#else
204HANDLE qemu_event_handle;
205
206static void dummy_event_handler(void *opaque)
207{
208}
209
210static int qemu_event_init(void)
211{
212 qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
213 if (!qemu_event_handle) {
214 fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
215 return -1;
216 }
217 qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
218 return 0;
219}
220
221static void qemu_event_increment(void)
222{
223 if (!SetEvent(qemu_event_handle)) {
224 fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n",
225 GetLastError());
226 exit (1);
227 }
228}
229#endif
230
231#ifndef CONFIG_IOTHREAD
232int qemu_init_main_loop(void)
233{
234 return qemu_event_init();
235}
236
7277e027
BS
237void qemu_main_loop_start(void)
238{
239}
240
296af7c9
BS
241void qemu_init_vcpu(void *_env)
242{
243 CPUState *env = _env;
244
245 env->nr_cores = smp_cores;
246 env->nr_threads = smp_threads;
247 if (kvm_enabled())
248 kvm_init_vcpu(env);
249 return;
250}
251
252int qemu_cpu_self(void *env)
253{
254 return 1;
255}
256
e82bcec2
MT
257void run_on_cpu(CPUState *env, void (*func)(void *data), void *data)
258{
259 func(data);
260}
261
296af7c9
BS
262void resume_all_vcpus(void)
263{
264}
265
266void pause_all_vcpus(void)
267{
268}
269
270void qemu_cpu_kick(void *env)
271{
272 return;
273}
274
275void qemu_notify_event(void)
276{
277 CPUState *env = cpu_single_env;
278
279 qemu_event_increment ();
280 if (env) {
281 cpu_exit(env);
282 }
283 if (next_cpu && env != next_cpu) {
284 cpu_exit(next_cpu);
285 }
286}
287
288void qemu_mutex_lock_iothread(void) {}
289void qemu_mutex_unlock_iothread(void) {}
290
291void vm_stop(int reason)
292{
293 do_vm_stop(reason);
294}
295
296#else /* CONFIG_IOTHREAD */
297
298#include "qemu-thread.h"
299
300QemuMutex qemu_global_mutex;
301static QemuMutex qemu_fair_mutex;
302
303static QemuThread io_thread;
304
305static QemuThread *tcg_cpu_thread;
306static QemuCond *tcg_halt_cond;
307
308static int qemu_system_ready;
309/* cpu creation */
310static QemuCond qemu_cpu_cond;
311/* system init */
312static QemuCond qemu_system_cond;
313static QemuCond qemu_pause_cond;
e82bcec2 314static QemuCond qemu_work_cond;
296af7c9
BS
315
316static void tcg_block_io_signals(void);
317static void kvm_block_io_signals(CPUState *env);
318static void unblock_io_signals(void);
319
320int qemu_init_main_loop(void)
321{
322 int ret;
323
324 ret = qemu_event_init();
325 if (ret)
326 return ret;
327
328 qemu_cond_init(&qemu_pause_cond);
329 qemu_mutex_init(&qemu_fair_mutex);
330 qemu_mutex_init(&qemu_global_mutex);
331 qemu_mutex_lock(&qemu_global_mutex);
332
333 unblock_io_signals();
334 qemu_thread_self(&io_thread);
335
336 return 0;
337}
338
7277e027
BS
339void qemu_main_loop_start(void)
340{
341 qemu_system_ready = 1;
342 qemu_cond_broadcast(&qemu_system_cond);
343}
344
e82bcec2
MT
345void run_on_cpu(CPUState *env, void (*func)(void *data), void *data)
346{
347 struct qemu_work_item wi;
348
349 if (qemu_cpu_self(env)) {
350 func(data);
351 return;
352 }
353
354 wi.func = func;
355 wi.data = data;
356 if (!env->queued_work_first)
357 env->queued_work_first = &wi;
358 else
359 env->queued_work_last->next = &wi;
360 env->queued_work_last = &wi;
361 wi.next = NULL;
362 wi.done = false;
363
364 qemu_cpu_kick(env);
365 while (!wi.done) {
366 CPUState *self_env = cpu_single_env;
367
368 qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
369 cpu_single_env = self_env;
370 }
371}
372
373static void flush_queued_work(CPUState *env)
374{
375 struct qemu_work_item *wi;
376
377 if (!env->queued_work_first)
378 return;
379
380 while ((wi = env->queued_work_first)) {
381 env->queued_work_first = wi->next;
382 wi->func(wi->data);
383 wi->done = true;
384 }
385 env->queued_work_last = NULL;
386 qemu_cond_broadcast(&qemu_work_cond);
387}
388
296af7c9
BS
389static void qemu_wait_io_event_common(CPUState *env)
390{
391 if (env->stop) {
392 env->stop = 0;
393 env->stopped = 1;
394 qemu_cond_signal(&qemu_pause_cond);
395 }
e82bcec2 396 flush_queued_work(env);
296af7c9
BS
397}
398
399static void qemu_wait_io_event(CPUState *env)
400{
401 while (!tcg_has_work())
402 qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
403
404 qemu_mutex_unlock(&qemu_global_mutex);
405
406 /*
407 * Users of qemu_global_mutex can be starved, having no chance
408 * to acquire it since this path will get to it first.
409 * So use another lock to provide fairness.
410 */
411 qemu_mutex_lock(&qemu_fair_mutex);
412 qemu_mutex_unlock(&qemu_fair_mutex);
413
414 qemu_mutex_lock(&qemu_global_mutex);
415 qemu_wait_io_event_common(env);
416}
417
418static void qemu_kvm_eat_signal(CPUState *env, int timeout)
419{
420 struct timespec ts;
421 int r, e;
422 siginfo_t siginfo;
423 sigset_t waitset;
424
425 ts.tv_sec = timeout / 1000;
426 ts.tv_nsec = (timeout % 1000) * 1000000;
427
428 sigemptyset(&waitset);
429 sigaddset(&waitset, SIG_IPI);
430
431 qemu_mutex_unlock(&qemu_global_mutex);
432 r = sigtimedwait(&waitset, &siginfo, &ts);
433 e = errno;
434 qemu_mutex_lock(&qemu_global_mutex);
435
436 if (r == -1 && !(e == EAGAIN || e == EINTR)) {
437 fprintf(stderr, "sigtimedwait: %s\n", strerror(e));
438 exit(1);
439 }
440}
441
442static void qemu_kvm_wait_io_event(CPUState *env)
443{
444 while (!cpu_has_work(env))
445 qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
446
447 qemu_kvm_eat_signal(env, 0);
448 qemu_wait_io_event_common(env);
449}
450
451static int qemu_cpu_exec(CPUState *env);
452
453static void *kvm_cpu_thread_fn(void *arg)
454{
455 CPUState *env = arg;
456
6164e6d6 457 qemu_mutex_lock(&qemu_global_mutex);
296af7c9
BS
458 qemu_thread_self(env->thread);
459 if (kvm_enabled())
460 kvm_init_vcpu(env);
461
462 kvm_block_io_signals(env);
463
464 /* signal CPU creation */
296af7c9
BS
465 env->created = 1;
466 qemu_cond_signal(&qemu_cpu_cond);
467
468 /* and wait for machine initialization */
469 while (!qemu_system_ready)
470 qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
471
472 while (1) {
473 if (cpu_can_run(env))
474 qemu_cpu_exec(env);
475 qemu_kvm_wait_io_event(env);
476 }
477
478 return NULL;
479}
480
481static void *tcg_cpu_thread_fn(void *arg)
482{
483 CPUState *env = arg;
484
485 tcg_block_io_signals();
486 qemu_thread_self(env->thread);
487
488 /* signal CPU creation */
489 qemu_mutex_lock(&qemu_global_mutex);
490 for (env = first_cpu; env != NULL; env = env->next_cpu)
491 env->created = 1;
492 qemu_cond_signal(&qemu_cpu_cond);
493
494 /* and wait for machine initialization */
495 while (!qemu_system_ready)
496 qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
497
498 while (1) {
499 tcg_cpu_exec();
500 qemu_wait_io_event(cur_cpu);
501 }
502
503 return NULL;
504}
505
506void qemu_cpu_kick(void *_env)
507{
508 CPUState *env = _env;
509 qemu_cond_broadcast(env->halt_cond);
1fbb22e5 510 qemu_thread_signal(env->thread, SIG_IPI);
296af7c9
BS
511}
512
513int qemu_cpu_self(void *_env)
514{
515 CPUState *env = _env;
516 QemuThread this;
517
518 qemu_thread_self(&this);
519
520 return qemu_thread_equal(&this, env->thread);
521}
522
523static void cpu_signal(int sig)
524{
525 if (cpu_single_env)
526 cpu_exit(cpu_single_env);
1a28cac3 527 exit_request = 1;
296af7c9
BS
528}
529
530static void tcg_block_io_signals(void)
531{
532 sigset_t set;
533 struct sigaction sigact;
534
535 sigemptyset(&set);
536 sigaddset(&set, SIGUSR2);
537 sigaddset(&set, SIGIO);
538 sigaddset(&set, SIGALRM);
539 sigaddset(&set, SIGCHLD);
540 pthread_sigmask(SIG_BLOCK, &set, NULL);
541
542 sigemptyset(&set);
543 sigaddset(&set, SIG_IPI);
544 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
545
546 memset(&sigact, 0, sizeof(sigact));
547 sigact.sa_handler = cpu_signal;
548 sigaction(SIG_IPI, &sigact, NULL);
549}
550
551static void dummy_signal(int sig)
552{
553}
554
555static void kvm_block_io_signals(CPUState *env)
556{
557 int r;
558 sigset_t set;
559 struct sigaction sigact;
560
561 sigemptyset(&set);
562 sigaddset(&set, SIGUSR2);
563 sigaddset(&set, SIGIO);
564 sigaddset(&set, SIGALRM);
565 sigaddset(&set, SIGCHLD);
566 sigaddset(&set, SIG_IPI);
567 pthread_sigmask(SIG_BLOCK, &set, NULL);
568
569 pthread_sigmask(SIG_BLOCK, NULL, &set);
570 sigdelset(&set, SIG_IPI);
571
572 memset(&sigact, 0, sizeof(sigact));
573 sigact.sa_handler = dummy_signal;
574 sigaction(SIG_IPI, &sigact, NULL);
575
576 r = kvm_set_signal_mask(env, &set);
577 if (r) {
578 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(r));
579 exit(1);
580 }
581}
582
583static void unblock_io_signals(void)
584{
585 sigset_t set;
586
587 sigemptyset(&set);
588 sigaddset(&set, SIGUSR2);
589 sigaddset(&set, SIGIO);
590 sigaddset(&set, SIGALRM);
591 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
592
593 sigemptyset(&set);
594 sigaddset(&set, SIG_IPI);
595 pthread_sigmask(SIG_BLOCK, &set, NULL);
596}
597
296af7c9
BS
598void qemu_mutex_lock_iothread(void)
599{
600 if (kvm_enabled()) {
601 qemu_mutex_lock(&qemu_fair_mutex);
602 qemu_mutex_lock(&qemu_global_mutex);
603 qemu_mutex_unlock(&qemu_fair_mutex);
1a28cac3
MT
604 } else {
605 qemu_mutex_lock(&qemu_fair_mutex);
606 if (qemu_mutex_trylock(&qemu_global_mutex)) {
607 qemu_thread_signal(tcg_cpu_thread, SIG_IPI);
608 qemu_mutex_lock(&qemu_global_mutex);
609 }
610 qemu_mutex_unlock(&qemu_fair_mutex);
611 }
296af7c9
BS
612}
613
614void qemu_mutex_unlock_iothread(void)
615{
616 qemu_mutex_unlock(&qemu_global_mutex);
617}
618
619static int all_vcpus_paused(void)
620{
621 CPUState *penv = first_cpu;
622
623 while (penv) {
624 if (!penv->stopped)
625 return 0;
626 penv = (CPUState *)penv->next_cpu;
627 }
628
629 return 1;
630}
631
632void pause_all_vcpus(void)
633{
634 CPUState *penv = first_cpu;
635
636 while (penv) {
637 penv->stop = 1;
296af7c9
BS
638 qemu_cpu_kick(penv);
639 penv = (CPUState *)penv->next_cpu;
640 }
641
642 while (!all_vcpus_paused()) {
643 qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
644 penv = first_cpu;
645 while (penv) {
1fbb22e5 646 qemu_cpu_kick(penv);
296af7c9
BS
647 penv = (CPUState *)penv->next_cpu;
648 }
649 }
650}
651
652void resume_all_vcpus(void)
653{
654 CPUState *penv = first_cpu;
655
656 while (penv) {
657 penv->stop = 0;
658 penv->stopped = 0;
296af7c9
BS
659 qemu_cpu_kick(penv);
660 penv = (CPUState *)penv->next_cpu;
661 }
662}
663
664static void tcg_init_vcpu(void *_env)
665{
666 CPUState *env = _env;
667 /* share a single thread for all cpus with TCG */
668 if (!tcg_cpu_thread) {
669 env->thread = qemu_mallocz(sizeof(QemuThread));
670 env->halt_cond = qemu_mallocz(sizeof(QemuCond));
671 qemu_cond_init(env->halt_cond);
672 qemu_thread_create(env->thread, tcg_cpu_thread_fn, env);
673 while (env->created == 0)
674 qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
675 tcg_cpu_thread = env->thread;
676 tcg_halt_cond = env->halt_cond;
677 } else {
678 env->thread = tcg_cpu_thread;
679 env->halt_cond = tcg_halt_cond;
680 }
681}
682
683static void kvm_start_vcpu(CPUState *env)
684{
685 env->thread = qemu_mallocz(sizeof(QemuThread));
686 env->halt_cond = qemu_mallocz(sizeof(QemuCond));
687 qemu_cond_init(env->halt_cond);
688 qemu_thread_create(env->thread, kvm_cpu_thread_fn, env);
689 while (env->created == 0)
690 qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
691}
692
693void qemu_init_vcpu(void *_env)
694{
695 CPUState *env = _env;
696
697 env->nr_cores = smp_cores;
698 env->nr_threads = smp_threads;
699 if (kvm_enabled())
700 kvm_start_vcpu(env);
701 else
702 tcg_init_vcpu(env);
703}
704
705void qemu_notify_event(void)
706{
707 qemu_event_increment();
708}
709
710static void qemu_system_vmstop_request(int reason)
711{
712 vmstop_requested = reason;
713 qemu_notify_event();
714}
715
716void vm_stop(int reason)
717{
718 QemuThread me;
719 qemu_thread_self(&me);
720
721 if (!qemu_thread_equal(&me, &io_thread)) {
722 qemu_system_vmstop_request(reason);
723 /*
724 * FIXME: should not return to device code in case
725 * vm_stop() has been requested.
726 */
727 if (cpu_single_env) {
728 cpu_exit(cpu_single_env);
729 cpu_single_env->stop = 1;
730 }
731 return;
732 }
733 do_vm_stop(reason);
734}
735
736#endif
737
738static int qemu_cpu_exec(CPUState *env)
739{
740 int ret;
741#ifdef CONFIG_PROFILER
742 int64_t ti;
743#endif
744
745#ifdef CONFIG_PROFILER
746 ti = profile_getclock();
747#endif
748 if (use_icount) {
749 int64_t count;
750 int decr;
751 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
752 env->icount_decr.u16.low = 0;
753 env->icount_extra = 0;
754 count = qemu_icount_round (qemu_next_deadline());
755 qemu_icount += count;
756 decr = (count > 0xffff) ? 0xffff : count;
757 count -= decr;
758 env->icount_decr.u16.low = decr;
759 env->icount_extra = count;
760 }
761 ret = cpu_exec(env);
762#ifdef CONFIG_PROFILER
763 qemu_time += profile_getclock() - ti;
764#endif
765 if (use_icount) {
766 /* Fold pending instructions back into the
767 instruction counter, and clear the interrupt flag. */
768 qemu_icount -= (env->icount_decr.u16.low
769 + env->icount_extra);
770 env->icount_decr.u32 = 0;
771 env->icount_extra = 0;
772 }
773 return ret;
774}
775
776bool tcg_cpu_exec(void)
777{
778 int ret = 0;
779
780 if (next_cpu == NULL)
781 next_cpu = first_cpu;
782 for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
783 CPUState *env = cur_cpu = next_cpu;
784
785 qemu_clock_enable(vm_clock,
786 (cur_cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
787
788 if (qemu_alarm_pending())
789 break;
790 if (cpu_can_run(env))
791 ret = qemu_cpu_exec(env);
792 else if (env->stop)
793 break;
794
795 if (ret == EXCP_DEBUG) {
796 gdb_set_stop_cpu(env);
797 debug_requested = EXCP_DEBUG;
798 break;
799 }
800 }
801 return tcg_has_work();
802}
803
804void set_numa_modes(void)
805{
806 CPUState *env;
807 int i;
808
809 for (env = first_cpu; env != NULL; env = env->next_cpu) {
810 for (i = 0; i < nb_numa_nodes; i++) {
811 if (node_cpumask[i] & (1 << env->cpu_index)) {
812 env->numa_node = i;
813 }
814 }
815 }
816}
817
818void set_cpu_log(const char *optarg)
819{
820 int mask;
821 const CPULogItem *item;
822
823 mask = cpu_str_to_log_mask(optarg);
824 if (!mask) {
825 printf("Log items (comma separated):\n");
826 for (item = cpu_log_items; item->mask != 0; item++) {
827 printf("%-10s %s\n", item->name, item->help);
828 }
829 exit(1);
830 }
831 cpu_set_log(mask);
832}
29e922b6
BS
833
834/* Return the virtual CPU time, based on the instruction counter. */
835int64_t cpu_get_icount(void)
836{
837 int64_t icount;
838 CPUState *env = cpu_single_env;;
839
840 icount = qemu_icount;
841 if (env) {
842 if (!can_do_io(env)) {
843 fprintf(stderr, "Bad clock read\n");
844 }
845 icount -= (env->icount_decr.u16.low + env->icount_extra);
846 }
847 return qemu_icount_bias + (icount << icount_time_shift);
848}
262353cb
BS
849
850void list_cpus(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
851 const char *optarg)
852{
853 /* XXX: implement xxx_cpu_list for targets that still miss it */
854#if defined(cpu_list_id)
855 cpu_list_id(f, cpu_fprintf, optarg);
856#elif defined(cpu_list)
857 cpu_list(f, cpu_fprintf); /* deprecated */
858#endif
859}