]> git.proxmox.com Git - mirror_qemu.git/blame - include/qom/cpu.h
exec.c: Allow target CPUs to define multiple AddressSpaces
[mirror_qemu.git] / include / qom / cpu.h
CommitLineData
dd83b06a
AF
1/*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20#ifndef QEMU_CPU_H
21#define QEMU_CPU_H
22
fcd7d003 23#include <signal.h>
6f03bef0 24#include <setjmp.h>
961f8395 25#include "hw/qdev-core.h"
37b9de46 26#include "disas/bfd.h"
c658b94f 27#include "exec/hwaddr.h"
66b9b43c 28#include "exec/memattrs.h"
bdc44640 29#include "qemu/queue.h"
1de7afc9 30#include "qemu/thread.h"
a23bbfda 31#include "qemu/typedefs.h"
dd83b06a 32
b5ba1cc6
QN
33typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
34 void *opaque);
c72bf468 35
577f42c0
AF
36/**
37 * vaddr:
38 * Type wide enough to contain any #target_ulong virtual address.
39 */
40typedef uint64_t vaddr;
41#define VADDR_PRId PRId64
42#define VADDR_PRIu PRIu64
43#define VADDR_PRIo PRIo64
44#define VADDR_PRIx PRIx64
45#define VADDR_PRIX PRIX64
46#define VADDR_MAX UINT64_MAX
47
dd83b06a
AF
48/**
49 * SECTION:cpu
50 * @section_id: QEMU-cpu
51 * @title: CPU Class
52 * @short_description: Base class for all CPUs
53 */
54
55#define TYPE_CPU "cpu"
56
0d6d1ab4
AF
57/* Since this macro is used a lot in hot code paths and in conjunction with
58 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
59 * an unchecked cast.
60 */
61#define CPU(obj) ((CPUState *)(obj))
62
dd83b06a
AF
63#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
64#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
65
66typedef struct CPUState CPUState;
67
c658b94f
AF
68typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
69 bool is_write, bool is_exec, int opaque,
70 unsigned size);
71
bdf7ae5b
AF
72struct TranslationBlock;
73
dd83b06a
AF
74/**
75 * CPUClass:
2b8c2754
AF
76 * @class_by_name: Callback to map -cpu command line model name to an
77 * instantiatable CPU type.
94a444b2 78 * @parse_features: Callback to parse command line arguments.
f5df5baf 79 * @reset: Callback to reset the #CPUState to its initial state.
91b1df8c 80 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
8c2e1b00 81 * @has_work: Callback for checking if there is work to do.
97a8ea5a 82 * @do_interrupt: Callback for interrupt handling.
c658b94f 83 * @do_unassigned_access: Callback for unassigned access handling.
93e22326
PB
84 * @do_unaligned_access: Callback for unaligned access handling, if
85 * the target defines #ALIGNED_ONLY.
c08295d4
PM
86 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
87 * runtime configurable endianness is currently big-endian. Non-configurable
88 * CPUs can use the default implementation of this method. This method should
89 * not be used by any callers other than the pre-1.0 virtio devices.
f3659eee 90 * @memory_rw_debug: Callback for GDB memory access.
878096ee
AF
91 * @dump_state: Callback for dumping state.
92 * @dump_statistics: Callback for dumping statistics.
997395d3 93 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
444d5590 94 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
a23bbfda 95 * @get_memory_mapping: Callback for obtaining the memory mappings.
f45748f1 96 * @set_pc: Callback for setting the Program Counter register.
bdf7ae5b
AF
97 * @synchronize_from_tb: Callback for synchronizing state from a TCG
98 * #TranslationBlock.
7510454e 99 * @handle_mmu_fault: Callback for handling an MMU fault.
00b941e5 100 * @get_phys_page_debug: Callback for obtaining a physical address.
5b50e790
AF
101 * @gdb_read_register: Callback for letting GDB read a register.
102 * @gdb_write_register: Callback for letting GDB write a register.
86025ee4 103 * @debug_excp_handler: Callback for handling debug exceptions.
c08295d4
PM
104 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
105 * 64-bit VM coredump.
106 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
107 * note to a 32-bit VM coredump.
108 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
109 * 32-bit VM coredump.
110 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
111 * note to a 32-bit VM coredump.
b170fce3 112 * @vmsd: State description for migration.
a0e372f0 113 * @gdb_num_core_regs: Number of core registers accessible to GDB.
5b24c641 114 * @gdb_core_xml_file: File name for core registers GDB XML description.
2472b6c0
PM
115 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
116 * before the insn which triggers a watchpoint rather than after it.
cffe7b32
RH
117 * @cpu_exec_enter: Callback for cpu_exec preparation.
118 * @cpu_exec_exit: Callback for cpu_exec cleanup.
9585db68 119 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
37b9de46 120 * @disas_set_info: Setup architecture specific components of disassembly info
dd83b06a
AF
121 *
122 * Represents a CPU family or model.
123 */
124typedef struct CPUClass {
125 /*< private >*/
961f8395 126 DeviceClass parent_class;
dd83b06a
AF
127 /*< public >*/
128
2b8c2754 129 ObjectClass *(*class_by_name)(const char *cpu_model);
94a444b2 130 void (*parse_features)(CPUState *cpu, char *str, Error **errp);
2b8c2754 131
dd83b06a 132 void (*reset)(CPUState *cpu);
91b1df8c 133 int reset_dump_flags;
8c2e1b00 134 bool (*has_work)(CPUState *cpu);
97a8ea5a 135 void (*do_interrupt)(CPUState *cpu);
c658b94f 136 CPUUnassignedAccess do_unassigned_access;
93e22326
PB
137 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
138 int is_write, int is_user, uintptr_t retaddr);
bf7663c4 139 bool (*virtio_is_big_endian)(CPUState *cpu);
f3659eee
AF
140 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
141 uint8_t *buf, int len, bool is_write);
878096ee
AF
142 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
143 int flags);
144 void (*dump_statistics)(CPUState *cpu, FILE *f,
145 fprintf_function cpu_fprintf, int flags);
997395d3 146 int64_t (*get_arch_id)(CPUState *cpu);
444d5590 147 bool (*get_paging_enabled)(const CPUState *cpu);
a23bbfda
AF
148 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
149 Error **errp);
f45748f1 150 void (*set_pc)(CPUState *cpu, vaddr value);
bdf7ae5b 151 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
7510454e
AF
152 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
153 int mmu_index);
00b941e5 154 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
5b50e790
AF
155 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
156 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
86025ee4 157 void (*debug_excp_handler)(CPUState *cpu);
b170fce3 158
c72bf468
JF
159 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
160 int cpuid, void *opaque);
161 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
162 void *opaque);
163 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
164 int cpuid, void *opaque);
165 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
166 void *opaque);
a0e372f0
AF
167
168 const struct VMStateDescription *vmsd;
169 int gdb_num_core_regs;
5b24c641 170 const char *gdb_core_xml_file;
2472b6c0 171 bool gdb_stop_before_watchpoint;
cffe7b32
RH
172
173 void (*cpu_exec_enter)(CPUState *cpu);
174 void (*cpu_exec_exit)(CPUState *cpu);
9585db68 175 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
37b9de46
PC
176
177 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
dd83b06a
AF
178} CPUClass;
179
28ecfd7a
AF
180#ifdef HOST_WORDS_BIGENDIAN
181typedef struct icount_decr_u16 {
182 uint16_t high;
183 uint16_t low;
184} icount_decr_u16;
185#else
186typedef struct icount_decr_u16 {
187 uint16_t low;
188 uint16_t high;
189} icount_decr_u16;
190#endif
191
f0c3c505
AF
192typedef struct CPUBreakpoint {
193 vaddr pc;
194 int flags; /* BP_* */
195 QTAILQ_ENTRY(CPUBreakpoint) entry;
196} CPUBreakpoint;
197
ff4700b0
AF
198typedef struct CPUWatchpoint {
199 vaddr vaddr;
05068c0d 200 vaddr len;
08225676 201 vaddr hitaddr;
66b9b43c 202 MemTxAttrs hitattrs;
ff4700b0
AF
203 int flags; /* BP_* */
204 QTAILQ_ENTRY(CPUWatchpoint) entry;
205} CPUWatchpoint;
206
a60f24b5 207struct KVMState;
f7575c96 208struct kvm_run;
a60f24b5 209
8cd70437
AF
210#define TB_JMP_CACHE_BITS 12
211#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
212
dd83b06a
AF
213/**
214 * CPUState:
55e5c285 215 * @cpu_index: CPU index (informative).
ce3960eb
AF
216 * @nr_cores: Number of cores within this CPU package.
217 * @nr_threads: Number of threads within this CPU.
1b1ed8dc 218 * @numa_node: NUMA node this CPU is belonging to.
0d34282f 219 * @host_tid: Host thread ID.
0315c31c 220 * @running: #true if CPU is currently running (usermode).
61a46217 221 * @created: Indicates whether the CPU thread has been successfully created.
259186a7
AF
222 * @interrupt_request: Indicates a pending interrupt request.
223 * @halted: Nonzero if the CPU is in suspended state.
4fdeee7c 224 * @stop: Indicates a pending stop request.
f324e766 225 * @stopped: Indicates the CPU has been artificially stopped.
bac05aa9 226 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
378df4b2
PM
227 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
228 * CPU and return to its top level loop.
ed2803da 229 * @singlestep_enabled: Flags for single-stepping.
efee7340 230 * @icount_extra: Instructions until next timer event.
28ecfd7a
AF
231 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
232 * This allows a single read-compare-cbranch-write sequence to test
233 * for both decrementer underflow and exceptions.
414b15c9
PB
234 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
235 * requires that IO only be performed on the last instruction of a TB
236 * so that interrupts take effect immediately.
32857f4d
PM
237 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
238 * AddressSpaces this CPU has)
12ebc9a7 239 * @num_ases: number of CPUAddressSpaces in @cpu_ases
32857f4d
PM
240 * @as: Pointer to the first AddressSpace, for the convenience of targets which
241 * only have a single AddressSpace
c05efcb1 242 * @env_ptr: Pointer to subclass-specific CPUArchState field.
d77953b9 243 * @current_tb: Currently executing TB.
eac8b355 244 * @gdb_regs: Additional GDB registers.
a0e372f0 245 * @gdb_num_regs: Number of total registers accessible to GDB.
35143f01 246 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
182735ef 247 * @next_cpu: Next CPU sharing TB cache.
0429a971 248 * @opaque: User data.
93afeade
AF
249 * @mem_io_pc: Host Program Counter at which the memory was accessed.
250 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
8737c51c 251 * @kvm_fd: vCPU file descriptor for KVM.
376692b9
PB
252 * @work_mutex: Lock to prevent multiple access to queued_work_*.
253 * @queued_work_first: First asynchronous work pending.
dd83b06a
AF
254 *
255 * State of one CPU core or thread.
256 */
257struct CPUState {
258 /*< private >*/
961f8395 259 DeviceState parent_obj;
dd83b06a
AF
260 /*< public >*/
261
ce3960eb
AF
262 int nr_cores;
263 int nr_threads;
1b1ed8dc 264 int numa_node;
ce3960eb 265
814e612e 266 struct QemuThread *thread;
bcba2a72
AF
267#ifdef _WIN32
268 HANDLE hThread;
269#endif
9f09e18a 270 int thread_id;
0d34282f 271 uint32_t host_tid;
0315c31c 272 bool running;
f5c121b8 273 struct QemuCond *halt_cond;
216fc9a4 274 bool thread_kicked;
61a46217 275 bool created;
4fdeee7c 276 bool stop;
f324e766 277 bool stopped;
bac05aa9 278 bool crash_occurred;
e0c38211 279 bool exit_request;
259186a7 280 uint32_t interrupt_request;
ed2803da 281 int singlestep_enabled;
efee7340 282 int64_t icount_extra;
6f03bef0 283 sigjmp_buf jmp_env;
bcba2a72 284
376692b9
PB
285 QemuMutex work_mutex;
286 struct qemu_work_item *queued_work_first, *queued_work_last;
287
32857f4d 288 CPUAddressSpace *cpu_ases;
12ebc9a7 289 int num_ases;
09daed84 290 AddressSpace *as;
09daed84 291
c05efcb1 292 void *env_ptr; /* CPUArchState */
d77953b9 293 struct TranslationBlock *current_tb;
8cd70437 294 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
eac8b355 295 struct GDBRegisterState *gdb_regs;
a0e372f0 296 int gdb_num_regs;
35143f01 297 int gdb_num_g_regs;
bdc44640 298 QTAILQ_ENTRY(CPUState) node;
d77953b9 299
f0c3c505
AF
300 /* ice debug support */
301 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
302
ff4700b0
AF
303 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
304 CPUWatchpoint *watchpoint_hit;
305
0429a971
AF
306 void *opaque;
307
93afeade
AF
308 /* In order to avoid passing too many arguments to the MMIO helpers,
309 * we store some rarely used information in the CPU context.
310 */
311 uintptr_t mem_io_pc;
312 vaddr mem_io_vaddr;
313
8737c51c 314 int kvm_fd;
20d695a9 315 bool kvm_vcpu_dirty;
a60f24b5 316 struct KVMState *kvm_state;
f7575c96 317 struct kvm_run *kvm_run;
8737c51c 318
f5df5baf 319 /* TODO Move common fields from CPUArchState here. */
55e5c285 320 int cpu_index; /* used by alpha TCG */
259186a7 321 uint32_t halted; /* used by alpha, cris, ppc TCG */
28ecfd7a
AF
322 union {
323 uint32_t u32;
324 icount_decr_u16 u16;
325 } icount_decr;
99df7dce 326 uint32_t can_do_io;
27103424 327 int32_t exception_index; /* used by m68k TCG */
7e4fb26d 328
2adcc85d
JH
329 /* Used to keep track of an outstanding cpu throttle thread for migration
330 * autoconverge
331 */
332 bool throttle_thread_scheduled;
333
7e4fb26d
RH
334 /* Note that this is accessed at the start of every TB via a negative
335 offset from AREG0. Leave this field at the end so as to make the
336 (absolute value) offset as small as possible. This reduces code
337 size, especially for hosts without large memory offsets. */
e0c38211 338 uint32_t tcg_exit_req;
dd83b06a
AF
339};
340
bdc44640
AF
341QTAILQ_HEAD(CPUTailQ, CPUState);
342extern struct CPUTailQ cpus;
343#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
344#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
345#define CPU_FOREACH_SAFE(cpu, next_cpu) \
346 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
8487d123
BR
347#define CPU_FOREACH_REVERSE(cpu) \
348 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
bdc44640 349#define first_cpu QTAILQ_FIRST(&cpus)
182735ef 350
f240eb6f 351extern __thread CPUState *current_cpu;
4917cf44 352
444d5590
AF
353/**
354 * cpu_paging_enabled:
355 * @cpu: The CPU whose state is to be inspected.
356 *
357 * Returns: %true if paging is enabled, %false otherwise.
358 */
359bool cpu_paging_enabled(const CPUState *cpu);
360
a23bbfda
AF
361/**
362 * cpu_get_memory_mapping:
363 * @cpu: The CPU whose memory mappings are to be obtained.
364 * @list: Where to write the memory mappings to.
365 * @errp: Pointer for reporting an #Error.
366 */
367void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
368 Error **errp);
369
c72bf468
JF
370/**
371 * cpu_write_elf64_note:
372 * @f: pointer to a function that writes memory to a file
373 * @cpu: The CPU whose memory is to be dumped
374 * @cpuid: ID number of the CPU
375 * @opaque: pointer to the CPUState struct
376 */
377int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
378 int cpuid, void *opaque);
379
380/**
381 * cpu_write_elf64_qemunote:
382 * @f: pointer to a function that writes memory to a file
383 * @cpu: The CPU whose memory is to be dumped
384 * @cpuid: ID number of the CPU
385 * @opaque: pointer to the CPUState struct
386 */
387int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
388 void *opaque);
389
390/**
391 * cpu_write_elf32_note:
392 * @f: pointer to a function that writes memory to a file
393 * @cpu: The CPU whose memory is to be dumped
394 * @cpuid: ID number of the CPU
395 * @opaque: pointer to the CPUState struct
396 */
397int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
398 int cpuid, void *opaque);
399
400/**
401 * cpu_write_elf32_qemunote:
402 * @f: pointer to a function that writes memory to a file
403 * @cpu: The CPU whose memory is to be dumped
404 * @cpuid: ID number of the CPU
405 * @opaque: pointer to the CPUState struct
406 */
407int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
408 void *opaque);
dd83b06a 409
878096ee
AF
410/**
411 * CPUDumpFlags:
412 * @CPU_DUMP_CODE:
413 * @CPU_DUMP_FPU: dump FPU register state, not just integer
414 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
415 */
416enum CPUDumpFlags {
417 CPU_DUMP_CODE = 0x00010000,
418 CPU_DUMP_FPU = 0x00020000,
419 CPU_DUMP_CCOP = 0x00040000,
420};
421
422/**
423 * cpu_dump_state:
424 * @cpu: The CPU whose state is to be dumped.
425 * @f: File to dump to.
426 * @cpu_fprintf: Function to dump with.
427 * @flags: Flags what to dump.
428 *
429 * Dumps CPU state.
430 */
431void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
432 int flags);
433
434/**
435 * cpu_dump_statistics:
436 * @cpu: The CPU whose state is to be dumped.
437 * @f: File to dump to.
438 * @cpu_fprintf: Function to dump with.
439 * @flags: Flags what to dump.
440 *
441 * Dumps CPU statistics.
442 */
443void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
444 int flags);
445
00b941e5
AF
446#ifndef CONFIG_USER_ONLY
447/**
448 * cpu_get_phys_page_debug:
449 * @cpu: The CPU to obtain the physical page address for.
450 * @addr: The virtual address.
451 *
452 * Obtains the physical page corresponding to a virtual one.
453 * Use it only for debugging because no protection checks are done.
454 *
455 * Returns: Corresponding physical page address or -1 if no page found.
456 */
457static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
458{
459 CPUClass *cc = CPU_GET_CLASS(cpu);
460
461 return cc->get_phys_page_debug(cpu, addr);
462}
463#endif
464
dd83b06a
AF
465/**
466 * cpu_reset:
467 * @cpu: The CPU whose state is to be reset.
468 */
469void cpu_reset(CPUState *cpu);
470
2b8c2754
AF
471/**
472 * cpu_class_by_name:
473 * @typename: The CPU base type.
474 * @cpu_model: The model string without any parameters.
475 *
476 * Looks up a CPU #ObjectClass matching name @cpu_model.
477 *
478 * Returns: A #CPUClass or %NULL if not matching class is found.
479 */
480ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
481
9262685b
AF
482/**
483 * cpu_generic_init:
484 * @typename: The CPU base type.
485 * @cpu_model: The model string including optional parameters.
486 *
487 * Instantiates a CPU, processes optional parameters and realizes the CPU.
488 *
489 * Returns: A #CPUState or %NULL if an error occurred.
490 */
491CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
492
3993c6bd 493/**
8c2e1b00 494 * cpu_has_work:
3993c6bd
AF
495 * @cpu: The vCPU to check.
496 *
497 * Checks whether the CPU has work to do.
498 *
499 * Returns: %true if the CPU has work, %false otherwise.
500 */
8c2e1b00
AF
501static inline bool cpu_has_work(CPUState *cpu)
502{
503 CPUClass *cc = CPU_GET_CLASS(cpu);
504
505 g_assert(cc->has_work);
506 return cc->has_work(cpu);
507}
3993c6bd 508
60e82579
AF
509/**
510 * qemu_cpu_is_self:
511 * @cpu: The vCPU to check against.
512 *
513 * Checks whether the caller is executing on the vCPU thread.
514 *
515 * Returns: %true if called from @cpu's thread, %false otherwise.
516 */
517bool qemu_cpu_is_self(CPUState *cpu);
518
c08d7424
AF
519/**
520 * qemu_cpu_kick:
521 * @cpu: The vCPU to kick.
522 *
523 * Kicks @cpu's thread.
524 */
525void qemu_cpu_kick(CPUState *cpu);
526
2fa45344
AF
527/**
528 * cpu_is_stopped:
529 * @cpu: The CPU to check.
530 *
531 * Checks whether the CPU is stopped.
532 *
533 * Returns: %true if run state is not running or if artificially stopped;
534 * %false otherwise.
535 */
536bool cpu_is_stopped(CPUState *cpu);
537
f100f0b3
AF
538/**
539 * run_on_cpu:
540 * @cpu: The vCPU to run on.
541 * @func: The function to be executed.
542 * @data: Data to pass to the function.
543 *
544 * Schedules the function @func for execution on the vCPU @cpu.
545 */
546void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
547
3c02270d
CV
548/**
549 * async_run_on_cpu:
550 * @cpu: The vCPU to run on.
551 * @func: The function to be executed.
552 * @data: Data to pass to the function.
553 *
554 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
555 */
556void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
557
38d8f5c8
AF
558/**
559 * qemu_get_cpu:
560 * @index: The CPUState@cpu_index value of the CPU to obtain.
561 *
562 * Gets a CPU matching @index.
563 *
564 * Returns: The CPU or %NULL if there is no matching CPU.
565 */
566CPUState *qemu_get_cpu(int index);
567
69e5ff06
IM
568/**
569 * cpu_exists:
570 * @id: Guest-exposed CPU ID to lookup.
571 *
572 * Search for CPU with specified ID.
573 *
574 * Returns: %true - CPU is found, %false - CPU isn't found.
575 */
576bool cpu_exists(int64_t id);
577
2adcc85d
JH
578/**
579 * cpu_throttle_set:
580 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
581 *
582 * Throttles all vcpus by forcing them to sleep for the given percentage of
583 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
584 * (example: 10ms sleep for every 30ms awake).
585 *
586 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
587 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
588 * is called.
589 */
590void cpu_throttle_set(int new_throttle_pct);
591
592/**
593 * cpu_throttle_stop:
594 *
595 * Stops the vcpu throttling started by cpu_throttle_set.
596 */
597void cpu_throttle_stop(void);
598
599/**
600 * cpu_throttle_active:
601 *
602 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
603 */
604bool cpu_throttle_active(void);
605
606/**
607 * cpu_throttle_get_percentage:
608 *
609 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
610 *
611 * Returns: The throttle percentage in range 1 to 99.
612 */
613int cpu_throttle_get_percentage(void);
614
c3affe56
AF
615#ifndef CONFIG_USER_ONLY
616
617typedef void (*CPUInterruptHandler)(CPUState *, int);
618
619extern CPUInterruptHandler cpu_interrupt_handler;
620
621/**
622 * cpu_interrupt:
623 * @cpu: The CPU to set an interrupt on.
624 * @mask: The interupts to set.
625 *
626 * Invokes the interrupt handler.
627 */
628static inline void cpu_interrupt(CPUState *cpu, int mask)
629{
630 cpu_interrupt_handler(cpu, mask);
631}
632
633#else /* USER_ONLY */
634
635void cpu_interrupt(CPUState *cpu, int mask);
636
637#endif /* USER_ONLY */
638
93e22326 639#ifdef CONFIG_SOFTMMU
c658b94f
AF
640static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
641 bool is_write, bool is_exec,
642 int opaque, unsigned size)
643{
644 CPUClass *cc = CPU_GET_CLASS(cpu);
645
646 if (cc->do_unassigned_access) {
647 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
648 }
649}
650
93e22326
PB
651static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
652 int is_write, int is_user,
653 uintptr_t retaddr)
654{
655 CPUClass *cc = CPU_GET_CLASS(cpu);
656
e7ae771f 657 cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
93e22326 658}
c658b94f
AF
659#endif
660
2991b890
PC
661/**
662 * cpu_set_pc:
663 * @cpu: The CPU to set the program counter for.
664 * @addr: Program counter value.
665 *
666 * Sets the program counter for a CPU.
667 */
668static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
669{
670 CPUClass *cc = CPU_GET_CLASS(cpu);
671
672 cc->set_pc(cpu, addr);
673}
674
d8ed887b
AF
675/**
676 * cpu_reset_interrupt:
677 * @cpu: The CPU to clear the interrupt on.
678 * @mask: The interrupt mask to clear.
679 *
680 * Resets interrupts on the vCPU @cpu.
681 */
682void cpu_reset_interrupt(CPUState *cpu, int mask);
683
60a3e17a
AF
684/**
685 * cpu_exit:
686 * @cpu: The CPU to exit.
687 *
688 * Requests the CPU @cpu to exit execution.
689 */
690void cpu_exit(CPUState *cpu);
691
2993683b
IM
692/**
693 * cpu_resume:
694 * @cpu: The CPU to resume.
695 *
696 * Resumes CPU, i.e. puts CPU into runnable state.
697 */
698void cpu_resume(CPUState *cpu);
dd83b06a 699
c643bed9
AF
700/**
701 * qemu_init_vcpu:
702 * @cpu: The vCPU to initialize.
703 *
704 * Initializes a vCPU.
705 */
706void qemu_init_vcpu(CPUState *cpu);
707
3825b28f
AF
708#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
709#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
710#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
711
712/**
713 * cpu_single_step:
714 * @cpu: CPU to the flags for.
715 * @enabled: Flags to enable.
716 *
717 * Enables or disables single-stepping for @cpu.
718 */
719void cpu_single_step(CPUState *cpu, int enabled);
720
b3310ab3
AF
721/* Breakpoint/watchpoint flags */
722#define BP_MEM_READ 0x01
723#define BP_MEM_WRITE 0x02
724#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
725#define BP_STOP_BEFORE_ACCESS 0x04
08225676 726/* 0x08 currently unused */
b3310ab3
AF
727#define BP_GDB 0x10
728#define BP_CPU 0x20
b933066a 729#define BP_ANY (BP_GDB | BP_CPU)
08225676
PM
730#define BP_WATCHPOINT_HIT_READ 0x40
731#define BP_WATCHPOINT_HIT_WRITE 0x80
732#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
b3310ab3
AF
733
734int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
735 CPUBreakpoint **breakpoint);
736int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
737void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
738void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
739
b933066a
RH
740/* Return true if PC matches an installed breakpoint. */
741static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
742{
743 CPUBreakpoint *bp;
744
745 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
746 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
747 if (bp->pc == pc && (bp->flags & mask)) {
748 return true;
749 }
750 }
751 }
752 return false;
753}
754
75a34036
AF
755int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
756 int flags, CPUWatchpoint **watchpoint);
757int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
758 vaddr len, int flags);
759void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
760void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
761
a47dddd7
AF
762void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
763 GCC_FMT_ATTR(2, 3);
b7bca733 764void cpu_exec_exit(CPUState *cpu);
a47dddd7 765
1a1562f5
AF
766#ifdef CONFIG_SOFTMMU
767extern const struct VMStateDescription vmstate_cpu_common;
768#else
769#define vmstate_cpu_common vmstate_dummy
770#endif
771
772#define VMSTATE_CPU() { \
773 .name = "parent_obj", \
774 .size = sizeof(CPUState), \
775 .vmsd = &vmstate_cpu_common, \
776 .flags = VMS_STRUCT, \
777 .offset = 0, \
778}
779
dd83b06a 780#endif