]> git.proxmox.com Git - mirror_qemu.git/blame - include/qom/cpu.h
target-arm: Fix IL bit reported for Thumb VFP and Neon traps
[mirror_qemu.git] / include / qom / cpu.h
CommitLineData
dd83b06a
AF
1/*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20#ifndef QEMU_CPU_H
21#define QEMU_CPU_H
22
fcd7d003 23#include <signal.h>
6f03bef0 24#include <setjmp.h>
961f8395 25#include "hw/qdev-core.h"
37b9de46 26#include "disas/bfd.h"
c658b94f 27#include "exec/hwaddr.h"
66b9b43c 28#include "exec/memattrs.h"
bdc44640 29#include "qemu/queue.h"
1de7afc9 30#include "qemu/thread.h"
a23bbfda 31#include "qemu/typedefs.h"
dd83b06a 32
b5ba1cc6
QN
33typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
34 void *opaque);
c72bf468 35
577f42c0
AF
36/**
37 * vaddr:
38 * Type wide enough to contain any #target_ulong virtual address.
39 */
40typedef uint64_t vaddr;
41#define VADDR_PRId PRId64
42#define VADDR_PRIu PRIu64
43#define VADDR_PRIo PRIo64
44#define VADDR_PRIx PRIx64
45#define VADDR_PRIX PRIX64
46#define VADDR_MAX UINT64_MAX
47
dd83b06a
AF
48/**
49 * SECTION:cpu
50 * @section_id: QEMU-cpu
51 * @title: CPU Class
52 * @short_description: Base class for all CPUs
53 */
54
55#define TYPE_CPU "cpu"
56
0d6d1ab4
AF
57/* Since this macro is used a lot in hot code paths and in conjunction with
58 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
59 * an unchecked cast.
60 */
61#define CPU(obj) ((CPUState *)(obj))
62
dd83b06a
AF
63#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
64#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
65
66typedef struct CPUState CPUState;
67
c658b94f
AF
68typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
69 bool is_write, bool is_exec, int opaque,
70 unsigned size);
71
bdf7ae5b
AF
72struct TranslationBlock;
73
dd83b06a
AF
74/**
75 * CPUClass:
2b8c2754
AF
76 * @class_by_name: Callback to map -cpu command line model name to an
77 * instantiatable CPU type.
94a444b2 78 * @parse_features: Callback to parse command line arguments.
f5df5baf 79 * @reset: Callback to reset the #CPUState to its initial state.
91b1df8c 80 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
8c2e1b00 81 * @has_work: Callback for checking if there is work to do.
97a8ea5a 82 * @do_interrupt: Callback for interrupt handling.
c658b94f 83 * @do_unassigned_access: Callback for unassigned access handling.
93e22326
PB
84 * @do_unaligned_access: Callback for unaligned access handling, if
85 * the target defines #ALIGNED_ONLY.
c08295d4
PM
86 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
87 * runtime configurable endianness is currently big-endian. Non-configurable
88 * CPUs can use the default implementation of this method. This method should
89 * not be used by any callers other than the pre-1.0 virtio devices.
f3659eee 90 * @memory_rw_debug: Callback for GDB memory access.
878096ee
AF
91 * @dump_state: Callback for dumping state.
92 * @dump_statistics: Callback for dumping statistics.
997395d3 93 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
444d5590 94 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
a23bbfda 95 * @get_memory_mapping: Callback for obtaining the memory mappings.
f45748f1 96 * @set_pc: Callback for setting the Program Counter register.
bdf7ae5b
AF
97 * @synchronize_from_tb: Callback for synchronizing state from a TCG
98 * #TranslationBlock.
7510454e 99 * @handle_mmu_fault: Callback for handling an MMU fault.
00b941e5 100 * @get_phys_page_debug: Callback for obtaining a physical address.
1dc6fb1f
PM
101 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
102 * associated memory transaction attributes to use for the access.
103 * CPUs which use memory transaction attributes should implement this
104 * instead of get_phys_page_debug.
d7f25a9e
PM
105 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
106 * a memory access with the specified memory transaction attributes.
5b50e790
AF
107 * @gdb_read_register: Callback for letting GDB read a register.
108 * @gdb_write_register: Callback for letting GDB write a register.
86025ee4 109 * @debug_excp_handler: Callback for handling debug exceptions.
c08295d4
PM
110 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
111 * 64-bit VM coredump.
112 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
113 * note to a 32-bit VM coredump.
114 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
115 * 32-bit VM coredump.
116 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
117 * note to a 32-bit VM coredump.
b170fce3 118 * @vmsd: State description for migration.
a0e372f0 119 * @gdb_num_core_regs: Number of core registers accessible to GDB.
5b24c641 120 * @gdb_core_xml_file: File name for core registers GDB XML description.
2472b6c0
PM
121 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
122 * before the insn which triggers a watchpoint rather than after it.
b3820e6c
DH
123 * @gdb_arch_name: Optional callback that returns the architecture name known
124 * to GDB. The caller must free the returned string with g_free.
cffe7b32
RH
125 * @cpu_exec_enter: Callback for cpu_exec preparation.
126 * @cpu_exec_exit: Callback for cpu_exec cleanup.
9585db68 127 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
37b9de46 128 * @disas_set_info: Setup architecture specific components of disassembly info
dd83b06a
AF
129 *
130 * Represents a CPU family or model.
131 */
132typedef struct CPUClass {
133 /*< private >*/
961f8395 134 DeviceClass parent_class;
dd83b06a
AF
135 /*< public >*/
136
2b8c2754 137 ObjectClass *(*class_by_name)(const char *cpu_model);
94a444b2 138 void (*parse_features)(CPUState *cpu, char *str, Error **errp);
2b8c2754 139
dd83b06a 140 void (*reset)(CPUState *cpu);
91b1df8c 141 int reset_dump_flags;
8c2e1b00 142 bool (*has_work)(CPUState *cpu);
97a8ea5a 143 void (*do_interrupt)(CPUState *cpu);
c658b94f 144 CPUUnassignedAccess do_unassigned_access;
93e22326
PB
145 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
146 int is_write, int is_user, uintptr_t retaddr);
bf7663c4 147 bool (*virtio_is_big_endian)(CPUState *cpu);
f3659eee
AF
148 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
149 uint8_t *buf, int len, bool is_write);
878096ee
AF
150 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
151 int flags);
152 void (*dump_statistics)(CPUState *cpu, FILE *f,
153 fprintf_function cpu_fprintf, int flags);
997395d3 154 int64_t (*get_arch_id)(CPUState *cpu);
444d5590 155 bool (*get_paging_enabled)(const CPUState *cpu);
a23bbfda
AF
156 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
157 Error **errp);
f45748f1 158 void (*set_pc)(CPUState *cpu, vaddr value);
bdf7ae5b 159 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
7510454e
AF
160 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
161 int mmu_index);
00b941e5 162 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
1dc6fb1f
PM
163 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
164 MemTxAttrs *attrs);
d7f25a9e 165 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
5b50e790
AF
166 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
167 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
86025ee4 168 void (*debug_excp_handler)(CPUState *cpu);
b170fce3 169
c72bf468
JF
170 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
171 int cpuid, void *opaque);
172 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
173 void *opaque);
174 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
175 int cpuid, void *opaque);
176 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
177 void *opaque);
a0e372f0
AF
178
179 const struct VMStateDescription *vmsd;
180 int gdb_num_core_regs;
5b24c641 181 const char *gdb_core_xml_file;
b3820e6c 182 gchar * (*gdb_arch_name)(CPUState *cpu);
2472b6c0 183 bool gdb_stop_before_watchpoint;
cffe7b32
RH
184
185 void (*cpu_exec_enter)(CPUState *cpu);
186 void (*cpu_exec_exit)(CPUState *cpu);
9585db68 187 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
37b9de46
PC
188
189 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
dd83b06a
AF
190} CPUClass;
191
28ecfd7a
AF
192#ifdef HOST_WORDS_BIGENDIAN
193typedef struct icount_decr_u16 {
194 uint16_t high;
195 uint16_t low;
196} icount_decr_u16;
197#else
198typedef struct icount_decr_u16 {
199 uint16_t low;
200 uint16_t high;
201} icount_decr_u16;
202#endif
203
f0c3c505
AF
204typedef struct CPUBreakpoint {
205 vaddr pc;
206 int flags; /* BP_* */
207 QTAILQ_ENTRY(CPUBreakpoint) entry;
208} CPUBreakpoint;
209
ff4700b0
AF
210typedef struct CPUWatchpoint {
211 vaddr vaddr;
05068c0d 212 vaddr len;
08225676 213 vaddr hitaddr;
66b9b43c 214 MemTxAttrs hitattrs;
ff4700b0
AF
215 int flags; /* BP_* */
216 QTAILQ_ENTRY(CPUWatchpoint) entry;
217} CPUWatchpoint;
218
a60f24b5 219struct KVMState;
f7575c96 220struct kvm_run;
a60f24b5 221
8cd70437
AF
222#define TB_JMP_CACHE_BITS 12
223#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
224
dd83b06a
AF
225/**
226 * CPUState:
55e5c285 227 * @cpu_index: CPU index (informative).
ce3960eb
AF
228 * @nr_cores: Number of cores within this CPU package.
229 * @nr_threads: Number of threads within this CPU.
1b1ed8dc 230 * @numa_node: NUMA node this CPU is belonging to.
0d34282f 231 * @host_tid: Host thread ID.
0315c31c 232 * @running: #true if CPU is currently running (usermode).
61a46217 233 * @created: Indicates whether the CPU thread has been successfully created.
259186a7
AF
234 * @interrupt_request: Indicates a pending interrupt request.
235 * @halted: Nonzero if the CPU is in suspended state.
4fdeee7c 236 * @stop: Indicates a pending stop request.
f324e766 237 * @stopped: Indicates the CPU has been artificially stopped.
bac05aa9 238 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
378df4b2
PM
239 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
240 * CPU and return to its top level loop.
ed2803da 241 * @singlestep_enabled: Flags for single-stepping.
efee7340 242 * @icount_extra: Instructions until next timer event.
28ecfd7a
AF
243 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
244 * This allows a single read-compare-cbranch-write sequence to test
245 * for both decrementer underflow and exceptions.
414b15c9
PB
246 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
247 * requires that IO only be performed on the last instruction of a TB
248 * so that interrupts take effect immediately.
32857f4d
PM
249 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
250 * AddressSpaces this CPU has)
12ebc9a7 251 * @num_ases: number of CPUAddressSpaces in @cpu_ases
32857f4d
PM
252 * @as: Pointer to the first AddressSpace, for the convenience of targets which
253 * only have a single AddressSpace
c05efcb1 254 * @env_ptr: Pointer to subclass-specific CPUArchState field.
d77953b9 255 * @current_tb: Currently executing TB.
eac8b355 256 * @gdb_regs: Additional GDB registers.
a0e372f0 257 * @gdb_num_regs: Number of total registers accessible to GDB.
35143f01 258 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
182735ef 259 * @next_cpu: Next CPU sharing TB cache.
0429a971 260 * @opaque: User data.
93afeade
AF
261 * @mem_io_pc: Host Program Counter at which the memory was accessed.
262 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
8737c51c 263 * @kvm_fd: vCPU file descriptor for KVM.
376692b9
PB
264 * @work_mutex: Lock to prevent multiple access to queued_work_*.
265 * @queued_work_first: First asynchronous work pending.
dd83b06a
AF
266 *
267 * State of one CPU core or thread.
268 */
269struct CPUState {
270 /*< private >*/
961f8395 271 DeviceState parent_obj;
dd83b06a
AF
272 /*< public >*/
273
ce3960eb
AF
274 int nr_cores;
275 int nr_threads;
1b1ed8dc 276 int numa_node;
ce3960eb 277
814e612e 278 struct QemuThread *thread;
bcba2a72
AF
279#ifdef _WIN32
280 HANDLE hThread;
281#endif
9f09e18a 282 int thread_id;
0d34282f 283 uint32_t host_tid;
0315c31c 284 bool running;
f5c121b8 285 struct QemuCond *halt_cond;
216fc9a4 286 bool thread_kicked;
61a46217 287 bool created;
4fdeee7c 288 bool stop;
f324e766 289 bool stopped;
bac05aa9 290 bool crash_occurred;
e0c38211 291 bool exit_request;
259186a7 292 uint32_t interrupt_request;
ed2803da 293 int singlestep_enabled;
efee7340 294 int64_t icount_extra;
6f03bef0 295 sigjmp_buf jmp_env;
bcba2a72 296
376692b9
PB
297 QemuMutex work_mutex;
298 struct qemu_work_item *queued_work_first, *queued_work_last;
299
32857f4d 300 CPUAddressSpace *cpu_ases;
12ebc9a7 301 int num_ases;
09daed84 302 AddressSpace *as;
6731d864 303 MemoryRegion *memory;
09daed84 304
c05efcb1 305 void *env_ptr; /* CPUArchState */
d77953b9 306 struct TranslationBlock *current_tb;
8cd70437 307 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
eac8b355 308 struct GDBRegisterState *gdb_regs;
a0e372f0 309 int gdb_num_regs;
35143f01 310 int gdb_num_g_regs;
bdc44640 311 QTAILQ_ENTRY(CPUState) node;
d77953b9 312
f0c3c505
AF
313 /* ice debug support */
314 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
315
ff4700b0
AF
316 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
317 CPUWatchpoint *watchpoint_hit;
318
0429a971
AF
319 void *opaque;
320
93afeade
AF
321 /* In order to avoid passing too many arguments to the MMIO helpers,
322 * we store some rarely used information in the CPU context.
323 */
324 uintptr_t mem_io_pc;
325 vaddr mem_io_vaddr;
326
8737c51c 327 int kvm_fd;
20d695a9 328 bool kvm_vcpu_dirty;
a60f24b5 329 struct KVMState *kvm_state;
f7575c96 330 struct kvm_run *kvm_run;
8737c51c 331
f5df5baf 332 /* TODO Move common fields from CPUArchState here. */
55e5c285 333 int cpu_index; /* used by alpha TCG */
259186a7 334 uint32_t halted; /* used by alpha, cris, ppc TCG */
28ecfd7a
AF
335 union {
336 uint32_t u32;
337 icount_decr_u16 u16;
338 } icount_decr;
99df7dce 339 uint32_t can_do_io;
27103424 340 int32_t exception_index; /* used by m68k TCG */
7e4fb26d 341
2adcc85d
JH
342 /* Used to keep track of an outstanding cpu throttle thread for migration
343 * autoconverge
344 */
345 bool throttle_thread_scheduled;
346
7e4fb26d
RH
347 /* Note that this is accessed at the start of every TB via a negative
348 offset from AREG0. Leave this field at the end so as to make the
349 (absolute value) offset as small as possible. This reduces code
350 size, especially for hosts without large memory offsets. */
e0c38211 351 uint32_t tcg_exit_req;
dd83b06a
AF
352};
353
bdc44640
AF
354QTAILQ_HEAD(CPUTailQ, CPUState);
355extern struct CPUTailQ cpus;
356#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
357#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
358#define CPU_FOREACH_SAFE(cpu, next_cpu) \
359 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
8487d123
BR
360#define CPU_FOREACH_REVERSE(cpu) \
361 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
bdc44640 362#define first_cpu QTAILQ_FIRST(&cpus)
182735ef 363
f240eb6f 364extern __thread CPUState *current_cpu;
4917cf44 365
444d5590
AF
366/**
367 * cpu_paging_enabled:
368 * @cpu: The CPU whose state is to be inspected.
369 *
370 * Returns: %true if paging is enabled, %false otherwise.
371 */
372bool cpu_paging_enabled(const CPUState *cpu);
373
a23bbfda
AF
374/**
375 * cpu_get_memory_mapping:
376 * @cpu: The CPU whose memory mappings are to be obtained.
377 * @list: Where to write the memory mappings to.
378 * @errp: Pointer for reporting an #Error.
379 */
380void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
381 Error **errp);
382
c72bf468
JF
383/**
384 * cpu_write_elf64_note:
385 * @f: pointer to a function that writes memory to a file
386 * @cpu: The CPU whose memory is to be dumped
387 * @cpuid: ID number of the CPU
388 * @opaque: pointer to the CPUState struct
389 */
390int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
391 int cpuid, void *opaque);
392
393/**
394 * cpu_write_elf64_qemunote:
395 * @f: pointer to a function that writes memory to a file
396 * @cpu: The CPU whose memory is to be dumped
397 * @cpuid: ID number of the CPU
398 * @opaque: pointer to the CPUState struct
399 */
400int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
401 void *opaque);
402
403/**
404 * cpu_write_elf32_note:
405 * @f: pointer to a function that writes memory to a file
406 * @cpu: The CPU whose memory is to be dumped
407 * @cpuid: ID number of the CPU
408 * @opaque: pointer to the CPUState struct
409 */
410int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
411 int cpuid, void *opaque);
412
413/**
414 * cpu_write_elf32_qemunote:
415 * @f: pointer to a function that writes memory to a file
416 * @cpu: The CPU whose memory is to be dumped
417 * @cpuid: ID number of the CPU
418 * @opaque: pointer to the CPUState struct
419 */
420int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
421 void *opaque);
dd83b06a 422
878096ee
AF
423/**
424 * CPUDumpFlags:
425 * @CPU_DUMP_CODE:
426 * @CPU_DUMP_FPU: dump FPU register state, not just integer
427 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
428 */
429enum CPUDumpFlags {
430 CPU_DUMP_CODE = 0x00010000,
431 CPU_DUMP_FPU = 0x00020000,
432 CPU_DUMP_CCOP = 0x00040000,
433};
434
435/**
436 * cpu_dump_state:
437 * @cpu: The CPU whose state is to be dumped.
438 * @f: File to dump to.
439 * @cpu_fprintf: Function to dump with.
440 * @flags: Flags what to dump.
441 *
442 * Dumps CPU state.
443 */
444void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
445 int flags);
446
447/**
448 * cpu_dump_statistics:
449 * @cpu: The CPU whose state is to be dumped.
450 * @f: File to dump to.
451 * @cpu_fprintf: Function to dump with.
452 * @flags: Flags what to dump.
453 *
454 * Dumps CPU statistics.
455 */
456void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
457 int flags);
458
00b941e5 459#ifndef CONFIG_USER_ONLY
1dc6fb1f
PM
460/**
461 * cpu_get_phys_page_attrs_debug:
462 * @cpu: The CPU to obtain the physical page address for.
463 * @addr: The virtual address.
464 * @attrs: Updated on return with the memory transaction attributes to use
465 * for this access.
466 *
467 * Obtains the physical page corresponding to a virtual one, together
468 * with the corresponding memory transaction attributes to use for the access.
469 * Use it only for debugging because no protection checks are done.
470 *
471 * Returns: Corresponding physical page address or -1 if no page found.
472 */
473static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
474 MemTxAttrs *attrs)
475{
476 CPUClass *cc = CPU_GET_CLASS(cpu);
477
478 if (cc->get_phys_page_attrs_debug) {
479 return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
480 }
481 /* Fallback for CPUs which don't implement the _attrs_ hook */
482 *attrs = MEMTXATTRS_UNSPECIFIED;
483 return cc->get_phys_page_debug(cpu, addr);
484}
485
00b941e5
AF
486/**
487 * cpu_get_phys_page_debug:
488 * @cpu: The CPU to obtain the physical page address for.
489 * @addr: The virtual address.
490 *
491 * Obtains the physical page corresponding to a virtual one.
492 * Use it only for debugging because no protection checks are done.
493 *
494 * Returns: Corresponding physical page address or -1 if no page found.
495 */
496static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
497{
1dc6fb1f 498 MemTxAttrs attrs = {};
00b941e5 499
1dc6fb1f 500 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
00b941e5 501}
d7f25a9e
PM
502
503/** cpu_asidx_from_attrs:
504 * @cpu: CPU
505 * @attrs: memory transaction attributes
506 *
507 * Returns the address space index specifying the CPU AddressSpace
508 * to use for a memory access with the given transaction attributes.
509 */
510static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
511{
512 CPUClass *cc = CPU_GET_CLASS(cpu);
513
514 if (cc->asidx_from_attrs) {
515 return cc->asidx_from_attrs(cpu, attrs);
516 }
517 return 0;
518}
00b941e5
AF
519#endif
520
dd83b06a
AF
521/**
522 * cpu_reset:
523 * @cpu: The CPU whose state is to be reset.
524 */
525void cpu_reset(CPUState *cpu);
526
2b8c2754
AF
527/**
528 * cpu_class_by_name:
529 * @typename: The CPU base type.
530 * @cpu_model: The model string without any parameters.
531 *
532 * Looks up a CPU #ObjectClass matching name @cpu_model.
533 *
534 * Returns: A #CPUClass or %NULL if not matching class is found.
535 */
536ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
537
9262685b
AF
538/**
539 * cpu_generic_init:
540 * @typename: The CPU base type.
541 * @cpu_model: The model string including optional parameters.
542 *
543 * Instantiates a CPU, processes optional parameters and realizes the CPU.
544 *
545 * Returns: A #CPUState or %NULL if an error occurred.
546 */
547CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
548
3993c6bd 549/**
8c2e1b00 550 * cpu_has_work:
3993c6bd
AF
551 * @cpu: The vCPU to check.
552 *
553 * Checks whether the CPU has work to do.
554 *
555 * Returns: %true if the CPU has work, %false otherwise.
556 */
8c2e1b00
AF
557static inline bool cpu_has_work(CPUState *cpu)
558{
559 CPUClass *cc = CPU_GET_CLASS(cpu);
560
561 g_assert(cc->has_work);
562 return cc->has_work(cpu);
563}
3993c6bd 564
60e82579
AF
565/**
566 * qemu_cpu_is_self:
567 * @cpu: The vCPU to check against.
568 *
569 * Checks whether the caller is executing on the vCPU thread.
570 *
571 * Returns: %true if called from @cpu's thread, %false otherwise.
572 */
573bool qemu_cpu_is_self(CPUState *cpu);
574
c08d7424
AF
575/**
576 * qemu_cpu_kick:
577 * @cpu: The vCPU to kick.
578 *
579 * Kicks @cpu's thread.
580 */
581void qemu_cpu_kick(CPUState *cpu);
582
2fa45344
AF
583/**
584 * cpu_is_stopped:
585 * @cpu: The CPU to check.
586 *
587 * Checks whether the CPU is stopped.
588 *
589 * Returns: %true if run state is not running or if artificially stopped;
590 * %false otherwise.
591 */
592bool cpu_is_stopped(CPUState *cpu);
593
f100f0b3
AF
594/**
595 * run_on_cpu:
596 * @cpu: The vCPU to run on.
597 * @func: The function to be executed.
598 * @data: Data to pass to the function.
599 *
600 * Schedules the function @func for execution on the vCPU @cpu.
601 */
602void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
603
3c02270d
CV
604/**
605 * async_run_on_cpu:
606 * @cpu: The vCPU to run on.
607 * @func: The function to be executed.
608 * @data: Data to pass to the function.
609 *
610 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
611 */
612void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
613
38d8f5c8
AF
614/**
615 * qemu_get_cpu:
616 * @index: The CPUState@cpu_index value of the CPU to obtain.
617 *
618 * Gets a CPU matching @index.
619 *
620 * Returns: The CPU or %NULL if there is no matching CPU.
621 */
622CPUState *qemu_get_cpu(int index);
623
69e5ff06
IM
624/**
625 * cpu_exists:
626 * @id: Guest-exposed CPU ID to lookup.
627 *
628 * Search for CPU with specified ID.
629 *
630 * Returns: %true - CPU is found, %false - CPU isn't found.
631 */
632bool cpu_exists(int64_t id);
633
2adcc85d
JH
634/**
635 * cpu_throttle_set:
636 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
637 *
638 * Throttles all vcpus by forcing them to sleep for the given percentage of
639 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
640 * (example: 10ms sleep for every 30ms awake).
641 *
642 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
643 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
644 * is called.
645 */
646void cpu_throttle_set(int new_throttle_pct);
647
648/**
649 * cpu_throttle_stop:
650 *
651 * Stops the vcpu throttling started by cpu_throttle_set.
652 */
653void cpu_throttle_stop(void);
654
655/**
656 * cpu_throttle_active:
657 *
658 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
659 */
660bool cpu_throttle_active(void);
661
662/**
663 * cpu_throttle_get_percentage:
664 *
665 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
666 *
667 * Returns: The throttle percentage in range 1 to 99.
668 */
669int cpu_throttle_get_percentage(void);
670
c3affe56
AF
671#ifndef CONFIG_USER_ONLY
672
673typedef void (*CPUInterruptHandler)(CPUState *, int);
674
675extern CPUInterruptHandler cpu_interrupt_handler;
676
677/**
678 * cpu_interrupt:
679 * @cpu: The CPU to set an interrupt on.
680 * @mask: The interupts to set.
681 *
682 * Invokes the interrupt handler.
683 */
684static inline void cpu_interrupt(CPUState *cpu, int mask)
685{
686 cpu_interrupt_handler(cpu, mask);
687}
688
689#else /* USER_ONLY */
690
691void cpu_interrupt(CPUState *cpu, int mask);
692
693#endif /* USER_ONLY */
694
93e22326 695#ifdef CONFIG_SOFTMMU
c658b94f
AF
696static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
697 bool is_write, bool is_exec,
698 int opaque, unsigned size)
699{
700 CPUClass *cc = CPU_GET_CLASS(cpu);
701
702 if (cc->do_unassigned_access) {
703 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
704 }
705}
706
93e22326
PB
707static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
708 int is_write, int is_user,
709 uintptr_t retaddr)
710{
711 CPUClass *cc = CPU_GET_CLASS(cpu);
712
e7ae771f 713 cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
93e22326 714}
c658b94f
AF
715#endif
716
2991b890
PC
717/**
718 * cpu_set_pc:
719 * @cpu: The CPU to set the program counter for.
720 * @addr: Program counter value.
721 *
722 * Sets the program counter for a CPU.
723 */
724static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
725{
726 CPUClass *cc = CPU_GET_CLASS(cpu);
727
728 cc->set_pc(cpu, addr);
729}
730
d8ed887b
AF
731/**
732 * cpu_reset_interrupt:
733 * @cpu: The CPU to clear the interrupt on.
734 * @mask: The interrupt mask to clear.
735 *
736 * Resets interrupts on the vCPU @cpu.
737 */
738void cpu_reset_interrupt(CPUState *cpu, int mask);
739
60a3e17a
AF
740/**
741 * cpu_exit:
742 * @cpu: The CPU to exit.
743 *
744 * Requests the CPU @cpu to exit execution.
745 */
746void cpu_exit(CPUState *cpu);
747
2993683b
IM
748/**
749 * cpu_resume:
750 * @cpu: The CPU to resume.
751 *
752 * Resumes CPU, i.e. puts CPU into runnable state.
753 */
754void cpu_resume(CPUState *cpu);
dd83b06a 755
c643bed9
AF
756/**
757 * qemu_init_vcpu:
758 * @cpu: The vCPU to initialize.
759 *
760 * Initializes a vCPU.
761 */
762void qemu_init_vcpu(CPUState *cpu);
763
3825b28f
AF
764#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
765#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
766#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
767
768/**
769 * cpu_single_step:
770 * @cpu: CPU to the flags for.
771 * @enabled: Flags to enable.
772 *
773 * Enables or disables single-stepping for @cpu.
774 */
775void cpu_single_step(CPUState *cpu, int enabled);
776
b3310ab3
AF
777/* Breakpoint/watchpoint flags */
778#define BP_MEM_READ 0x01
779#define BP_MEM_WRITE 0x02
780#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
781#define BP_STOP_BEFORE_ACCESS 0x04
08225676 782/* 0x08 currently unused */
b3310ab3
AF
783#define BP_GDB 0x10
784#define BP_CPU 0x20
b933066a 785#define BP_ANY (BP_GDB | BP_CPU)
08225676
PM
786#define BP_WATCHPOINT_HIT_READ 0x40
787#define BP_WATCHPOINT_HIT_WRITE 0x80
788#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
b3310ab3
AF
789
790int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
791 CPUBreakpoint **breakpoint);
792int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
793void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
794void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
795
b933066a
RH
796/* Return true if PC matches an installed breakpoint. */
797static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
798{
799 CPUBreakpoint *bp;
800
801 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
802 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
803 if (bp->pc == pc && (bp->flags & mask)) {
804 return true;
805 }
806 }
807 }
808 return false;
809}
810
75a34036
AF
811int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
812 int flags, CPUWatchpoint **watchpoint);
813int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
814 vaddr len, int flags);
815void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
816void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
817
a47dddd7
AF
818void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
819 GCC_FMT_ATTR(2, 3);
b7bca733 820void cpu_exec_exit(CPUState *cpu);
a47dddd7 821
1a1562f5
AF
822#ifdef CONFIG_SOFTMMU
823extern const struct VMStateDescription vmstate_cpu_common;
824#else
825#define vmstate_cpu_common vmstate_dummy
826#endif
827
828#define VMSTATE_CPU() { \
829 .name = "parent_obj", \
830 .size = sizeof(CPUState), \
831 .vmsd = &vmstate_cpu_common, \
832 .flags = VMS_STRUCT, \
833 .offset = 0, \
834}
835
dd83b06a 836#endif