]> git.proxmox.com Git - mirror_qemu.git/blame - include/qom/cpu.h
cpus: Initialize pseudo-random seeds for all guest cpus
[mirror_qemu.git] / include / qom / cpu.h
CommitLineData
dd83b06a
AF
1/*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20#ifndef QEMU_CPU_H
21#define QEMU_CPU_H
22
961f8395 23#include "hw/qdev-core.h"
3979fca4 24#include "disas/dis-asm.h"
c658b94f 25#include "exec/hwaddr.h"
66b9b43c 26#include "exec/memattrs.h"
9af23989 27#include "qapi/qapi-types-run-state.h"
48151859 28#include "qemu/bitmap.h"
068a5ea0 29#include "qemu/rcu_queue.h"
bdc44640 30#include "qemu/queue.h"
1de7afc9 31#include "qemu/thread.h"
dd83b06a 32
b5ba1cc6
QN
33typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
34 void *opaque);
c72bf468 35
577f42c0
AF
36/**
37 * vaddr:
38 * Type wide enough to contain any #target_ulong virtual address.
39 */
40typedef uint64_t vaddr;
41#define VADDR_PRId PRId64
42#define VADDR_PRIu PRIu64
43#define VADDR_PRIo PRIo64
44#define VADDR_PRIx PRIx64
45#define VADDR_PRIX PRIX64
46#define VADDR_MAX UINT64_MAX
47
dd83b06a
AF
48/**
49 * SECTION:cpu
50 * @section_id: QEMU-cpu
51 * @title: CPU Class
52 * @short_description: Base class for all CPUs
53 */
54
55#define TYPE_CPU "cpu"
56
0d6d1ab4
AF
57/* Since this macro is used a lot in hot code paths and in conjunction with
58 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
59 * an unchecked cast.
60 */
61#define CPU(obj) ((CPUState *)(obj))
62
dd83b06a
AF
63#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
64#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
65
b35399bb
SS
66typedef enum MMUAccessType {
67 MMU_DATA_LOAD = 0,
68 MMU_DATA_STORE = 1,
69 MMU_INST_FETCH = 2
70} MMUAccessType;
71
568496c0 72typedef struct CPUWatchpoint CPUWatchpoint;
dd83b06a 73
c658b94f
AF
74typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
75 bool is_write, bool is_exec, int opaque,
76 unsigned size);
77
bdf7ae5b
AF
78struct TranslationBlock;
79
dd83b06a
AF
80/**
81 * CPUClass:
2b8c2754
AF
82 * @class_by_name: Callback to map -cpu command line model name to an
83 * instantiatable CPU type.
94a444b2 84 * @parse_features: Callback to parse command line arguments.
f5df5baf 85 * @reset: Callback to reset the #CPUState to its initial state.
91b1df8c 86 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
8c2e1b00 87 * @has_work: Callback for checking if there is work to do.
97a8ea5a 88 * @do_interrupt: Callback for interrupt handling.
c658b94f 89 * @do_unassigned_access: Callback for unassigned access handling.
0dff0939 90 * (this is deprecated: new targets should use do_transaction_failed instead)
93e22326
PB
91 * @do_unaligned_access: Callback for unaligned access handling, if
92 * the target defines #ALIGNED_ONLY.
0dff0939
PM
93 * @do_transaction_failed: Callback for handling failed memory transactions
94 * (ie bus faults or external aborts; not MMU faults)
c08295d4
PM
95 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
96 * runtime configurable endianness is currently big-endian. Non-configurable
97 * CPUs can use the default implementation of this method. This method should
98 * not be used by any callers other than the pre-1.0 virtio devices.
f3659eee 99 * @memory_rw_debug: Callback for GDB memory access.
878096ee
AF
100 * @dump_state: Callback for dumping state.
101 * @dump_statistics: Callback for dumping statistics.
997395d3 102 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
444d5590 103 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
a23bbfda 104 * @get_memory_mapping: Callback for obtaining the memory mappings.
42f6ed91
JS
105 * @set_pc: Callback for setting the Program Counter register. This
106 * should have the semantics used by the target architecture when
107 * setting the PC from a source such as an ELF file entry point;
108 * for example on Arm it will also set the Thumb mode bit based
109 * on the least significant bit of the new PC value.
110 * If the target behaviour here is anything other than "set
111 * the PC register to the value passed in" then the target must
112 * also implement the synchronize_from_tb hook.
bdf7ae5b 113 * @synchronize_from_tb: Callback for synchronizing state from a TCG
42f6ed91
JS
114 * #TranslationBlock. This is called when we abandon execution
115 * of a TB before starting it, and must set all parts of the CPU
116 * state which the previous TB in the chain may not have updated.
117 * This always includes at least the program counter; some targets
118 * will need to do more. If this hook is not implemented then the
119 * default is to call @set_pc(tb->pc).
da6bbf85
RH
120 * @tlb_fill: Callback for handling a softmmu tlb miss or user-only
121 * address fault. For system mode, if the access is valid, call
122 * tlb_set_page and return true; if the access is invalid, and
123 * probe is true, return false; otherwise raise an exception and
124 * do not return. For user-only mode, always raise an exception
125 * and do not return.
00b941e5 126 * @get_phys_page_debug: Callback for obtaining a physical address.
1dc6fb1f
PM
127 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
128 * associated memory transaction attributes to use for the access.
129 * CPUs which use memory transaction attributes should implement this
130 * instead of get_phys_page_debug.
d7f25a9e
PM
131 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
132 * a memory access with the specified memory transaction attributes.
5b50e790
AF
133 * @gdb_read_register: Callback for letting GDB read a register.
134 * @gdb_write_register: Callback for letting GDB write a register.
568496c0
SF
135 * @debug_check_watchpoint: Callback: return true if the architectural
136 * watchpoint whose address has matched should really fire.
86025ee4 137 * @debug_excp_handler: Callback for handling debug exceptions.
c08295d4
PM
138 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
139 * 64-bit VM coredump.
140 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
141 * note to a 32-bit VM coredump.
142 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
143 * 32-bit VM coredump.
144 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
145 * note to a 32-bit VM coredump.
b170fce3 146 * @vmsd: State description for migration.
a0e372f0 147 * @gdb_num_core_regs: Number of core registers accessible to GDB.
5b24c641 148 * @gdb_core_xml_file: File name for core registers GDB XML description.
2472b6c0
PM
149 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
150 * before the insn which triggers a watchpoint rather than after it.
b3820e6c
DH
151 * @gdb_arch_name: Optional callback that returns the architecture name known
152 * to GDB. The caller must free the returned string with g_free.
200bf5b7
AB
153 * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
154 * gdb stub. Returns a pointer to the XML contents for the specified XML file
155 * or NULL if the CPU doesn't have a dynamically generated content for it.
cffe7b32
RH
156 * @cpu_exec_enter: Callback for cpu_exec preparation.
157 * @cpu_exec_exit: Callback for cpu_exec cleanup.
9585db68 158 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
37b9de46 159 * @disas_set_info: Setup architecture specific components of disassembly info
40612000
JB
160 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
161 * address before attempting to match it against watchpoints.
dd83b06a
AF
162 *
163 * Represents a CPU family or model.
164 */
165typedef struct CPUClass {
166 /*< private >*/
961f8395 167 DeviceClass parent_class;
dd83b06a
AF
168 /*< public >*/
169
2b8c2754 170 ObjectClass *(*class_by_name)(const char *cpu_model);
62a48a2a 171 void (*parse_features)(const char *typename, char *str, Error **errp);
2b8c2754 172
dd83b06a 173 void (*reset)(CPUState *cpu);
91b1df8c 174 int reset_dump_flags;
8c2e1b00 175 bool (*has_work)(CPUState *cpu);
97a8ea5a 176 void (*do_interrupt)(CPUState *cpu);
c658b94f 177 CPUUnassignedAccess do_unassigned_access;
93e22326 178 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
b35399bb
SS
179 MMUAccessType access_type,
180 int mmu_idx, uintptr_t retaddr);
0dff0939
PM
181 void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
182 unsigned size, MMUAccessType access_type,
183 int mmu_idx, MemTxAttrs attrs,
184 MemTxResult response, uintptr_t retaddr);
bf7663c4 185 bool (*virtio_is_big_endian)(CPUState *cpu);
f3659eee
AF
186 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
187 uint8_t *buf, int len, bool is_write);
90c84c56 188 void (*dump_state)(CPUState *cpu, FILE *, int flags);
c86f106b 189 GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
11cb6c15 190 void (*dump_statistics)(CPUState *cpu, int flags);
997395d3 191 int64_t (*get_arch_id)(CPUState *cpu);
444d5590 192 bool (*get_paging_enabled)(const CPUState *cpu);
a23bbfda
AF
193 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
194 Error **errp);
f45748f1 195 void (*set_pc)(CPUState *cpu, vaddr value);
bdf7ae5b 196 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
da6bbf85
RH
197 bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
198 MMUAccessType access_type, int mmu_idx,
199 bool probe, uintptr_t retaddr);
00b941e5 200 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
1dc6fb1f
PM
201 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
202 MemTxAttrs *attrs);
d7f25a9e 203 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
5b50e790
AF
204 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
205 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
568496c0 206 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
86025ee4 207 void (*debug_excp_handler)(CPUState *cpu);
b170fce3 208
c72bf468
JF
209 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
210 int cpuid, void *opaque);
211 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
212 void *opaque);
213 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
214 int cpuid, void *opaque);
215 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
216 void *opaque);
a0e372f0
AF
217
218 const struct VMStateDescription *vmsd;
5b24c641 219 const char *gdb_core_xml_file;
b3820e6c 220 gchar * (*gdb_arch_name)(CPUState *cpu);
200bf5b7 221 const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
cffe7b32
RH
222 void (*cpu_exec_enter)(CPUState *cpu);
223 void (*cpu_exec_exit)(CPUState *cpu);
9585db68 224 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
37b9de46
PC
225
226 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
40612000 227 vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
55c3ceef
RH
228 void (*tcg_initialize)(void);
229
230 /* Keep non-pointer data at the end to minimize holes. */
231 int gdb_num_core_regs;
232 bool gdb_stop_before_watchpoint;
dd83b06a
AF
233} CPUClass;
234
28ecfd7a
AF
235#ifdef HOST_WORDS_BIGENDIAN
236typedef struct icount_decr_u16 {
237 uint16_t high;
238 uint16_t low;
239} icount_decr_u16;
240#else
241typedef struct icount_decr_u16 {
242 uint16_t low;
243 uint16_t high;
244} icount_decr_u16;
245#endif
246
f0c3c505
AF
247typedef struct CPUBreakpoint {
248 vaddr pc;
249 int flags; /* BP_* */
250 QTAILQ_ENTRY(CPUBreakpoint) entry;
251} CPUBreakpoint;
252
568496c0 253struct CPUWatchpoint {
ff4700b0 254 vaddr vaddr;
05068c0d 255 vaddr len;
08225676 256 vaddr hitaddr;
66b9b43c 257 MemTxAttrs hitattrs;
ff4700b0
AF
258 int flags; /* BP_* */
259 QTAILQ_ENTRY(CPUWatchpoint) entry;
568496c0 260};
ff4700b0 261
a60f24b5 262struct KVMState;
f7575c96 263struct kvm_run;
a60f24b5 264
b0cb0a66
VP
265struct hax_vcpu_state;
266
8cd70437
AF
267#define TB_JMP_CACHE_BITS 12
268#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
269
4b4629d9 270/* work queue */
14e6fe12
PB
271
272/* The union type allows passing of 64 bit target pointers on 32 bit
273 * hosts in a single parameter
274 */
275typedef union {
276 int host_int;
277 unsigned long host_ulong;
278 void *host_ptr;
279 vaddr target_ptr;
280} run_on_cpu_data;
281
282#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
283#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
284#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
285#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
286#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
287
288typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
289
d148d90e 290struct qemu_work_item;
4b4629d9 291
0b8497f0 292#define CPU_UNSET_NUMA_NODE_ID -1
d01c05c9 293#define CPU_TRACE_DSTATE_MAX_EVENTS 32
0b8497f0 294
dd83b06a
AF
295/**
296 * CPUState:
55e5c285 297 * @cpu_index: CPU index (informative).
7ea7b9ad
PM
298 * @cluster_index: Identifies which cluster this CPU is in.
299 * For boards which don't define clusters or for "loose" CPUs not assigned
300 * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
301 * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
302 * QOM parent.
ce3960eb
AF
303 * @nr_cores: Number of cores within this CPU package.
304 * @nr_threads: Number of threads within this CPU.
c265e976
PB
305 * @running: #true if CPU is currently running (lockless).
306 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
ab129972 307 * valid under cpu_list_lock.
61a46217 308 * @created: Indicates whether the CPU thread has been successfully created.
259186a7
AF
309 * @interrupt_request: Indicates a pending interrupt request.
310 * @halted: Nonzero if the CPU is in suspended state.
4fdeee7c 311 * @stop: Indicates a pending stop request.
f324e766 312 * @stopped: Indicates the CPU has been artificially stopped.
4c055ab5 313 * @unplug: Indicates a pending CPU unplug request.
bac05aa9 314 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
ed2803da 315 * @singlestep_enabled: Flags for single-stepping.
efee7340 316 * @icount_extra: Instructions until next timer event.
1aab16c2
PB
317 * @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
318 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
319 * CPU and return to its top level loop (even in non-icount mode).
28ecfd7a
AF
320 * This allows a single read-compare-cbranch-write sequence to test
321 * for both decrementer underflow and exceptions.
414b15c9
PB
322 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
323 * requires that IO only be performed on the last instruction of a TB
324 * so that interrupts take effect immediately.
32857f4d
PM
325 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
326 * AddressSpaces this CPU has)
12ebc9a7 327 * @num_ases: number of CPUAddressSpaces in @cpu_ases
32857f4d
PM
328 * @as: Pointer to the first AddressSpace, for the convenience of targets which
329 * only have a single AddressSpace
c05efcb1 330 * @env_ptr: Pointer to subclass-specific CPUArchState field.
eac8b355 331 * @gdb_regs: Additional GDB registers.
a0e372f0 332 * @gdb_num_regs: Number of total registers accessible to GDB.
35143f01 333 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
182735ef 334 * @next_cpu: Next CPU sharing TB cache.
0429a971 335 * @opaque: User data.
93afeade
AF
336 * @mem_io_pc: Host Program Counter at which the memory was accessed.
337 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
8737c51c 338 * @kvm_fd: vCPU file descriptor for KVM.
376692b9
PB
339 * @work_mutex: Lock to prevent multiple access to queued_work_*.
340 * @queued_work_first: First asynchronous work pending.
d4381116
LV
341 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
342 * to @trace_dstate).
48151859 343 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
ed860129
PM
344 * @ignore_memory_transaction_failures: Cached copy of the MachineState
345 * flag of the same name: allows the board to suppress calling of the
346 * CPU do_transaction_failed hook function.
dd83b06a
AF
347 *
348 * State of one CPU core or thread.
349 */
350struct CPUState {
351 /*< private >*/
961f8395 352 DeviceState parent_obj;
dd83b06a
AF
353 /*< public >*/
354
ce3960eb
AF
355 int nr_cores;
356 int nr_threads;
357
814e612e 358 struct QemuThread *thread;
bcba2a72
AF
359#ifdef _WIN32
360 HANDLE hThread;
361#endif
9f09e18a 362 int thread_id;
c265e976 363 bool running, has_waiter;
f5c121b8 364 struct QemuCond *halt_cond;
216fc9a4 365 bool thread_kicked;
61a46217 366 bool created;
4fdeee7c 367 bool stop;
f324e766 368 bool stopped;
4c055ab5 369 bool unplug;
bac05aa9 370 bool crash_occurred;
e0c38211 371 bool exit_request;
9b990ee5 372 uint32_t cflags_next_tb;
8d04fb55 373 /* updates protected by BQL */
259186a7 374 uint32_t interrupt_request;
ed2803da 375 int singlestep_enabled;
e4cd9657 376 int64_t icount_budget;
efee7340 377 int64_t icount_extra;
9c09a251 378 uint64_t random_seed;
6f03bef0 379 sigjmp_buf jmp_env;
bcba2a72 380
376692b9
PB
381 QemuMutex work_mutex;
382 struct qemu_work_item *queued_work_first, *queued_work_last;
383
32857f4d 384 CPUAddressSpace *cpu_ases;
12ebc9a7 385 int num_ases;
09daed84 386 AddressSpace *as;
6731d864 387 MemoryRegion *memory;
09daed84 388
c05efcb1 389 void *env_ptr; /* CPUArchState */
7d7500d9 390
f3ced3c5 391 /* Accessed in parallel; all accesses must be atomic */
8cd70437 392 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
7d7500d9 393
eac8b355 394 struct GDBRegisterState *gdb_regs;
a0e372f0 395 int gdb_num_regs;
35143f01 396 int gdb_num_g_regs;
bdc44640 397 QTAILQ_ENTRY(CPUState) node;
d77953b9 398
f0c3c505 399 /* ice debug support */
b58deb34 400 QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
f0c3c505 401
b58deb34 402 QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
ff4700b0
AF
403 CPUWatchpoint *watchpoint_hit;
404
0429a971
AF
405 void *opaque;
406
93afeade
AF
407 /* In order to avoid passing too many arguments to the MMIO helpers,
408 * we store some rarely used information in the CPU context.
409 */
410 uintptr_t mem_io_pc;
411 vaddr mem_io_vaddr;
dbea78a4
PM
412 /*
413 * This is only needed for the legacy cpu_unassigned_access() hook;
414 * when all targets using it have been converted to use
415 * cpu_transaction_failed() instead it can be removed.
416 */
417 MMUAccessType mem_io_access_type;
93afeade 418
8737c51c 419 int kvm_fd;
a60f24b5 420 struct KVMState *kvm_state;
f7575c96 421 struct kvm_run *kvm_run;
8737c51c 422
d01c05c9 423 /* Used for events with 'vcpu' and *without* the 'disabled' properties */
d4381116 424 DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
d01c05c9 425 DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
48151859 426
f5df5baf 427 /* TODO Move common fields from CPUArchState here. */
6fda014e 428 int cpu_index;
7ea7b9ad 429 int cluster_index;
6fda014e 430 uint32_t halted;
99df7dce 431 uint32_t can_do_io;
6fda014e 432 int32_t exception_index;
7e4fb26d 433
99f31832
SAGDR
434 /* shared by kvm, hax and hvf */
435 bool vcpu_dirty;
436
2adcc85d
JH
437 /* Used to keep track of an outstanding cpu throttle thread for migration
438 * autoconverge
439 */
440 bool throttle_thread_scheduled;
441
ed860129
PM
442 bool ignore_memory_transaction_failures;
443
7e4fb26d
RH
444 /* Note that this is accessed at the start of every TB via a negative
445 offset from AREG0. Leave this field at the end so as to make the
446 (absolute value) offset as small as possible. This reduces code
447 size, especially for hosts without large memory offsets. */
1aab16c2
PB
448 union {
449 uint32_t u32;
450 icount_decr_u16 u16;
451 } icount_decr;
b0cb0a66 452
b0cb0a66 453 struct hax_vcpu_state *hax_vcpu;
e3b9ca81 454
c97d6d2c 455 int hvf_fd;
1f871c5e
PM
456
457 /* track IOMMUs whose translations we've cached in the TCG TLB */
458 GArray *iommu_notifiers;
dd83b06a
AF
459};
460
f481ee2d
PB
461typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
462extern CPUTailQ cpus;
463
068a5ea0
EC
464#define first_cpu QTAILQ_FIRST_RCU(&cpus)
465#define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node)
466#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node)
bdc44640 467#define CPU_FOREACH_SAFE(cpu, next_cpu) \
068a5ea0 468 QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu)
182735ef 469
f240eb6f 470extern __thread CPUState *current_cpu;
4917cf44 471
f3ced3c5
EC
472static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
473{
474 unsigned int i;
475
476 for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
477 atomic_set(&cpu->tb_jmp_cache[i], NULL);
478 }
479}
480
8d4e9146
FK
481/**
482 * qemu_tcg_mttcg_enabled:
483 * Check whether we are running MultiThread TCG or not.
484 *
485 * Returns: %true if we are in MTTCG mode %false otherwise.
486 */
487extern bool mttcg_enabled;
488#define qemu_tcg_mttcg_enabled() (mttcg_enabled)
489
444d5590
AF
490/**
491 * cpu_paging_enabled:
492 * @cpu: The CPU whose state is to be inspected.
493 *
494 * Returns: %true if paging is enabled, %false otherwise.
495 */
496bool cpu_paging_enabled(const CPUState *cpu);
497
a23bbfda
AF
498/**
499 * cpu_get_memory_mapping:
500 * @cpu: The CPU whose memory mappings are to be obtained.
501 * @list: Where to write the memory mappings to.
502 * @errp: Pointer for reporting an #Error.
503 */
504void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
505 Error **errp);
506
c72bf468
JF
507/**
508 * cpu_write_elf64_note:
509 * @f: pointer to a function that writes memory to a file
510 * @cpu: The CPU whose memory is to be dumped
511 * @cpuid: ID number of the CPU
512 * @opaque: pointer to the CPUState struct
513 */
514int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
515 int cpuid, void *opaque);
516
517/**
518 * cpu_write_elf64_qemunote:
519 * @f: pointer to a function that writes memory to a file
520 * @cpu: The CPU whose memory is to be dumped
521 * @cpuid: ID number of the CPU
522 * @opaque: pointer to the CPUState struct
523 */
524int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
525 void *opaque);
526
527/**
528 * cpu_write_elf32_note:
529 * @f: pointer to a function that writes memory to a file
530 * @cpu: The CPU whose memory is to be dumped
531 * @cpuid: ID number of the CPU
532 * @opaque: pointer to the CPUState struct
533 */
534int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
535 int cpuid, void *opaque);
536
537/**
538 * cpu_write_elf32_qemunote:
539 * @f: pointer to a function that writes memory to a file
540 * @cpu: The CPU whose memory is to be dumped
541 * @cpuid: ID number of the CPU
542 * @opaque: pointer to the CPUState struct
543 */
544int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
545 void *opaque);
dd83b06a 546
c86f106b
AN
547/**
548 * cpu_get_crash_info:
549 * @cpu: The CPU to get crash information for
550 *
551 * Gets the previously saved crash information.
552 * Caller is responsible for freeing the data.
553 */
554GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
555
878096ee
AF
556/**
557 * CPUDumpFlags:
558 * @CPU_DUMP_CODE:
559 * @CPU_DUMP_FPU: dump FPU register state, not just integer
560 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
561 */
562enum CPUDumpFlags {
563 CPU_DUMP_CODE = 0x00010000,
564 CPU_DUMP_FPU = 0x00020000,
565 CPU_DUMP_CCOP = 0x00040000,
566};
567
568/**
569 * cpu_dump_state:
570 * @cpu: The CPU whose state is to be dumped.
90c84c56 571 * @f: If non-null, dump to this stream, else to current print sink.
878096ee
AF
572 *
573 * Dumps CPU state.
574 */
90c84c56 575void cpu_dump_state(CPUState *cpu, FILE *f, int flags);
878096ee
AF
576
577/**
578 * cpu_dump_statistics:
579 * @cpu: The CPU whose state is to be dumped.
878096ee
AF
580 * @flags: Flags what to dump.
581 *
11cb6c15
MA
582 * Dump CPU statistics to the current monitor if we have one, else to
583 * stdout.
878096ee 584 */
11cb6c15 585void cpu_dump_statistics(CPUState *cpu, int flags);
878096ee 586
00b941e5 587#ifndef CONFIG_USER_ONLY
1dc6fb1f
PM
588/**
589 * cpu_get_phys_page_attrs_debug:
590 * @cpu: The CPU to obtain the physical page address for.
591 * @addr: The virtual address.
592 * @attrs: Updated on return with the memory transaction attributes to use
593 * for this access.
594 *
595 * Obtains the physical page corresponding to a virtual one, together
596 * with the corresponding memory transaction attributes to use for the access.
597 * Use it only for debugging because no protection checks are done.
598 *
599 * Returns: Corresponding physical page address or -1 if no page found.
600 */
601static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
602 MemTxAttrs *attrs)
603{
604 CPUClass *cc = CPU_GET_CLASS(cpu);
605
606 if (cc->get_phys_page_attrs_debug) {
607 return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
608 }
609 /* Fallback for CPUs which don't implement the _attrs_ hook */
610 *attrs = MEMTXATTRS_UNSPECIFIED;
611 return cc->get_phys_page_debug(cpu, addr);
612}
613
00b941e5
AF
614/**
615 * cpu_get_phys_page_debug:
616 * @cpu: The CPU to obtain the physical page address for.
617 * @addr: The virtual address.
618 *
619 * Obtains the physical page corresponding to a virtual one.
620 * Use it only for debugging because no protection checks are done.
621 *
622 * Returns: Corresponding physical page address or -1 if no page found.
623 */
624static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
625{
1dc6fb1f 626 MemTxAttrs attrs = {};
00b941e5 627
1dc6fb1f 628 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
00b941e5 629}
d7f25a9e
PM
630
631/** cpu_asidx_from_attrs:
632 * @cpu: CPU
633 * @attrs: memory transaction attributes
634 *
635 * Returns the address space index specifying the CPU AddressSpace
636 * to use for a memory access with the given transaction attributes.
637 */
638static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
639{
640 CPUClass *cc = CPU_GET_CLASS(cpu);
9c8c334b 641 int ret = 0;
d7f25a9e
PM
642
643 if (cc->asidx_from_attrs) {
9c8c334b
RH
644 ret = cc->asidx_from_attrs(cpu, attrs);
645 assert(ret < cpu->num_ases && ret >= 0);
d7f25a9e 646 }
9c8c334b 647 return ret;
d7f25a9e 648}
00b941e5
AF
649#endif
650
267f685b
PB
651/**
652 * cpu_list_add:
653 * @cpu: The CPU to be added to the list of CPUs.
654 */
655void cpu_list_add(CPUState *cpu);
656
657/**
658 * cpu_list_remove:
659 * @cpu: The CPU to be removed from the list of CPUs.
660 */
661void cpu_list_remove(CPUState *cpu);
662
dd83b06a
AF
663/**
664 * cpu_reset:
665 * @cpu: The CPU whose state is to be reset.
666 */
667void cpu_reset(CPUState *cpu);
668
2b8c2754
AF
669/**
670 * cpu_class_by_name:
671 * @typename: The CPU base type.
672 * @cpu_model: The model string without any parameters.
673 *
674 * Looks up a CPU #ObjectClass matching name @cpu_model.
675 *
676 * Returns: A #CPUClass or %NULL if not matching class is found.
677 */
678ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
679
3c72234c
IM
680/**
681 * cpu_create:
682 * @typename: The CPU type.
683 *
684 * Instantiates a CPU and realizes the CPU.
685 *
686 * Returns: A #CPUState or %NULL if an error occurred.
687 */
688CPUState *cpu_create(const char *typename);
689
690/**
c1c8cfe5
EH
691 * parse_cpu_option:
692 * @cpu_option: The -cpu option including optional parameters.
3c72234c
IM
693 *
694 * processes optional parameters and registers them as global properties
695 *
4482e05c
IM
696 * Returns: type of CPU to create or prints error and terminates process
697 * if an error occurred.
3c72234c 698 */
c1c8cfe5 699const char *parse_cpu_option(const char *cpu_option);
9262685b 700
3993c6bd 701/**
8c2e1b00 702 * cpu_has_work:
3993c6bd
AF
703 * @cpu: The vCPU to check.
704 *
705 * Checks whether the CPU has work to do.
706 *
707 * Returns: %true if the CPU has work, %false otherwise.
708 */
8c2e1b00
AF
709static inline bool cpu_has_work(CPUState *cpu)
710{
711 CPUClass *cc = CPU_GET_CLASS(cpu);
712
713 g_assert(cc->has_work);
714 return cc->has_work(cpu);
715}
3993c6bd 716
60e82579
AF
717/**
718 * qemu_cpu_is_self:
719 * @cpu: The vCPU to check against.
720 *
721 * Checks whether the caller is executing on the vCPU thread.
722 *
723 * Returns: %true if called from @cpu's thread, %false otherwise.
724 */
725bool qemu_cpu_is_self(CPUState *cpu);
726
c08d7424
AF
727/**
728 * qemu_cpu_kick:
729 * @cpu: The vCPU to kick.
730 *
731 * Kicks @cpu's thread.
732 */
733void qemu_cpu_kick(CPUState *cpu);
734
2fa45344
AF
735/**
736 * cpu_is_stopped:
737 * @cpu: The CPU to check.
738 *
739 * Checks whether the CPU is stopped.
740 *
741 * Returns: %true if run state is not running or if artificially stopped;
742 * %false otherwise.
743 */
744bool cpu_is_stopped(CPUState *cpu);
745
d148d90e
SF
746/**
747 * do_run_on_cpu:
748 * @cpu: The vCPU to run on.
749 * @func: The function to be executed.
750 * @data: Data to pass to the function.
751 * @mutex: Mutex to release while waiting for @func to run.
752 *
753 * Used internally in the implementation of run_on_cpu.
754 */
14e6fe12 755void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
d148d90e
SF
756 QemuMutex *mutex);
757
f100f0b3
AF
758/**
759 * run_on_cpu:
760 * @cpu: The vCPU to run on.
761 * @func: The function to be executed.
762 * @data: Data to pass to the function.
763 *
764 * Schedules the function @func for execution on the vCPU @cpu.
765 */
14e6fe12 766void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
f100f0b3 767
3c02270d
CV
768/**
769 * async_run_on_cpu:
770 * @cpu: The vCPU to run on.
771 * @func: The function to be executed.
772 * @data: Data to pass to the function.
773 *
774 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
775 */
14e6fe12 776void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
3c02270d 777
53f5ed95
PB
778/**
779 * async_safe_run_on_cpu:
780 * @cpu: The vCPU to run on.
781 * @func: The function to be executed.
782 * @data: Data to pass to the function.
783 *
784 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
785 * while all other vCPUs are sleeping.
786 *
787 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
788 * BQL.
789 */
14e6fe12 790void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
53f5ed95 791
38d8f5c8
AF
792/**
793 * qemu_get_cpu:
794 * @index: The CPUState@cpu_index value of the CPU to obtain.
795 *
796 * Gets a CPU matching @index.
797 *
798 * Returns: The CPU or %NULL if there is no matching CPU.
799 */
800CPUState *qemu_get_cpu(int index);
801
69e5ff06
IM
802/**
803 * cpu_exists:
804 * @id: Guest-exposed CPU ID to lookup.
805 *
806 * Search for CPU with specified ID.
807 *
808 * Returns: %true - CPU is found, %false - CPU isn't found.
809 */
810bool cpu_exists(int64_t id);
811
5ce46cb3
EH
812/**
813 * cpu_by_arch_id:
814 * @id: Guest-exposed CPU ID of the CPU to obtain.
815 *
816 * Get a CPU with matching @id.
817 *
818 * Returns: The CPU or %NULL if there is no matching CPU.
819 */
820CPUState *cpu_by_arch_id(int64_t id);
821
2adcc85d
JH
822/**
823 * cpu_throttle_set:
824 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
825 *
826 * Throttles all vcpus by forcing them to sleep for the given percentage of
827 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
828 * (example: 10ms sleep for every 30ms awake).
829 *
830 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
831 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
832 * is called.
833 */
834void cpu_throttle_set(int new_throttle_pct);
835
836/**
837 * cpu_throttle_stop:
838 *
839 * Stops the vcpu throttling started by cpu_throttle_set.
840 */
841void cpu_throttle_stop(void);
842
843/**
844 * cpu_throttle_active:
845 *
846 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
847 */
848bool cpu_throttle_active(void);
849
850/**
851 * cpu_throttle_get_percentage:
852 *
853 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
854 *
855 * Returns: The throttle percentage in range 1 to 99.
856 */
857int cpu_throttle_get_percentage(void);
858
c3affe56
AF
859#ifndef CONFIG_USER_ONLY
860
861typedef void (*CPUInterruptHandler)(CPUState *, int);
862
863extern CPUInterruptHandler cpu_interrupt_handler;
864
865/**
866 * cpu_interrupt:
867 * @cpu: The CPU to set an interrupt on.
7e63bc38 868 * @mask: The interrupts to set.
c3affe56
AF
869 *
870 * Invokes the interrupt handler.
871 */
872static inline void cpu_interrupt(CPUState *cpu, int mask)
873{
874 cpu_interrupt_handler(cpu, mask);
875}
876
877#else /* USER_ONLY */
878
879void cpu_interrupt(CPUState *cpu, int mask);
880
881#endif /* USER_ONLY */
882
47507383
TH
883#ifdef NEED_CPU_H
884
93e22326 885#ifdef CONFIG_SOFTMMU
c658b94f
AF
886static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
887 bool is_write, bool is_exec,
888 int opaque, unsigned size)
889{
890 CPUClass *cc = CPU_GET_CLASS(cpu);
891
892 if (cc->do_unassigned_access) {
893 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
894 }
895}
896
93e22326 897static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
b35399bb
SS
898 MMUAccessType access_type,
899 int mmu_idx, uintptr_t retaddr)
93e22326
PB
900{
901 CPUClass *cc = CPU_GET_CLASS(cpu);
902
b35399bb 903 cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
93e22326 904}
0dff0939
PM
905
906static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
907 vaddr addr, unsigned size,
908 MMUAccessType access_type,
909 int mmu_idx, MemTxAttrs attrs,
910 MemTxResult response,
911 uintptr_t retaddr)
912{
913 CPUClass *cc = CPU_GET_CLASS(cpu);
914
ed860129 915 if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
0dff0939
PM
916 cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
917 mmu_idx, attrs, response, retaddr);
918 }
919}
c658b94f
AF
920#endif
921
47507383
TH
922#endif /* NEED_CPU_H */
923
2991b890
PC
924/**
925 * cpu_set_pc:
926 * @cpu: The CPU to set the program counter for.
927 * @addr: Program counter value.
928 *
929 * Sets the program counter for a CPU.
930 */
931static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
932{
933 CPUClass *cc = CPU_GET_CLASS(cpu);
934
935 cc->set_pc(cpu, addr);
936}
937
d8ed887b
AF
938/**
939 * cpu_reset_interrupt:
940 * @cpu: The CPU to clear the interrupt on.
941 * @mask: The interrupt mask to clear.
942 *
943 * Resets interrupts on the vCPU @cpu.
944 */
945void cpu_reset_interrupt(CPUState *cpu, int mask);
946
60a3e17a
AF
947/**
948 * cpu_exit:
949 * @cpu: The CPU to exit.
950 *
951 * Requests the CPU @cpu to exit execution.
952 */
953void cpu_exit(CPUState *cpu);
954
2993683b
IM
955/**
956 * cpu_resume:
957 * @cpu: The CPU to resume.
958 *
959 * Resumes CPU, i.e. puts CPU into runnable state.
960 */
961void cpu_resume(CPUState *cpu);
dd83b06a 962
4c055ab5
GZ
963/**
964 * cpu_remove:
965 * @cpu: The CPU to remove.
966 *
967 * Requests the CPU to be removed.
968 */
969void cpu_remove(CPUState *cpu);
970
2c579042
BR
971 /**
972 * cpu_remove_sync:
973 * @cpu: The CPU to remove.
974 *
975 * Requests the CPU to be removed and waits till it is removed.
976 */
977void cpu_remove_sync(CPUState *cpu);
978
d148d90e
SF
979/**
980 * process_queued_cpu_work() - process all items on CPU work queue
981 * @cpu: The CPU which work queue to process.
982 */
983void process_queued_cpu_work(CPUState *cpu);
984
ab129972
PB
985/**
986 * cpu_exec_start:
987 * @cpu: The CPU for the current thread.
988 *
989 * Record that a CPU has started execution and can be interrupted with
990 * cpu_exit.
991 */
992void cpu_exec_start(CPUState *cpu);
993
994/**
995 * cpu_exec_end:
996 * @cpu: The CPU for the current thread.
997 *
998 * Record that a CPU has stopped execution and exclusive sections
999 * can be executed without interrupting it.
1000 */
1001void cpu_exec_end(CPUState *cpu);
1002
1003/**
1004 * start_exclusive:
1005 *
1006 * Wait for a concurrent exclusive section to end, and then start
1007 * a section of work that is run while other CPUs are not running
1008 * between cpu_exec_start and cpu_exec_end. CPUs that are running
1009 * cpu_exec are exited immediately. CPUs that call cpu_exec_start
1010 * during the exclusive section go to sleep until this CPU calls
1011 * end_exclusive.
ab129972
PB
1012 */
1013void start_exclusive(void);
1014
1015/**
1016 * end_exclusive:
1017 *
1018 * Concludes an exclusive execution section started by start_exclusive.
ab129972
PB
1019 */
1020void end_exclusive(void);
1021
c643bed9
AF
1022/**
1023 * qemu_init_vcpu:
1024 * @cpu: The vCPU to initialize.
1025 *
1026 * Initializes a vCPU.
1027 */
1028void qemu_init_vcpu(CPUState *cpu);
1029
3825b28f
AF
1030#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
1031#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
1032#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
1033
1034/**
1035 * cpu_single_step:
1036 * @cpu: CPU to the flags for.
1037 * @enabled: Flags to enable.
1038 *
1039 * Enables or disables single-stepping for @cpu.
1040 */
1041void cpu_single_step(CPUState *cpu, int enabled);
1042
b3310ab3
AF
1043/* Breakpoint/watchpoint flags */
1044#define BP_MEM_READ 0x01
1045#define BP_MEM_WRITE 0x02
1046#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
1047#define BP_STOP_BEFORE_ACCESS 0x04
08225676 1048/* 0x08 currently unused */
b3310ab3
AF
1049#define BP_GDB 0x10
1050#define BP_CPU 0x20
b933066a 1051#define BP_ANY (BP_GDB | BP_CPU)
08225676
PM
1052#define BP_WATCHPOINT_HIT_READ 0x40
1053#define BP_WATCHPOINT_HIT_WRITE 0x80
1054#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
b3310ab3
AF
1055
1056int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
1057 CPUBreakpoint **breakpoint);
1058int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
1059void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
1060void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
1061
b933066a
RH
1062/* Return true if PC matches an installed breakpoint. */
1063static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
1064{
1065 CPUBreakpoint *bp;
1066
1067 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
1068 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
1069 if (bp->pc == pc && (bp->flags & mask)) {
1070 return true;
1071 }
1072 }
1073 }
1074 return false;
1075}
1076
75a34036
AF
1077int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1078 int flags, CPUWatchpoint **watchpoint);
1079int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1080 vaddr len, int flags);
1081void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
1082void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
1083
63c91552
PB
1084/**
1085 * cpu_get_address_space:
1086 * @cpu: CPU to get address space from
1087 * @asidx: index identifying which address space to get
1088 *
1089 * Return the requested address space of this CPU. @asidx
1090 * specifies which address space to read.
1091 */
1092AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1093
a47dddd7
AF
1094void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
1095 GCC_FMT_ATTR(2, 3);
c7e002c5 1096extern Property cpu_common_props[];
39e329e3 1097void cpu_exec_initfn(CPUState *cpu);
ce5b1bbf 1098void cpu_exec_realizefn(CPUState *cpu, Error **errp);
7bbc124e 1099void cpu_exec_unrealizefn(CPUState *cpu);
a47dddd7 1100
c95ac103
TH
1101/**
1102 * target_words_bigendian:
1103 * Returns true if the (default) endianness of the target is big endian,
1104 * false otherwise. Note that in target-specific code, you can use
1105 * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common
1106 * code should normally never need to know about the endianness of the
1107 * target, so please do *not* use this function unless you know very well
1108 * what you are doing!
1109 */
1110bool target_words_bigendian(void);
1111
47507383
TH
1112#ifdef NEED_CPU_H
1113
1a1562f5
AF
1114#ifdef CONFIG_SOFTMMU
1115extern const struct VMStateDescription vmstate_cpu_common;
1116#else
1117#define vmstate_cpu_common vmstate_dummy
1118#endif
1119
1120#define VMSTATE_CPU() { \
1121 .name = "parent_obj", \
1122 .size = sizeof(CPUState), \
1123 .vmsd = &vmstate_cpu_common, \
1124 .flags = VMS_STRUCT, \
1125 .offset = 0, \
1126}
1127
47507383
TH
1128#endif /* NEED_CPU_H */
1129
a07f953e 1130#define UNASSIGNED_CPU_INDEX -1
7ea7b9ad 1131#define UNASSIGNED_CLUSTER_INDEX -1
a07f953e 1132
dd83b06a 1133#endif