]> git.proxmox.com Git - mirror_qemu.git/blame - include/qom/cpu.h
nvic: Change NVIC to support ARMv6-M
[mirror_qemu.git] / include / qom / cpu.h
CommitLineData
dd83b06a
AF
1/*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20#ifndef QEMU_CPU_H
21#define QEMU_CPU_H
22
961f8395 23#include "hw/qdev-core.h"
37b9de46 24#include "disas/bfd.h"
c658b94f 25#include "exec/hwaddr.h"
66b9b43c 26#include "exec/memattrs.h"
9af23989 27#include "qapi/qapi-types-run-state.h"
48151859 28#include "qemu/bitmap.h"
bdc44640 29#include "qemu/queue.h"
1de7afc9 30#include "qemu/thread.h"
dd83b06a 31
b5ba1cc6
QN
32typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
33 void *opaque);
c72bf468 34
577f42c0
AF
35/**
36 * vaddr:
37 * Type wide enough to contain any #target_ulong virtual address.
38 */
39typedef uint64_t vaddr;
40#define VADDR_PRId PRId64
41#define VADDR_PRIu PRIu64
42#define VADDR_PRIo PRIo64
43#define VADDR_PRIx PRIx64
44#define VADDR_PRIX PRIX64
45#define VADDR_MAX UINT64_MAX
46
dd83b06a
AF
47/**
48 * SECTION:cpu
49 * @section_id: QEMU-cpu
50 * @title: CPU Class
51 * @short_description: Base class for all CPUs
52 */
53
54#define TYPE_CPU "cpu"
55
0d6d1ab4
AF
56/* Since this macro is used a lot in hot code paths and in conjunction with
57 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
58 * an unchecked cast.
59 */
60#define CPU(obj) ((CPUState *)(obj))
61
dd83b06a
AF
62#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
63#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
64
b35399bb
SS
65typedef enum MMUAccessType {
66 MMU_DATA_LOAD = 0,
67 MMU_DATA_STORE = 1,
68 MMU_INST_FETCH = 2
69} MMUAccessType;
70
568496c0 71typedef struct CPUWatchpoint CPUWatchpoint;
dd83b06a 72
c658b94f
AF
73typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
74 bool is_write, bool is_exec, int opaque,
75 unsigned size);
76
bdf7ae5b
AF
77struct TranslationBlock;
78
dd83b06a
AF
79/**
80 * CPUClass:
2b8c2754
AF
81 * @class_by_name: Callback to map -cpu command line model name to an
82 * instantiatable CPU type.
94a444b2 83 * @parse_features: Callback to parse command line arguments.
f5df5baf 84 * @reset: Callback to reset the #CPUState to its initial state.
91b1df8c 85 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
8c2e1b00 86 * @has_work: Callback for checking if there is work to do.
97a8ea5a 87 * @do_interrupt: Callback for interrupt handling.
c658b94f 88 * @do_unassigned_access: Callback for unassigned access handling.
0dff0939 89 * (this is deprecated: new targets should use do_transaction_failed instead)
93e22326
PB
90 * @do_unaligned_access: Callback for unaligned access handling, if
91 * the target defines #ALIGNED_ONLY.
0dff0939
PM
92 * @do_transaction_failed: Callback for handling failed memory transactions
93 * (ie bus faults or external aborts; not MMU faults)
c08295d4
PM
94 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
95 * runtime configurable endianness is currently big-endian. Non-configurable
96 * CPUs can use the default implementation of this method. This method should
97 * not be used by any callers other than the pre-1.0 virtio devices.
f3659eee 98 * @memory_rw_debug: Callback for GDB memory access.
878096ee
AF
99 * @dump_state: Callback for dumping state.
100 * @dump_statistics: Callback for dumping statistics.
997395d3 101 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
444d5590 102 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
a23bbfda 103 * @get_memory_mapping: Callback for obtaining the memory mappings.
f45748f1 104 * @set_pc: Callback for setting the Program Counter register.
bdf7ae5b
AF
105 * @synchronize_from_tb: Callback for synchronizing state from a TCG
106 * #TranslationBlock.
7510454e 107 * @handle_mmu_fault: Callback for handling an MMU fault.
00b941e5 108 * @get_phys_page_debug: Callback for obtaining a physical address.
1dc6fb1f
PM
109 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
110 * associated memory transaction attributes to use for the access.
111 * CPUs which use memory transaction attributes should implement this
112 * instead of get_phys_page_debug.
d7f25a9e
PM
113 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
114 * a memory access with the specified memory transaction attributes.
5b50e790
AF
115 * @gdb_read_register: Callback for letting GDB read a register.
116 * @gdb_write_register: Callback for letting GDB write a register.
568496c0
SF
117 * @debug_check_watchpoint: Callback: return true if the architectural
118 * watchpoint whose address has matched should really fire.
86025ee4 119 * @debug_excp_handler: Callback for handling debug exceptions.
c08295d4
PM
120 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
121 * 64-bit VM coredump.
122 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
123 * note to a 32-bit VM coredump.
124 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
125 * 32-bit VM coredump.
126 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
127 * note to a 32-bit VM coredump.
b170fce3 128 * @vmsd: State description for migration.
a0e372f0 129 * @gdb_num_core_regs: Number of core registers accessible to GDB.
5b24c641 130 * @gdb_core_xml_file: File name for core registers GDB XML description.
2472b6c0
PM
131 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
132 * before the insn which triggers a watchpoint rather than after it.
b3820e6c
DH
133 * @gdb_arch_name: Optional callback that returns the architecture name known
134 * to GDB. The caller must free the returned string with g_free.
200bf5b7
AB
135 * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
136 * gdb stub. Returns a pointer to the XML contents for the specified XML file
137 * or NULL if the CPU doesn't have a dynamically generated content for it.
cffe7b32
RH
138 * @cpu_exec_enter: Callback for cpu_exec preparation.
139 * @cpu_exec_exit: Callback for cpu_exec cleanup.
9585db68 140 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
37b9de46 141 * @disas_set_info: Setup architecture specific components of disassembly info
40612000
JB
142 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
143 * address before attempting to match it against watchpoints.
dd83b06a
AF
144 *
145 * Represents a CPU family or model.
146 */
147typedef struct CPUClass {
148 /*< private >*/
961f8395 149 DeviceClass parent_class;
dd83b06a
AF
150 /*< public >*/
151
2b8c2754 152 ObjectClass *(*class_by_name)(const char *cpu_model);
62a48a2a 153 void (*parse_features)(const char *typename, char *str, Error **errp);
2b8c2754 154
dd83b06a 155 void (*reset)(CPUState *cpu);
91b1df8c 156 int reset_dump_flags;
8c2e1b00 157 bool (*has_work)(CPUState *cpu);
97a8ea5a 158 void (*do_interrupt)(CPUState *cpu);
c658b94f 159 CPUUnassignedAccess do_unassigned_access;
93e22326 160 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
b35399bb
SS
161 MMUAccessType access_type,
162 int mmu_idx, uintptr_t retaddr);
0dff0939
PM
163 void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
164 unsigned size, MMUAccessType access_type,
165 int mmu_idx, MemTxAttrs attrs,
166 MemTxResult response, uintptr_t retaddr);
bf7663c4 167 bool (*virtio_is_big_endian)(CPUState *cpu);
f3659eee
AF
168 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
169 uint8_t *buf, int len, bool is_write);
878096ee
AF
170 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
171 int flags);
c86f106b 172 GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
878096ee
AF
173 void (*dump_statistics)(CPUState *cpu, FILE *f,
174 fprintf_function cpu_fprintf, int flags);
997395d3 175 int64_t (*get_arch_id)(CPUState *cpu);
444d5590 176 bool (*get_paging_enabled)(const CPUState *cpu);
a23bbfda
AF
177 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
178 Error **errp);
f45748f1 179 void (*set_pc)(CPUState *cpu, vaddr value);
bdf7ae5b 180 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
98670d47 181 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
7510454e 182 int mmu_index);
00b941e5 183 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
1dc6fb1f
PM
184 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
185 MemTxAttrs *attrs);
d7f25a9e 186 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
5b50e790
AF
187 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
188 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
568496c0 189 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
86025ee4 190 void (*debug_excp_handler)(CPUState *cpu);
b170fce3 191
c72bf468
JF
192 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
193 int cpuid, void *opaque);
194 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
195 void *opaque);
196 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
197 int cpuid, void *opaque);
198 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
199 void *opaque);
a0e372f0
AF
200
201 const struct VMStateDescription *vmsd;
5b24c641 202 const char *gdb_core_xml_file;
b3820e6c 203 gchar * (*gdb_arch_name)(CPUState *cpu);
200bf5b7 204 const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
cffe7b32
RH
205 void (*cpu_exec_enter)(CPUState *cpu);
206 void (*cpu_exec_exit)(CPUState *cpu);
9585db68 207 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
37b9de46
PC
208
209 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
40612000 210 vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
55c3ceef
RH
211 void (*tcg_initialize)(void);
212
213 /* Keep non-pointer data at the end to minimize holes. */
214 int gdb_num_core_regs;
215 bool gdb_stop_before_watchpoint;
dd83b06a
AF
216} CPUClass;
217
28ecfd7a
AF
218#ifdef HOST_WORDS_BIGENDIAN
219typedef struct icount_decr_u16 {
220 uint16_t high;
221 uint16_t low;
222} icount_decr_u16;
223#else
224typedef struct icount_decr_u16 {
225 uint16_t low;
226 uint16_t high;
227} icount_decr_u16;
228#endif
229
f0c3c505
AF
230typedef struct CPUBreakpoint {
231 vaddr pc;
232 int flags; /* BP_* */
233 QTAILQ_ENTRY(CPUBreakpoint) entry;
234} CPUBreakpoint;
235
568496c0 236struct CPUWatchpoint {
ff4700b0 237 vaddr vaddr;
05068c0d 238 vaddr len;
08225676 239 vaddr hitaddr;
66b9b43c 240 MemTxAttrs hitattrs;
ff4700b0
AF
241 int flags; /* BP_* */
242 QTAILQ_ENTRY(CPUWatchpoint) entry;
568496c0 243};
ff4700b0 244
a60f24b5 245struct KVMState;
f7575c96 246struct kvm_run;
a60f24b5 247
b0cb0a66
VP
248struct hax_vcpu_state;
249
8cd70437
AF
250#define TB_JMP_CACHE_BITS 12
251#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
252
4b4629d9 253/* work queue */
14e6fe12
PB
254
255/* The union type allows passing of 64 bit target pointers on 32 bit
256 * hosts in a single parameter
257 */
258typedef union {
259 int host_int;
260 unsigned long host_ulong;
261 void *host_ptr;
262 vaddr target_ptr;
263} run_on_cpu_data;
264
265#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
266#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
267#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
268#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
269#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
270
271typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
272
d148d90e 273struct qemu_work_item;
4b4629d9 274
0b8497f0 275#define CPU_UNSET_NUMA_NODE_ID -1
d01c05c9 276#define CPU_TRACE_DSTATE_MAX_EVENTS 32
0b8497f0 277
dd83b06a
AF
278/**
279 * CPUState:
55e5c285 280 * @cpu_index: CPU index (informative).
ce3960eb
AF
281 * @nr_cores: Number of cores within this CPU package.
282 * @nr_threads: Number of threads within this CPU.
c265e976
PB
283 * @running: #true if CPU is currently running (lockless).
284 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
ab129972 285 * valid under cpu_list_lock.
61a46217 286 * @created: Indicates whether the CPU thread has been successfully created.
259186a7
AF
287 * @interrupt_request: Indicates a pending interrupt request.
288 * @halted: Nonzero if the CPU is in suspended state.
4fdeee7c 289 * @stop: Indicates a pending stop request.
f324e766 290 * @stopped: Indicates the CPU has been artificially stopped.
4c055ab5 291 * @unplug: Indicates a pending CPU unplug request.
bac05aa9 292 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
ed2803da 293 * @singlestep_enabled: Flags for single-stepping.
efee7340 294 * @icount_extra: Instructions until next timer event.
1aab16c2
PB
295 * @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
296 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
297 * CPU and return to its top level loop (even in non-icount mode).
28ecfd7a
AF
298 * This allows a single read-compare-cbranch-write sequence to test
299 * for both decrementer underflow and exceptions.
414b15c9
PB
300 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
301 * requires that IO only be performed on the last instruction of a TB
302 * so that interrupts take effect immediately.
32857f4d
PM
303 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
304 * AddressSpaces this CPU has)
12ebc9a7 305 * @num_ases: number of CPUAddressSpaces in @cpu_ases
32857f4d
PM
306 * @as: Pointer to the first AddressSpace, for the convenience of targets which
307 * only have a single AddressSpace
c05efcb1 308 * @env_ptr: Pointer to subclass-specific CPUArchState field.
eac8b355 309 * @gdb_regs: Additional GDB registers.
a0e372f0 310 * @gdb_num_regs: Number of total registers accessible to GDB.
35143f01 311 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
182735ef 312 * @next_cpu: Next CPU sharing TB cache.
0429a971 313 * @opaque: User data.
93afeade
AF
314 * @mem_io_pc: Host Program Counter at which the memory was accessed.
315 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
8737c51c 316 * @kvm_fd: vCPU file descriptor for KVM.
376692b9
PB
317 * @work_mutex: Lock to prevent multiple access to queued_work_*.
318 * @queued_work_first: First asynchronous work pending.
d4381116
LV
319 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
320 * to @trace_dstate).
48151859 321 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
ed860129
PM
322 * @ignore_memory_transaction_failures: Cached copy of the MachineState
323 * flag of the same name: allows the board to suppress calling of the
324 * CPU do_transaction_failed hook function.
dd83b06a
AF
325 *
326 * State of one CPU core or thread.
327 */
328struct CPUState {
329 /*< private >*/
961f8395 330 DeviceState parent_obj;
dd83b06a
AF
331 /*< public >*/
332
ce3960eb
AF
333 int nr_cores;
334 int nr_threads;
335
814e612e 336 struct QemuThread *thread;
bcba2a72
AF
337#ifdef _WIN32
338 HANDLE hThread;
339#endif
9f09e18a 340 int thread_id;
c265e976 341 bool running, has_waiter;
f5c121b8 342 struct QemuCond *halt_cond;
216fc9a4 343 bool thread_kicked;
61a46217 344 bool created;
4fdeee7c 345 bool stop;
f324e766 346 bool stopped;
4c055ab5 347 bool unplug;
bac05aa9 348 bool crash_occurred;
e0c38211 349 bool exit_request;
9b990ee5 350 uint32_t cflags_next_tb;
8d04fb55 351 /* updates protected by BQL */
259186a7 352 uint32_t interrupt_request;
ed2803da 353 int singlestep_enabled;
e4cd9657 354 int64_t icount_budget;
efee7340 355 int64_t icount_extra;
6f03bef0 356 sigjmp_buf jmp_env;
bcba2a72 357
376692b9
PB
358 QemuMutex work_mutex;
359 struct qemu_work_item *queued_work_first, *queued_work_last;
360
32857f4d 361 CPUAddressSpace *cpu_ases;
12ebc9a7 362 int num_ases;
09daed84 363 AddressSpace *as;
6731d864 364 MemoryRegion *memory;
09daed84 365
c05efcb1 366 void *env_ptr; /* CPUArchState */
7d7500d9 367
f3ced3c5 368 /* Accessed in parallel; all accesses must be atomic */
8cd70437 369 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
7d7500d9 370
eac8b355 371 struct GDBRegisterState *gdb_regs;
a0e372f0 372 int gdb_num_regs;
35143f01 373 int gdb_num_g_regs;
bdc44640 374 QTAILQ_ENTRY(CPUState) node;
d77953b9 375
f0c3c505
AF
376 /* ice debug support */
377 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
378
ff4700b0
AF
379 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
380 CPUWatchpoint *watchpoint_hit;
381
0429a971
AF
382 void *opaque;
383
93afeade
AF
384 /* In order to avoid passing too many arguments to the MMIO helpers,
385 * we store some rarely used information in the CPU context.
386 */
387 uintptr_t mem_io_pc;
388 vaddr mem_io_vaddr;
389
8737c51c 390 int kvm_fd;
a60f24b5 391 struct KVMState *kvm_state;
f7575c96 392 struct kvm_run *kvm_run;
8737c51c 393
d01c05c9 394 /* Used for events with 'vcpu' and *without* the 'disabled' properties */
d4381116 395 DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
d01c05c9 396 DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
48151859 397
f5df5baf 398 /* TODO Move common fields from CPUArchState here. */
6fda014e
DH
399 int cpu_index;
400 uint32_t halted;
99df7dce 401 uint32_t can_do_io;
6fda014e 402 int32_t exception_index;
7e4fb26d 403
99f31832
SAGDR
404 /* shared by kvm, hax and hvf */
405 bool vcpu_dirty;
406
2adcc85d
JH
407 /* Used to keep track of an outstanding cpu throttle thread for migration
408 * autoconverge
409 */
410 bool throttle_thread_scheduled;
411
ed860129
PM
412 bool ignore_memory_transaction_failures;
413
7e4fb26d
RH
414 /* Note that this is accessed at the start of every TB via a negative
415 offset from AREG0. Leave this field at the end so as to make the
416 (absolute value) offset as small as possible. This reduces code
417 size, especially for hosts without large memory offsets. */
1aab16c2
PB
418 union {
419 uint32_t u32;
420 icount_decr_u16 u16;
421 } icount_decr;
b0cb0a66 422
b0cb0a66 423 struct hax_vcpu_state *hax_vcpu;
e3b9ca81
FK
424
425 /* The pending_tlb_flush flag is set and cleared atomically to
426 * avoid potential races. The aim of the flag is to avoid
427 * unnecessary flushes.
428 */
e7218445 429 uint16_t pending_tlb_flush;
c97d6d2c
SAGDR
430
431 int hvf_fd;
1f871c5e
PM
432
433 /* track IOMMUs whose translations we've cached in the TCG TLB */
434 GArray *iommu_notifiers;
dd83b06a
AF
435};
436
bdc44640
AF
437QTAILQ_HEAD(CPUTailQ, CPUState);
438extern struct CPUTailQ cpus;
439#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
440#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
441#define CPU_FOREACH_SAFE(cpu, next_cpu) \
442 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
8487d123
BR
443#define CPU_FOREACH_REVERSE(cpu) \
444 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
bdc44640 445#define first_cpu QTAILQ_FIRST(&cpus)
182735ef 446
f240eb6f 447extern __thread CPUState *current_cpu;
4917cf44 448
f3ced3c5
EC
449static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
450{
451 unsigned int i;
452
453 for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
454 atomic_set(&cpu->tb_jmp_cache[i], NULL);
455 }
456}
457
8d4e9146
FK
458/**
459 * qemu_tcg_mttcg_enabled:
460 * Check whether we are running MultiThread TCG or not.
461 *
462 * Returns: %true if we are in MTTCG mode %false otherwise.
463 */
464extern bool mttcg_enabled;
465#define qemu_tcg_mttcg_enabled() (mttcg_enabled)
466
444d5590
AF
467/**
468 * cpu_paging_enabled:
469 * @cpu: The CPU whose state is to be inspected.
470 *
471 * Returns: %true if paging is enabled, %false otherwise.
472 */
473bool cpu_paging_enabled(const CPUState *cpu);
474
a23bbfda
AF
475/**
476 * cpu_get_memory_mapping:
477 * @cpu: The CPU whose memory mappings are to be obtained.
478 * @list: Where to write the memory mappings to.
479 * @errp: Pointer for reporting an #Error.
480 */
481void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
482 Error **errp);
483
c72bf468
JF
484/**
485 * cpu_write_elf64_note:
486 * @f: pointer to a function that writes memory to a file
487 * @cpu: The CPU whose memory is to be dumped
488 * @cpuid: ID number of the CPU
489 * @opaque: pointer to the CPUState struct
490 */
491int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
492 int cpuid, void *opaque);
493
494/**
495 * cpu_write_elf64_qemunote:
496 * @f: pointer to a function that writes memory to a file
497 * @cpu: The CPU whose memory is to be dumped
498 * @cpuid: ID number of the CPU
499 * @opaque: pointer to the CPUState struct
500 */
501int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
502 void *opaque);
503
504/**
505 * cpu_write_elf32_note:
506 * @f: pointer to a function that writes memory to a file
507 * @cpu: The CPU whose memory is to be dumped
508 * @cpuid: ID number of the CPU
509 * @opaque: pointer to the CPUState struct
510 */
511int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
512 int cpuid, void *opaque);
513
514/**
515 * cpu_write_elf32_qemunote:
516 * @f: pointer to a function that writes memory to a file
517 * @cpu: The CPU whose memory is to be dumped
518 * @cpuid: ID number of the CPU
519 * @opaque: pointer to the CPUState struct
520 */
521int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
522 void *opaque);
dd83b06a 523
c86f106b
AN
524/**
525 * cpu_get_crash_info:
526 * @cpu: The CPU to get crash information for
527 *
528 * Gets the previously saved crash information.
529 * Caller is responsible for freeing the data.
530 */
531GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
532
878096ee
AF
533/**
534 * CPUDumpFlags:
535 * @CPU_DUMP_CODE:
536 * @CPU_DUMP_FPU: dump FPU register state, not just integer
537 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
538 */
539enum CPUDumpFlags {
540 CPU_DUMP_CODE = 0x00010000,
541 CPU_DUMP_FPU = 0x00020000,
542 CPU_DUMP_CCOP = 0x00040000,
543};
544
545/**
546 * cpu_dump_state:
547 * @cpu: The CPU whose state is to be dumped.
548 * @f: File to dump to.
549 * @cpu_fprintf: Function to dump with.
550 * @flags: Flags what to dump.
551 *
552 * Dumps CPU state.
553 */
554void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
555 int flags);
556
557/**
558 * cpu_dump_statistics:
559 * @cpu: The CPU whose state is to be dumped.
560 * @f: File to dump to.
561 * @cpu_fprintf: Function to dump with.
562 * @flags: Flags what to dump.
563 *
564 * Dumps CPU statistics.
565 */
566void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
567 int flags);
568
00b941e5 569#ifndef CONFIG_USER_ONLY
1dc6fb1f
PM
570/**
571 * cpu_get_phys_page_attrs_debug:
572 * @cpu: The CPU to obtain the physical page address for.
573 * @addr: The virtual address.
574 * @attrs: Updated on return with the memory transaction attributes to use
575 * for this access.
576 *
577 * Obtains the physical page corresponding to a virtual one, together
578 * with the corresponding memory transaction attributes to use for the access.
579 * Use it only for debugging because no protection checks are done.
580 *
581 * Returns: Corresponding physical page address or -1 if no page found.
582 */
583static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
584 MemTxAttrs *attrs)
585{
586 CPUClass *cc = CPU_GET_CLASS(cpu);
587
588 if (cc->get_phys_page_attrs_debug) {
589 return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
590 }
591 /* Fallback for CPUs which don't implement the _attrs_ hook */
592 *attrs = MEMTXATTRS_UNSPECIFIED;
593 return cc->get_phys_page_debug(cpu, addr);
594}
595
00b941e5
AF
596/**
597 * cpu_get_phys_page_debug:
598 * @cpu: The CPU to obtain the physical page address for.
599 * @addr: The virtual address.
600 *
601 * Obtains the physical page corresponding to a virtual one.
602 * Use it only for debugging because no protection checks are done.
603 *
604 * Returns: Corresponding physical page address or -1 if no page found.
605 */
606static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
607{
1dc6fb1f 608 MemTxAttrs attrs = {};
00b941e5 609
1dc6fb1f 610 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
00b941e5 611}
d7f25a9e
PM
612
613/** cpu_asidx_from_attrs:
614 * @cpu: CPU
615 * @attrs: memory transaction attributes
616 *
617 * Returns the address space index specifying the CPU AddressSpace
618 * to use for a memory access with the given transaction attributes.
619 */
620static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
621{
622 CPUClass *cc = CPU_GET_CLASS(cpu);
9c8c334b 623 int ret = 0;
d7f25a9e
PM
624
625 if (cc->asidx_from_attrs) {
9c8c334b
RH
626 ret = cc->asidx_from_attrs(cpu, attrs);
627 assert(ret < cpu->num_ases && ret >= 0);
d7f25a9e 628 }
9c8c334b 629 return ret;
d7f25a9e 630}
00b941e5
AF
631#endif
632
267f685b
PB
633/**
634 * cpu_list_add:
635 * @cpu: The CPU to be added to the list of CPUs.
636 */
637void cpu_list_add(CPUState *cpu);
638
639/**
640 * cpu_list_remove:
641 * @cpu: The CPU to be removed from the list of CPUs.
642 */
643void cpu_list_remove(CPUState *cpu);
644
dd83b06a
AF
645/**
646 * cpu_reset:
647 * @cpu: The CPU whose state is to be reset.
648 */
649void cpu_reset(CPUState *cpu);
650
2b8c2754
AF
651/**
652 * cpu_class_by_name:
653 * @typename: The CPU base type.
654 * @cpu_model: The model string without any parameters.
655 *
656 * Looks up a CPU #ObjectClass matching name @cpu_model.
657 *
658 * Returns: A #CPUClass or %NULL if not matching class is found.
659 */
660ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
661
3c72234c
IM
662/**
663 * cpu_create:
664 * @typename: The CPU type.
665 *
666 * Instantiates a CPU and realizes the CPU.
667 *
668 * Returns: A #CPUState or %NULL if an error occurred.
669 */
670CPUState *cpu_create(const char *typename);
671
672/**
2278b939 673 * parse_cpu_model:
3c72234c
IM
674 * @cpu_model: The model string including optional parameters.
675 *
676 * processes optional parameters and registers them as global properties
677 *
4482e05c
IM
678 * Returns: type of CPU to create or prints error and terminates process
679 * if an error occurred.
3c72234c 680 */
2278b939 681const char *parse_cpu_model(const char *cpu_model);
9262685b 682
3993c6bd 683/**
8c2e1b00 684 * cpu_has_work:
3993c6bd
AF
685 * @cpu: The vCPU to check.
686 *
687 * Checks whether the CPU has work to do.
688 *
689 * Returns: %true if the CPU has work, %false otherwise.
690 */
8c2e1b00
AF
691static inline bool cpu_has_work(CPUState *cpu)
692{
693 CPUClass *cc = CPU_GET_CLASS(cpu);
694
695 g_assert(cc->has_work);
696 return cc->has_work(cpu);
697}
3993c6bd 698
60e82579
AF
699/**
700 * qemu_cpu_is_self:
701 * @cpu: The vCPU to check against.
702 *
703 * Checks whether the caller is executing on the vCPU thread.
704 *
705 * Returns: %true if called from @cpu's thread, %false otherwise.
706 */
707bool qemu_cpu_is_self(CPUState *cpu);
708
c08d7424
AF
709/**
710 * qemu_cpu_kick:
711 * @cpu: The vCPU to kick.
712 *
713 * Kicks @cpu's thread.
714 */
715void qemu_cpu_kick(CPUState *cpu);
716
2fa45344
AF
717/**
718 * cpu_is_stopped:
719 * @cpu: The CPU to check.
720 *
721 * Checks whether the CPU is stopped.
722 *
723 * Returns: %true if run state is not running or if artificially stopped;
724 * %false otherwise.
725 */
726bool cpu_is_stopped(CPUState *cpu);
727
d148d90e
SF
728/**
729 * do_run_on_cpu:
730 * @cpu: The vCPU to run on.
731 * @func: The function to be executed.
732 * @data: Data to pass to the function.
733 * @mutex: Mutex to release while waiting for @func to run.
734 *
735 * Used internally in the implementation of run_on_cpu.
736 */
14e6fe12 737void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
d148d90e
SF
738 QemuMutex *mutex);
739
f100f0b3
AF
740/**
741 * run_on_cpu:
742 * @cpu: The vCPU to run on.
743 * @func: The function to be executed.
744 * @data: Data to pass to the function.
745 *
746 * Schedules the function @func for execution on the vCPU @cpu.
747 */
14e6fe12 748void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
f100f0b3 749
3c02270d
CV
750/**
751 * async_run_on_cpu:
752 * @cpu: The vCPU to run on.
753 * @func: The function to be executed.
754 * @data: Data to pass to the function.
755 *
756 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
757 */
14e6fe12 758void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
3c02270d 759
53f5ed95
PB
760/**
761 * async_safe_run_on_cpu:
762 * @cpu: The vCPU to run on.
763 * @func: The function to be executed.
764 * @data: Data to pass to the function.
765 *
766 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
767 * while all other vCPUs are sleeping.
768 *
769 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
770 * BQL.
771 */
14e6fe12 772void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
53f5ed95 773
38d8f5c8
AF
774/**
775 * qemu_get_cpu:
776 * @index: The CPUState@cpu_index value of the CPU to obtain.
777 *
778 * Gets a CPU matching @index.
779 *
780 * Returns: The CPU or %NULL if there is no matching CPU.
781 */
782CPUState *qemu_get_cpu(int index);
783
69e5ff06
IM
784/**
785 * cpu_exists:
786 * @id: Guest-exposed CPU ID to lookup.
787 *
788 * Search for CPU with specified ID.
789 *
790 * Returns: %true - CPU is found, %false - CPU isn't found.
791 */
792bool cpu_exists(int64_t id);
793
5ce46cb3
EH
794/**
795 * cpu_by_arch_id:
796 * @id: Guest-exposed CPU ID of the CPU to obtain.
797 *
798 * Get a CPU with matching @id.
799 *
800 * Returns: The CPU or %NULL if there is no matching CPU.
801 */
802CPUState *cpu_by_arch_id(int64_t id);
803
2adcc85d
JH
804/**
805 * cpu_throttle_set:
806 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
807 *
808 * Throttles all vcpus by forcing them to sleep for the given percentage of
809 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
810 * (example: 10ms sleep for every 30ms awake).
811 *
812 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
813 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
814 * is called.
815 */
816void cpu_throttle_set(int new_throttle_pct);
817
818/**
819 * cpu_throttle_stop:
820 *
821 * Stops the vcpu throttling started by cpu_throttle_set.
822 */
823void cpu_throttle_stop(void);
824
825/**
826 * cpu_throttle_active:
827 *
828 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
829 */
830bool cpu_throttle_active(void);
831
832/**
833 * cpu_throttle_get_percentage:
834 *
835 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
836 *
837 * Returns: The throttle percentage in range 1 to 99.
838 */
839int cpu_throttle_get_percentage(void);
840
c3affe56
AF
841#ifndef CONFIG_USER_ONLY
842
843typedef void (*CPUInterruptHandler)(CPUState *, int);
844
845extern CPUInterruptHandler cpu_interrupt_handler;
846
847/**
848 * cpu_interrupt:
849 * @cpu: The CPU to set an interrupt on.
850 * @mask: The interupts to set.
851 *
852 * Invokes the interrupt handler.
853 */
854static inline void cpu_interrupt(CPUState *cpu, int mask)
855{
856 cpu_interrupt_handler(cpu, mask);
857}
858
859#else /* USER_ONLY */
860
861void cpu_interrupt(CPUState *cpu, int mask);
862
863#endif /* USER_ONLY */
864
47507383
TH
865#ifdef NEED_CPU_H
866
93e22326 867#ifdef CONFIG_SOFTMMU
c658b94f
AF
868static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
869 bool is_write, bool is_exec,
870 int opaque, unsigned size)
871{
872 CPUClass *cc = CPU_GET_CLASS(cpu);
873
874 if (cc->do_unassigned_access) {
875 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
876 }
877}
878
93e22326 879static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
b35399bb
SS
880 MMUAccessType access_type,
881 int mmu_idx, uintptr_t retaddr)
93e22326
PB
882{
883 CPUClass *cc = CPU_GET_CLASS(cpu);
884
b35399bb 885 cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
93e22326 886}
0dff0939
PM
887
888static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
889 vaddr addr, unsigned size,
890 MMUAccessType access_type,
891 int mmu_idx, MemTxAttrs attrs,
892 MemTxResult response,
893 uintptr_t retaddr)
894{
895 CPUClass *cc = CPU_GET_CLASS(cpu);
896
ed860129 897 if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
0dff0939
PM
898 cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
899 mmu_idx, attrs, response, retaddr);
900 }
901}
c658b94f
AF
902#endif
903
47507383
TH
904#endif /* NEED_CPU_H */
905
2991b890
PC
906/**
907 * cpu_set_pc:
908 * @cpu: The CPU to set the program counter for.
909 * @addr: Program counter value.
910 *
911 * Sets the program counter for a CPU.
912 */
913static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
914{
915 CPUClass *cc = CPU_GET_CLASS(cpu);
916
917 cc->set_pc(cpu, addr);
918}
919
d8ed887b
AF
920/**
921 * cpu_reset_interrupt:
922 * @cpu: The CPU to clear the interrupt on.
923 * @mask: The interrupt mask to clear.
924 *
925 * Resets interrupts on the vCPU @cpu.
926 */
927void cpu_reset_interrupt(CPUState *cpu, int mask);
928
60a3e17a
AF
929/**
930 * cpu_exit:
931 * @cpu: The CPU to exit.
932 *
933 * Requests the CPU @cpu to exit execution.
934 */
935void cpu_exit(CPUState *cpu);
936
2993683b
IM
937/**
938 * cpu_resume:
939 * @cpu: The CPU to resume.
940 *
941 * Resumes CPU, i.e. puts CPU into runnable state.
942 */
943void cpu_resume(CPUState *cpu);
dd83b06a 944
4c055ab5
GZ
945/**
946 * cpu_remove:
947 * @cpu: The CPU to remove.
948 *
949 * Requests the CPU to be removed.
950 */
951void cpu_remove(CPUState *cpu);
952
2c579042
BR
953 /**
954 * cpu_remove_sync:
955 * @cpu: The CPU to remove.
956 *
957 * Requests the CPU to be removed and waits till it is removed.
958 */
959void cpu_remove_sync(CPUState *cpu);
960
d148d90e
SF
961/**
962 * process_queued_cpu_work() - process all items on CPU work queue
963 * @cpu: The CPU which work queue to process.
964 */
965void process_queued_cpu_work(CPUState *cpu);
966
ab129972
PB
967/**
968 * cpu_exec_start:
969 * @cpu: The CPU for the current thread.
970 *
971 * Record that a CPU has started execution and can be interrupted with
972 * cpu_exit.
973 */
974void cpu_exec_start(CPUState *cpu);
975
976/**
977 * cpu_exec_end:
978 * @cpu: The CPU for the current thread.
979 *
980 * Record that a CPU has stopped execution and exclusive sections
981 * can be executed without interrupting it.
982 */
983void cpu_exec_end(CPUState *cpu);
984
985/**
986 * start_exclusive:
987 *
988 * Wait for a concurrent exclusive section to end, and then start
989 * a section of work that is run while other CPUs are not running
990 * between cpu_exec_start and cpu_exec_end. CPUs that are running
991 * cpu_exec are exited immediately. CPUs that call cpu_exec_start
992 * during the exclusive section go to sleep until this CPU calls
993 * end_exclusive.
ab129972
PB
994 */
995void start_exclusive(void);
996
997/**
998 * end_exclusive:
999 *
1000 * Concludes an exclusive execution section started by start_exclusive.
ab129972
PB
1001 */
1002void end_exclusive(void);
1003
c643bed9
AF
1004/**
1005 * qemu_init_vcpu:
1006 * @cpu: The vCPU to initialize.
1007 *
1008 * Initializes a vCPU.
1009 */
1010void qemu_init_vcpu(CPUState *cpu);
1011
3825b28f
AF
1012#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
1013#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
1014#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
1015
1016/**
1017 * cpu_single_step:
1018 * @cpu: CPU to the flags for.
1019 * @enabled: Flags to enable.
1020 *
1021 * Enables or disables single-stepping for @cpu.
1022 */
1023void cpu_single_step(CPUState *cpu, int enabled);
1024
b3310ab3
AF
1025/* Breakpoint/watchpoint flags */
1026#define BP_MEM_READ 0x01
1027#define BP_MEM_WRITE 0x02
1028#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
1029#define BP_STOP_BEFORE_ACCESS 0x04
08225676 1030/* 0x08 currently unused */
b3310ab3
AF
1031#define BP_GDB 0x10
1032#define BP_CPU 0x20
b933066a 1033#define BP_ANY (BP_GDB | BP_CPU)
08225676
PM
1034#define BP_WATCHPOINT_HIT_READ 0x40
1035#define BP_WATCHPOINT_HIT_WRITE 0x80
1036#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
b3310ab3
AF
1037
1038int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
1039 CPUBreakpoint **breakpoint);
1040int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
1041void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
1042void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
1043
b933066a
RH
1044/* Return true if PC matches an installed breakpoint. */
1045static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
1046{
1047 CPUBreakpoint *bp;
1048
1049 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
1050 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
1051 if (bp->pc == pc && (bp->flags & mask)) {
1052 return true;
1053 }
1054 }
1055 }
1056 return false;
1057}
1058
75a34036
AF
1059int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1060 int flags, CPUWatchpoint **watchpoint);
1061int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1062 vaddr len, int flags);
1063void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
1064void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
1065
63c91552
PB
1066/**
1067 * cpu_get_address_space:
1068 * @cpu: CPU to get address space from
1069 * @asidx: index identifying which address space to get
1070 *
1071 * Return the requested address space of this CPU. @asidx
1072 * specifies which address space to read.
1073 */
1074AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1075
a47dddd7
AF
1076void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
1077 GCC_FMT_ATTR(2, 3);
c7e002c5 1078extern Property cpu_common_props[];
39e329e3 1079void cpu_exec_initfn(CPUState *cpu);
ce5b1bbf 1080void cpu_exec_realizefn(CPUState *cpu, Error **errp);
7bbc124e 1081void cpu_exec_unrealizefn(CPUState *cpu);
a47dddd7 1082
47507383
TH
1083#ifdef NEED_CPU_H
1084
1a1562f5
AF
1085#ifdef CONFIG_SOFTMMU
1086extern const struct VMStateDescription vmstate_cpu_common;
1087#else
1088#define vmstate_cpu_common vmstate_dummy
1089#endif
1090
1091#define VMSTATE_CPU() { \
1092 .name = "parent_obj", \
1093 .size = sizeof(CPUState), \
1094 .vmsd = &vmstate_cpu_common, \
1095 .flags = VMS_STRUCT, \
1096 .offset = 0, \
1097}
1098
47507383
TH
1099#endif /* NEED_CPU_H */
1100
a07f953e
IM
1101#define UNASSIGNED_CPU_INDEX -1
1102
dd83b06a 1103#endif