]> git.proxmox.com Git - mirror_qemu.git/blame - include/qom/cpu.h
gdbstub: Allow target CPUs to specify watchpoint STOP_BEFORE_ACCESS flag
[mirror_qemu.git] / include / qom / cpu.h
CommitLineData
dd83b06a
AF
1/*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20#ifndef QEMU_CPU_H
21#define QEMU_CPU_H
22
fcd7d003 23#include <signal.h>
6f03bef0 24#include <setjmp.h>
961f8395 25#include "hw/qdev-core.h"
c658b94f 26#include "exec/hwaddr.h"
bdc44640 27#include "qemu/queue.h"
1de7afc9 28#include "qemu/thread.h"
4917cf44 29#include "qemu/tls.h"
a23bbfda 30#include "qemu/typedefs.h"
dd83b06a 31
b5ba1cc6
QN
32typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
33 void *opaque);
c72bf468 34
577f42c0
AF
35/**
36 * vaddr:
37 * Type wide enough to contain any #target_ulong virtual address.
38 */
39typedef uint64_t vaddr;
40#define VADDR_PRId PRId64
41#define VADDR_PRIu PRIu64
42#define VADDR_PRIo PRIo64
43#define VADDR_PRIx PRIx64
44#define VADDR_PRIX PRIX64
45#define VADDR_MAX UINT64_MAX
46
dd83b06a
AF
47/**
48 * SECTION:cpu
49 * @section_id: QEMU-cpu
50 * @title: CPU Class
51 * @short_description: Base class for all CPUs
52 */
53
54#define TYPE_CPU "cpu"
55
0d6d1ab4
AF
56/* Since this macro is used a lot in hot code paths and in conjunction with
57 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
58 * an unchecked cast.
59 */
60#define CPU(obj) ((CPUState *)(obj))
61
dd83b06a
AF
62#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
63#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
64
65typedef struct CPUState CPUState;
66
c658b94f
AF
67typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
68 bool is_write, bool is_exec, int opaque,
69 unsigned size);
70
bdf7ae5b
AF
71struct TranslationBlock;
72
dd83b06a
AF
73/**
74 * CPUClass:
2b8c2754
AF
75 * @class_by_name: Callback to map -cpu command line model name to an
76 * instantiatable CPU type.
94a444b2 77 * @parse_features: Callback to parse command line arguments.
f5df5baf 78 * @reset: Callback to reset the #CPUState to its initial state.
91b1df8c 79 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
8c2e1b00 80 * @has_work: Callback for checking if there is work to do.
97a8ea5a 81 * @do_interrupt: Callback for interrupt handling.
c658b94f 82 * @do_unassigned_access: Callback for unassigned access handling.
93e22326
PB
83 * @do_unaligned_access: Callback for unaligned access handling, if
84 * the target defines #ALIGNED_ONLY.
f3659eee 85 * @memory_rw_debug: Callback for GDB memory access.
878096ee
AF
86 * @dump_state: Callback for dumping state.
87 * @dump_statistics: Callback for dumping statistics.
997395d3 88 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
444d5590 89 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
a23bbfda 90 * @get_memory_mapping: Callback for obtaining the memory mappings.
f45748f1 91 * @set_pc: Callback for setting the Program Counter register.
bdf7ae5b
AF
92 * @synchronize_from_tb: Callback for synchronizing state from a TCG
93 * #TranslationBlock.
7510454e 94 * @handle_mmu_fault: Callback for handling an MMU fault.
00b941e5 95 * @get_phys_page_debug: Callback for obtaining a physical address.
5b50e790
AF
96 * @gdb_read_register: Callback for letting GDB read a register.
97 * @gdb_write_register: Callback for letting GDB write a register.
86025ee4 98 * @debug_excp_handler: Callback for handling debug exceptions.
b170fce3 99 * @vmsd: State description for migration.
a0e372f0 100 * @gdb_num_core_regs: Number of core registers accessible to GDB.
5b24c641 101 * @gdb_core_xml_file: File name for core registers GDB XML description.
2472b6c0
PM
102 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
103 * before the insn which triggers a watchpoint rather than after it.
cffe7b32
RH
104 * @cpu_exec_enter: Callback for cpu_exec preparation.
105 * @cpu_exec_exit: Callback for cpu_exec cleanup.
9585db68 106 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
dd83b06a
AF
107 *
108 * Represents a CPU family or model.
109 */
110typedef struct CPUClass {
111 /*< private >*/
961f8395 112 DeviceClass parent_class;
dd83b06a
AF
113 /*< public >*/
114
2b8c2754 115 ObjectClass *(*class_by_name)(const char *cpu_model);
94a444b2 116 void (*parse_features)(CPUState *cpu, char *str, Error **errp);
2b8c2754 117
dd83b06a 118 void (*reset)(CPUState *cpu);
91b1df8c 119 int reset_dump_flags;
8c2e1b00 120 bool (*has_work)(CPUState *cpu);
97a8ea5a 121 void (*do_interrupt)(CPUState *cpu);
c658b94f 122 CPUUnassignedAccess do_unassigned_access;
93e22326
PB
123 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
124 int is_write, int is_user, uintptr_t retaddr);
bf7663c4 125 bool (*virtio_is_big_endian)(CPUState *cpu);
f3659eee
AF
126 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
127 uint8_t *buf, int len, bool is_write);
878096ee
AF
128 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
129 int flags);
130 void (*dump_statistics)(CPUState *cpu, FILE *f,
131 fprintf_function cpu_fprintf, int flags);
997395d3 132 int64_t (*get_arch_id)(CPUState *cpu);
444d5590 133 bool (*get_paging_enabled)(const CPUState *cpu);
a23bbfda
AF
134 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
135 Error **errp);
f45748f1 136 void (*set_pc)(CPUState *cpu, vaddr value);
bdf7ae5b 137 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
7510454e
AF
138 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
139 int mmu_index);
00b941e5 140 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
5b50e790
AF
141 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
142 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
86025ee4 143 void (*debug_excp_handler)(CPUState *cpu);
b170fce3 144
c72bf468
JF
145 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
146 int cpuid, void *opaque);
147 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
148 void *opaque);
149 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
150 int cpuid, void *opaque);
151 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
152 void *opaque);
a0e372f0
AF
153
154 const struct VMStateDescription *vmsd;
155 int gdb_num_core_regs;
5b24c641 156 const char *gdb_core_xml_file;
2472b6c0 157 bool gdb_stop_before_watchpoint;
cffe7b32
RH
158
159 void (*cpu_exec_enter)(CPUState *cpu);
160 void (*cpu_exec_exit)(CPUState *cpu);
9585db68 161 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
dd83b06a
AF
162} CPUClass;
163
28ecfd7a
AF
164#ifdef HOST_WORDS_BIGENDIAN
165typedef struct icount_decr_u16 {
166 uint16_t high;
167 uint16_t low;
168} icount_decr_u16;
169#else
170typedef struct icount_decr_u16 {
171 uint16_t low;
172 uint16_t high;
173} icount_decr_u16;
174#endif
175
f0c3c505
AF
176typedef struct CPUBreakpoint {
177 vaddr pc;
178 int flags; /* BP_* */
179 QTAILQ_ENTRY(CPUBreakpoint) entry;
180} CPUBreakpoint;
181
ff4700b0
AF
182typedef struct CPUWatchpoint {
183 vaddr vaddr;
05068c0d 184 vaddr len;
08225676 185 vaddr hitaddr;
ff4700b0
AF
186 int flags; /* BP_* */
187 QTAILQ_ENTRY(CPUWatchpoint) entry;
188} CPUWatchpoint;
189
a60f24b5 190struct KVMState;
f7575c96 191struct kvm_run;
a60f24b5 192
8cd70437
AF
193#define TB_JMP_CACHE_BITS 12
194#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
195
dd83b06a
AF
196/**
197 * CPUState:
55e5c285 198 * @cpu_index: CPU index (informative).
ce3960eb
AF
199 * @nr_cores: Number of cores within this CPU package.
200 * @nr_threads: Number of threads within this CPU.
1b1ed8dc 201 * @numa_node: NUMA node this CPU is belonging to.
0d34282f 202 * @host_tid: Host thread ID.
0315c31c 203 * @running: #true if CPU is currently running (usermode).
61a46217 204 * @created: Indicates whether the CPU thread has been successfully created.
259186a7
AF
205 * @interrupt_request: Indicates a pending interrupt request.
206 * @halted: Nonzero if the CPU is in suspended state.
4fdeee7c 207 * @stop: Indicates a pending stop request.
f324e766 208 * @stopped: Indicates the CPU has been artificially stopped.
378df4b2
PM
209 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
210 * CPU and return to its top level loop.
ed2803da 211 * @singlestep_enabled: Flags for single-stepping.
efee7340 212 * @icount_extra: Instructions until next timer event.
28ecfd7a
AF
213 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
214 * This allows a single read-compare-cbranch-write sequence to test
215 * for both decrementer underflow and exceptions.
99df7dce 216 * @can_do_io: Nonzero if memory-mapped IO is safe.
c05efcb1 217 * @env_ptr: Pointer to subclass-specific CPUArchState field.
d77953b9 218 * @current_tb: Currently executing TB.
eac8b355 219 * @gdb_regs: Additional GDB registers.
a0e372f0 220 * @gdb_num_regs: Number of total registers accessible to GDB.
35143f01 221 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
182735ef 222 * @next_cpu: Next CPU sharing TB cache.
0429a971 223 * @opaque: User data.
93afeade
AF
224 * @mem_io_pc: Host Program Counter at which the memory was accessed.
225 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
8737c51c 226 * @kvm_fd: vCPU file descriptor for KVM.
dd83b06a
AF
227 *
228 * State of one CPU core or thread.
229 */
230struct CPUState {
231 /*< private >*/
961f8395 232 DeviceState parent_obj;
dd83b06a
AF
233 /*< public >*/
234
ce3960eb
AF
235 int nr_cores;
236 int nr_threads;
1b1ed8dc 237 int numa_node;
ce3960eb 238
814e612e 239 struct QemuThread *thread;
bcba2a72
AF
240#ifdef _WIN32
241 HANDLE hThread;
242#endif
9f09e18a 243 int thread_id;
0d34282f 244 uint32_t host_tid;
0315c31c 245 bool running;
f5c121b8 246 struct QemuCond *halt_cond;
c64ca814 247 struct qemu_work_item *queued_work_first, *queued_work_last;
216fc9a4 248 bool thread_kicked;
61a46217 249 bool created;
4fdeee7c 250 bool stop;
f324e766 251 bool stopped;
fcd7d003 252 volatile sig_atomic_t exit_request;
259186a7 253 uint32_t interrupt_request;
ed2803da 254 int singlestep_enabled;
efee7340 255 int64_t icount_extra;
6f03bef0 256 sigjmp_buf jmp_env;
bcba2a72 257
09daed84
EI
258 AddressSpace *as;
259 MemoryListener *tcg_as_listener;
260
c05efcb1 261 void *env_ptr; /* CPUArchState */
d77953b9 262 struct TranslationBlock *current_tb;
8cd70437 263 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
eac8b355 264 struct GDBRegisterState *gdb_regs;
a0e372f0 265 int gdb_num_regs;
35143f01 266 int gdb_num_g_regs;
bdc44640 267 QTAILQ_ENTRY(CPUState) node;
d77953b9 268
f0c3c505
AF
269 /* ice debug support */
270 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
271
ff4700b0
AF
272 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
273 CPUWatchpoint *watchpoint_hit;
274
0429a971
AF
275 void *opaque;
276
93afeade
AF
277 /* In order to avoid passing too many arguments to the MMIO helpers,
278 * we store some rarely used information in the CPU context.
279 */
280 uintptr_t mem_io_pc;
281 vaddr mem_io_vaddr;
282
8737c51c 283 int kvm_fd;
20d695a9 284 bool kvm_vcpu_dirty;
a60f24b5 285 struct KVMState *kvm_state;
f7575c96 286 struct kvm_run *kvm_run;
8737c51c 287
f5df5baf 288 /* TODO Move common fields from CPUArchState here. */
55e5c285 289 int cpu_index; /* used by alpha TCG */
259186a7 290 uint32_t halted; /* used by alpha, cris, ppc TCG */
28ecfd7a
AF
291 union {
292 uint32_t u32;
293 icount_decr_u16 u16;
294 } icount_decr;
99df7dce 295 uint32_t can_do_io;
27103424 296 int32_t exception_index; /* used by m68k TCG */
7e4fb26d
RH
297
298 /* Note that this is accessed at the start of every TB via a negative
299 offset from AREG0. Leave this field at the end so as to make the
300 (absolute value) offset as small as possible. This reduces code
301 size, especially for hosts without large memory offsets. */
302 volatile sig_atomic_t tcg_exit_req;
dd83b06a
AF
303};
304
bdc44640
AF
305QTAILQ_HEAD(CPUTailQ, CPUState);
306extern struct CPUTailQ cpus;
307#define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
308#define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
309#define CPU_FOREACH_SAFE(cpu, next_cpu) \
310 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
311#define first_cpu QTAILQ_FIRST(&cpus)
182735ef 312
4917cf44
AF
313DECLARE_TLS(CPUState *, current_cpu);
314#define current_cpu tls_var(current_cpu)
315
444d5590
AF
316/**
317 * cpu_paging_enabled:
318 * @cpu: The CPU whose state is to be inspected.
319 *
320 * Returns: %true if paging is enabled, %false otherwise.
321 */
322bool cpu_paging_enabled(const CPUState *cpu);
323
a23bbfda
AF
324/**
325 * cpu_get_memory_mapping:
326 * @cpu: The CPU whose memory mappings are to be obtained.
327 * @list: Where to write the memory mappings to.
328 * @errp: Pointer for reporting an #Error.
329 */
330void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
331 Error **errp);
332
c72bf468
JF
333/**
334 * cpu_write_elf64_note:
335 * @f: pointer to a function that writes memory to a file
336 * @cpu: The CPU whose memory is to be dumped
337 * @cpuid: ID number of the CPU
338 * @opaque: pointer to the CPUState struct
339 */
340int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
341 int cpuid, void *opaque);
342
343/**
344 * cpu_write_elf64_qemunote:
345 * @f: pointer to a function that writes memory to a file
346 * @cpu: The CPU whose memory is to be dumped
347 * @cpuid: ID number of the CPU
348 * @opaque: pointer to the CPUState struct
349 */
350int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
351 void *opaque);
352
353/**
354 * cpu_write_elf32_note:
355 * @f: pointer to a function that writes memory to a file
356 * @cpu: The CPU whose memory is to be dumped
357 * @cpuid: ID number of the CPU
358 * @opaque: pointer to the CPUState struct
359 */
360int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
361 int cpuid, void *opaque);
362
363/**
364 * cpu_write_elf32_qemunote:
365 * @f: pointer to a function that writes memory to a file
366 * @cpu: The CPU whose memory is to be dumped
367 * @cpuid: ID number of the CPU
368 * @opaque: pointer to the CPUState struct
369 */
370int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
371 void *opaque);
dd83b06a 372
878096ee
AF
373/**
374 * CPUDumpFlags:
375 * @CPU_DUMP_CODE:
376 * @CPU_DUMP_FPU: dump FPU register state, not just integer
377 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
378 */
379enum CPUDumpFlags {
380 CPU_DUMP_CODE = 0x00010000,
381 CPU_DUMP_FPU = 0x00020000,
382 CPU_DUMP_CCOP = 0x00040000,
383};
384
385/**
386 * cpu_dump_state:
387 * @cpu: The CPU whose state is to be dumped.
388 * @f: File to dump to.
389 * @cpu_fprintf: Function to dump with.
390 * @flags: Flags what to dump.
391 *
392 * Dumps CPU state.
393 */
394void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
395 int flags);
396
397/**
398 * cpu_dump_statistics:
399 * @cpu: The CPU whose state is to be dumped.
400 * @f: File to dump to.
401 * @cpu_fprintf: Function to dump with.
402 * @flags: Flags what to dump.
403 *
404 * Dumps CPU statistics.
405 */
406void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
407 int flags);
408
00b941e5
AF
409#ifndef CONFIG_USER_ONLY
410/**
411 * cpu_get_phys_page_debug:
412 * @cpu: The CPU to obtain the physical page address for.
413 * @addr: The virtual address.
414 *
415 * Obtains the physical page corresponding to a virtual one.
416 * Use it only for debugging because no protection checks are done.
417 *
418 * Returns: Corresponding physical page address or -1 if no page found.
419 */
420static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
421{
422 CPUClass *cc = CPU_GET_CLASS(cpu);
423
424 return cc->get_phys_page_debug(cpu, addr);
425}
426#endif
427
dd83b06a
AF
428/**
429 * cpu_reset:
430 * @cpu: The CPU whose state is to be reset.
431 */
432void cpu_reset(CPUState *cpu);
433
2b8c2754
AF
434/**
435 * cpu_class_by_name:
436 * @typename: The CPU base type.
437 * @cpu_model: The model string without any parameters.
438 *
439 * Looks up a CPU #ObjectClass matching name @cpu_model.
440 *
441 * Returns: A #CPUClass or %NULL if not matching class is found.
442 */
443ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
444
9262685b
AF
445/**
446 * cpu_generic_init:
447 * @typename: The CPU base type.
448 * @cpu_model: The model string including optional parameters.
449 *
450 * Instantiates a CPU, processes optional parameters and realizes the CPU.
451 *
452 * Returns: A #CPUState or %NULL if an error occurred.
453 */
454CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
455
3993c6bd 456/**
8c2e1b00 457 * cpu_has_work:
3993c6bd
AF
458 * @cpu: The vCPU to check.
459 *
460 * Checks whether the CPU has work to do.
461 *
462 * Returns: %true if the CPU has work, %false otherwise.
463 */
8c2e1b00
AF
464static inline bool cpu_has_work(CPUState *cpu)
465{
466 CPUClass *cc = CPU_GET_CLASS(cpu);
467
468 g_assert(cc->has_work);
469 return cc->has_work(cpu);
470}
3993c6bd 471
60e82579
AF
472/**
473 * qemu_cpu_is_self:
474 * @cpu: The vCPU to check against.
475 *
476 * Checks whether the caller is executing on the vCPU thread.
477 *
478 * Returns: %true if called from @cpu's thread, %false otherwise.
479 */
480bool qemu_cpu_is_self(CPUState *cpu);
481
c08d7424
AF
482/**
483 * qemu_cpu_kick:
484 * @cpu: The vCPU to kick.
485 *
486 * Kicks @cpu's thread.
487 */
488void qemu_cpu_kick(CPUState *cpu);
489
2fa45344
AF
490/**
491 * cpu_is_stopped:
492 * @cpu: The CPU to check.
493 *
494 * Checks whether the CPU is stopped.
495 *
496 * Returns: %true if run state is not running or if artificially stopped;
497 * %false otherwise.
498 */
499bool cpu_is_stopped(CPUState *cpu);
500
f100f0b3
AF
501/**
502 * run_on_cpu:
503 * @cpu: The vCPU to run on.
504 * @func: The function to be executed.
505 * @data: Data to pass to the function.
506 *
507 * Schedules the function @func for execution on the vCPU @cpu.
508 */
509void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
510
3c02270d
CV
511/**
512 * async_run_on_cpu:
513 * @cpu: The vCPU to run on.
514 * @func: The function to be executed.
515 * @data: Data to pass to the function.
516 *
517 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
518 */
519void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
520
38d8f5c8
AF
521/**
522 * qemu_get_cpu:
523 * @index: The CPUState@cpu_index value of the CPU to obtain.
524 *
525 * Gets a CPU matching @index.
526 *
527 * Returns: The CPU or %NULL if there is no matching CPU.
528 */
529CPUState *qemu_get_cpu(int index);
530
69e5ff06
IM
531/**
532 * cpu_exists:
533 * @id: Guest-exposed CPU ID to lookup.
534 *
535 * Search for CPU with specified ID.
536 *
537 * Returns: %true - CPU is found, %false - CPU isn't found.
538 */
539bool cpu_exists(int64_t id);
540
c3affe56
AF
541#ifndef CONFIG_USER_ONLY
542
543typedef void (*CPUInterruptHandler)(CPUState *, int);
544
545extern CPUInterruptHandler cpu_interrupt_handler;
546
547/**
548 * cpu_interrupt:
549 * @cpu: The CPU to set an interrupt on.
550 * @mask: The interupts to set.
551 *
552 * Invokes the interrupt handler.
553 */
554static inline void cpu_interrupt(CPUState *cpu, int mask)
555{
556 cpu_interrupt_handler(cpu, mask);
557}
558
559#else /* USER_ONLY */
560
561void cpu_interrupt(CPUState *cpu, int mask);
562
563#endif /* USER_ONLY */
564
93e22326 565#ifdef CONFIG_SOFTMMU
c658b94f
AF
566static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
567 bool is_write, bool is_exec,
568 int opaque, unsigned size)
569{
570 CPUClass *cc = CPU_GET_CLASS(cpu);
571
572 if (cc->do_unassigned_access) {
573 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
574 }
575}
576
93e22326
PB
577static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
578 int is_write, int is_user,
579 uintptr_t retaddr)
580{
581 CPUClass *cc = CPU_GET_CLASS(cpu);
582
583 return cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
584}
c658b94f
AF
585#endif
586
d8ed887b
AF
587/**
588 * cpu_reset_interrupt:
589 * @cpu: The CPU to clear the interrupt on.
590 * @mask: The interrupt mask to clear.
591 *
592 * Resets interrupts on the vCPU @cpu.
593 */
594void cpu_reset_interrupt(CPUState *cpu, int mask);
595
60a3e17a
AF
596/**
597 * cpu_exit:
598 * @cpu: The CPU to exit.
599 *
600 * Requests the CPU @cpu to exit execution.
601 */
602void cpu_exit(CPUState *cpu);
603
2993683b
IM
604/**
605 * cpu_resume:
606 * @cpu: The CPU to resume.
607 *
608 * Resumes CPU, i.e. puts CPU into runnable state.
609 */
610void cpu_resume(CPUState *cpu);
dd83b06a 611
c643bed9
AF
612/**
613 * qemu_init_vcpu:
614 * @cpu: The vCPU to initialize.
615 *
616 * Initializes a vCPU.
617 */
618void qemu_init_vcpu(CPUState *cpu);
619
3825b28f
AF
620#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
621#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
622#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
623
624/**
625 * cpu_single_step:
626 * @cpu: CPU to the flags for.
627 * @enabled: Flags to enable.
628 *
629 * Enables or disables single-stepping for @cpu.
630 */
631void cpu_single_step(CPUState *cpu, int enabled);
632
b3310ab3
AF
633/* Breakpoint/watchpoint flags */
634#define BP_MEM_READ 0x01
635#define BP_MEM_WRITE 0x02
636#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
637#define BP_STOP_BEFORE_ACCESS 0x04
08225676 638/* 0x08 currently unused */
b3310ab3
AF
639#define BP_GDB 0x10
640#define BP_CPU 0x20
08225676
PM
641#define BP_WATCHPOINT_HIT_READ 0x40
642#define BP_WATCHPOINT_HIT_WRITE 0x80
643#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
b3310ab3
AF
644
645int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
646 CPUBreakpoint **breakpoint);
647int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
648void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
649void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
650
75a34036
AF
651int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
652 int flags, CPUWatchpoint **watchpoint);
653int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
654 vaddr len, int flags);
655void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
656void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
657
a47dddd7
AF
658void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
659 GCC_FMT_ATTR(2, 3);
660
1a1562f5
AF
661#ifdef CONFIG_SOFTMMU
662extern const struct VMStateDescription vmstate_cpu_common;
663#else
664#define vmstate_cpu_common vmstate_dummy
665#endif
666
667#define VMSTATE_CPU() { \
668 .name = "parent_obj", \
669 .size = sizeof(CPUState), \
670 .vmsd = &vmstate_cpu_common, \
671 .flags = VMS_STRUCT, \
672 .offset = 0, \
673}
674
dd83b06a 675#endif