]> git.proxmox.com Git - mirror_qemu.git/blob - include/qom/cpu.h
exec.c: Allow target CPUs to define multiple AddressSpaces
[mirror_qemu.git] / include / qom / cpu.h
1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20 #ifndef QEMU_CPU_H
21 #define QEMU_CPU_H
22
23 #include <signal.h>
24 #include <setjmp.h>
25 #include "hw/qdev-core.h"
26 #include "disas/bfd.h"
27 #include "exec/hwaddr.h"
28 #include "exec/memattrs.h"
29 #include "qemu/queue.h"
30 #include "qemu/thread.h"
31 #include "qemu/typedefs.h"
32
33 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
34 void *opaque);
35
36 /**
37 * vaddr:
38 * Type wide enough to contain any #target_ulong virtual address.
39 */
40 typedef uint64_t vaddr;
41 #define VADDR_PRId PRId64
42 #define VADDR_PRIu PRIu64
43 #define VADDR_PRIo PRIo64
44 #define VADDR_PRIx PRIx64
45 #define VADDR_PRIX PRIX64
46 #define VADDR_MAX UINT64_MAX
47
48 /**
49 * SECTION:cpu
50 * @section_id: QEMU-cpu
51 * @title: CPU Class
52 * @short_description: Base class for all CPUs
53 */
54
55 #define TYPE_CPU "cpu"
56
57 /* Since this macro is used a lot in hot code paths and in conjunction with
58 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
59 * an unchecked cast.
60 */
61 #define CPU(obj) ((CPUState *)(obj))
62
63 #define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
64 #define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
65
66 typedef struct CPUState CPUState;
67
68 typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
69 bool is_write, bool is_exec, int opaque,
70 unsigned size);
71
72 struct TranslationBlock;
73
74 /**
75 * CPUClass:
76 * @class_by_name: Callback to map -cpu command line model name to an
77 * instantiatable CPU type.
78 * @parse_features: Callback to parse command line arguments.
79 * @reset: Callback to reset the #CPUState to its initial state.
80 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
81 * @has_work: Callback for checking if there is work to do.
82 * @do_interrupt: Callback for interrupt handling.
83 * @do_unassigned_access: Callback for unassigned access handling.
84 * @do_unaligned_access: Callback for unaligned access handling, if
85 * the target defines #ALIGNED_ONLY.
86 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
87 * runtime configurable endianness is currently big-endian. Non-configurable
88 * CPUs can use the default implementation of this method. This method should
89 * not be used by any callers other than the pre-1.0 virtio devices.
90 * @memory_rw_debug: Callback for GDB memory access.
91 * @dump_state: Callback for dumping state.
92 * @dump_statistics: Callback for dumping statistics.
93 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
94 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
95 * @get_memory_mapping: Callback for obtaining the memory mappings.
96 * @set_pc: Callback for setting the Program Counter register.
97 * @synchronize_from_tb: Callback for synchronizing state from a TCG
98 * #TranslationBlock.
99 * @handle_mmu_fault: Callback for handling an MMU fault.
100 * @get_phys_page_debug: Callback for obtaining a physical address.
101 * @gdb_read_register: Callback for letting GDB read a register.
102 * @gdb_write_register: Callback for letting GDB write a register.
103 * @debug_excp_handler: Callback for handling debug exceptions.
104 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
105 * 64-bit VM coredump.
106 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
107 * note to a 32-bit VM coredump.
108 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
109 * 32-bit VM coredump.
110 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
111 * note to a 32-bit VM coredump.
112 * @vmsd: State description for migration.
113 * @gdb_num_core_regs: Number of core registers accessible to GDB.
114 * @gdb_core_xml_file: File name for core registers GDB XML description.
115 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
116 * before the insn which triggers a watchpoint rather than after it.
117 * @cpu_exec_enter: Callback for cpu_exec preparation.
118 * @cpu_exec_exit: Callback for cpu_exec cleanup.
119 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
120 * @disas_set_info: Setup architecture specific components of disassembly info
121 *
122 * Represents a CPU family or model.
123 */
124 typedef struct CPUClass {
125 /*< private >*/
126 DeviceClass parent_class;
127 /*< public >*/
128
129 ObjectClass *(*class_by_name)(const char *cpu_model);
130 void (*parse_features)(CPUState *cpu, char *str, Error **errp);
131
132 void (*reset)(CPUState *cpu);
133 int reset_dump_flags;
134 bool (*has_work)(CPUState *cpu);
135 void (*do_interrupt)(CPUState *cpu);
136 CPUUnassignedAccess do_unassigned_access;
137 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
138 int is_write, int is_user, uintptr_t retaddr);
139 bool (*virtio_is_big_endian)(CPUState *cpu);
140 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
141 uint8_t *buf, int len, bool is_write);
142 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
143 int flags);
144 void (*dump_statistics)(CPUState *cpu, FILE *f,
145 fprintf_function cpu_fprintf, int flags);
146 int64_t (*get_arch_id)(CPUState *cpu);
147 bool (*get_paging_enabled)(const CPUState *cpu);
148 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
149 Error **errp);
150 void (*set_pc)(CPUState *cpu, vaddr value);
151 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
152 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
153 int mmu_index);
154 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
155 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
156 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
157 void (*debug_excp_handler)(CPUState *cpu);
158
159 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
160 int cpuid, void *opaque);
161 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
162 void *opaque);
163 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
164 int cpuid, void *opaque);
165 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
166 void *opaque);
167
168 const struct VMStateDescription *vmsd;
169 int gdb_num_core_regs;
170 const char *gdb_core_xml_file;
171 bool gdb_stop_before_watchpoint;
172
173 void (*cpu_exec_enter)(CPUState *cpu);
174 void (*cpu_exec_exit)(CPUState *cpu);
175 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
176
177 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
178 } CPUClass;
179
180 #ifdef HOST_WORDS_BIGENDIAN
181 typedef struct icount_decr_u16 {
182 uint16_t high;
183 uint16_t low;
184 } icount_decr_u16;
185 #else
186 typedef struct icount_decr_u16 {
187 uint16_t low;
188 uint16_t high;
189 } icount_decr_u16;
190 #endif
191
192 typedef struct CPUBreakpoint {
193 vaddr pc;
194 int flags; /* BP_* */
195 QTAILQ_ENTRY(CPUBreakpoint) entry;
196 } CPUBreakpoint;
197
198 typedef struct CPUWatchpoint {
199 vaddr vaddr;
200 vaddr len;
201 vaddr hitaddr;
202 MemTxAttrs hitattrs;
203 int flags; /* BP_* */
204 QTAILQ_ENTRY(CPUWatchpoint) entry;
205 } CPUWatchpoint;
206
207 struct KVMState;
208 struct kvm_run;
209
210 #define TB_JMP_CACHE_BITS 12
211 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
212
213 /**
214 * CPUState:
215 * @cpu_index: CPU index (informative).
216 * @nr_cores: Number of cores within this CPU package.
217 * @nr_threads: Number of threads within this CPU.
218 * @numa_node: NUMA node this CPU is belonging to.
219 * @host_tid: Host thread ID.
220 * @running: #true if CPU is currently running (usermode).
221 * @created: Indicates whether the CPU thread has been successfully created.
222 * @interrupt_request: Indicates a pending interrupt request.
223 * @halted: Nonzero if the CPU is in suspended state.
224 * @stop: Indicates a pending stop request.
225 * @stopped: Indicates the CPU has been artificially stopped.
226 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
227 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
228 * CPU and return to its top level loop.
229 * @singlestep_enabled: Flags for single-stepping.
230 * @icount_extra: Instructions until next timer event.
231 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
232 * This allows a single read-compare-cbranch-write sequence to test
233 * for both decrementer underflow and exceptions.
234 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
235 * requires that IO only be performed on the last instruction of a TB
236 * so that interrupts take effect immediately.
237 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
238 * AddressSpaces this CPU has)
239 * @num_ases: number of CPUAddressSpaces in @cpu_ases
240 * @as: Pointer to the first AddressSpace, for the convenience of targets which
241 * only have a single AddressSpace
242 * @env_ptr: Pointer to subclass-specific CPUArchState field.
243 * @current_tb: Currently executing TB.
244 * @gdb_regs: Additional GDB registers.
245 * @gdb_num_regs: Number of total registers accessible to GDB.
246 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
247 * @next_cpu: Next CPU sharing TB cache.
248 * @opaque: User data.
249 * @mem_io_pc: Host Program Counter at which the memory was accessed.
250 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
251 * @kvm_fd: vCPU file descriptor for KVM.
252 * @work_mutex: Lock to prevent multiple access to queued_work_*.
253 * @queued_work_first: First asynchronous work pending.
254 *
255 * State of one CPU core or thread.
256 */
257 struct CPUState {
258 /*< private >*/
259 DeviceState parent_obj;
260 /*< public >*/
261
262 int nr_cores;
263 int nr_threads;
264 int numa_node;
265
266 struct QemuThread *thread;
267 #ifdef _WIN32
268 HANDLE hThread;
269 #endif
270 int thread_id;
271 uint32_t host_tid;
272 bool running;
273 struct QemuCond *halt_cond;
274 bool thread_kicked;
275 bool created;
276 bool stop;
277 bool stopped;
278 bool crash_occurred;
279 bool exit_request;
280 uint32_t interrupt_request;
281 int singlestep_enabled;
282 int64_t icount_extra;
283 sigjmp_buf jmp_env;
284
285 QemuMutex work_mutex;
286 struct qemu_work_item *queued_work_first, *queued_work_last;
287
288 CPUAddressSpace *cpu_ases;
289 int num_ases;
290 AddressSpace *as;
291
292 void *env_ptr; /* CPUArchState */
293 struct TranslationBlock *current_tb;
294 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
295 struct GDBRegisterState *gdb_regs;
296 int gdb_num_regs;
297 int gdb_num_g_regs;
298 QTAILQ_ENTRY(CPUState) node;
299
300 /* ice debug support */
301 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
302
303 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
304 CPUWatchpoint *watchpoint_hit;
305
306 void *opaque;
307
308 /* In order to avoid passing too many arguments to the MMIO helpers,
309 * we store some rarely used information in the CPU context.
310 */
311 uintptr_t mem_io_pc;
312 vaddr mem_io_vaddr;
313
314 int kvm_fd;
315 bool kvm_vcpu_dirty;
316 struct KVMState *kvm_state;
317 struct kvm_run *kvm_run;
318
319 /* TODO Move common fields from CPUArchState here. */
320 int cpu_index; /* used by alpha TCG */
321 uint32_t halted; /* used by alpha, cris, ppc TCG */
322 union {
323 uint32_t u32;
324 icount_decr_u16 u16;
325 } icount_decr;
326 uint32_t can_do_io;
327 int32_t exception_index; /* used by m68k TCG */
328
329 /* Used to keep track of an outstanding cpu throttle thread for migration
330 * autoconverge
331 */
332 bool throttle_thread_scheduled;
333
334 /* Note that this is accessed at the start of every TB via a negative
335 offset from AREG0. Leave this field at the end so as to make the
336 (absolute value) offset as small as possible. This reduces code
337 size, especially for hosts without large memory offsets. */
338 uint32_t tcg_exit_req;
339 };
340
341 QTAILQ_HEAD(CPUTailQ, CPUState);
342 extern struct CPUTailQ cpus;
343 #define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
344 #define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
345 #define CPU_FOREACH_SAFE(cpu, next_cpu) \
346 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
347 #define CPU_FOREACH_REVERSE(cpu) \
348 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
349 #define first_cpu QTAILQ_FIRST(&cpus)
350
351 extern __thread CPUState *current_cpu;
352
353 /**
354 * cpu_paging_enabled:
355 * @cpu: The CPU whose state is to be inspected.
356 *
357 * Returns: %true if paging is enabled, %false otherwise.
358 */
359 bool cpu_paging_enabled(const CPUState *cpu);
360
361 /**
362 * cpu_get_memory_mapping:
363 * @cpu: The CPU whose memory mappings are to be obtained.
364 * @list: Where to write the memory mappings to.
365 * @errp: Pointer for reporting an #Error.
366 */
367 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
368 Error **errp);
369
370 /**
371 * cpu_write_elf64_note:
372 * @f: pointer to a function that writes memory to a file
373 * @cpu: The CPU whose memory is to be dumped
374 * @cpuid: ID number of the CPU
375 * @opaque: pointer to the CPUState struct
376 */
377 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
378 int cpuid, void *opaque);
379
380 /**
381 * cpu_write_elf64_qemunote:
382 * @f: pointer to a function that writes memory to a file
383 * @cpu: The CPU whose memory is to be dumped
384 * @cpuid: ID number of the CPU
385 * @opaque: pointer to the CPUState struct
386 */
387 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
388 void *opaque);
389
390 /**
391 * cpu_write_elf32_note:
392 * @f: pointer to a function that writes memory to a file
393 * @cpu: The CPU whose memory is to be dumped
394 * @cpuid: ID number of the CPU
395 * @opaque: pointer to the CPUState struct
396 */
397 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
398 int cpuid, void *opaque);
399
400 /**
401 * cpu_write_elf32_qemunote:
402 * @f: pointer to a function that writes memory to a file
403 * @cpu: The CPU whose memory is to be dumped
404 * @cpuid: ID number of the CPU
405 * @opaque: pointer to the CPUState struct
406 */
407 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
408 void *opaque);
409
410 /**
411 * CPUDumpFlags:
412 * @CPU_DUMP_CODE:
413 * @CPU_DUMP_FPU: dump FPU register state, not just integer
414 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
415 */
416 enum CPUDumpFlags {
417 CPU_DUMP_CODE = 0x00010000,
418 CPU_DUMP_FPU = 0x00020000,
419 CPU_DUMP_CCOP = 0x00040000,
420 };
421
422 /**
423 * cpu_dump_state:
424 * @cpu: The CPU whose state is to be dumped.
425 * @f: File to dump to.
426 * @cpu_fprintf: Function to dump with.
427 * @flags: Flags what to dump.
428 *
429 * Dumps CPU state.
430 */
431 void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
432 int flags);
433
434 /**
435 * cpu_dump_statistics:
436 * @cpu: The CPU whose state is to be dumped.
437 * @f: File to dump to.
438 * @cpu_fprintf: Function to dump with.
439 * @flags: Flags what to dump.
440 *
441 * Dumps CPU statistics.
442 */
443 void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
444 int flags);
445
446 #ifndef CONFIG_USER_ONLY
447 /**
448 * cpu_get_phys_page_debug:
449 * @cpu: The CPU to obtain the physical page address for.
450 * @addr: The virtual address.
451 *
452 * Obtains the physical page corresponding to a virtual one.
453 * Use it only for debugging because no protection checks are done.
454 *
455 * Returns: Corresponding physical page address or -1 if no page found.
456 */
457 static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
458 {
459 CPUClass *cc = CPU_GET_CLASS(cpu);
460
461 return cc->get_phys_page_debug(cpu, addr);
462 }
463 #endif
464
465 /**
466 * cpu_reset:
467 * @cpu: The CPU whose state is to be reset.
468 */
469 void cpu_reset(CPUState *cpu);
470
471 /**
472 * cpu_class_by_name:
473 * @typename: The CPU base type.
474 * @cpu_model: The model string without any parameters.
475 *
476 * Looks up a CPU #ObjectClass matching name @cpu_model.
477 *
478 * Returns: A #CPUClass or %NULL if not matching class is found.
479 */
480 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
481
482 /**
483 * cpu_generic_init:
484 * @typename: The CPU base type.
485 * @cpu_model: The model string including optional parameters.
486 *
487 * Instantiates a CPU, processes optional parameters and realizes the CPU.
488 *
489 * Returns: A #CPUState or %NULL if an error occurred.
490 */
491 CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
492
493 /**
494 * cpu_has_work:
495 * @cpu: The vCPU to check.
496 *
497 * Checks whether the CPU has work to do.
498 *
499 * Returns: %true if the CPU has work, %false otherwise.
500 */
501 static inline bool cpu_has_work(CPUState *cpu)
502 {
503 CPUClass *cc = CPU_GET_CLASS(cpu);
504
505 g_assert(cc->has_work);
506 return cc->has_work(cpu);
507 }
508
509 /**
510 * qemu_cpu_is_self:
511 * @cpu: The vCPU to check against.
512 *
513 * Checks whether the caller is executing on the vCPU thread.
514 *
515 * Returns: %true if called from @cpu's thread, %false otherwise.
516 */
517 bool qemu_cpu_is_self(CPUState *cpu);
518
519 /**
520 * qemu_cpu_kick:
521 * @cpu: The vCPU to kick.
522 *
523 * Kicks @cpu's thread.
524 */
525 void qemu_cpu_kick(CPUState *cpu);
526
527 /**
528 * cpu_is_stopped:
529 * @cpu: The CPU to check.
530 *
531 * Checks whether the CPU is stopped.
532 *
533 * Returns: %true if run state is not running or if artificially stopped;
534 * %false otherwise.
535 */
536 bool cpu_is_stopped(CPUState *cpu);
537
538 /**
539 * run_on_cpu:
540 * @cpu: The vCPU to run on.
541 * @func: The function to be executed.
542 * @data: Data to pass to the function.
543 *
544 * Schedules the function @func for execution on the vCPU @cpu.
545 */
546 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
547
548 /**
549 * async_run_on_cpu:
550 * @cpu: The vCPU to run on.
551 * @func: The function to be executed.
552 * @data: Data to pass to the function.
553 *
554 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
555 */
556 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
557
558 /**
559 * qemu_get_cpu:
560 * @index: The CPUState@cpu_index value of the CPU to obtain.
561 *
562 * Gets a CPU matching @index.
563 *
564 * Returns: The CPU or %NULL if there is no matching CPU.
565 */
566 CPUState *qemu_get_cpu(int index);
567
568 /**
569 * cpu_exists:
570 * @id: Guest-exposed CPU ID to lookup.
571 *
572 * Search for CPU with specified ID.
573 *
574 * Returns: %true - CPU is found, %false - CPU isn't found.
575 */
576 bool cpu_exists(int64_t id);
577
578 /**
579 * cpu_throttle_set:
580 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
581 *
582 * Throttles all vcpus by forcing them to sleep for the given percentage of
583 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
584 * (example: 10ms sleep for every 30ms awake).
585 *
586 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
587 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
588 * is called.
589 */
590 void cpu_throttle_set(int new_throttle_pct);
591
592 /**
593 * cpu_throttle_stop:
594 *
595 * Stops the vcpu throttling started by cpu_throttle_set.
596 */
597 void cpu_throttle_stop(void);
598
599 /**
600 * cpu_throttle_active:
601 *
602 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
603 */
604 bool cpu_throttle_active(void);
605
606 /**
607 * cpu_throttle_get_percentage:
608 *
609 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
610 *
611 * Returns: The throttle percentage in range 1 to 99.
612 */
613 int cpu_throttle_get_percentage(void);
614
615 #ifndef CONFIG_USER_ONLY
616
617 typedef void (*CPUInterruptHandler)(CPUState *, int);
618
619 extern CPUInterruptHandler cpu_interrupt_handler;
620
621 /**
622 * cpu_interrupt:
623 * @cpu: The CPU to set an interrupt on.
624 * @mask: The interupts to set.
625 *
626 * Invokes the interrupt handler.
627 */
628 static inline void cpu_interrupt(CPUState *cpu, int mask)
629 {
630 cpu_interrupt_handler(cpu, mask);
631 }
632
633 #else /* USER_ONLY */
634
635 void cpu_interrupt(CPUState *cpu, int mask);
636
637 #endif /* USER_ONLY */
638
639 #ifdef CONFIG_SOFTMMU
640 static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
641 bool is_write, bool is_exec,
642 int opaque, unsigned size)
643 {
644 CPUClass *cc = CPU_GET_CLASS(cpu);
645
646 if (cc->do_unassigned_access) {
647 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
648 }
649 }
650
651 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
652 int is_write, int is_user,
653 uintptr_t retaddr)
654 {
655 CPUClass *cc = CPU_GET_CLASS(cpu);
656
657 cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
658 }
659 #endif
660
661 /**
662 * cpu_set_pc:
663 * @cpu: The CPU to set the program counter for.
664 * @addr: Program counter value.
665 *
666 * Sets the program counter for a CPU.
667 */
668 static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
669 {
670 CPUClass *cc = CPU_GET_CLASS(cpu);
671
672 cc->set_pc(cpu, addr);
673 }
674
675 /**
676 * cpu_reset_interrupt:
677 * @cpu: The CPU to clear the interrupt on.
678 * @mask: The interrupt mask to clear.
679 *
680 * Resets interrupts on the vCPU @cpu.
681 */
682 void cpu_reset_interrupt(CPUState *cpu, int mask);
683
684 /**
685 * cpu_exit:
686 * @cpu: The CPU to exit.
687 *
688 * Requests the CPU @cpu to exit execution.
689 */
690 void cpu_exit(CPUState *cpu);
691
692 /**
693 * cpu_resume:
694 * @cpu: The CPU to resume.
695 *
696 * Resumes CPU, i.e. puts CPU into runnable state.
697 */
698 void cpu_resume(CPUState *cpu);
699
700 /**
701 * qemu_init_vcpu:
702 * @cpu: The vCPU to initialize.
703 *
704 * Initializes a vCPU.
705 */
706 void qemu_init_vcpu(CPUState *cpu);
707
708 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
709 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
710 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
711
712 /**
713 * cpu_single_step:
714 * @cpu: CPU to the flags for.
715 * @enabled: Flags to enable.
716 *
717 * Enables or disables single-stepping for @cpu.
718 */
719 void cpu_single_step(CPUState *cpu, int enabled);
720
721 /* Breakpoint/watchpoint flags */
722 #define BP_MEM_READ 0x01
723 #define BP_MEM_WRITE 0x02
724 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
725 #define BP_STOP_BEFORE_ACCESS 0x04
726 /* 0x08 currently unused */
727 #define BP_GDB 0x10
728 #define BP_CPU 0x20
729 #define BP_ANY (BP_GDB | BP_CPU)
730 #define BP_WATCHPOINT_HIT_READ 0x40
731 #define BP_WATCHPOINT_HIT_WRITE 0x80
732 #define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
733
734 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
735 CPUBreakpoint **breakpoint);
736 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
737 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
738 void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
739
740 /* Return true if PC matches an installed breakpoint. */
741 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
742 {
743 CPUBreakpoint *bp;
744
745 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
746 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
747 if (bp->pc == pc && (bp->flags & mask)) {
748 return true;
749 }
750 }
751 }
752 return false;
753 }
754
755 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
756 int flags, CPUWatchpoint **watchpoint);
757 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
758 vaddr len, int flags);
759 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
760 void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
761
762 void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
763 GCC_FMT_ATTR(2, 3);
764 void cpu_exec_exit(CPUState *cpu);
765
766 #ifdef CONFIG_SOFTMMU
767 extern const struct VMStateDescription vmstate_cpu_common;
768 #else
769 #define vmstate_cpu_common vmstate_dummy
770 #endif
771
772 #define VMSTATE_CPU() { \
773 .name = "parent_obj", \
774 .size = sizeof(CPUState), \
775 .vmsd = &vmstate_cpu_common, \
776 .flags = VMS_STRUCT, \
777 .offset = 0, \
778 }
779
780 #endif