]> git.proxmox.com Git - mirror_qemu.git/blob - include/qom/cpu.h
qom/cpu: Add MemoryRegion property
[mirror_qemu.git] / include / qom / cpu.h
1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20 #ifndef QEMU_CPU_H
21 #define QEMU_CPU_H
22
23 #include <signal.h>
24 #include <setjmp.h>
25 #include "hw/qdev-core.h"
26 #include "disas/bfd.h"
27 #include "exec/hwaddr.h"
28 #include "exec/memattrs.h"
29 #include "qemu/queue.h"
30 #include "qemu/thread.h"
31 #include "qemu/typedefs.h"
32
33 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
34 void *opaque);
35
36 /**
37 * vaddr:
38 * Type wide enough to contain any #target_ulong virtual address.
39 */
40 typedef uint64_t vaddr;
41 #define VADDR_PRId PRId64
42 #define VADDR_PRIu PRIu64
43 #define VADDR_PRIo PRIo64
44 #define VADDR_PRIx PRIx64
45 #define VADDR_PRIX PRIX64
46 #define VADDR_MAX UINT64_MAX
47
48 /**
49 * SECTION:cpu
50 * @section_id: QEMU-cpu
51 * @title: CPU Class
52 * @short_description: Base class for all CPUs
53 */
54
55 #define TYPE_CPU "cpu"
56
57 /* Since this macro is used a lot in hot code paths and in conjunction with
58 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
59 * an unchecked cast.
60 */
61 #define CPU(obj) ((CPUState *)(obj))
62
63 #define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
64 #define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
65
66 typedef struct CPUState CPUState;
67
68 typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
69 bool is_write, bool is_exec, int opaque,
70 unsigned size);
71
72 struct TranslationBlock;
73
74 /**
75 * CPUClass:
76 * @class_by_name: Callback to map -cpu command line model name to an
77 * instantiatable CPU type.
78 * @parse_features: Callback to parse command line arguments.
79 * @reset: Callback to reset the #CPUState to its initial state.
80 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
81 * @has_work: Callback for checking if there is work to do.
82 * @do_interrupt: Callback for interrupt handling.
83 * @do_unassigned_access: Callback for unassigned access handling.
84 * @do_unaligned_access: Callback for unaligned access handling, if
85 * the target defines #ALIGNED_ONLY.
86 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
87 * runtime configurable endianness is currently big-endian. Non-configurable
88 * CPUs can use the default implementation of this method. This method should
89 * not be used by any callers other than the pre-1.0 virtio devices.
90 * @memory_rw_debug: Callback for GDB memory access.
91 * @dump_state: Callback for dumping state.
92 * @dump_statistics: Callback for dumping statistics.
93 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
94 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
95 * @get_memory_mapping: Callback for obtaining the memory mappings.
96 * @set_pc: Callback for setting the Program Counter register.
97 * @synchronize_from_tb: Callback for synchronizing state from a TCG
98 * #TranslationBlock.
99 * @handle_mmu_fault: Callback for handling an MMU fault.
100 * @get_phys_page_debug: Callback for obtaining a physical address.
101 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
102 * associated memory transaction attributes to use for the access.
103 * CPUs which use memory transaction attributes should implement this
104 * instead of get_phys_page_debug.
105 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
106 * a memory access with the specified memory transaction attributes.
107 * @gdb_read_register: Callback for letting GDB read a register.
108 * @gdb_write_register: Callback for letting GDB write a register.
109 * @debug_excp_handler: Callback for handling debug exceptions.
110 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
111 * 64-bit VM coredump.
112 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
113 * note to a 32-bit VM coredump.
114 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
115 * 32-bit VM coredump.
116 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
117 * note to a 32-bit VM coredump.
118 * @vmsd: State description for migration.
119 * @gdb_num_core_regs: Number of core registers accessible to GDB.
120 * @gdb_core_xml_file: File name for core registers GDB XML description.
121 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
122 * before the insn which triggers a watchpoint rather than after it.
123 * @cpu_exec_enter: Callback for cpu_exec preparation.
124 * @cpu_exec_exit: Callback for cpu_exec cleanup.
125 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
126 * @disas_set_info: Setup architecture specific components of disassembly info
127 *
128 * Represents a CPU family or model.
129 */
130 typedef struct CPUClass {
131 /*< private >*/
132 DeviceClass parent_class;
133 /*< public >*/
134
135 ObjectClass *(*class_by_name)(const char *cpu_model);
136 void (*parse_features)(CPUState *cpu, char *str, Error **errp);
137
138 void (*reset)(CPUState *cpu);
139 int reset_dump_flags;
140 bool (*has_work)(CPUState *cpu);
141 void (*do_interrupt)(CPUState *cpu);
142 CPUUnassignedAccess do_unassigned_access;
143 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
144 int is_write, int is_user, uintptr_t retaddr);
145 bool (*virtio_is_big_endian)(CPUState *cpu);
146 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
147 uint8_t *buf, int len, bool is_write);
148 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
149 int flags);
150 void (*dump_statistics)(CPUState *cpu, FILE *f,
151 fprintf_function cpu_fprintf, int flags);
152 int64_t (*get_arch_id)(CPUState *cpu);
153 bool (*get_paging_enabled)(const CPUState *cpu);
154 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
155 Error **errp);
156 void (*set_pc)(CPUState *cpu, vaddr value);
157 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
158 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
159 int mmu_index);
160 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
161 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
162 MemTxAttrs *attrs);
163 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
164 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
165 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
166 void (*debug_excp_handler)(CPUState *cpu);
167
168 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
169 int cpuid, void *opaque);
170 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
171 void *opaque);
172 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
173 int cpuid, void *opaque);
174 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
175 void *opaque);
176
177 const struct VMStateDescription *vmsd;
178 int gdb_num_core_regs;
179 const char *gdb_core_xml_file;
180 bool gdb_stop_before_watchpoint;
181
182 void (*cpu_exec_enter)(CPUState *cpu);
183 void (*cpu_exec_exit)(CPUState *cpu);
184 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
185
186 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
187 } CPUClass;
188
189 #ifdef HOST_WORDS_BIGENDIAN
190 typedef struct icount_decr_u16 {
191 uint16_t high;
192 uint16_t low;
193 } icount_decr_u16;
194 #else
195 typedef struct icount_decr_u16 {
196 uint16_t low;
197 uint16_t high;
198 } icount_decr_u16;
199 #endif
200
201 typedef struct CPUBreakpoint {
202 vaddr pc;
203 int flags; /* BP_* */
204 QTAILQ_ENTRY(CPUBreakpoint) entry;
205 } CPUBreakpoint;
206
207 typedef struct CPUWatchpoint {
208 vaddr vaddr;
209 vaddr len;
210 vaddr hitaddr;
211 MemTxAttrs hitattrs;
212 int flags; /* BP_* */
213 QTAILQ_ENTRY(CPUWatchpoint) entry;
214 } CPUWatchpoint;
215
216 struct KVMState;
217 struct kvm_run;
218
219 #define TB_JMP_CACHE_BITS 12
220 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
221
222 /**
223 * CPUState:
224 * @cpu_index: CPU index (informative).
225 * @nr_cores: Number of cores within this CPU package.
226 * @nr_threads: Number of threads within this CPU.
227 * @numa_node: NUMA node this CPU is belonging to.
228 * @host_tid: Host thread ID.
229 * @running: #true if CPU is currently running (usermode).
230 * @created: Indicates whether the CPU thread has been successfully created.
231 * @interrupt_request: Indicates a pending interrupt request.
232 * @halted: Nonzero if the CPU is in suspended state.
233 * @stop: Indicates a pending stop request.
234 * @stopped: Indicates the CPU has been artificially stopped.
235 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
236 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
237 * CPU and return to its top level loop.
238 * @singlestep_enabled: Flags for single-stepping.
239 * @icount_extra: Instructions until next timer event.
240 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
241 * This allows a single read-compare-cbranch-write sequence to test
242 * for both decrementer underflow and exceptions.
243 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
244 * requires that IO only be performed on the last instruction of a TB
245 * so that interrupts take effect immediately.
246 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
247 * AddressSpaces this CPU has)
248 * @num_ases: number of CPUAddressSpaces in @cpu_ases
249 * @as: Pointer to the first AddressSpace, for the convenience of targets which
250 * only have a single AddressSpace
251 * @env_ptr: Pointer to subclass-specific CPUArchState field.
252 * @current_tb: Currently executing TB.
253 * @gdb_regs: Additional GDB registers.
254 * @gdb_num_regs: Number of total registers accessible to GDB.
255 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
256 * @next_cpu: Next CPU sharing TB cache.
257 * @opaque: User data.
258 * @mem_io_pc: Host Program Counter at which the memory was accessed.
259 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
260 * @kvm_fd: vCPU file descriptor for KVM.
261 * @work_mutex: Lock to prevent multiple access to queued_work_*.
262 * @queued_work_first: First asynchronous work pending.
263 *
264 * State of one CPU core or thread.
265 */
266 struct CPUState {
267 /*< private >*/
268 DeviceState parent_obj;
269 /*< public >*/
270
271 int nr_cores;
272 int nr_threads;
273 int numa_node;
274
275 struct QemuThread *thread;
276 #ifdef _WIN32
277 HANDLE hThread;
278 #endif
279 int thread_id;
280 uint32_t host_tid;
281 bool running;
282 struct QemuCond *halt_cond;
283 bool thread_kicked;
284 bool created;
285 bool stop;
286 bool stopped;
287 bool crash_occurred;
288 bool exit_request;
289 uint32_t interrupt_request;
290 int singlestep_enabled;
291 int64_t icount_extra;
292 sigjmp_buf jmp_env;
293
294 QemuMutex work_mutex;
295 struct qemu_work_item *queued_work_first, *queued_work_last;
296
297 CPUAddressSpace *cpu_ases;
298 int num_ases;
299 AddressSpace *as;
300 MemoryRegion *memory;
301
302 void *env_ptr; /* CPUArchState */
303 struct TranslationBlock *current_tb;
304 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
305 struct GDBRegisterState *gdb_regs;
306 int gdb_num_regs;
307 int gdb_num_g_regs;
308 QTAILQ_ENTRY(CPUState) node;
309
310 /* ice debug support */
311 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
312
313 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
314 CPUWatchpoint *watchpoint_hit;
315
316 void *opaque;
317
318 /* In order to avoid passing too many arguments to the MMIO helpers,
319 * we store some rarely used information in the CPU context.
320 */
321 uintptr_t mem_io_pc;
322 vaddr mem_io_vaddr;
323
324 int kvm_fd;
325 bool kvm_vcpu_dirty;
326 struct KVMState *kvm_state;
327 struct kvm_run *kvm_run;
328
329 /* TODO Move common fields from CPUArchState here. */
330 int cpu_index; /* used by alpha TCG */
331 uint32_t halted; /* used by alpha, cris, ppc TCG */
332 union {
333 uint32_t u32;
334 icount_decr_u16 u16;
335 } icount_decr;
336 uint32_t can_do_io;
337 int32_t exception_index; /* used by m68k TCG */
338
339 /* Used to keep track of an outstanding cpu throttle thread for migration
340 * autoconverge
341 */
342 bool throttle_thread_scheduled;
343
344 /* Note that this is accessed at the start of every TB via a negative
345 offset from AREG0. Leave this field at the end so as to make the
346 (absolute value) offset as small as possible. This reduces code
347 size, especially for hosts without large memory offsets. */
348 uint32_t tcg_exit_req;
349 };
350
351 QTAILQ_HEAD(CPUTailQ, CPUState);
352 extern struct CPUTailQ cpus;
353 #define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
354 #define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
355 #define CPU_FOREACH_SAFE(cpu, next_cpu) \
356 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
357 #define CPU_FOREACH_REVERSE(cpu) \
358 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
359 #define first_cpu QTAILQ_FIRST(&cpus)
360
361 extern __thread CPUState *current_cpu;
362
363 /**
364 * cpu_paging_enabled:
365 * @cpu: The CPU whose state is to be inspected.
366 *
367 * Returns: %true if paging is enabled, %false otherwise.
368 */
369 bool cpu_paging_enabled(const CPUState *cpu);
370
371 /**
372 * cpu_get_memory_mapping:
373 * @cpu: The CPU whose memory mappings are to be obtained.
374 * @list: Where to write the memory mappings to.
375 * @errp: Pointer for reporting an #Error.
376 */
377 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
378 Error **errp);
379
380 /**
381 * cpu_write_elf64_note:
382 * @f: pointer to a function that writes memory to a file
383 * @cpu: The CPU whose memory is to be dumped
384 * @cpuid: ID number of the CPU
385 * @opaque: pointer to the CPUState struct
386 */
387 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
388 int cpuid, void *opaque);
389
390 /**
391 * cpu_write_elf64_qemunote:
392 * @f: pointer to a function that writes memory to a file
393 * @cpu: The CPU whose memory is to be dumped
394 * @cpuid: ID number of the CPU
395 * @opaque: pointer to the CPUState struct
396 */
397 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
398 void *opaque);
399
400 /**
401 * cpu_write_elf32_note:
402 * @f: pointer to a function that writes memory to a file
403 * @cpu: The CPU whose memory is to be dumped
404 * @cpuid: ID number of the CPU
405 * @opaque: pointer to the CPUState struct
406 */
407 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
408 int cpuid, void *opaque);
409
410 /**
411 * cpu_write_elf32_qemunote:
412 * @f: pointer to a function that writes memory to a file
413 * @cpu: The CPU whose memory is to be dumped
414 * @cpuid: ID number of the CPU
415 * @opaque: pointer to the CPUState struct
416 */
417 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
418 void *opaque);
419
420 /**
421 * CPUDumpFlags:
422 * @CPU_DUMP_CODE:
423 * @CPU_DUMP_FPU: dump FPU register state, not just integer
424 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
425 */
426 enum CPUDumpFlags {
427 CPU_DUMP_CODE = 0x00010000,
428 CPU_DUMP_FPU = 0x00020000,
429 CPU_DUMP_CCOP = 0x00040000,
430 };
431
432 /**
433 * cpu_dump_state:
434 * @cpu: The CPU whose state is to be dumped.
435 * @f: File to dump to.
436 * @cpu_fprintf: Function to dump with.
437 * @flags: Flags what to dump.
438 *
439 * Dumps CPU state.
440 */
441 void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
442 int flags);
443
444 /**
445 * cpu_dump_statistics:
446 * @cpu: The CPU whose state is to be dumped.
447 * @f: File to dump to.
448 * @cpu_fprintf: Function to dump with.
449 * @flags: Flags what to dump.
450 *
451 * Dumps CPU statistics.
452 */
453 void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
454 int flags);
455
456 #ifndef CONFIG_USER_ONLY
457 /**
458 * cpu_get_phys_page_attrs_debug:
459 * @cpu: The CPU to obtain the physical page address for.
460 * @addr: The virtual address.
461 * @attrs: Updated on return with the memory transaction attributes to use
462 * for this access.
463 *
464 * Obtains the physical page corresponding to a virtual one, together
465 * with the corresponding memory transaction attributes to use for the access.
466 * Use it only for debugging because no protection checks are done.
467 *
468 * Returns: Corresponding physical page address or -1 if no page found.
469 */
470 static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
471 MemTxAttrs *attrs)
472 {
473 CPUClass *cc = CPU_GET_CLASS(cpu);
474
475 if (cc->get_phys_page_attrs_debug) {
476 return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
477 }
478 /* Fallback for CPUs which don't implement the _attrs_ hook */
479 *attrs = MEMTXATTRS_UNSPECIFIED;
480 return cc->get_phys_page_debug(cpu, addr);
481 }
482
483 /**
484 * cpu_get_phys_page_debug:
485 * @cpu: The CPU to obtain the physical page address for.
486 * @addr: The virtual address.
487 *
488 * Obtains the physical page corresponding to a virtual one.
489 * Use it only for debugging because no protection checks are done.
490 *
491 * Returns: Corresponding physical page address or -1 if no page found.
492 */
493 static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
494 {
495 MemTxAttrs attrs = {};
496
497 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
498 }
499
500 /** cpu_asidx_from_attrs:
501 * @cpu: CPU
502 * @attrs: memory transaction attributes
503 *
504 * Returns the address space index specifying the CPU AddressSpace
505 * to use for a memory access with the given transaction attributes.
506 */
507 static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
508 {
509 CPUClass *cc = CPU_GET_CLASS(cpu);
510
511 if (cc->asidx_from_attrs) {
512 return cc->asidx_from_attrs(cpu, attrs);
513 }
514 return 0;
515 }
516 #endif
517
518 /**
519 * cpu_reset:
520 * @cpu: The CPU whose state is to be reset.
521 */
522 void cpu_reset(CPUState *cpu);
523
524 /**
525 * cpu_class_by_name:
526 * @typename: The CPU base type.
527 * @cpu_model: The model string without any parameters.
528 *
529 * Looks up a CPU #ObjectClass matching name @cpu_model.
530 *
531 * Returns: A #CPUClass or %NULL if not matching class is found.
532 */
533 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
534
535 /**
536 * cpu_generic_init:
537 * @typename: The CPU base type.
538 * @cpu_model: The model string including optional parameters.
539 *
540 * Instantiates a CPU, processes optional parameters and realizes the CPU.
541 *
542 * Returns: A #CPUState or %NULL if an error occurred.
543 */
544 CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
545
546 /**
547 * cpu_has_work:
548 * @cpu: The vCPU to check.
549 *
550 * Checks whether the CPU has work to do.
551 *
552 * Returns: %true if the CPU has work, %false otherwise.
553 */
554 static inline bool cpu_has_work(CPUState *cpu)
555 {
556 CPUClass *cc = CPU_GET_CLASS(cpu);
557
558 g_assert(cc->has_work);
559 return cc->has_work(cpu);
560 }
561
562 /**
563 * qemu_cpu_is_self:
564 * @cpu: The vCPU to check against.
565 *
566 * Checks whether the caller is executing on the vCPU thread.
567 *
568 * Returns: %true if called from @cpu's thread, %false otherwise.
569 */
570 bool qemu_cpu_is_self(CPUState *cpu);
571
572 /**
573 * qemu_cpu_kick:
574 * @cpu: The vCPU to kick.
575 *
576 * Kicks @cpu's thread.
577 */
578 void qemu_cpu_kick(CPUState *cpu);
579
580 /**
581 * cpu_is_stopped:
582 * @cpu: The CPU to check.
583 *
584 * Checks whether the CPU is stopped.
585 *
586 * Returns: %true if run state is not running or if artificially stopped;
587 * %false otherwise.
588 */
589 bool cpu_is_stopped(CPUState *cpu);
590
591 /**
592 * run_on_cpu:
593 * @cpu: The vCPU to run on.
594 * @func: The function to be executed.
595 * @data: Data to pass to the function.
596 *
597 * Schedules the function @func for execution on the vCPU @cpu.
598 */
599 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
600
601 /**
602 * async_run_on_cpu:
603 * @cpu: The vCPU to run on.
604 * @func: The function to be executed.
605 * @data: Data to pass to the function.
606 *
607 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
608 */
609 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
610
611 /**
612 * qemu_get_cpu:
613 * @index: The CPUState@cpu_index value of the CPU to obtain.
614 *
615 * Gets a CPU matching @index.
616 *
617 * Returns: The CPU or %NULL if there is no matching CPU.
618 */
619 CPUState *qemu_get_cpu(int index);
620
621 /**
622 * cpu_exists:
623 * @id: Guest-exposed CPU ID to lookup.
624 *
625 * Search for CPU with specified ID.
626 *
627 * Returns: %true - CPU is found, %false - CPU isn't found.
628 */
629 bool cpu_exists(int64_t id);
630
631 /**
632 * cpu_throttle_set:
633 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
634 *
635 * Throttles all vcpus by forcing them to sleep for the given percentage of
636 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
637 * (example: 10ms sleep for every 30ms awake).
638 *
639 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
640 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
641 * is called.
642 */
643 void cpu_throttle_set(int new_throttle_pct);
644
645 /**
646 * cpu_throttle_stop:
647 *
648 * Stops the vcpu throttling started by cpu_throttle_set.
649 */
650 void cpu_throttle_stop(void);
651
652 /**
653 * cpu_throttle_active:
654 *
655 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
656 */
657 bool cpu_throttle_active(void);
658
659 /**
660 * cpu_throttle_get_percentage:
661 *
662 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
663 *
664 * Returns: The throttle percentage in range 1 to 99.
665 */
666 int cpu_throttle_get_percentage(void);
667
668 #ifndef CONFIG_USER_ONLY
669
670 typedef void (*CPUInterruptHandler)(CPUState *, int);
671
672 extern CPUInterruptHandler cpu_interrupt_handler;
673
674 /**
675 * cpu_interrupt:
676 * @cpu: The CPU to set an interrupt on.
677 * @mask: The interupts to set.
678 *
679 * Invokes the interrupt handler.
680 */
681 static inline void cpu_interrupt(CPUState *cpu, int mask)
682 {
683 cpu_interrupt_handler(cpu, mask);
684 }
685
686 #else /* USER_ONLY */
687
688 void cpu_interrupt(CPUState *cpu, int mask);
689
690 #endif /* USER_ONLY */
691
692 #ifdef CONFIG_SOFTMMU
693 static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
694 bool is_write, bool is_exec,
695 int opaque, unsigned size)
696 {
697 CPUClass *cc = CPU_GET_CLASS(cpu);
698
699 if (cc->do_unassigned_access) {
700 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
701 }
702 }
703
704 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
705 int is_write, int is_user,
706 uintptr_t retaddr)
707 {
708 CPUClass *cc = CPU_GET_CLASS(cpu);
709
710 cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
711 }
712 #endif
713
714 /**
715 * cpu_set_pc:
716 * @cpu: The CPU to set the program counter for.
717 * @addr: Program counter value.
718 *
719 * Sets the program counter for a CPU.
720 */
721 static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
722 {
723 CPUClass *cc = CPU_GET_CLASS(cpu);
724
725 cc->set_pc(cpu, addr);
726 }
727
728 /**
729 * cpu_reset_interrupt:
730 * @cpu: The CPU to clear the interrupt on.
731 * @mask: The interrupt mask to clear.
732 *
733 * Resets interrupts on the vCPU @cpu.
734 */
735 void cpu_reset_interrupt(CPUState *cpu, int mask);
736
737 /**
738 * cpu_exit:
739 * @cpu: The CPU to exit.
740 *
741 * Requests the CPU @cpu to exit execution.
742 */
743 void cpu_exit(CPUState *cpu);
744
745 /**
746 * cpu_resume:
747 * @cpu: The CPU to resume.
748 *
749 * Resumes CPU, i.e. puts CPU into runnable state.
750 */
751 void cpu_resume(CPUState *cpu);
752
753 /**
754 * qemu_init_vcpu:
755 * @cpu: The vCPU to initialize.
756 *
757 * Initializes a vCPU.
758 */
759 void qemu_init_vcpu(CPUState *cpu);
760
761 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
762 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
763 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
764
765 /**
766 * cpu_single_step:
767 * @cpu: CPU to the flags for.
768 * @enabled: Flags to enable.
769 *
770 * Enables or disables single-stepping for @cpu.
771 */
772 void cpu_single_step(CPUState *cpu, int enabled);
773
774 /* Breakpoint/watchpoint flags */
775 #define BP_MEM_READ 0x01
776 #define BP_MEM_WRITE 0x02
777 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
778 #define BP_STOP_BEFORE_ACCESS 0x04
779 /* 0x08 currently unused */
780 #define BP_GDB 0x10
781 #define BP_CPU 0x20
782 #define BP_ANY (BP_GDB | BP_CPU)
783 #define BP_WATCHPOINT_HIT_READ 0x40
784 #define BP_WATCHPOINT_HIT_WRITE 0x80
785 #define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
786
787 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
788 CPUBreakpoint **breakpoint);
789 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
790 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
791 void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
792
793 /* Return true if PC matches an installed breakpoint. */
794 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
795 {
796 CPUBreakpoint *bp;
797
798 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
799 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
800 if (bp->pc == pc && (bp->flags & mask)) {
801 return true;
802 }
803 }
804 }
805 return false;
806 }
807
808 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
809 int flags, CPUWatchpoint **watchpoint);
810 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
811 vaddr len, int flags);
812 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
813 void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
814
815 void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
816 GCC_FMT_ATTR(2, 3);
817 void cpu_exec_exit(CPUState *cpu);
818
819 #ifdef CONFIG_SOFTMMU
820 extern const struct VMStateDescription vmstate_cpu_common;
821 #else
822 #define vmstate_cpu_common vmstate_dummy
823 #endif
824
825 #define VMSTATE_CPU() { \
826 .name = "parent_obj", \
827 .size = sizeof(CPUState), \
828 .vmsd = &vmstate_cpu_common, \
829 .flags = VMS_STRUCT, \
830 .offset = 0, \
831 }
832
833 #endif