]> git.proxmox.com Git - mirror_qemu.git/blob - include/qom/cpu.h
cpu: move exec-all.h inclusion out of cpu.h
[mirror_qemu.git] / include / qom / cpu.h
1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20 #ifndef QEMU_CPU_H
21 #define QEMU_CPU_H
22
23 #include "hw/qdev-core.h"
24 #include "disas/bfd.h"
25 #include "exec/hwaddr.h"
26 #include "exec/memattrs.h"
27 #include "qemu/queue.h"
28 #include "qemu/thread.h"
29
30 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
31 void *opaque);
32
33 /**
34 * vaddr:
35 * Type wide enough to contain any #target_ulong virtual address.
36 */
37 typedef uint64_t vaddr;
38 #define VADDR_PRId PRId64
39 #define VADDR_PRIu PRIu64
40 #define VADDR_PRIo PRIo64
41 #define VADDR_PRIx PRIx64
42 #define VADDR_PRIX PRIX64
43 #define VADDR_MAX UINT64_MAX
44
45 /**
46 * SECTION:cpu
47 * @section_id: QEMU-cpu
48 * @title: CPU Class
49 * @short_description: Base class for all CPUs
50 */
51
52 #define TYPE_CPU "cpu"
53
54 /* Since this macro is used a lot in hot code paths and in conjunction with
55 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
56 * an unchecked cast.
57 */
58 #define CPU(obj) ((CPUState *)(obj))
59
60 #define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
61 #define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
62
63 typedef struct CPUWatchpoint CPUWatchpoint;
64
65 typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
66 bool is_write, bool is_exec, int opaque,
67 unsigned size);
68
69 struct TranslationBlock;
70
71 /**
72 * CPUClass:
73 * @class_by_name: Callback to map -cpu command line model name to an
74 * instantiatable CPU type.
75 * @parse_features: Callback to parse command line arguments.
76 * @reset: Callback to reset the #CPUState to its initial state.
77 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
78 * @has_work: Callback for checking if there is work to do.
79 * @do_interrupt: Callback for interrupt handling.
80 * @do_unassigned_access: Callback for unassigned access handling.
81 * @do_unaligned_access: Callback for unaligned access handling, if
82 * the target defines #ALIGNED_ONLY.
83 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
84 * runtime configurable endianness is currently big-endian. Non-configurable
85 * CPUs can use the default implementation of this method. This method should
86 * not be used by any callers other than the pre-1.0 virtio devices.
87 * @memory_rw_debug: Callback for GDB memory access.
88 * @dump_state: Callback for dumping state.
89 * @dump_statistics: Callback for dumping statistics.
90 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
91 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
92 * @get_memory_mapping: Callback for obtaining the memory mappings.
93 * @set_pc: Callback for setting the Program Counter register.
94 * @synchronize_from_tb: Callback for synchronizing state from a TCG
95 * #TranslationBlock.
96 * @handle_mmu_fault: Callback for handling an MMU fault.
97 * @get_phys_page_debug: Callback for obtaining a physical address.
98 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
99 * associated memory transaction attributes to use for the access.
100 * CPUs which use memory transaction attributes should implement this
101 * instead of get_phys_page_debug.
102 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
103 * a memory access with the specified memory transaction attributes.
104 * @gdb_read_register: Callback for letting GDB read a register.
105 * @gdb_write_register: Callback for letting GDB write a register.
106 * @debug_check_watchpoint: Callback: return true if the architectural
107 * watchpoint whose address has matched should really fire.
108 * @debug_excp_handler: Callback for handling debug exceptions.
109 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
110 * 64-bit VM coredump.
111 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
112 * note to a 32-bit VM coredump.
113 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
114 * 32-bit VM coredump.
115 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
116 * note to a 32-bit VM coredump.
117 * @vmsd: State description for migration.
118 * @gdb_num_core_regs: Number of core registers accessible to GDB.
119 * @gdb_core_xml_file: File name for core registers GDB XML description.
120 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
121 * before the insn which triggers a watchpoint rather than after it.
122 * @gdb_arch_name: Optional callback that returns the architecture name known
123 * to GDB. The caller must free the returned string with g_free.
124 * @cpu_exec_enter: Callback for cpu_exec preparation.
125 * @cpu_exec_exit: Callback for cpu_exec cleanup.
126 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
127 * @disas_set_info: Setup architecture specific components of disassembly info
128 *
129 * Represents a CPU family or model.
130 */
131 typedef struct CPUClass {
132 /*< private >*/
133 DeviceClass parent_class;
134 /*< public >*/
135
136 ObjectClass *(*class_by_name)(const char *cpu_model);
137 void (*parse_features)(CPUState *cpu, char *str, Error **errp);
138
139 void (*reset)(CPUState *cpu);
140 int reset_dump_flags;
141 bool (*has_work)(CPUState *cpu);
142 void (*do_interrupt)(CPUState *cpu);
143 CPUUnassignedAccess do_unassigned_access;
144 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
145 int is_write, int is_user, uintptr_t retaddr);
146 bool (*virtio_is_big_endian)(CPUState *cpu);
147 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
148 uint8_t *buf, int len, bool is_write);
149 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
150 int flags);
151 void (*dump_statistics)(CPUState *cpu, FILE *f,
152 fprintf_function cpu_fprintf, int flags);
153 int64_t (*get_arch_id)(CPUState *cpu);
154 bool (*get_paging_enabled)(const CPUState *cpu);
155 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
156 Error **errp);
157 void (*set_pc)(CPUState *cpu, vaddr value);
158 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
159 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
160 int mmu_index);
161 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
162 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
163 MemTxAttrs *attrs);
164 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
165 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
166 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
167 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
168 void (*debug_excp_handler)(CPUState *cpu);
169
170 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
171 int cpuid, void *opaque);
172 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
173 void *opaque);
174 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
175 int cpuid, void *opaque);
176 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
177 void *opaque);
178
179 const struct VMStateDescription *vmsd;
180 int gdb_num_core_regs;
181 const char *gdb_core_xml_file;
182 gchar * (*gdb_arch_name)(CPUState *cpu);
183 bool gdb_stop_before_watchpoint;
184
185 void (*cpu_exec_enter)(CPUState *cpu);
186 void (*cpu_exec_exit)(CPUState *cpu);
187 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
188
189 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
190 } CPUClass;
191
192 #ifdef HOST_WORDS_BIGENDIAN
193 typedef struct icount_decr_u16 {
194 uint16_t high;
195 uint16_t low;
196 } icount_decr_u16;
197 #else
198 typedef struct icount_decr_u16 {
199 uint16_t low;
200 uint16_t high;
201 } icount_decr_u16;
202 #endif
203
204 typedef struct CPUBreakpoint {
205 vaddr pc;
206 int flags; /* BP_* */
207 QTAILQ_ENTRY(CPUBreakpoint) entry;
208 } CPUBreakpoint;
209
210 struct CPUWatchpoint {
211 vaddr vaddr;
212 vaddr len;
213 vaddr hitaddr;
214 MemTxAttrs hitattrs;
215 int flags; /* BP_* */
216 QTAILQ_ENTRY(CPUWatchpoint) entry;
217 };
218
219 struct KVMState;
220 struct kvm_run;
221
222 #define TB_JMP_CACHE_BITS 12
223 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
224
225 /* work queue */
226 struct qemu_work_item {
227 struct qemu_work_item *next;
228 void (*func)(void *data);
229 void *data;
230 int done;
231 bool free;
232 };
233
234 /**
235 * CPUState:
236 * @cpu_index: CPU index (informative).
237 * @nr_cores: Number of cores within this CPU package.
238 * @nr_threads: Number of threads within this CPU.
239 * @numa_node: NUMA node this CPU is belonging to.
240 * @host_tid: Host thread ID.
241 * @running: #true if CPU is currently running (usermode).
242 * @created: Indicates whether the CPU thread has been successfully created.
243 * @interrupt_request: Indicates a pending interrupt request.
244 * @halted: Nonzero if the CPU is in suspended state.
245 * @stop: Indicates a pending stop request.
246 * @stopped: Indicates the CPU has been artificially stopped.
247 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
248 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
249 * CPU and return to its top level loop.
250 * @tb_flushed: Indicates the translation buffer has been flushed.
251 * @singlestep_enabled: Flags for single-stepping.
252 * @icount_extra: Instructions until next timer event.
253 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
254 * This allows a single read-compare-cbranch-write sequence to test
255 * for both decrementer underflow and exceptions.
256 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
257 * requires that IO only be performed on the last instruction of a TB
258 * so that interrupts take effect immediately.
259 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
260 * AddressSpaces this CPU has)
261 * @num_ases: number of CPUAddressSpaces in @cpu_ases
262 * @as: Pointer to the first AddressSpace, for the convenience of targets which
263 * only have a single AddressSpace
264 * @env_ptr: Pointer to subclass-specific CPUArchState field.
265 * @gdb_regs: Additional GDB registers.
266 * @gdb_num_regs: Number of total registers accessible to GDB.
267 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
268 * @next_cpu: Next CPU sharing TB cache.
269 * @opaque: User data.
270 * @mem_io_pc: Host Program Counter at which the memory was accessed.
271 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
272 * @kvm_fd: vCPU file descriptor for KVM.
273 * @work_mutex: Lock to prevent multiple access to queued_work_*.
274 * @queued_work_first: First asynchronous work pending.
275 *
276 * State of one CPU core or thread.
277 */
278 struct CPUState {
279 /*< private >*/
280 DeviceState parent_obj;
281 /*< public >*/
282
283 int nr_cores;
284 int nr_threads;
285 int numa_node;
286
287 struct QemuThread *thread;
288 #ifdef _WIN32
289 HANDLE hThread;
290 #endif
291 int thread_id;
292 uint32_t host_tid;
293 bool running;
294 struct QemuCond *halt_cond;
295 bool thread_kicked;
296 bool created;
297 bool stop;
298 bool stopped;
299 bool crash_occurred;
300 bool exit_request;
301 bool tb_flushed;
302 uint32_t interrupt_request;
303 int singlestep_enabled;
304 int64_t icount_extra;
305 sigjmp_buf jmp_env;
306
307 QemuMutex work_mutex;
308 struct qemu_work_item *queued_work_first, *queued_work_last;
309
310 CPUAddressSpace *cpu_ases;
311 int num_ases;
312 AddressSpace *as;
313 MemoryRegion *memory;
314
315 void *env_ptr; /* CPUArchState */
316 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
317 struct GDBRegisterState *gdb_regs;
318 int gdb_num_regs;
319 int gdb_num_g_regs;
320 QTAILQ_ENTRY(CPUState) node;
321
322 /* ice debug support */
323 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
324
325 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
326 CPUWatchpoint *watchpoint_hit;
327
328 void *opaque;
329
330 /* In order to avoid passing too many arguments to the MMIO helpers,
331 * we store some rarely used information in the CPU context.
332 */
333 uintptr_t mem_io_pc;
334 vaddr mem_io_vaddr;
335
336 int kvm_fd;
337 bool kvm_vcpu_dirty;
338 struct KVMState *kvm_state;
339 struct kvm_run *kvm_run;
340
341 /* TODO Move common fields from CPUArchState here. */
342 int cpu_index; /* used by alpha TCG */
343 uint32_t halted; /* used by alpha, cris, ppc TCG */
344 union {
345 uint32_t u32;
346 icount_decr_u16 u16;
347 } icount_decr;
348 uint32_t can_do_io;
349 int32_t exception_index; /* used by m68k TCG */
350
351 /* Used to keep track of an outstanding cpu throttle thread for migration
352 * autoconverge
353 */
354 bool throttle_thread_scheduled;
355
356 /* Note that this is accessed at the start of every TB via a negative
357 offset from AREG0. Leave this field at the end so as to make the
358 (absolute value) offset as small as possible. This reduces code
359 size, especially for hosts without large memory offsets. */
360 uint32_t tcg_exit_req;
361 };
362
363 QTAILQ_HEAD(CPUTailQ, CPUState);
364 extern struct CPUTailQ cpus;
365 #define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
366 #define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
367 #define CPU_FOREACH_SAFE(cpu, next_cpu) \
368 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
369 #define CPU_FOREACH_REVERSE(cpu) \
370 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
371 #define first_cpu QTAILQ_FIRST(&cpus)
372
373 extern __thread CPUState *current_cpu;
374
375 /**
376 * cpu_paging_enabled:
377 * @cpu: The CPU whose state is to be inspected.
378 *
379 * Returns: %true if paging is enabled, %false otherwise.
380 */
381 bool cpu_paging_enabled(const CPUState *cpu);
382
383 /**
384 * cpu_get_memory_mapping:
385 * @cpu: The CPU whose memory mappings are to be obtained.
386 * @list: Where to write the memory mappings to.
387 * @errp: Pointer for reporting an #Error.
388 */
389 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
390 Error **errp);
391
392 /**
393 * cpu_write_elf64_note:
394 * @f: pointer to a function that writes memory to a file
395 * @cpu: The CPU whose memory is to be dumped
396 * @cpuid: ID number of the CPU
397 * @opaque: pointer to the CPUState struct
398 */
399 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
400 int cpuid, void *opaque);
401
402 /**
403 * cpu_write_elf64_qemunote:
404 * @f: pointer to a function that writes memory to a file
405 * @cpu: The CPU whose memory is to be dumped
406 * @cpuid: ID number of the CPU
407 * @opaque: pointer to the CPUState struct
408 */
409 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
410 void *opaque);
411
412 /**
413 * cpu_write_elf32_note:
414 * @f: pointer to a function that writes memory to a file
415 * @cpu: The CPU whose memory is to be dumped
416 * @cpuid: ID number of the CPU
417 * @opaque: pointer to the CPUState struct
418 */
419 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
420 int cpuid, void *opaque);
421
422 /**
423 * cpu_write_elf32_qemunote:
424 * @f: pointer to a function that writes memory to a file
425 * @cpu: The CPU whose memory is to be dumped
426 * @cpuid: ID number of the CPU
427 * @opaque: pointer to the CPUState struct
428 */
429 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
430 void *opaque);
431
432 /**
433 * CPUDumpFlags:
434 * @CPU_DUMP_CODE:
435 * @CPU_DUMP_FPU: dump FPU register state, not just integer
436 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
437 */
438 enum CPUDumpFlags {
439 CPU_DUMP_CODE = 0x00010000,
440 CPU_DUMP_FPU = 0x00020000,
441 CPU_DUMP_CCOP = 0x00040000,
442 };
443
444 /**
445 * cpu_dump_state:
446 * @cpu: The CPU whose state is to be dumped.
447 * @f: File to dump to.
448 * @cpu_fprintf: Function to dump with.
449 * @flags: Flags what to dump.
450 *
451 * Dumps CPU state.
452 */
453 void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
454 int flags);
455
456 /**
457 * cpu_dump_statistics:
458 * @cpu: The CPU whose state is to be dumped.
459 * @f: File to dump to.
460 * @cpu_fprintf: Function to dump with.
461 * @flags: Flags what to dump.
462 *
463 * Dumps CPU statistics.
464 */
465 void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
466 int flags);
467
468 #ifndef CONFIG_USER_ONLY
469 /**
470 * cpu_get_phys_page_attrs_debug:
471 * @cpu: The CPU to obtain the physical page address for.
472 * @addr: The virtual address.
473 * @attrs: Updated on return with the memory transaction attributes to use
474 * for this access.
475 *
476 * Obtains the physical page corresponding to a virtual one, together
477 * with the corresponding memory transaction attributes to use for the access.
478 * Use it only for debugging because no protection checks are done.
479 *
480 * Returns: Corresponding physical page address or -1 if no page found.
481 */
482 static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
483 MemTxAttrs *attrs)
484 {
485 CPUClass *cc = CPU_GET_CLASS(cpu);
486
487 if (cc->get_phys_page_attrs_debug) {
488 return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
489 }
490 /* Fallback for CPUs which don't implement the _attrs_ hook */
491 *attrs = MEMTXATTRS_UNSPECIFIED;
492 return cc->get_phys_page_debug(cpu, addr);
493 }
494
495 /**
496 * cpu_get_phys_page_debug:
497 * @cpu: The CPU to obtain the physical page address for.
498 * @addr: The virtual address.
499 *
500 * Obtains the physical page corresponding to a virtual one.
501 * Use it only for debugging because no protection checks are done.
502 *
503 * Returns: Corresponding physical page address or -1 if no page found.
504 */
505 static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
506 {
507 MemTxAttrs attrs = {};
508
509 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
510 }
511
512 /** cpu_asidx_from_attrs:
513 * @cpu: CPU
514 * @attrs: memory transaction attributes
515 *
516 * Returns the address space index specifying the CPU AddressSpace
517 * to use for a memory access with the given transaction attributes.
518 */
519 static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
520 {
521 CPUClass *cc = CPU_GET_CLASS(cpu);
522
523 if (cc->asidx_from_attrs) {
524 return cc->asidx_from_attrs(cpu, attrs);
525 }
526 return 0;
527 }
528 #endif
529
530 /**
531 * cpu_reset:
532 * @cpu: The CPU whose state is to be reset.
533 */
534 void cpu_reset(CPUState *cpu);
535
536 /**
537 * cpu_class_by_name:
538 * @typename: The CPU base type.
539 * @cpu_model: The model string without any parameters.
540 *
541 * Looks up a CPU #ObjectClass matching name @cpu_model.
542 *
543 * Returns: A #CPUClass or %NULL if not matching class is found.
544 */
545 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
546
547 /**
548 * cpu_generic_init:
549 * @typename: The CPU base type.
550 * @cpu_model: The model string including optional parameters.
551 *
552 * Instantiates a CPU, processes optional parameters and realizes the CPU.
553 *
554 * Returns: A #CPUState or %NULL if an error occurred.
555 */
556 CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
557
558 /**
559 * cpu_has_work:
560 * @cpu: The vCPU to check.
561 *
562 * Checks whether the CPU has work to do.
563 *
564 * Returns: %true if the CPU has work, %false otherwise.
565 */
566 static inline bool cpu_has_work(CPUState *cpu)
567 {
568 CPUClass *cc = CPU_GET_CLASS(cpu);
569
570 g_assert(cc->has_work);
571 return cc->has_work(cpu);
572 }
573
574 /**
575 * qemu_cpu_is_self:
576 * @cpu: The vCPU to check against.
577 *
578 * Checks whether the caller is executing on the vCPU thread.
579 *
580 * Returns: %true if called from @cpu's thread, %false otherwise.
581 */
582 bool qemu_cpu_is_self(CPUState *cpu);
583
584 /**
585 * qemu_cpu_kick:
586 * @cpu: The vCPU to kick.
587 *
588 * Kicks @cpu's thread.
589 */
590 void qemu_cpu_kick(CPUState *cpu);
591
592 /**
593 * cpu_is_stopped:
594 * @cpu: The CPU to check.
595 *
596 * Checks whether the CPU is stopped.
597 *
598 * Returns: %true if run state is not running or if artificially stopped;
599 * %false otherwise.
600 */
601 bool cpu_is_stopped(CPUState *cpu);
602
603 /**
604 * run_on_cpu:
605 * @cpu: The vCPU to run on.
606 * @func: The function to be executed.
607 * @data: Data to pass to the function.
608 *
609 * Schedules the function @func for execution on the vCPU @cpu.
610 */
611 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
612
613 /**
614 * async_run_on_cpu:
615 * @cpu: The vCPU to run on.
616 * @func: The function to be executed.
617 * @data: Data to pass to the function.
618 *
619 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
620 */
621 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
622
623 /**
624 * qemu_get_cpu:
625 * @index: The CPUState@cpu_index value of the CPU to obtain.
626 *
627 * Gets a CPU matching @index.
628 *
629 * Returns: The CPU or %NULL if there is no matching CPU.
630 */
631 CPUState *qemu_get_cpu(int index);
632
633 /**
634 * cpu_exists:
635 * @id: Guest-exposed CPU ID to lookup.
636 *
637 * Search for CPU with specified ID.
638 *
639 * Returns: %true - CPU is found, %false - CPU isn't found.
640 */
641 bool cpu_exists(int64_t id);
642
643 /**
644 * cpu_throttle_set:
645 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
646 *
647 * Throttles all vcpus by forcing them to sleep for the given percentage of
648 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
649 * (example: 10ms sleep for every 30ms awake).
650 *
651 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
652 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
653 * is called.
654 */
655 void cpu_throttle_set(int new_throttle_pct);
656
657 /**
658 * cpu_throttle_stop:
659 *
660 * Stops the vcpu throttling started by cpu_throttle_set.
661 */
662 void cpu_throttle_stop(void);
663
664 /**
665 * cpu_throttle_active:
666 *
667 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
668 */
669 bool cpu_throttle_active(void);
670
671 /**
672 * cpu_throttle_get_percentage:
673 *
674 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
675 *
676 * Returns: The throttle percentage in range 1 to 99.
677 */
678 int cpu_throttle_get_percentage(void);
679
680 #ifndef CONFIG_USER_ONLY
681
682 typedef void (*CPUInterruptHandler)(CPUState *, int);
683
684 extern CPUInterruptHandler cpu_interrupt_handler;
685
686 /**
687 * cpu_interrupt:
688 * @cpu: The CPU to set an interrupt on.
689 * @mask: The interupts to set.
690 *
691 * Invokes the interrupt handler.
692 */
693 static inline void cpu_interrupt(CPUState *cpu, int mask)
694 {
695 cpu_interrupt_handler(cpu, mask);
696 }
697
698 #else /* USER_ONLY */
699
700 void cpu_interrupt(CPUState *cpu, int mask);
701
702 #endif /* USER_ONLY */
703
704 #ifdef CONFIG_SOFTMMU
705 static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
706 bool is_write, bool is_exec,
707 int opaque, unsigned size)
708 {
709 CPUClass *cc = CPU_GET_CLASS(cpu);
710
711 if (cc->do_unassigned_access) {
712 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
713 }
714 }
715
716 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
717 int is_write, int is_user,
718 uintptr_t retaddr)
719 {
720 CPUClass *cc = CPU_GET_CLASS(cpu);
721
722 cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr);
723 }
724 #endif
725
726 /**
727 * cpu_set_pc:
728 * @cpu: The CPU to set the program counter for.
729 * @addr: Program counter value.
730 *
731 * Sets the program counter for a CPU.
732 */
733 static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
734 {
735 CPUClass *cc = CPU_GET_CLASS(cpu);
736
737 cc->set_pc(cpu, addr);
738 }
739
740 /**
741 * cpu_reset_interrupt:
742 * @cpu: The CPU to clear the interrupt on.
743 * @mask: The interrupt mask to clear.
744 *
745 * Resets interrupts on the vCPU @cpu.
746 */
747 void cpu_reset_interrupt(CPUState *cpu, int mask);
748
749 /**
750 * cpu_exit:
751 * @cpu: The CPU to exit.
752 *
753 * Requests the CPU @cpu to exit execution.
754 */
755 void cpu_exit(CPUState *cpu);
756
757 /**
758 * cpu_resume:
759 * @cpu: The CPU to resume.
760 *
761 * Resumes CPU, i.e. puts CPU into runnable state.
762 */
763 void cpu_resume(CPUState *cpu);
764
765 /**
766 * qemu_init_vcpu:
767 * @cpu: The vCPU to initialize.
768 *
769 * Initializes a vCPU.
770 */
771 void qemu_init_vcpu(CPUState *cpu);
772
773 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
774 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
775 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
776
777 /**
778 * cpu_single_step:
779 * @cpu: CPU to the flags for.
780 * @enabled: Flags to enable.
781 *
782 * Enables or disables single-stepping for @cpu.
783 */
784 void cpu_single_step(CPUState *cpu, int enabled);
785
786 /* Breakpoint/watchpoint flags */
787 #define BP_MEM_READ 0x01
788 #define BP_MEM_WRITE 0x02
789 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
790 #define BP_STOP_BEFORE_ACCESS 0x04
791 /* 0x08 currently unused */
792 #define BP_GDB 0x10
793 #define BP_CPU 0x20
794 #define BP_ANY (BP_GDB | BP_CPU)
795 #define BP_WATCHPOINT_HIT_READ 0x40
796 #define BP_WATCHPOINT_HIT_WRITE 0x80
797 #define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
798
799 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
800 CPUBreakpoint **breakpoint);
801 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
802 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
803 void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
804
805 /* Return true if PC matches an installed breakpoint. */
806 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
807 {
808 CPUBreakpoint *bp;
809
810 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
811 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
812 if (bp->pc == pc && (bp->flags & mask)) {
813 return true;
814 }
815 }
816 }
817 return false;
818 }
819
820 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
821 int flags, CPUWatchpoint **watchpoint);
822 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
823 vaddr len, int flags);
824 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
825 void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
826
827 /**
828 * cpu_get_address_space:
829 * @cpu: CPU to get address space from
830 * @asidx: index identifying which address space to get
831 *
832 * Return the requested address space of this CPU. @asidx
833 * specifies which address space to read.
834 */
835 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
836
837 void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
838 GCC_FMT_ATTR(2, 3);
839 void cpu_exec_exit(CPUState *cpu);
840
841 #ifdef CONFIG_SOFTMMU
842 extern const struct VMStateDescription vmstate_cpu_common;
843 #else
844 #define vmstate_cpu_common vmstate_dummy
845 #endif
846
847 #define VMSTATE_CPU() { \
848 .name = "parent_obj", \
849 .size = sizeof(CPUState), \
850 .vmsd = &vmstate_cpu_common, \
851 .flags = VMS_STRUCT, \
852 .offset = 0, \
853 }
854
855 #endif