]> git.proxmox.com Git - mirror_qemu.git/blob - include/qom/cpu.h
Plumb the HAXM-based hardware acceleration support
[mirror_qemu.git] / include / qom / cpu.h
1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20 #ifndef QEMU_CPU_H
21 #define QEMU_CPU_H
22
23 #include "hw/qdev-core.h"
24 #include "disas/bfd.h"
25 #include "exec/hwaddr.h"
26 #include "exec/memattrs.h"
27 #include "qemu/bitmap.h"
28 #include "qemu/queue.h"
29 #include "qemu/thread.h"
30
31 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
32 void *opaque);
33
34 /**
35 * vaddr:
36 * Type wide enough to contain any #target_ulong virtual address.
37 */
38 typedef uint64_t vaddr;
39 #define VADDR_PRId PRId64
40 #define VADDR_PRIu PRIu64
41 #define VADDR_PRIo PRIo64
42 #define VADDR_PRIx PRIx64
43 #define VADDR_PRIX PRIX64
44 #define VADDR_MAX UINT64_MAX
45
46 /**
47 * SECTION:cpu
48 * @section_id: QEMU-cpu
49 * @title: CPU Class
50 * @short_description: Base class for all CPUs
51 */
52
53 #define TYPE_CPU "cpu"
54
55 /* Since this macro is used a lot in hot code paths and in conjunction with
56 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
57 * an unchecked cast.
58 */
59 #define CPU(obj) ((CPUState *)(obj))
60
61 #define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
62 #define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
63
64 typedef enum MMUAccessType {
65 MMU_DATA_LOAD = 0,
66 MMU_DATA_STORE = 1,
67 MMU_INST_FETCH = 2
68 } MMUAccessType;
69
70 typedef struct CPUWatchpoint CPUWatchpoint;
71
72 typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
73 bool is_write, bool is_exec, int opaque,
74 unsigned size);
75
76 struct TranslationBlock;
77
78 /**
79 * CPUClass:
80 * @class_by_name: Callback to map -cpu command line model name to an
81 * instantiatable CPU type.
82 * @parse_features: Callback to parse command line arguments.
83 * @reset: Callback to reset the #CPUState to its initial state.
84 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
85 * @has_work: Callback for checking if there is work to do.
86 * @do_interrupt: Callback for interrupt handling.
87 * @do_unassigned_access: Callback for unassigned access handling.
88 * @do_unaligned_access: Callback for unaligned access handling, if
89 * the target defines #ALIGNED_ONLY.
90 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
91 * runtime configurable endianness is currently big-endian. Non-configurable
92 * CPUs can use the default implementation of this method. This method should
93 * not be used by any callers other than the pre-1.0 virtio devices.
94 * @memory_rw_debug: Callback for GDB memory access.
95 * @dump_state: Callback for dumping state.
96 * @dump_statistics: Callback for dumping statistics.
97 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
98 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
99 * @get_memory_mapping: Callback for obtaining the memory mappings.
100 * @set_pc: Callback for setting the Program Counter register.
101 * @synchronize_from_tb: Callback for synchronizing state from a TCG
102 * #TranslationBlock.
103 * @handle_mmu_fault: Callback for handling an MMU fault.
104 * @get_phys_page_debug: Callback for obtaining a physical address.
105 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
106 * associated memory transaction attributes to use for the access.
107 * CPUs which use memory transaction attributes should implement this
108 * instead of get_phys_page_debug.
109 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
110 * a memory access with the specified memory transaction attributes.
111 * @gdb_read_register: Callback for letting GDB read a register.
112 * @gdb_write_register: Callback for letting GDB write a register.
113 * @debug_check_watchpoint: Callback: return true if the architectural
114 * watchpoint whose address has matched should really fire.
115 * @debug_excp_handler: Callback for handling debug exceptions.
116 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
117 * 64-bit VM coredump.
118 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
119 * note to a 32-bit VM coredump.
120 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
121 * 32-bit VM coredump.
122 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
123 * note to a 32-bit VM coredump.
124 * @vmsd: State description for migration.
125 * @gdb_num_core_regs: Number of core registers accessible to GDB.
126 * @gdb_core_xml_file: File name for core registers GDB XML description.
127 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
128 * before the insn which triggers a watchpoint rather than after it.
129 * @gdb_arch_name: Optional callback that returns the architecture name known
130 * to GDB. The caller must free the returned string with g_free.
131 * @cpu_exec_enter: Callback for cpu_exec preparation.
132 * @cpu_exec_exit: Callback for cpu_exec cleanup.
133 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
134 * @disas_set_info: Setup architecture specific components of disassembly info
135 *
136 * Represents a CPU family or model.
137 */
138 typedef struct CPUClass {
139 /*< private >*/
140 DeviceClass parent_class;
141 /*< public >*/
142
143 ObjectClass *(*class_by_name)(const char *cpu_model);
144 void (*parse_features)(const char *typename, char *str, Error **errp);
145
146 void (*reset)(CPUState *cpu);
147 int reset_dump_flags;
148 bool (*has_work)(CPUState *cpu);
149 void (*do_interrupt)(CPUState *cpu);
150 CPUUnassignedAccess do_unassigned_access;
151 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
152 MMUAccessType access_type,
153 int mmu_idx, uintptr_t retaddr);
154 bool (*virtio_is_big_endian)(CPUState *cpu);
155 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
156 uint8_t *buf, int len, bool is_write);
157 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
158 int flags);
159 void (*dump_statistics)(CPUState *cpu, FILE *f,
160 fprintf_function cpu_fprintf, int flags);
161 int64_t (*get_arch_id)(CPUState *cpu);
162 bool (*get_paging_enabled)(const CPUState *cpu);
163 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
164 Error **errp);
165 void (*set_pc)(CPUState *cpu, vaddr value);
166 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
167 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
168 int mmu_index);
169 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
170 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
171 MemTxAttrs *attrs);
172 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
173 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
174 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
175 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
176 void (*debug_excp_handler)(CPUState *cpu);
177
178 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
179 int cpuid, void *opaque);
180 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
181 void *opaque);
182 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
183 int cpuid, void *opaque);
184 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
185 void *opaque);
186
187 const struct VMStateDescription *vmsd;
188 int gdb_num_core_regs;
189 const char *gdb_core_xml_file;
190 gchar * (*gdb_arch_name)(CPUState *cpu);
191 bool gdb_stop_before_watchpoint;
192
193 void (*cpu_exec_enter)(CPUState *cpu);
194 void (*cpu_exec_exit)(CPUState *cpu);
195 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
196
197 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
198 } CPUClass;
199
200 #ifdef HOST_WORDS_BIGENDIAN
201 typedef struct icount_decr_u16 {
202 uint16_t high;
203 uint16_t low;
204 } icount_decr_u16;
205 #else
206 typedef struct icount_decr_u16 {
207 uint16_t low;
208 uint16_t high;
209 } icount_decr_u16;
210 #endif
211
212 typedef struct CPUBreakpoint {
213 vaddr pc;
214 int flags; /* BP_* */
215 QTAILQ_ENTRY(CPUBreakpoint) entry;
216 } CPUBreakpoint;
217
218 struct CPUWatchpoint {
219 vaddr vaddr;
220 vaddr len;
221 vaddr hitaddr;
222 MemTxAttrs hitattrs;
223 int flags; /* BP_* */
224 QTAILQ_ENTRY(CPUWatchpoint) entry;
225 };
226
227 struct KVMState;
228 struct kvm_run;
229
230 struct hax_vcpu_state;
231
232 #define TB_JMP_CACHE_BITS 12
233 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
234
235 /* work queue */
236
237 /* The union type allows passing of 64 bit target pointers on 32 bit
238 * hosts in a single parameter
239 */
240 typedef union {
241 int host_int;
242 unsigned long host_ulong;
243 void *host_ptr;
244 vaddr target_ptr;
245 } run_on_cpu_data;
246
247 #define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
248 #define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
249 #define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
250 #define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
251 #define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
252
253 typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
254
255 struct qemu_work_item;
256
257 /**
258 * CPUState:
259 * @cpu_index: CPU index (informative).
260 * @nr_cores: Number of cores within this CPU package.
261 * @nr_threads: Number of threads within this CPU.
262 * @numa_node: NUMA node this CPU is belonging to.
263 * @host_tid: Host thread ID.
264 * @running: #true if CPU is currently running (lockless).
265 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
266 * valid under cpu_list_lock.
267 * @created: Indicates whether the CPU thread has been successfully created.
268 * @interrupt_request: Indicates a pending interrupt request.
269 * @halted: Nonzero if the CPU is in suspended state.
270 * @stop: Indicates a pending stop request.
271 * @stopped: Indicates the CPU has been artificially stopped.
272 * @unplug: Indicates a pending CPU unplug request.
273 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
274 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
275 * CPU and return to its top level loop.
276 * @singlestep_enabled: Flags for single-stepping.
277 * @icount_extra: Instructions until next timer event.
278 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
279 * This allows a single read-compare-cbranch-write sequence to test
280 * for both decrementer underflow and exceptions.
281 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
282 * requires that IO only be performed on the last instruction of a TB
283 * so that interrupts take effect immediately.
284 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
285 * AddressSpaces this CPU has)
286 * @num_ases: number of CPUAddressSpaces in @cpu_ases
287 * @as: Pointer to the first AddressSpace, for the convenience of targets which
288 * only have a single AddressSpace
289 * @env_ptr: Pointer to subclass-specific CPUArchState field.
290 * @gdb_regs: Additional GDB registers.
291 * @gdb_num_regs: Number of total registers accessible to GDB.
292 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
293 * @next_cpu: Next CPU sharing TB cache.
294 * @opaque: User data.
295 * @mem_io_pc: Host Program Counter at which the memory was accessed.
296 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
297 * @kvm_fd: vCPU file descriptor for KVM.
298 * @work_mutex: Lock to prevent multiple access to queued_work_*.
299 * @queued_work_first: First asynchronous work pending.
300 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
301 *
302 * State of one CPU core or thread.
303 */
304 struct CPUState {
305 /*< private >*/
306 DeviceState parent_obj;
307 /*< public >*/
308
309 int nr_cores;
310 int nr_threads;
311 int numa_node;
312
313 struct QemuThread *thread;
314 #ifdef _WIN32
315 HANDLE hThread;
316 #endif
317 int thread_id;
318 uint32_t host_tid;
319 bool running, has_waiter;
320 struct QemuCond *halt_cond;
321 bool thread_kicked;
322 bool created;
323 bool stop;
324 bool stopped;
325 bool unplug;
326 bool crash_occurred;
327 bool exit_request;
328 uint32_t interrupt_request;
329 int singlestep_enabled;
330 int64_t icount_extra;
331 sigjmp_buf jmp_env;
332
333 QemuMutex work_mutex;
334 struct qemu_work_item *queued_work_first, *queued_work_last;
335
336 CPUAddressSpace *cpu_ases;
337 int num_ases;
338 AddressSpace *as;
339 MemoryRegion *memory;
340
341 void *env_ptr; /* CPUArchState */
342
343 /* Writes protected by tb_lock, reads not thread-safe */
344 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
345
346 struct GDBRegisterState *gdb_regs;
347 int gdb_num_regs;
348 int gdb_num_g_regs;
349 QTAILQ_ENTRY(CPUState) node;
350
351 /* ice debug support */
352 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
353
354 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
355 CPUWatchpoint *watchpoint_hit;
356
357 void *opaque;
358
359 /* In order to avoid passing too many arguments to the MMIO helpers,
360 * we store some rarely used information in the CPU context.
361 */
362 uintptr_t mem_io_pc;
363 vaddr mem_io_vaddr;
364
365 int kvm_fd;
366 bool kvm_vcpu_dirty;
367 struct KVMState *kvm_state;
368 struct kvm_run *kvm_run;
369
370 /*
371 * Used for events with 'vcpu' and *without* the 'disabled' properties.
372 * Dynamically allocated based on bitmap requried to hold up to
373 * trace_get_vcpu_event_count() entries.
374 */
375 unsigned long *trace_dstate;
376
377 /* TODO Move common fields from CPUArchState here. */
378 int cpu_index; /* used by alpha TCG */
379 uint32_t halted; /* used by alpha, cris, ppc TCG */
380 union {
381 uint32_t u32;
382 icount_decr_u16 u16;
383 } icount_decr;
384 uint32_t can_do_io;
385 int32_t exception_index; /* used by m68k TCG */
386
387 /* Used to keep track of an outstanding cpu throttle thread for migration
388 * autoconverge
389 */
390 bool throttle_thread_scheduled;
391
392 /* Note that this is accessed at the start of every TB via a negative
393 offset from AREG0. Leave this field at the end so as to make the
394 (absolute value) offset as small as possible. This reduces code
395 size, especially for hosts without large memory offsets. */
396 uint32_t tcg_exit_req;
397
398 bool hax_vcpu_dirty;
399 struct hax_vcpu_state *hax_vcpu;
400 };
401
402 QTAILQ_HEAD(CPUTailQ, CPUState);
403 extern struct CPUTailQ cpus;
404 #define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
405 #define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
406 #define CPU_FOREACH_SAFE(cpu, next_cpu) \
407 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
408 #define CPU_FOREACH_REVERSE(cpu) \
409 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
410 #define first_cpu QTAILQ_FIRST(&cpus)
411
412 extern __thread CPUState *current_cpu;
413
414 /**
415 * cpu_paging_enabled:
416 * @cpu: The CPU whose state is to be inspected.
417 *
418 * Returns: %true if paging is enabled, %false otherwise.
419 */
420 bool cpu_paging_enabled(const CPUState *cpu);
421
422 /**
423 * cpu_get_memory_mapping:
424 * @cpu: The CPU whose memory mappings are to be obtained.
425 * @list: Where to write the memory mappings to.
426 * @errp: Pointer for reporting an #Error.
427 */
428 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
429 Error **errp);
430
431 /**
432 * cpu_write_elf64_note:
433 * @f: pointer to a function that writes memory to a file
434 * @cpu: The CPU whose memory is to be dumped
435 * @cpuid: ID number of the CPU
436 * @opaque: pointer to the CPUState struct
437 */
438 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
439 int cpuid, void *opaque);
440
441 /**
442 * cpu_write_elf64_qemunote:
443 * @f: pointer to a function that writes memory to a file
444 * @cpu: The CPU whose memory is to be dumped
445 * @cpuid: ID number of the CPU
446 * @opaque: pointer to the CPUState struct
447 */
448 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
449 void *opaque);
450
451 /**
452 * cpu_write_elf32_note:
453 * @f: pointer to a function that writes memory to a file
454 * @cpu: The CPU whose memory is to be dumped
455 * @cpuid: ID number of the CPU
456 * @opaque: pointer to the CPUState struct
457 */
458 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
459 int cpuid, void *opaque);
460
461 /**
462 * cpu_write_elf32_qemunote:
463 * @f: pointer to a function that writes memory to a file
464 * @cpu: The CPU whose memory is to be dumped
465 * @cpuid: ID number of the CPU
466 * @opaque: pointer to the CPUState struct
467 */
468 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
469 void *opaque);
470
471 /**
472 * CPUDumpFlags:
473 * @CPU_DUMP_CODE:
474 * @CPU_DUMP_FPU: dump FPU register state, not just integer
475 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
476 */
477 enum CPUDumpFlags {
478 CPU_DUMP_CODE = 0x00010000,
479 CPU_DUMP_FPU = 0x00020000,
480 CPU_DUMP_CCOP = 0x00040000,
481 };
482
483 /**
484 * cpu_dump_state:
485 * @cpu: The CPU whose state is to be dumped.
486 * @f: File to dump to.
487 * @cpu_fprintf: Function to dump with.
488 * @flags: Flags what to dump.
489 *
490 * Dumps CPU state.
491 */
492 void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
493 int flags);
494
495 /**
496 * cpu_dump_statistics:
497 * @cpu: The CPU whose state is to be dumped.
498 * @f: File to dump to.
499 * @cpu_fprintf: Function to dump with.
500 * @flags: Flags what to dump.
501 *
502 * Dumps CPU statistics.
503 */
504 void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
505 int flags);
506
507 #ifndef CONFIG_USER_ONLY
508 /**
509 * cpu_get_phys_page_attrs_debug:
510 * @cpu: The CPU to obtain the physical page address for.
511 * @addr: The virtual address.
512 * @attrs: Updated on return with the memory transaction attributes to use
513 * for this access.
514 *
515 * Obtains the physical page corresponding to a virtual one, together
516 * with the corresponding memory transaction attributes to use for the access.
517 * Use it only for debugging because no protection checks are done.
518 *
519 * Returns: Corresponding physical page address or -1 if no page found.
520 */
521 static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
522 MemTxAttrs *attrs)
523 {
524 CPUClass *cc = CPU_GET_CLASS(cpu);
525
526 if (cc->get_phys_page_attrs_debug) {
527 return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
528 }
529 /* Fallback for CPUs which don't implement the _attrs_ hook */
530 *attrs = MEMTXATTRS_UNSPECIFIED;
531 return cc->get_phys_page_debug(cpu, addr);
532 }
533
534 /**
535 * cpu_get_phys_page_debug:
536 * @cpu: The CPU to obtain the physical page address for.
537 * @addr: The virtual address.
538 *
539 * Obtains the physical page corresponding to a virtual one.
540 * Use it only for debugging because no protection checks are done.
541 *
542 * Returns: Corresponding physical page address or -1 if no page found.
543 */
544 static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
545 {
546 MemTxAttrs attrs = {};
547
548 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
549 }
550
551 /** cpu_asidx_from_attrs:
552 * @cpu: CPU
553 * @attrs: memory transaction attributes
554 *
555 * Returns the address space index specifying the CPU AddressSpace
556 * to use for a memory access with the given transaction attributes.
557 */
558 static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
559 {
560 CPUClass *cc = CPU_GET_CLASS(cpu);
561
562 if (cc->asidx_from_attrs) {
563 return cc->asidx_from_attrs(cpu, attrs);
564 }
565 return 0;
566 }
567 #endif
568
569 /**
570 * cpu_list_add:
571 * @cpu: The CPU to be added to the list of CPUs.
572 */
573 void cpu_list_add(CPUState *cpu);
574
575 /**
576 * cpu_list_remove:
577 * @cpu: The CPU to be removed from the list of CPUs.
578 */
579 void cpu_list_remove(CPUState *cpu);
580
581 /**
582 * cpu_reset:
583 * @cpu: The CPU whose state is to be reset.
584 */
585 void cpu_reset(CPUState *cpu);
586
587 /**
588 * cpu_class_by_name:
589 * @typename: The CPU base type.
590 * @cpu_model: The model string without any parameters.
591 *
592 * Looks up a CPU #ObjectClass matching name @cpu_model.
593 *
594 * Returns: A #CPUClass or %NULL if not matching class is found.
595 */
596 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
597
598 /**
599 * cpu_generic_init:
600 * @typename: The CPU base type.
601 * @cpu_model: The model string including optional parameters.
602 *
603 * Instantiates a CPU, processes optional parameters and realizes the CPU.
604 *
605 * Returns: A #CPUState or %NULL if an error occurred.
606 */
607 CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
608
609 /**
610 * cpu_has_work:
611 * @cpu: The vCPU to check.
612 *
613 * Checks whether the CPU has work to do.
614 *
615 * Returns: %true if the CPU has work, %false otherwise.
616 */
617 static inline bool cpu_has_work(CPUState *cpu)
618 {
619 CPUClass *cc = CPU_GET_CLASS(cpu);
620
621 g_assert(cc->has_work);
622 return cc->has_work(cpu);
623 }
624
625 /**
626 * qemu_cpu_is_self:
627 * @cpu: The vCPU to check against.
628 *
629 * Checks whether the caller is executing on the vCPU thread.
630 *
631 * Returns: %true if called from @cpu's thread, %false otherwise.
632 */
633 bool qemu_cpu_is_self(CPUState *cpu);
634
635 /**
636 * qemu_cpu_kick:
637 * @cpu: The vCPU to kick.
638 *
639 * Kicks @cpu's thread.
640 */
641 void qemu_cpu_kick(CPUState *cpu);
642
643 /**
644 * cpu_is_stopped:
645 * @cpu: The CPU to check.
646 *
647 * Checks whether the CPU is stopped.
648 *
649 * Returns: %true if run state is not running or if artificially stopped;
650 * %false otherwise.
651 */
652 bool cpu_is_stopped(CPUState *cpu);
653
654 /**
655 * do_run_on_cpu:
656 * @cpu: The vCPU to run on.
657 * @func: The function to be executed.
658 * @data: Data to pass to the function.
659 * @mutex: Mutex to release while waiting for @func to run.
660 *
661 * Used internally in the implementation of run_on_cpu.
662 */
663 void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
664 QemuMutex *mutex);
665
666 /**
667 * run_on_cpu:
668 * @cpu: The vCPU to run on.
669 * @func: The function to be executed.
670 * @data: Data to pass to the function.
671 *
672 * Schedules the function @func for execution on the vCPU @cpu.
673 */
674 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
675
676 /**
677 * async_run_on_cpu:
678 * @cpu: The vCPU to run on.
679 * @func: The function to be executed.
680 * @data: Data to pass to the function.
681 *
682 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
683 */
684 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
685
686 /**
687 * async_safe_run_on_cpu:
688 * @cpu: The vCPU to run on.
689 * @func: The function to be executed.
690 * @data: Data to pass to the function.
691 *
692 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
693 * while all other vCPUs are sleeping.
694 *
695 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
696 * BQL.
697 */
698 void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
699
700 /**
701 * qemu_get_cpu:
702 * @index: The CPUState@cpu_index value of the CPU to obtain.
703 *
704 * Gets a CPU matching @index.
705 *
706 * Returns: The CPU or %NULL if there is no matching CPU.
707 */
708 CPUState *qemu_get_cpu(int index);
709
710 /**
711 * cpu_exists:
712 * @id: Guest-exposed CPU ID to lookup.
713 *
714 * Search for CPU with specified ID.
715 *
716 * Returns: %true - CPU is found, %false - CPU isn't found.
717 */
718 bool cpu_exists(int64_t id);
719
720 /**
721 * cpu_throttle_set:
722 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
723 *
724 * Throttles all vcpus by forcing them to sleep for the given percentage of
725 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
726 * (example: 10ms sleep for every 30ms awake).
727 *
728 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
729 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
730 * is called.
731 */
732 void cpu_throttle_set(int new_throttle_pct);
733
734 /**
735 * cpu_throttle_stop:
736 *
737 * Stops the vcpu throttling started by cpu_throttle_set.
738 */
739 void cpu_throttle_stop(void);
740
741 /**
742 * cpu_throttle_active:
743 *
744 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
745 */
746 bool cpu_throttle_active(void);
747
748 /**
749 * cpu_throttle_get_percentage:
750 *
751 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
752 *
753 * Returns: The throttle percentage in range 1 to 99.
754 */
755 int cpu_throttle_get_percentage(void);
756
757 #ifndef CONFIG_USER_ONLY
758
759 typedef void (*CPUInterruptHandler)(CPUState *, int);
760
761 extern CPUInterruptHandler cpu_interrupt_handler;
762
763 /**
764 * cpu_interrupt:
765 * @cpu: The CPU to set an interrupt on.
766 * @mask: The interupts to set.
767 *
768 * Invokes the interrupt handler.
769 */
770 static inline void cpu_interrupt(CPUState *cpu, int mask)
771 {
772 cpu_interrupt_handler(cpu, mask);
773 }
774
775 #else /* USER_ONLY */
776
777 void cpu_interrupt(CPUState *cpu, int mask);
778
779 #endif /* USER_ONLY */
780
781 #ifdef CONFIG_SOFTMMU
782 static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
783 bool is_write, bool is_exec,
784 int opaque, unsigned size)
785 {
786 CPUClass *cc = CPU_GET_CLASS(cpu);
787
788 if (cc->do_unassigned_access) {
789 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
790 }
791 }
792
793 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
794 MMUAccessType access_type,
795 int mmu_idx, uintptr_t retaddr)
796 {
797 CPUClass *cc = CPU_GET_CLASS(cpu);
798
799 cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
800 }
801 #endif
802
803 /**
804 * cpu_set_pc:
805 * @cpu: The CPU to set the program counter for.
806 * @addr: Program counter value.
807 *
808 * Sets the program counter for a CPU.
809 */
810 static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
811 {
812 CPUClass *cc = CPU_GET_CLASS(cpu);
813
814 cc->set_pc(cpu, addr);
815 }
816
817 /**
818 * cpu_reset_interrupt:
819 * @cpu: The CPU to clear the interrupt on.
820 * @mask: The interrupt mask to clear.
821 *
822 * Resets interrupts on the vCPU @cpu.
823 */
824 void cpu_reset_interrupt(CPUState *cpu, int mask);
825
826 /**
827 * cpu_exit:
828 * @cpu: The CPU to exit.
829 *
830 * Requests the CPU @cpu to exit execution.
831 */
832 void cpu_exit(CPUState *cpu);
833
834 /**
835 * cpu_resume:
836 * @cpu: The CPU to resume.
837 *
838 * Resumes CPU, i.e. puts CPU into runnable state.
839 */
840 void cpu_resume(CPUState *cpu);
841
842 /**
843 * cpu_remove:
844 * @cpu: The CPU to remove.
845 *
846 * Requests the CPU to be removed.
847 */
848 void cpu_remove(CPUState *cpu);
849
850 /**
851 * cpu_remove_sync:
852 * @cpu: The CPU to remove.
853 *
854 * Requests the CPU to be removed and waits till it is removed.
855 */
856 void cpu_remove_sync(CPUState *cpu);
857
858 /**
859 * process_queued_cpu_work() - process all items on CPU work queue
860 * @cpu: The CPU which work queue to process.
861 */
862 void process_queued_cpu_work(CPUState *cpu);
863
864 /**
865 * cpu_exec_start:
866 * @cpu: The CPU for the current thread.
867 *
868 * Record that a CPU has started execution and can be interrupted with
869 * cpu_exit.
870 */
871 void cpu_exec_start(CPUState *cpu);
872
873 /**
874 * cpu_exec_end:
875 * @cpu: The CPU for the current thread.
876 *
877 * Record that a CPU has stopped execution and exclusive sections
878 * can be executed without interrupting it.
879 */
880 void cpu_exec_end(CPUState *cpu);
881
882 /**
883 * start_exclusive:
884 *
885 * Wait for a concurrent exclusive section to end, and then start
886 * a section of work that is run while other CPUs are not running
887 * between cpu_exec_start and cpu_exec_end. CPUs that are running
888 * cpu_exec are exited immediately. CPUs that call cpu_exec_start
889 * during the exclusive section go to sleep until this CPU calls
890 * end_exclusive.
891 */
892 void start_exclusive(void);
893
894 /**
895 * end_exclusive:
896 *
897 * Concludes an exclusive execution section started by start_exclusive.
898 */
899 void end_exclusive(void);
900
901 /**
902 * qemu_init_vcpu:
903 * @cpu: The vCPU to initialize.
904 *
905 * Initializes a vCPU.
906 */
907 void qemu_init_vcpu(CPUState *cpu);
908
909 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
910 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
911 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
912
913 /**
914 * cpu_single_step:
915 * @cpu: CPU to the flags for.
916 * @enabled: Flags to enable.
917 *
918 * Enables or disables single-stepping for @cpu.
919 */
920 void cpu_single_step(CPUState *cpu, int enabled);
921
922 /* Breakpoint/watchpoint flags */
923 #define BP_MEM_READ 0x01
924 #define BP_MEM_WRITE 0x02
925 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
926 #define BP_STOP_BEFORE_ACCESS 0x04
927 /* 0x08 currently unused */
928 #define BP_GDB 0x10
929 #define BP_CPU 0x20
930 #define BP_ANY (BP_GDB | BP_CPU)
931 #define BP_WATCHPOINT_HIT_READ 0x40
932 #define BP_WATCHPOINT_HIT_WRITE 0x80
933 #define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
934
935 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
936 CPUBreakpoint **breakpoint);
937 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
938 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
939 void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
940
941 /* Return true if PC matches an installed breakpoint. */
942 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
943 {
944 CPUBreakpoint *bp;
945
946 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
947 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
948 if (bp->pc == pc && (bp->flags & mask)) {
949 return true;
950 }
951 }
952 }
953 return false;
954 }
955
956 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
957 int flags, CPUWatchpoint **watchpoint);
958 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
959 vaddr len, int flags);
960 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
961 void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
962
963 /**
964 * cpu_get_address_space:
965 * @cpu: CPU to get address space from
966 * @asidx: index identifying which address space to get
967 *
968 * Return the requested address space of this CPU. @asidx
969 * specifies which address space to read.
970 */
971 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
972
973 void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
974 GCC_FMT_ATTR(2, 3);
975 void cpu_exec_initfn(CPUState *cpu);
976 void cpu_exec_realizefn(CPUState *cpu, Error **errp);
977 void cpu_exec_unrealizefn(CPUState *cpu);
978
979 #ifdef CONFIG_SOFTMMU
980 extern const struct VMStateDescription vmstate_cpu_common;
981 #else
982 #define vmstate_cpu_common vmstate_dummy
983 #endif
984
985 #define VMSTATE_CPU() { \
986 .name = "parent_obj", \
987 .size = sizeof(CPUState), \
988 .vmsd = &vmstate_cpu_common, \
989 .flags = VMS_STRUCT, \
990 .offset = 0, \
991 }
992
993 #define UNASSIGNED_CPU_INDEX -1
994
995 #endif