]> git.proxmox.com Git - mirror_qemu.git/blob - include/hw/core/cpu.h
accel: Introduce cpu_exec_reset_hold()
[mirror_qemu.git] / include / hw / core / cpu.h
1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20 #ifndef QEMU_CPU_H
21 #define QEMU_CPU_H
22
23 #include "hw/qdev-core.h"
24 #include "disas/dis-asm.h"
25 #include "exec/cpu-common.h"
26 #include "exec/hwaddr.h"
27 #include "exec/memattrs.h"
28 #include "exec/tlb-common.h"
29 #include "qapi/qapi-types-run-state.h"
30 #include "qemu/bitmap.h"
31 #include "qemu/rcu_queue.h"
32 #include "qemu/queue.h"
33 #include "qemu/thread.h"
34 #include "qemu/plugin-event.h"
35 #include "qom/object.h"
36
37 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
38 void *opaque);
39
40 /**
41 * SECTION:cpu
42 * @section_id: QEMU-cpu
43 * @title: CPU Class
44 * @short_description: Base class for all CPUs
45 */
46
47 #define TYPE_CPU "cpu"
48
49 /* Since this macro is used a lot in hot code paths and in conjunction with
50 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
51 * an unchecked cast.
52 */
53 #define CPU(obj) ((CPUState *)(obj))
54
55 /*
56 * The class checkers bring in CPU_GET_CLASS() which is potentially
57 * expensive given the eventual call to
58 * object_class_dynamic_cast_assert(). Because of this the CPUState
59 * has a cached value for the class in cs->cc which is set up in
60 * cpu_exec_realizefn() for use in hot code paths.
61 */
62 typedef struct CPUClass CPUClass;
63 DECLARE_CLASS_CHECKERS(CPUClass, CPU,
64 TYPE_CPU)
65
66 /**
67 * OBJECT_DECLARE_CPU_TYPE:
68 * @CpuInstanceType: instance struct name
69 * @CpuClassType: class struct name
70 * @CPU_MODULE_OBJ_NAME: the CPU name in uppercase with underscore separators
71 *
72 * This macro is typically used in "cpu-qom.h" header file, and will:
73 *
74 * - create the typedefs for the CPU object and class structs
75 * - register the type for use with g_autoptr
76 * - provide three standard type cast functions
77 *
78 * The object struct and class struct need to be declared manually.
79 */
80 #define OBJECT_DECLARE_CPU_TYPE(CpuInstanceType, CpuClassType, CPU_MODULE_OBJ_NAME) \
81 typedef struct ArchCPU CpuInstanceType; \
82 OBJECT_DECLARE_TYPE(ArchCPU, CpuClassType, CPU_MODULE_OBJ_NAME);
83
84 typedef enum MMUAccessType {
85 MMU_DATA_LOAD = 0,
86 MMU_DATA_STORE = 1,
87 MMU_INST_FETCH = 2
88 #define MMU_ACCESS_COUNT 3
89 } MMUAccessType;
90
91 typedef struct CPUWatchpoint CPUWatchpoint;
92
93 /* see tcg-cpu-ops.h */
94 struct TCGCPUOps;
95
96 /* see accel-cpu.h */
97 struct AccelCPUClass;
98
99 /* see sysemu-cpu-ops.h */
100 struct SysemuCPUOps;
101
102 /**
103 * CPUClass:
104 * @class_by_name: Callback to map -cpu command line model name to an
105 * instantiatable CPU type.
106 * @parse_features: Callback to parse command line arguments.
107 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
108 * @has_work: Callback for checking if there is work to do.
109 * @memory_rw_debug: Callback for GDB memory access.
110 * @dump_state: Callback for dumping state.
111 * @query_cpu_fast:
112 * Fill in target specific information for the "query-cpus-fast"
113 * QAPI call.
114 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
115 * @set_pc: Callback for setting the Program Counter register. This
116 * should have the semantics used by the target architecture when
117 * setting the PC from a source such as an ELF file entry point;
118 * for example on Arm it will also set the Thumb mode bit based
119 * on the least significant bit of the new PC value.
120 * If the target behaviour here is anything other than "set
121 * the PC register to the value passed in" then the target must
122 * also implement the synchronize_from_tb hook.
123 * @get_pc: Callback for getting the Program Counter register.
124 * As above, with the semantics of the target architecture.
125 * @gdb_read_register: Callback for letting GDB read a register.
126 * @gdb_write_register: Callback for letting GDB write a register.
127 * @gdb_adjust_breakpoint: Callback for adjusting the address of a
128 * breakpoint. Used by AVR to handle a gdb mis-feature with
129 * its Harvard architecture split code and data.
130 * @gdb_num_core_regs: Number of core registers accessible to GDB.
131 * @gdb_core_xml_file: File name for core registers GDB XML description.
132 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
133 * before the insn which triggers a watchpoint rather than after it.
134 * @gdb_arch_name: Optional callback that returns the architecture name known
135 * to GDB. The caller must free the returned string with g_free.
136 * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
137 * gdb stub. Returns a pointer to the XML contents for the specified XML file
138 * or NULL if the CPU doesn't have a dynamically generated content for it.
139 * @disas_set_info: Setup architecture specific components of disassembly info
140 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
141 * address before attempting to match it against watchpoints.
142 * @deprecation_note: If this CPUClass is deprecated, this field provides
143 * related information.
144 *
145 * Represents a CPU family or model.
146 */
147 struct CPUClass {
148 /*< private >*/
149 DeviceClass parent_class;
150 /*< public >*/
151
152 ObjectClass *(*class_by_name)(const char *cpu_model);
153 void (*parse_features)(const char *typename, char *str, Error **errp);
154
155 bool (*has_work)(CPUState *cpu);
156 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
157 uint8_t *buf, int len, bool is_write);
158 void (*dump_state)(CPUState *cpu, FILE *, int flags);
159 void (*query_cpu_fast)(CPUState *cpu, CpuInfoFast *value);
160 int64_t (*get_arch_id)(CPUState *cpu);
161 void (*set_pc)(CPUState *cpu, vaddr value);
162 vaddr (*get_pc)(CPUState *cpu);
163 int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
164 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
165 vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
166
167 const char *gdb_core_xml_file;
168 const gchar * (*gdb_arch_name)(CPUState *cpu);
169 const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
170
171 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
172
173 const char *deprecation_note;
174 struct AccelCPUClass *accel_cpu;
175
176 /* when system emulation is not available, this pointer is NULL */
177 const struct SysemuCPUOps *sysemu_ops;
178
179 /* when TCG is not available, this pointer is NULL */
180 const struct TCGCPUOps *tcg_ops;
181
182 /*
183 * if not NULL, this is called in order for the CPUClass to initialize
184 * class data that depends on the accelerator, see accel/accel-common.c.
185 */
186 void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
187
188 /*
189 * Keep non-pointer data at the end to minimize holes.
190 */
191 int reset_dump_flags;
192 int gdb_num_core_regs;
193 bool gdb_stop_before_watchpoint;
194 };
195
196 /*
197 * Fix the number of mmu modes to 16, which is also the maximum
198 * supported by the softmmu tlb api.
199 */
200 #define NB_MMU_MODES 16
201
202 /* Use a fully associative victim tlb of 8 entries. */
203 #define CPU_VTLB_SIZE 8
204
205 /*
206 * The full TLB entry, which is not accessed by generated TCG code,
207 * so the layout is not as critical as that of CPUTLBEntry. This is
208 * also why we don't want to combine the two structs.
209 */
210 typedef struct CPUTLBEntryFull {
211 /*
212 * @xlat_section contains:
213 * - in the lower TARGET_PAGE_BITS, a physical section number
214 * - with the lower TARGET_PAGE_BITS masked off, an offset which
215 * must be added to the virtual address to obtain:
216 * + the ram_addr_t of the target RAM (if the physical section
217 * number is PHYS_SECTION_NOTDIRTY or PHYS_SECTION_ROM)
218 * + the offset within the target MemoryRegion (otherwise)
219 */
220 hwaddr xlat_section;
221
222 /*
223 * @phys_addr contains the physical address in the address space
224 * given by cpu_asidx_from_attrs(cpu, @attrs).
225 */
226 hwaddr phys_addr;
227
228 /* @attrs contains the memory transaction attributes for the page. */
229 MemTxAttrs attrs;
230
231 /* @prot contains the complete protections for the page. */
232 uint8_t prot;
233
234 /* @lg_page_size contains the log2 of the page size. */
235 uint8_t lg_page_size;
236
237 /*
238 * Additional tlb flags for use by the slow path. If non-zero,
239 * the corresponding CPUTLBEntry comparator must have TLB_FORCE_SLOW.
240 */
241 uint8_t slow_flags[MMU_ACCESS_COUNT];
242
243 /*
244 * Allow target-specific additions to this structure.
245 * This may be used to cache items from the guest cpu
246 * page tables for later use by the implementation.
247 */
248 union {
249 /*
250 * Cache the attrs and shareability fields from the page table entry.
251 *
252 * For ARMMMUIdx_Stage2*, pte_attrs is the S2 descriptor bits [5:2].
253 * Otherwise, pte_attrs is the same as the MAIR_EL1 8-bit format.
254 * For shareability and guarded, as in the SH and GP fields respectively
255 * of the VMSAv8-64 PTEs.
256 */
257 struct {
258 uint8_t pte_attrs;
259 uint8_t shareability;
260 bool guarded;
261 } arm;
262 } extra;
263 } CPUTLBEntryFull;
264
265 /*
266 * Data elements that are per MMU mode, minus the bits accessed by
267 * the TCG fast path.
268 */
269 typedef struct CPUTLBDesc {
270 /*
271 * Describe a region covering all of the large pages allocated
272 * into the tlb. When any page within this region is flushed,
273 * we must flush the entire tlb. The region is matched if
274 * (addr & large_page_mask) == large_page_addr.
275 */
276 vaddr large_page_addr;
277 vaddr large_page_mask;
278 /* host time (in ns) at the beginning of the time window */
279 int64_t window_begin_ns;
280 /* maximum number of entries observed in the window */
281 size_t window_max_entries;
282 size_t n_used_entries;
283 /* The next index to use in the tlb victim table. */
284 size_t vindex;
285 /* The tlb victim table, in two parts. */
286 CPUTLBEntry vtable[CPU_VTLB_SIZE];
287 CPUTLBEntryFull vfulltlb[CPU_VTLB_SIZE];
288 CPUTLBEntryFull *fulltlb;
289 } CPUTLBDesc;
290
291 /*
292 * Data elements that are shared between all MMU modes.
293 */
294 typedef struct CPUTLBCommon {
295 /* Serialize updates to f.table and d.vtable, and others as noted. */
296 QemuSpin lock;
297 /*
298 * Within dirty, for each bit N, modifications have been made to
299 * mmu_idx N since the last time that mmu_idx was flushed.
300 * Protected by tlb_c.lock.
301 */
302 uint16_t dirty;
303 /*
304 * Statistics. These are not lock protected, but are read and
305 * written atomically. This allows the monitor to print a snapshot
306 * of the stats without interfering with the cpu.
307 */
308 size_t full_flush_count;
309 size_t part_flush_count;
310 size_t elide_flush_count;
311 } CPUTLBCommon;
312
313 /*
314 * The entire softmmu tlb, for all MMU modes.
315 * The meaning of each of the MMU modes is defined in the target code.
316 * Since this is placed within CPUNegativeOffsetState, the smallest
317 * negative offsets are at the end of the struct.
318 */
319 typedef struct CPUTLB {
320 #ifdef CONFIG_TCG
321 CPUTLBCommon c;
322 CPUTLBDesc d[NB_MMU_MODES];
323 CPUTLBDescFast f[NB_MMU_MODES];
324 #endif
325 } CPUTLB;
326
327 /*
328 * Low 16 bits: number of cycles left, used only in icount mode.
329 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs
330 * for this CPU and return to its top level loop (even in non-icount mode).
331 * This allows a single read-compare-cbranch-write sequence to test
332 * for both decrementer underflow and exceptions.
333 */
334 typedef union IcountDecr {
335 uint32_t u32;
336 struct {
337 #if HOST_BIG_ENDIAN
338 uint16_t high;
339 uint16_t low;
340 #else
341 uint16_t low;
342 uint16_t high;
343 #endif
344 } u16;
345 } IcountDecr;
346
347 /*
348 * Elements of CPUState most efficiently accessed from CPUArchState,
349 * via small negative offsets.
350 */
351 typedef struct CPUNegativeOffsetState {
352 CPUTLB tlb;
353 IcountDecr icount_decr;
354 bool can_do_io;
355 } CPUNegativeOffsetState;
356
357 typedef struct CPUBreakpoint {
358 vaddr pc;
359 int flags; /* BP_* */
360 QTAILQ_ENTRY(CPUBreakpoint) entry;
361 } CPUBreakpoint;
362
363 struct CPUWatchpoint {
364 vaddr vaddr;
365 vaddr len;
366 vaddr hitaddr;
367 MemTxAttrs hitattrs;
368 int flags; /* BP_* */
369 QTAILQ_ENTRY(CPUWatchpoint) entry;
370 };
371
372 struct KVMState;
373 struct kvm_run;
374
375 /* work queue */
376
377 /* The union type allows passing of 64 bit target pointers on 32 bit
378 * hosts in a single parameter
379 */
380 typedef union {
381 int host_int;
382 unsigned long host_ulong;
383 void *host_ptr;
384 vaddr target_ptr;
385 } run_on_cpu_data;
386
387 #define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
388 #define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
389 #define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
390 #define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
391 #define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
392
393 typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
394
395 struct qemu_work_item;
396
397 #define CPU_UNSET_NUMA_NODE_ID -1
398
399 /**
400 * CPUState:
401 * @cpu_index: CPU index (informative).
402 * @cluster_index: Identifies which cluster this CPU is in.
403 * For boards which don't define clusters or for "loose" CPUs not assigned
404 * to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
405 * be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
406 * QOM parent.
407 * Under TCG this value is propagated to @tcg_cflags.
408 * See TranslationBlock::TCG CF_CLUSTER_MASK.
409 * @tcg_cflags: Pre-computed cflags for this cpu.
410 * @nr_cores: Number of cores within this CPU package.
411 * @nr_threads: Number of threads within this CPU.
412 * @running: #true if CPU is currently running (lockless).
413 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
414 * valid under cpu_list_lock.
415 * @created: Indicates whether the CPU thread has been successfully created.
416 * @interrupt_request: Indicates a pending interrupt request.
417 * @halted: Nonzero if the CPU is in suspended state.
418 * @stop: Indicates a pending stop request.
419 * @stopped: Indicates the CPU has been artificially stopped.
420 * @unplug: Indicates a pending CPU unplug request.
421 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
422 * @singlestep_enabled: Flags for single-stepping.
423 * @icount_extra: Instructions until next timer event.
424 * @neg.can_do_io: True if memory-mapped IO is allowed.
425 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
426 * AddressSpaces this CPU has)
427 * @num_ases: number of CPUAddressSpaces in @cpu_ases
428 * @as: Pointer to the first AddressSpace, for the convenience of targets which
429 * only have a single AddressSpace
430 * @gdb_regs: Additional GDB registers.
431 * @gdb_num_regs: Number of total registers accessible to GDB.
432 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
433 * @next_cpu: Next CPU sharing TB cache.
434 * @opaque: User data.
435 * @mem_io_pc: Host Program Counter at which the memory was accessed.
436 * @accel: Pointer to accelerator specific state.
437 * @kvm_fd: vCPU file descriptor for KVM.
438 * @work_mutex: Lock to prevent multiple access to @work_list.
439 * @work_list: List of pending asynchronous work.
440 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
441 * to @trace_dstate).
442 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
443 * @plugin_mask: Plugin event bitmap. Modified only via async work.
444 * @ignore_memory_transaction_failures: Cached copy of the MachineState
445 * flag of the same name: allows the board to suppress calling of the
446 * CPU do_transaction_failed hook function.
447 * @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty
448 * ring is enabled.
449 * @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU
450 * dirty ring structure.
451 *
452 * State of one CPU core or thread.
453 *
454 * Align, in order to match possible alignment required by CPUArchState,
455 * and eliminate a hole between CPUState and CPUArchState within ArchCPU.
456 */
457 struct CPUState {
458 /*< private >*/
459 DeviceState parent_obj;
460 /* cache to avoid expensive CPU_GET_CLASS */
461 CPUClass *cc;
462 /*< public >*/
463
464 int nr_cores;
465 int nr_threads;
466
467 struct QemuThread *thread;
468 #ifdef _WIN32
469 QemuSemaphore sem;
470 #endif
471 int thread_id;
472 bool running, has_waiter;
473 struct QemuCond *halt_cond;
474 bool thread_kicked;
475 bool created;
476 bool stop;
477 bool stopped;
478
479 /* Should CPU start in powered-off state? */
480 bool start_powered_off;
481
482 bool unplug;
483 bool crash_occurred;
484 bool exit_request;
485 int exclusive_context_count;
486 uint32_t cflags_next_tb;
487 /* updates protected by BQL */
488 uint32_t interrupt_request;
489 int singlestep_enabled;
490 int64_t icount_budget;
491 int64_t icount_extra;
492 uint64_t random_seed;
493 sigjmp_buf jmp_env;
494
495 QemuMutex work_mutex;
496 QSIMPLEQ_HEAD(, qemu_work_item) work_list;
497
498 CPUAddressSpace *cpu_ases;
499 int num_ases;
500 AddressSpace *as;
501 MemoryRegion *memory;
502
503 CPUJumpCache *tb_jmp_cache;
504
505 GArray *gdb_regs;
506 int gdb_num_regs;
507 int gdb_num_g_regs;
508 QTAILQ_ENTRY(CPUState) node;
509
510 /* ice debug support */
511 QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
512
513 QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
514 CPUWatchpoint *watchpoint_hit;
515
516 void *opaque;
517
518 /* In order to avoid passing too many arguments to the MMIO helpers,
519 * we store some rarely used information in the CPU context.
520 */
521 uintptr_t mem_io_pc;
522
523 /* Only used in KVM */
524 int kvm_fd;
525 struct KVMState *kvm_state;
526 struct kvm_run *kvm_run;
527 struct kvm_dirty_gfn *kvm_dirty_gfns;
528 uint32_t kvm_fetch_index;
529 uint64_t dirty_pages;
530 int kvm_vcpu_stats_fd;
531
532 /* Use by accel-block: CPU is executing an ioctl() */
533 QemuLockCnt in_ioctl_lock;
534
535 DECLARE_BITMAP(plugin_mask, QEMU_PLUGIN_EV_MAX);
536
537 #ifdef CONFIG_PLUGIN
538 GArray *plugin_mem_cbs;
539 #endif
540
541 /* TODO Move common fields from CPUArchState here. */
542 int cpu_index;
543 int cluster_index;
544 uint32_t tcg_cflags;
545 uint32_t halted;
546 int32_t exception_index;
547
548 AccelCPUState *accel;
549 /* shared by kvm and hvf */
550 bool vcpu_dirty;
551
552 /* Used to keep track of an outstanding cpu throttle thread for migration
553 * autoconverge
554 */
555 bool throttle_thread_scheduled;
556
557 /*
558 * Sleep throttle_us_per_full microseconds once dirty ring is full
559 * if dirty page rate limit is enabled.
560 */
561 int64_t throttle_us_per_full;
562
563 bool ignore_memory_transaction_failures;
564
565 /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
566 bool prctl_unalign_sigbus;
567
568 /* track IOMMUs whose translations we've cached in the TCG TLB */
569 GArray *iommu_notifiers;
570
571 /*
572 * MUST BE LAST in order to minimize the displacement to CPUArchState.
573 */
574 char neg_align[-sizeof(CPUNegativeOffsetState) % 16] QEMU_ALIGNED(16);
575 CPUNegativeOffsetState neg;
576 };
577
578 /* Validate placement of CPUNegativeOffsetState. */
579 QEMU_BUILD_BUG_ON(offsetof(CPUState, neg) !=
580 sizeof(CPUState) - sizeof(CPUNegativeOffsetState));
581
582 static inline CPUArchState *cpu_env(CPUState *cpu)
583 {
584 /* We validate that CPUArchState follows CPUState in cpu-all.h. */
585 return (CPUArchState *)(cpu + 1);
586 }
587
588 typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
589 extern CPUTailQ cpus;
590
591 #define first_cpu QTAILQ_FIRST_RCU(&cpus)
592 #define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node)
593 #define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node)
594 #define CPU_FOREACH_SAFE(cpu, next_cpu) \
595 QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu)
596
597 extern __thread CPUState *current_cpu;
598
599 /**
600 * qemu_tcg_mttcg_enabled:
601 * Check whether we are running MultiThread TCG or not.
602 *
603 * Returns: %true if we are in MTTCG mode %false otherwise.
604 */
605 extern bool mttcg_enabled;
606 #define qemu_tcg_mttcg_enabled() (mttcg_enabled)
607
608 /**
609 * cpu_paging_enabled:
610 * @cpu: The CPU whose state is to be inspected.
611 *
612 * Returns: %true if paging is enabled, %false otherwise.
613 */
614 bool cpu_paging_enabled(const CPUState *cpu);
615
616 /**
617 * cpu_get_memory_mapping:
618 * @cpu: The CPU whose memory mappings are to be obtained.
619 * @list: Where to write the memory mappings to.
620 * @errp: Pointer for reporting an #Error.
621 *
622 * Returns: %true on success, %false otherwise.
623 */
624 bool cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
625 Error **errp);
626
627 #if !defined(CONFIG_USER_ONLY)
628
629 /**
630 * cpu_write_elf64_note:
631 * @f: pointer to a function that writes memory to a file
632 * @cpu: The CPU whose memory is to be dumped
633 * @cpuid: ID number of the CPU
634 * @opaque: pointer to the CPUState struct
635 */
636 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
637 int cpuid, void *opaque);
638
639 /**
640 * cpu_write_elf64_qemunote:
641 * @f: pointer to a function that writes memory to a file
642 * @cpu: The CPU whose memory is to be dumped
643 * @cpuid: ID number of the CPU
644 * @opaque: pointer to the CPUState struct
645 */
646 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
647 void *opaque);
648
649 /**
650 * cpu_write_elf32_note:
651 * @f: pointer to a function that writes memory to a file
652 * @cpu: The CPU whose memory is to be dumped
653 * @cpuid: ID number of the CPU
654 * @opaque: pointer to the CPUState struct
655 */
656 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
657 int cpuid, void *opaque);
658
659 /**
660 * cpu_write_elf32_qemunote:
661 * @f: pointer to a function that writes memory to a file
662 * @cpu: The CPU whose memory is to be dumped
663 * @cpuid: ID number of the CPU
664 * @opaque: pointer to the CPUState struct
665 */
666 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
667 void *opaque);
668
669 /**
670 * cpu_get_crash_info:
671 * @cpu: The CPU to get crash information for
672 *
673 * Gets the previously saved crash information.
674 * Caller is responsible for freeing the data.
675 */
676 GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
677
678 #endif /* !CONFIG_USER_ONLY */
679
680 /**
681 * CPUDumpFlags:
682 * @CPU_DUMP_CODE:
683 * @CPU_DUMP_FPU: dump FPU register state, not just integer
684 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
685 * @CPU_DUMP_VPU: dump VPU registers
686 */
687 enum CPUDumpFlags {
688 CPU_DUMP_CODE = 0x00010000,
689 CPU_DUMP_FPU = 0x00020000,
690 CPU_DUMP_CCOP = 0x00040000,
691 CPU_DUMP_VPU = 0x00080000,
692 };
693
694 /**
695 * cpu_dump_state:
696 * @cpu: The CPU whose state is to be dumped.
697 * @f: If non-null, dump to this stream, else to current print sink.
698 *
699 * Dumps CPU state.
700 */
701 void cpu_dump_state(CPUState *cpu, FILE *f, int flags);
702
703 #ifndef CONFIG_USER_ONLY
704 /**
705 * cpu_get_phys_page_attrs_debug:
706 * @cpu: The CPU to obtain the physical page address for.
707 * @addr: The virtual address.
708 * @attrs: Updated on return with the memory transaction attributes to use
709 * for this access.
710 *
711 * Obtains the physical page corresponding to a virtual one, together
712 * with the corresponding memory transaction attributes to use for the access.
713 * Use it only for debugging because no protection checks are done.
714 *
715 * Returns: Corresponding physical page address or -1 if no page found.
716 */
717 hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
718 MemTxAttrs *attrs);
719
720 /**
721 * cpu_get_phys_page_debug:
722 * @cpu: The CPU to obtain the physical page address for.
723 * @addr: The virtual address.
724 *
725 * Obtains the physical page corresponding to a virtual one.
726 * Use it only for debugging because no protection checks are done.
727 *
728 * Returns: Corresponding physical page address or -1 if no page found.
729 */
730 hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
731
732 /** cpu_asidx_from_attrs:
733 * @cpu: CPU
734 * @attrs: memory transaction attributes
735 *
736 * Returns the address space index specifying the CPU AddressSpace
737 * to use for a memory access with the given transaction attributes.
738 */
739 int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
740
741 /**
742 * cpu_virtio_is_big_endian:
743 * @cpu: CPU
744
745 * Returns %true if a CPU which supports runtime configurable endianness
746 * is currently big-endian.
747 */
748 bool cpu_virtio_is_big_endian(CPUState *cpu);
749
750 #endif /* CONFIG_USER_ONLY */
751
752 /**
753 * cpu_list_add:
754 * @cpu: The CPU to be added to the list of CPUs.
755 */
756 void cpu_list_add(CPUState *cpu);
757
758 /**
759 * cpu_list_remove:
760 * @cpu: The CPU to be removed from the list of CPUs.
761 */
762 void cpu_list_remove(CPUState *cpu);
763
764 /**
765 * cpu_reset:
766 * @cpu: The CPU whose state is to be reset.
767 */
768 void cpu_reset(CPUState *cpu);
769
770 /**
771 * cpu_class_by_name:
772 * @typename: The CPU base type.
773 * @cpu_model: The model string without any parameters.
774 *
775 * Looks up a CPU #ObjectClass matching name @cpu_model.
776 *
777 * Returns: A #CPUClass or %NULL if not matching class is found.
778 */
779 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
780
781 /**
782 * cpu_create:
783 * @typename: The CPU type.
784 *
785 * Instantiates a CPU and realizes the CPU.
786 *
787 * Returns: A #CPUState or %NULL if an error occurred.
788 */
789 CPUState *cpu_create(const char *typename);
790
791 /**
792 * parse_cpu_option:
793 * @cpu_option: The -cpu option including optional parameters.
794 *
795 * processes optional parameters and registers them as global properties
796 *
797 * Returns: type of CPU to create or prints error and terminates process
798 * if an error occurred.
799 */
800 const char *parse_cpu_option(const char *cpu_option);
801
802 /**
803 * cpu_has_work:
804 * @cpu: The vCPU to check.
805 *
806 * Checks whether the CPU has work to do.
807 *
808 * Returns: %true if the CPU has work, %false otherwise.
809 */
810 static inline bool cpu_has_work(CPUState *cpu)
811 {
812 CPUClass *cc = CPU_GET_CLASS(cpu);
813
814 g_assert(cc->has_work);
815 return cc->has_work(cpu);
816 }
817
818 /**
819 * qemu_cpu_is_self:
820 * @cpu: The vCPU to check against.
821 *
822 * Checks whether the caller is executing on the vCPU thread.
823 *
824 * Returns: %true if called from @cpu's thread, %false otherwise.
825 */
826 bool qemu_cpu_is_self(CPUState *cpu);
827
828 /**
829 * qemu_cpu_kick:
830 * @cpu: The vCPU to kick.
831 *
832 * Kicks @cpu's thread.
833 */
834 void qemu_cpu_kick(CPUState *cpu);
835
836 /**
837 * cpu_is_stopped:
838 * @cpu: The CPU to check.
839 *
840 * Checks whether the CPU is stopped.
841 *
842 * Returns: %true if run state is not running or if artificially stopped;
843 * %false otherwise.
844 */
845 bool cpu_is_stopped(CPUState *cpu);
846
847 /**
848 * do_run_on_cpu:
849 * @cpu: The vCPU to run on.
850 * @func: The function to be executed.
851 * @data: Data to pass to the function.
852 * @mutex: Mutex to release while waiting for @func to run.
853 *
854 * Used internally in the implementation of run_on_cpu.
855 */
856 void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
857 QemuMutex *mutex);
858
859 /**
860 * run_on_cpu:
861 * @cpu: The vCPU to run on.
862 * @func: The function to be executed.
863 * @data: Data to pass to the function.
864 *
865 * Schedules the function @func for execution on the vCPU @cpu.
866 */
867 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
868
869 /**
870 * async_run_on_cpu:
871 * @cpu: The vCPU to run on.
872 * @func: The function to be executed.
873 * @data: Data to pass to the function.
874 *
875 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
876 */
877 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
878
879 /**
880 * async_safe_run_on_cpu:
881 * @cpu: The vCPU to run on.
882 * @func: The function to be executed.
883 * @data: Data to pass to the function.
884 *
885 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
886 * while all other vCPUs are sleeping.
887 *
888 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
889 * BQL.
890 */
891 void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
892
893 /**
894 * cpu_in_exclusive_context()
895 * @cpu: The vCPU to check
896 *
897 * Returns true if @cpu is an exclusive context, for example running
898 * something which has previously been queued via async_safe_run_on_cpu().
899 */
900 static inline bool cpu_in_exclusive_context(const CPUState *cpu)
901 {
902 return cpu->exclusive_context_count;
903 }
904
905 /**
906 * qemu_get_cpu:
907 * @index: The CPUState@cpu_index value of the CPU to obtain.
908 *
909 * Gets a CPU matching @index.
910 *
911 * Returns: The CPU or %NULL if there is no matching CPU.
912 */
913 CPUState *qemu_get_cpu(int index);
914
915 /**
916 * cpu_exists:
917 * @id: Guest-exposed CPU ID to lookup.
918 *
919 * Search for CPU with specified ID.
920 *
921 * Returns: %true - CPU is found, %false - CPU isn't found.
922 */
923 bool cpu_exists(int64_t id);
924
925 /**
926 * cpu_by_arch_id:
927 * @id: Guest-exposed CPU ID of the CPU to obtain.
928 *
929 * Get a CPU with matching @id.
930 *
931 * Returns: The CPU or %NULL if there is no matching CPU.
932 */
933 CPUState *cpu_by_arch_id(int64_t id);
934
935 /**
936 * cpu_interrupt:
937 * @cpu: The CPU to set an interrupt on.
938 * @mask: The interrupts to set.
939 *
940 * Invokes the interrupt handler.
941 */
942
943 void cpu_interrupt(CPUState *cpu, int mask);
944
945 /**
946 * cpu_set_pc:
947 * @cpu: The CPU to set the program counter for.
948 * @addr: Program counter value.
949 *
950 * Sets the program counter for a CPU.
951 */
952 static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
953 {
954 CPUClass *cc = CPU_GET_CLASS(cpu);
955
956 cc->set_pc(cpu, addr);
957 }
958
959 /**
960 * cpu_reset_interrupt:
961 * @cpu: The CPU to clear the interrupt on.
962 * @mask: The interrupt mask to clear.
963 *
964 * Resets interrupts on the vCPU @cpu.
965 */
966 void cpu_reset_interrupt(CPUState *cpu, int mask);
967
968 /**
969 * cpu_exit:
970 * @cpu: The CPU to exit.
971 *
972 * Requests the CPU @cpu to exit execution.
973 */
974 void cpu_exit(CPUState *cpu);
975
976 /**
977 * cpu_resume:
978 * @cpu: The CPU to resume.
979 *
980 * Resumes CPU, i.e. puts CPU into runnable state.
981 */
982 void cpu_resume(CPUState *cpu);
983
984 /**
985 * cpu_remove_sync:
986 * @cpu: The CPU to remove.
987 *
988 * Requests the CPU to be removed and waits till it is removed.
989 */
990 void cpu_remove_sync(CPUState *cpu);
991
992 /**
993 * process_queued_cpu_work() - process all items on CPU work queue
994 * @cpu: The CPU which work queue to process.
995 */
996 void process_queued_cpu_work(CPUState *cpu);
997
998 /**
999 * cpu_exec_start:
1000 * @cpu: The CPU for the current thread.
1001 *
1002 * Record that a CPU has started execution and can be interrupted with
1003 * cpu_exit.
1004 */
1005 void cpu_exec_start(CPUState *cpu);
1006
1007 /**
1008 * cpu_exec_end:
1009 * @cpu: The CPU for the current thread.
1010 *
1011 * Record that a CPU has stopped execution and exclusive sections
1012 * can be executed without interrupting it.
1013 */
1014 void cpu_exec_end(CPUState *cpu);
1015
1016 /**
1017 * start_exclusive:
1018 *
1019 * Wait for a concurrent exclusive section to end, and then start
1020 * a section of work that is run while other CPUs are not running
1021 * between cpu_exec_start and cpu_exec_end. CPUs that are running
1022 * cpu_exec are exited immediately. CPUs that call cpu_exec_start
1023 * during the exclusive section go to sleep until this CPU calls
1024 * end_exclusive.
1025 */
1026 void start_exclusive(void);
1027
1028 /**
1029 * end_exclusive:
1030 *
1031 * Concludes an exclusive execution section started by start_exclusive.
1032 */
1033 void end_exclusive(void);
1034
1035 /**
1036 * qemu_init_vcpu:
1037 * @cpu: The vCPU to initialize.
1038 *
1039 * Initializes a vCPU.
1040 */
1041 void qemu_init_vcpu(CPUState *cpu);
1042
1043 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
1044 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
1045 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
1046
1047 /**
1048 * cpu_single_step:
1049 * @cpu: CPU to the flags for.
1050 * @enabled: Flags to enable.
1051 *
1052 * Enables or disables single-stepping for @cpu.
1053 */
1054 void cpu_single_step(CPUState *cpu, int enabled);
1055
1056 /* Breakpoint/watchpoint flags */
1057 #define BP_MEM_READ 0x01
1058 #define BP_MEM_WRITE 0x02
1059 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
1060 #define BP_STOP_BEFORE_ACCESS 0x04
1061 /* 0x08 currently unused */
1062 #define BP_GDB 0x10
1063 #define BP_CPU 0x20
1064 #define BP_ANY (BP_GDB | BP_CPU)
1065 #define BP_HIT_SHIFT 6
1066 #define BP_WATCHPOINT_HIT_READ (BP_MEM_READ << BP_HIT_SHIFT)
1067 #define BP_WATCHPOINT_HIT_WRITE (BP_MEM_WRITE << BP_HIT_SHIFT)
1068 #define BP_WATCHPOINT_HIT (BP_MEM_ACCESS << BP_HIT_SHIFT)
1069
1070 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
1071 CPUBreakpoint **breakpoint);
1072 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
1073 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
1074 void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
1075
1076 /* Return true if PC matches an installed breakpoint. */
1077 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
1078 {
1079 CPUBreakpoint *bp;
1080
1081 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
1082 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
1083 if (bp->pc == pc && (bp->flags & mask)) {
1084 return true;
1085 }
1086 }
1087 }
1088 return false;
1089 }
1090
1091 #if defined(CONFIG_USER_ONLY)
1092 static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1093 int flags, CPUWatchpoint **watchpoint)
1094 {
1095 return -ENOSYS;
1096 }
1097
1098 static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1099 vaddr len, int flags)
1100 {
1101 return -ENOSYS;
1102 }
1103
1104 static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
1105 CPUWatchpoint *wp)
1106 {
1107 }
1108
1109 static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
1110 {
1111 }
1112 #else
1113 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1114 int flags, CPUWatchpoint **watchpoint);
1115 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1116 vaddr len, int flags);
1117 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
1118 void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
1119 #endif
1120
1121 /**
1122 * cpu_plugin_mem_cbs_enabled() - are plugin memory callbacks enabled?
1123 * @cs: CPUState pointer
1124 *
1125 * The memory callbacks are installed if a plugin has instrumented an
1126 * instruction for memory. This can be useful to know if you want to
1127 * force a slow path for a series of memory accesses.
1128 */
1129 static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
1130 {
1131 #ifdef CONFIG_PLUGIN
1132 return !!cpu->plugin_mem_cbs;
1133 #else
1134 return false;
1135 #endif
1136 }
1137
1138 /**
1139 * cpu_get_address_space:
1140 * @cpu: CPU to get address space from
1141 * @asidx: index identifying which address space to get
1142 *
1143 * Return the requested address space of this CPU. @asidx
1144 * specifies which address space to read.
1145 */
1146 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1147
1148 G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...)
1149 G_GNUC_PRINTF(2, 3);
1150
1151 /* $(top_srcdir)/cpu.c */
1152 void cpu_class_init_props(DeviceClass *dc);
1153 void cpu_exec_initfn(CPUState *cpu);
1154 void cpu_exec_realizefn(CPUState *cpu, Error **errp);
1155 void cpu_exec_unrealizefn(CPUState *cpu);
1156 void cpu_exec_reset_hold(CPUState *cpu);
1157
1158 /**
1159 * target_words_bigendian:
1160 * Returns true if the (default) endianness of the target is big endian,
1161 * false otherwise. Note that in target-specific code, you can use
1162 * TARGET_BIG_ENDIAN directly instead. On the other hand, common
1163 * code should normally never need to know about the endianness of the
1164 * target, so please do *not* use this function unless you know very well
1165 * what you are doing!
1166 */
1167 bool target_words_bigendian(void);
1168
1169 const char *target_name(void);
1170
1171 void page_size_init(void);
1172
1173 #ifdef NEED_CPU_H
1174
1175 #ifndef CONFIG_USER_ONLY
1176
1177 extern const VMStateDescription vmstate_cpu_common;
1178
1179 #define VMSTATE_CPU() { \
1180 .name = "parent_obj", \
1181 .size = sizeof(CPUState), \
1182 .vmsd = &vmstate_cpu_common, \
1183 .flags = VMS_STRUCT, \
1184 .offset = 0, \
1185 }
1186 #endif /* !CONFIG_USER_ONLY */
1187
1188 #endif /* NEED_CPU_H */
1189
1190 #define UNASSIGNED_CPU_INDEX -1
1191 #define UNASSIGNED_CLUSTER_INDEX -1
1192
1193 #endif