]> git.proxmox.com Git - mirror_qemu.git/blob - include/qom/cpu.h
arm: Correctly handle watchpoints for BE32 CPUs
[mirror_qemu.git] / include / qom / cpu.h
1 /*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20 #ifndef QEMU_CPU_H
21 #define QEMU_CPU_H
22
23 #include "hw/qdev-core.h"
24 #include "disas/bfd.h"
25 #include "exec/hwaddr.h"
26 #include "exec/memattrs.h"
27 #include "qemu/bitmap.h"
28 #include "qemu/queue.h"
29 #include "qemu/thread.h"
30
31 typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
32 void *opaque);
33
34 /**
35 * vaddr:
36 * Type wide enough to contain any #target_ulong virtual address.
37 */
38 typedef uint64_t vaddr;
39 #define VADDR_PRId PRId64
40 #define VADDR_PRIu PRIu64
41 #define VADDR_PRIo PRIo64
42 #define VADDR_PRIx PRIx64
43 #define VADDR_PRIX PRIX64
44 #define VADDR_MAX UINT64_MAX
45
46 /**
47 * SECTION:cpu
48 * @section_id: QEMU-cpu
49 * @title: CPU Class
50 * @short_description: Base class for all CPUs
51 */
52
53 #define TYPE_CPU "cpu"
54
55 /* Since this macro is used a lot in hot code paths and in conjunction with
56 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
57 * an unchecked cast.
58 */
59 #define CPU(obj) ((CPUState *)(obj))
60
61 #define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
62 #define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
63
64 typedef enum MMUAccessType {
65 MMU_DATA_LOAD = 0,
66 MMU_DATA_STORE = 1,
67 MMU_INST_FETCH = 2
68 } MMUAccessType;
69
70 typedef struct CPUWatchpoint CPUWatchpoint;
71
72 typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
73 bool is_write, bool is_exec, int opaque,
74 unsigned size);
75
76 struct TranslationBlock;
77
78 /**
79 * CPUClass:
80 * @class_by_name: Callback to map -cpu command line model name to an
81 * instantiatable CPU type.
82 * @parse_features: Callback to parse command line arguments.
83 * @reset: Callback to reset the #CPUState to its initial state.
84 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
85 * @has_work: Callback for checking if there is work to do.
86 * @do_interrupt: Callback for interrupt handling.
87 * @do_unassigned_access: Callback for unassigned access handling.
88 * @do_unaligned_access: Callback for unaligned access handling, if
89 * the target defines #ALIGNED_ONLY.
90 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
91 * runtime configurable endianness is currently big-endian. Non-configurable
92 * CPUs can use the default implementation of this method. This method should
93 * not be used by any callers other than the pre-1.0 virtio devices.
94 * @memory_rw_debug: Callback for GDB memory access.
95 * @dump_state: Callback for dumping state.
96 * @dump_statistics: Callback for dumping statistics.
97 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
98 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
99 * @get_memory_mapping: Callback for obtaining the memory mappings.
100 * @set_pc: Callback for setting the Program Counter register.
101 * @synchronize_from_tb: Callback for synchronizing state from a TCG
102 * #TranslationBlock.
103 * @handle_mmu_fault: Callback for handling an MMU fault.
104 * @get_phys_page_debug: Callback for obtaining a physical address.
105 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
106 * associated memory transaction attributes to use for the access.
107 * CPUs which use memory transaction attributes should implement this
108 * instead of get_phys_page_debug.
109 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
110 * a memory access with the specified memory transaction attributes.
111 * @gdb_read_register: Callback for letting GDB read a register.
112 * @gdb_write_register: Callback for letting GDB write a register.
113 * @debug_check_watchpoint: Callback: return true if the architectural
114 * watchpoint whose address has matched should really fire.
115 * @debug_excp_handler: Callback for handling debug exceptions.
116 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
117 * 64-bit VM coredump.
118 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
119 * note to a 32-bit VM coredump.
120 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
121 * 32-bit VM coredump.
122 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
123 * note to a 32-bit VM coredump.
124 * @vmsd: State description for migration.
125 * @gdb_num_core_regs: Number of core registers accessible to GDB.
126 * @gdb_core_xml_file: File name for core registers GDB XML description.
127 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
128 * before the insn which triggers a watchpoint rather than after it.
129 * @gdb_arch_name: Optional callback that returns the architecture name known
130 * to GDB. The caller must free the returned string with g_free.
131 * @cpu_exec_enter: Callback for cpu_exec preparation.
132 * @cpu_exec_exit: Callback for cpu_exec cleanup.
133 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
134 * @disas_set_info: Setup architecture specific components of disassembly info
135 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
136 * address before attempting to match it against watchpoints.
137 *
138 * Represents a CPU family or model.
139 */
140 typedef struct CPUClass {
141 /*< private >*/
142 DeviceClass parent_class;
143 /*< public >*/
144
145 ObjectClass *(*class_by_name)(const char *cpu_model);
146 void (*parse_features)(const char *typename, char *str, Error **errp);
147
148 void (*reset)(CPUState *cpu);
149 int reset_dump_flags;
150 bool (*has_work)(CPUState *cpu);
151 void (*do_interrupt)(CPUState *cpu);
152 CPUUnassignedAccess do_unassigned_access;
153 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
154 MMUAccessType access_type,
155 int mmu_idx, uintptr_t retaddr);
156 bool (*virtio_is_big_endian)(CPUState *cpu);
157 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
158 uint8_t *buf, int len, bool is_write);
159 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
160 int flags);
161 void (*dump_statistics)(CPUState *cpu, FILE *f,
162 fprintf_function cpu_fprintf, int flags);
163 int64_t (*get_arch_id)(CPUState *cpu);
164 bool (*get_paging_enabled)(const CPUState *cpu);
165 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
166 Error **errp);
167 void (*set_pc)(CPUState *cpu, vaddr value);
168 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
169 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int rw,
170 int mmu_index);
171 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
172 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
173 MemTxAttrs *attrs);
174 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
175 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
176 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
177 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
178 void (*debug_excp_handler)(CPUState *cpu);
179
180 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
181 int cpuid, void *opaque);
182 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
183 void *opaque);
184 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
185 int cpuid, void *opaque);
186 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
187 void *opaque);
188
189 const struct VMStateDescription *vmsd;
190 int gdb_num_core_regs;
191 const char *gdb_core_xml_file;
192 gchar * (*gdb_arch_name)(CPUState *cpu);
193 bool gdb_stop_before_watchpoint;
194
195 void (*cpu_exec_enter)(CPUState *cpu);
196 void (*cpu_exec_exit)(CPUState *cpu);
197 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
198
199 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
200 vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
201 } CPUClass;
202
203 #ifdef HOST_WORDS_BIGENDIAN
204 typedef struct icount_decr_u16 {
205 uint16_t high;
206 uint16_t low;
207 } icount_decr_u16;
208 #else
209 typedef struct icount_decr_u16 {
210 uint16_t low;
211 uint16_t high;
212 } icount_decr_u16;
213 #endif
214
215 typedef struct CPUBreakpoint {
216 vaddr pc;
217 int flags; /* BP_* */
218 QTAILQ_ENTRY(CPUBreakpoint) entry;
219 } CPUBreakpoint;
220
221 struct CPUWatchpoint {
222 vaddr vaddr;
223 vaddr len;
224 vaddr hitaddr;
225 MemTxAttrs hitattrs;
226 int flags; /* BP_* */
227 QTAILQ_ENTRY(CPUWatchpoint) entry;
228 };
229
230 struct KVMState;
231 struct kvm_run;
232
233 struct hax_vcpu_state;
234
235 #define TB_JMP_CACHE_BITS 12
236 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
237
238 /* work queue */
239
240 /* The union type allows passing of 64 bit target pointers on 32 bit
241 * hosts in a single parameter
242 */
243 typedef union {
244 int host_int;
245 unsigned long host_ulong;
246 void *host_ptr;
247 vaddr target_ptr;
248 } run_on_cpu_data;
249
250 #define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
251 #define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
252 #define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
253 #define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
254 #define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
255
256 typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
257
258 struct qemu_work_item;
259
260 /**
261 * CPUState:
262 * @cpu_index: CPU index (informative).
263 * @nr_cores: Number of cores within this CPU package.
264 * @nr_threads: Number of threads within this CPU.
265 * @numa_node: NUMA node this CPU is belonging to.
266 * @host_tid: Host thread ID.
267 * @running: #true if CPU is currently running (lockless).
268 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
269 * valid under cpu_list_lock.
270 * @created: Indicates whether the CPU thread has been successfully created.
271 * @interrupt_request: Indicates a pending interrupt request.
272 * @halted: Nonzero if the CPU is in suspended state.
273 * @stop: Indicates a pending stop request.
274 * @stopped: Indicates the CPU has been artificially stopped.
275 * @unplug: Indicates a pending CPU unplug request.
276 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
277 * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
278 * CPU and return to its top level loop.
279 * @singlestep_enabled: Flags for single-stepping.
280 * @icount_extra: Instructions until next timer event.
281 * @icount_decr: Number of cycles left, with interrupt flag in high bit.
282 * This allows a single read-compare-cbranch-write sequence to test
283 * for both decrementer underflow and exceptions.
284 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
285 * requires that IO only be performed on the last instruction of a TB
286 * so that interrupts take effect immediately.
287 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
288 * AddressSpaces this CPU has)
289 * @num_ases: number of CPUAddressSpaces in @cpu_ases
290 * @as: Pointer to the first AddressSpace, for the convenience of targets which
291 * only have a single AddressSpace
292 * @env_ptr: Pointer to subclass-specific CPUArchState field.
293 * @gdb_regs: Additional GDB registers.
294 * @gdb_num_regs: Number of total registers accessible to GDB.
295 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
296 * @next_cpu: Next CPU sharing TB cache.
297 * @opaque: User data.
298 * @mem_io_pc: Host Program Counter at which the memory was accessed.
299 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
300 * @kvm_fd: vCPU file descriptor for KVM.
301 * @work_mutex: Lock to prevent multiple access to queued_work_*.
302 * @queued_work_first: First asynchronous work pending.
303 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
304 *
305 * State of one CPU core or thread.
306 */
307 struct CPUState {
308 /*< private >*/
309 DeviceState parent_obj;
310 /*< public >*/
311
312 int nr_cores;
313 int nr_threads;
314 int numa_node;
315
316 struct QemuThread *thread;
317 #ifdef _WIN32
318 HANDLE hThread;
319 #endif
320 int thread_id;
321 uint32_t host_tid;
322 bool running, has_waiter;
323 struct QemuCond *halt_cond;
324 bool thread_kicked;
325 bool created;
326 bool stop;
327 bool stopped;
328 bool unplug;
329 bool crash_occurred;
330 bool exit_request;
331 uint32_t interrupt_request;
332 int singlestep_enabled;
333 int64_t icount_extra;
334 sigjmp_buf jmp_env;
335
336 QemuMutex work_mutex;
337 struct qemu_work_item *queued_work_first, *queued_work_last;
338
339 CPUAddressSpace *cpu_ases;
340 int num_ases;
341 AddressSpace *as;
342 MemoryRegion *memory;
343
344 void *env_ptr; /* CPUArchState */
345
346 /* Writes protected by tb_lock, reads not thread-safe */
347 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
348
349 struct GDBRegisterState *gdb_regs;
350 int gdb_num_regs;
351 int gdb_num_g_regs;
352 QTAILQ_ENTRY(CPUState) node;
353
354 /* ice debug support */
355 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
356
357 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
358 CPUWatchpoint *watchpoint_hit;
359
360 void *opaque;
361
362 /* In order to avoid passing too many arguments to the MMIO helpers,
363 * we store some rarely used information in the CPU context.
364 */
365 uintptr_t mem_io_pc;
366 vaddr mem_io_vaddr;
367
368 int kvm_fd;
369 bool kvm_vcpu_dirty;
370 struct KVMState *kvm_state;
371 struct kvm_run *kvm_run;
372
373 /*
374 * Used for events with 'vcpu' and *without* the 'disabled' properties.
375 * Dynamically allocated based on bitmap requried to hold up to
376 * trace_get_vcpu_event_count() entries.
377 */
378 unsigned long *trace_dstate;
379
380 /* TODO Move common fields from CPUArchState here. */
381 int cpu_index; /* used by alpha TCG */
382 uint32_t halted; /* used by alpha, cris, ppc TCG */
383 union {
384 uint32_t u32;
385 icount_decr_u16 u16;
386 } icount_decr;
387 uint32_t can_do_io;
388 int32_t exception_index; /* used by m68k TCG */
389
390 /* Used to keep track of an outstanding cpu throttle thread for migration
391 * autoconverge
392 */
393 bool throttle_thread_scheduled;
394
395 /* Note that this is accessed at the start of every TB via a negative
396 offset from AREG0. Leave this field at the end so as to make the
397 (absolute value) offset as small as possible. This reduces code
398 size, especially for hosts without large memory offsets. */
399 uint32_t tcg_exit_req;
400
401 bool hax_vcpu_dirty;
402 struct hax_vcpu_state *hax_vcpu;
403 };
404
405 QTAILQ_HEAD(CPUTailQ, CPUState);
406 extern struct CPUTailQ cpus;
407 #define CPU_NEXT(cpu) QTAILQ_NEXT(cpu, node)
408 #define CPU_FOREACH(cpu) QTAILQ_FOREACH(cpu, &cpus, node)
409 #define CPU_FOREACH_SAFE(cpu, next_cpu) \
410 QTAILQ_FOREACH_SAFE(cpu, &cpus, node, next_cpu)
411 #define CPU_FOREACH_REVERSE(cpu) \
412 QTAILQ_FOREACH_REVERSE(cpu, &cpus, CPUTailQ, node)
413 #define first_cpu QTAILQ_FIRST(&cpus)
414
415 extern __thread CPUState *current_cpu;
416
417 /**
418 * cpu_paging_enabled:
419 * @cpu: The CPU whose state is to be inspected.
420 *
421 * Returns: %true if paging is enabled, %false otherwise.
422 */
423 bool cpu_paging_enabled(const CPUState *cpu);
424
425 /**
426 * cpu_get_memory_mapping:
427 * @cpu: The CPU whose memory mappings are to be obtained.
428 * @list: Where to write the memory mappings to.
429 * @errp: Pointer for reporting an #Error.
430 */
431 void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
432 Error **errp);
433
434 /**
435 * cpu_write_elf64_note:
436 * @f: pointer to a function that writes memory to a file
437 * @cpu: The CPU whose memory is to be dumped
438 * @cpuid: ID number of the CPU
439 * @opaque: pointer to the CPUState struct
440 */
441 int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
442 int cpuid, void *opaque);
443
444 /**
445 * cpu_write_elf64_qemunote:
446 * @f: pointer to a function that writes memory to a file
447 * @cpu: The CPU whose memory is to be dumped
448 * @cpuid: ID number of the CPU
449 * @opaque: pointer to the CPUState struct
450 */
451 int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
452 void *opaque);
453
454 /**
455 * cpu_write_elf32_note:
456 * @f: pointer to a function that writes memory to a file
457 * @cpu: The CPU whose memory is to be dumped
458 * @cpuid: ID number of the CPU
459 * @opaque: pointer to the CPUState struct
460 */
461 int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
462 int cpuid, void *opaque);
463
464 /**
465 * cpu_write_elf32_qemunote:
466 * @f: pointer to a function that writes memory to a file
467 * @cpu: The CPU whose memory is to be dumped
468 * @cpuid: ID number of the CPU
469 * @opaque: pointer to the CPUState struct
470 */
471 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
472 void *opaque);
473
474 /**
475 * CPUDumpFlags:
476 * @CPU_DUMP_CODE:
477 * @CPU_DUMP_FPU: dump FPU register state, not just integer
478 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
479 */
480 enum CPUDumpFlags {
481 CPU_DUMP_CODE = 0x00010000,
482 CPU_DUMP_FPU = 0x00020000,
483 CPU_DUMP_CCOP = 0x00040000,
484 };
485
486 /**
487 * cpu_dump_state:
488 * @cpu: The CPU whose state is to be dumped.
489 * @f: File to dump to.
490 * @cpu_fprintf: Function to dump with.
491 * @flags: Flags what to dump.
492 *
493 * Dumps CPU state.
494 */
495 void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
496 int flags);
497
498 /**
499 * cpu_dump_statistics:
500 * @cpu: The CPU whose state is to be dumped.
501 * @f: File to dump to.
502 * @cpu_fprintf: Function to dump with.
503 * @flags: Flags what to dump.
504 *
505 * Dumps CPU statistics.
506 */
507 void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
508 int flags);
509
510 #ifndef CONFIG_USER_ONLY
511 /**
512 * cpu_get_phys_page_attrs_debug:
513 * @cpu: The CPU to obtain the physical page address for.
514 * @addr: The virtual address.
515 * @attrs: Updated on return with the memory transaction attributes to use
516 * for this access.
517 *
518 * Obtains the physical page corresponding to a virtual one, together
519 * with the corresponding memory transaction attributes to use for the access.
520 * Use it only for debugging because no protection checks are done.
521 *
522 * Returns: Corresponding physical page address or -1 if no page found.
523 */
524 static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
525 MemTxAttrs *attrs)
526 {
527 CPUClass *cc = CPU_GET_CLASS(cpu);
528
529 if (cc->get_phys_page_attrs_debug) {
530 return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
531 }
532 /* Fallback for CPUs which don't implement the _attrs_ hook */
533 *attrs = MEMTXATTRS_UNSPECIFIED;
534 return cc->get_phys_page_debug(cpu, addr);
535 }
536
537 /**
538 * cpu_get_phys_page_debug:
539 * @cpu: The CPU to obtain the physical page address for.
540 * @addr: The virtual address.
541 *
542 * Obtains the physical page corresponding to a virtual one.
543 * Use it only for debugging because no protection checks are done.
544 *
545 * Returns: Corresponding physical page address or -1 if no page found.
546 */
547 static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
548 {
549 MemTxAttrs attrs = {};
550
551 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
552 }
553
554 /** cpu_asidx_from_attrs:
555 * @cpu: CPU
556 * @attrs: memory transaction attributes
557 *
558 * Returns the address space index specifying the CPU AddressSpace
559 * to use for a memory access with the given transaction attributes.
560 */
561 static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
562 {
563 CPUClass *cc = CPU_GET_CLASS(cpu);
564
565 if (cc->asidx_from_attrs) {
566 return cc->asidx_from_attrs(cpu, attrs);
567 }
568 return 0;
569 }
570 #endif
571
572 /**
573 * cpu_list_add:
574 * @cpu: The CPU to be added to the list of CPUs.
575 */
576 void cpu_list_add(CPUState *cpu);
577
578 /**
579 * cpu_list_remove:
580 * @cpu: The CPU to be removed from the list of CPUs.
581 */
582 void cpu_list_remove(CPUState *cpu);
583
584 /**
585 * cpu_reset:
586 * @cpu: The CPU whose state is to be reset.
587 */
588 void cpu_reset(CPUState *cpu);
589
590 /**
591 * cpu_class_by_name:
592 * @typename: The CPU base type.
593 * @cpu_model: The model string without any parameters.
594 *
595 * Looks up a CPU #ObjectClass matching name @cpu_model.
596 *
597 * Returns: A #CPUClass or %NULL if not matching class is found.
598 */
599 ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
600
601 /**
602 * cpu_generic_init:
603 * @typename: The CPU base type.
604 * @cpu_model: The model string including optional parameters.
605 *
606 * Instantiates a CPU, processes optional parameters and realizes the CPU.
607 *
608 * Returns: A #CPUState or %NULL if an error occurred.
609 */
610 CPUState *cpu_generic_init(const char *typename, const char *cpu_model);
611
612 /**
613 * cpu_has_work:
614 * @cpu: The vCPU to check.
615 *
616 * Checks whether the CPU has work to do.
617 *
618 * Returns: %true if the CPU has work, %false otherwise.
619 */
620 static inline bool cpu_has_work(CPUState *cpu)
621 {
622 CPUClass *cc = CPU_GET_CLASS(cpu);
623
624 g_assert(cc->has_work);
625 return cc->has_work(cpu);
626 }
627
628 /**
629 * qemu_cpu_is_self:
630 * @cpu: The vCPU to check against.
631 *
632 * Checks whether the caller is executing on the vCPU thread.
633 *
634 * Returns: %true if called from @cpu's thread, %false otherwise.
635 */
636 bool qemu_cpu_is_self(CPUState *cpu);
637
638 /**
639 * qemu_cpu_kick:
640 * @cpu: The vCPU to kick.
641 *
642 * Kicks @cpu's thread.
643 */
644 void qemu_cpu_kick(CPUState *cpu);
645
646 /**
647 * cpu_is_stopped:
648 * @cpu: The CPU to check.
649 *
650 * Checks whether the CPU is stopped.
651 *
652 * Returns: %true if run state is not running or if artificially stopped;
653 * %false otherwise.
654 */
655 bool cpu_is_stopped(CPUState *cpu);
656
657 /**
658 * do_run_on_cpu:
659 * @cpu: The vCPU to run on.
660 * @func: The function to be executed.
661 * @data: Data to pass to the function.
662 * @mutex: Mutex to release while waiting for @func to run.
663 *
664 * Used internally in the implementation of run_on_cpu.
665 */
666 void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
667 QemuMutex *mutex);
668
669 /**
670 * run_on_cpu:
671 * @cpu: The vCPU to run on.
672 * @func: The function to be executed.
673 * @data: Data to pass to the function.
674 *
675 * Schedules the function @func for execution on the vCPU @cpu.
676 */
677 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
678
679 /**
680 * async_run_on_cpu:
681 * @cpu: The vCPU to run on.
682 * @func: The function to be executed.
683 * @data: Data to pass to the function.
684 *
685 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
686 */
687 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
688
689 /**
690 * async_safe_run_on_cpu:
691 * @cpu: The vCPU to run on.
692 * @func: The function to be executed.
693 * @data: Data to pass to the function.
694 *
695 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
696 * while all other vCPUs are sleeping.
697 *
698 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
699 * BQL.
700 */
701 void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
702
703 /**
704 * qemu_get_cpu:
705 * @index: The CPUState@cpu_index value of the CPU to obtain.
706 *
707 * Gets a CPU matching @index.
708 *
709 * Returns: The CPU or %NULL if there is no matching CPU.
710 */
711 CPUState *qemu_get_cpu(int index);
712
713 /**
714 * cpu_exists:
715 * @id: Guest-exposed CPU ID to lookup.
716 *
717 * Search for CPU with specified ID.
718 *
719 * Returns: %true - CPU is found, %false - CPU isn't found.
720 */
721 bool cpu_exists(int64_t id);
722
723 /**
724 * cpu_throttle_set:
725 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
726 *
727 * Throttles all vcpus by forcing them to sleep for the given percentage of
728 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
729 * (example: 10ms sleep for every 30ms awake).
730 *
731 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
732 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
733 * is called.
734 */
735 void cpu_throttle_set(int new_throttle_pct);
736
737 /**
738 * cpu_throttle_stop:
739 *
740 * Stops the vcpu throttling started by cpu_throttle_set.
741 */
742 void cpu_throttle_stop(void);
743
744 /**
745 * cpu_throttle_active:
746 *
747 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
748 */
749 bool cpu_throttle_active(void);
750
751 /**
752 * cpu_throttle_get_percentage:
753 *
754 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
755 *
756 * Returns: The throttle percentage in range 1 to 99.
757 */
758 int cpu_throttle_get_percentage(void);
759
760 #ifndef CONFIG_USER_ONLY
761
762 typedef void (*CPUInterruptHandler)(CPUState *, int);
763
764 extern CPUInterruptHandler cpu_interrupt_handler;
765
766 /**
767 * cpu_interrupt:
768 * @cpu: The CPU to set an interrupt on.
769 * @mask: The interupts to set.
770 *
771 * Invokes the interrupt handler.
772 */
773 static inline void cpu_interrupt(CPUState *cpu, int mask)
774 {
775 cpu_interrupt_handler(cpu, mask);
776 }
777
778 #else /* USER_ONLY */
779
780 void cpu_interrupt(CPUState *cpu, int mask);
781
782 #endif /* USER_ONLY */
783
784 #ifdef CONFIG_SOFTMMU
785 static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
786 bool is_write, bool is_exec,
787 int opaque, unsigned size)
788 {
789 CPUClass *cc = CPU_GET_CLASS(cpu);
790
791 if (cc->do_unassigned_access) {
792 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
793 }
794 }
795
796 static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
797 MMUAccessType access_type,
798 int mmu_idx, uintptr_t retaddr)
799 {
800 CPUClass *cc = CPU_GET_CLASS(cpu);
801
802 cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
803 }
804 #endif
805
806 /**
807 * cpu_set_pc:
808 * @cpu: The CPU to set the program counter for.
809 * @addr: Program counter value.
810 *
811 * Sets the program counter for a CPU.
812 */
813 static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
814 {
815 CPUClass *cc = CPU_GET_CLASS(cpu);
816
817 cc->set_pc(cpu, addr);
818 }
819
820 /**
821 * cpu_reset_interrupt:
822 * @cpu: The CPU to clear the interrupt on.
823 * @mask: The interrupt mask to clear.
824 *
825 * Resets interrupts on the vCPU @cpu.
826 */
827 void cpu_reset_interrupt(CPUState *cpu, int mask);
828
829 /**
830 * cpu_exit:
831 * @cpu: The CPU to exit.
832 *
833 * Requests the CPU @cpu to exit execution.
834 */
835 void cpu_exit(CPUState *cpu);
836
837 /**
838 * cpu_resume:
839 * @cpu: The CPU to resume.
840 *
841 * Resumes CPU, i.e. puts CPU into runnable state.
842 */
843 void cpu_resume(CPUState *cpu);
844
845 /**
846 * cpu_remove:
847 * @cpu: The CPU to remove.
848 *
849 * Requests the CPU to be removed.
850 */
851 void cpu_remove(CPUState *cpu);
852
853 /**
854 * cpu_remove_sync:
855 * @cpu: The CPU to remove.
856 *
857 * Requests the CPU to be removed and waits till it is removed.
858 */
859 void cpu_remove_sync(CPUState *cpu);
860
861 /**
862 * process_queued_cpu_work() - process all items on CPU work queue
863 * @cpu: The CPU which work queue to process.
864 */
865 void process_queued_cpu_work(CPUState *cpu);
866
867 /**
868 * cpu_exec_start:
869 * @cpu: The CPU for the current thread.
870 *
871 * Record that a CPU has started execution and can be interrupted with
872 * cpu_exit.
873 */
874 void cpu_exec_start(CPUState *cpu);
875
876 /**
877 * cpu_exec_end:
878 * @cpu: The CPU for the current thread.
879 *
880 * Record that a CPU has stopped execution and exclusive sections
881 * can be executed without interrupting it.
882 */
883 void cpu_exec_end(CPUState *cpu);
884
885 /**
886 * start_exclusive:
887 *
888 * Wait for a concurrent exclusive section to end, and then start
889 * a section of work that is run while other CPUs are not running
890 * between cpu_exec_start and cpu_exec_end. CPUs that are running
891 * cpu_exec are exited immediately. CPUs that call cpu_exec_start
892 * during the exclusive section go to sleep until this CPU calls
893 * end_exclusive.
894 */
895 void start_exclusive(void);
896
897 /**
898 * end_exclusive:
899 *
900 * Concludes an exclusive execution section started by start_exclusive.
901 */
902 void end_exclusive(void);
903
904 /**
905 * qemu_init_vcpu:
906 * @cpu: The vCPU to initialize.
907 *
908 * Initializes a vCPU.
909 */
910 void qemu_init_vcpu(CPUState *cpu);
911
912 #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
913 #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
914 #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
915
916 /**
917 * cpu_single_step:
918 * @cpu: CPU to the flags for.
919 * @enabled: Flags to enable.
920 *
921 * Enables or disables single-stepping for @cpu.
922 */
923 void cpu_single_step(CPUState *cpu, int enabled);
924
925 /* Breakpoint/watchpoint flags */
926 #define BP_MEM_READ 0x01
927 #define BP_MEM_WRITE 0x02
928 #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
929 #define BP_STOP_BEFORE_ACCESS 0x04
930 /* 0x08 currently unused */
931 #define BP_GDB 0x10
932 #define BP_CPU 0x20
933 #define BP_ANY (BP_GDB | BP_CPU)
934 #define BP_WATCHPOINT_HIT_READ 0x40
935 #define BP_WATCHPOINT_HIT_WRITE 0x80
936 #define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
937
938 int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
939 CPUBreakpoint **breakpoint);
940 int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
941 void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
942 void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
943
944 /* Return true if PC matches an installed breakpoint. */
945 static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
946 {
947 CPUBreakpoint *bp;
948
949 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
950 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
951 if (bp->pc == pc && (bp->flags & mask)) {
952 return true;
953 }
954 }
955 }
956 return false;
957 }
958
959 int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
960 int flags, CPUWatchpoint **watchpoint);
961 int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
962 vaddr len, int flags);
963 void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
964 void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
965
966 /**
967 * cpu_get_address_space:
968 * @cpu: CPU to get address space from
969 * @asidx: index identifying which address space to get
970 *
971 * Return the requested address space of this CPU. @asidx
972 * specifies which address space to read.
973 */
974 AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
975
976 void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
977 GCC_FMT_ATTR(2, 3);
978 void cpu_exec_initfn(CPUState *cpu);
979 void cpu_exec_realizefn(CPUState *cpu, Error **errp);
980 void cpu_exec_unrealizefn(CPUState *cpu);
981
982 #ifdef CONFIG_SOFTMMU
983 extern const struct VMStateDescription vmstate_cpu_common;
984 #else
985 #define vmstate_cpu_common vmstate_dummy
986 #endif
987
988 #define VMSTATE_CPU() { \
989 .name = "parent_obj", \
990 .size = sizeof(CPUState), \
991 .vmsd = &vmstate_cpu_common, \
992 .flags = VMS_STRUCT, \
993 .offset = 0, \
994 }
995
996 #define UNASSIGNED_CPU_INDEX -1
997
998 #endif