]> git.proxmox.com Git - mirror_qemu.git/blame - include/qom/cpu.h
vfio: make vfio_address_spaces static
[mirror_qemu.git] / include / qom / cpu.h
CommitLineData
dd83b06a
AF
1/*
2 * QEMU CPU model
3 *
4 * Copyright (c) 2012 SUSE LINUX Products GmbH
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see
18 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 */
20#ifndef QEMU_CPU_H
21#define QEMU_CPU_H
22
961f8395 23#include "hw/qdev-core.h"
37b9de46 24#include "disas/bfd.h"
c658b94f 25#include "exec/hwaddr.h"
66b9b43c 26#include "exec/memattrs.h"
9af23989 27#include "qapi/qapi-types-run-state.h"
48151859 28#include "qemu/bitmap.h"
c0b05ec5 29#include "qemu/fprintf-fn.h"
068a5ea0 30#include "qemu/rcu_queue.h"
bdc44640 31#include "qemu/queue.h"
1de7afc9 32#include "qemu/thread.h"
dd83b06a 33
b5ba1cc6
QN
34typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
35 void *opaque);
c72bf468 36
577f42c0
AF
37/**
38 * vaddr:
39 * Type wide enough to contain any #target_ulong virtual address.
40 */
41typedef uint64_t vaddr;
42#define VADDR_PRId PRId64
43#define VADDR_PRIu PRIu64
44#define VADDR_PRIo PRIo64
45#define VADDR_PRIx PRIx64
46#define VADDR_PRIX PRIX64
47#define VADDR_MAX UINT64_MAX
48
dd83b06a
AF
49/**
50 * SECTION:cpu
51 * @section_id: QEMU-cpu
52 * @title: CPU Class
53 * @short_description: Base class for all CPUs
54 */
55
56#define TYPE_CPU "cpu"
57
0d6d1ab4
AF
58/* Since this macro is used a lot in hot code paths and in conjunction with
59 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
60 * an unchecked cast.
61 */
62#define CPU(obj) ((CPUState *)(obj))
63
dd83b06a
AF
64#define CPU_CLASS(class) OBJECT_CLASS_CHECK(CPUClass, (class), TYPE_CPU)
65#define CPU_GET_CLASS(obj) OBJECT_GET_CLASS(CPUClass, (obj), TYPE_CPU)
66
b35399bb
SS
67typedef enum MMUAccessType {
68 MMU_DATA_LOAD = 0,
69 MMU_DATA_STORE = 1,
70 MMU_INST_FETCH = 2
71} MMUAccessType;
72
568496c0 73typedef struct CPUWatchpoint CPUWatchpoint;
dd83b06a 74
c658b94f
AF
75typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
76 bool is_write, bool is_exec, int opaque,
77 unsigned size);
78
bdf7ae5b
AF
79struct TranslationBlock;
80
dd83b06a
AF
81/**
82 * CPUClass:
2b8c2754
AF
83 * @class_by_name: Callback to map -cpu command line model name to an
84 * instantiatable CPU type.
94a444b2 85 * @parse_features: Callback to parse command line arguments.
f5df5baf 86 * @reset: Callback to reset the #CPUState to its initial state.
91b1df8c 87 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
8c2e1b00 88 * @has_work: Callback for checking if there is work to do.
97a8ea5a 89 * @do_interrupt: Callback for interrupt handling.
c658b94f 90 * @do_unassigned_access: Callback for unassigned access handling.
0dff0939 91 * (this is deprecated: new targets should use do_transaction_failed instead)
93e22326
PB
92 * @do_unaligned_access: Callback for unaligned access handling, if
93 * the target defines #ALIGNED_ONLY.
0dff0939
PM
94 * @do_transaction_failed: Callback for handling failed memory transactions
95 * (ie bus faults or external aborts; not MMU faults)
c08295d4
PM
96 * @virtio_is_big_endian: Callback to return %true if a CPU which supports
97 * runtime configurable endianness is currently big-endian. Non-configurable
98 * CPUs can use the default implementation of this method. This method should
99 * not be used by any callers other than the pre-1.0 virtio devices.
f3659eee 100 * @memory_rw_debug: Callback for GDB memory access.
878096ee
AF
101 * @dump_state: Callback for dumping state.
102 * @dump_statistics: Callback for dumping statistics.
997395d3 103 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
444d5590 104 * @get_paging_enabled: Callback for inquiring whether paging is enabled.
a23bbfda 105 * @get_memory_mapping: Callback for obtaining the memory mappings.
f45748f1 106 * @set_pc: Callback for setting the Program Counter register.
bdf7ae5b
AF
107 * @synchronize_from_tb: Callback for synchronizing state from a TCG
108 * #TranslationBlock.
7510454e 109 * @handle_mmu_fault: Callback for handling an MMU fault.
00b941e5 110 * @get_phys_page_debug: Callback for obtaining a physical address.
1dc6fb1f
PM
111 * @get_phys_page_attrs_debug: Callback for obtaining a physical address and the
112 * associated memory transaction attributes to use for the access.
113 * CPUs which use memory transaction attributes should implement this
114 * instead of get_phys_page_debug.
d7f25a9e
PM
115 * @asidx_from_attrs: Callback to return the CPU AddressSpace to use for
116 * a memory access with the specified memory transaction attributes.
5b50e790
AF
117 * @gdb_read_register: Callback for letting GDB read a register.
118 * @gdb_write_register: Callback for letting GDB write a register.
568496c0
SF
119 * @debug_check_watchpoint: Callback: return true if the architectural
120 * watchpoint whose address has matched should really fire.
86025ee4 121 * @debug_excp_handler: Callback for handling debug exceptions.
c08295d4
PM
122 * @write_elf64_note: Callback for writing a CPU-specific ELF note to a
123 * 64-bit VM coredump.
124 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
125 * note to a 32-bit VM coredump.
126 * @write_elf32_note: Callback for writing a CPU-specific ELF note to a
127 * 32-bit VM coredump.
128 * @write_elf32_qemunote: Callback for writing a CPU- and QEMU-specific ELF
129 * note to a 32-bit VM coredump.
b170fce3 130 * @vmsd: State description for migration.
a0e372f0 131 * @gdb_num_core_regs: Number of core registers accessible to GDB.
5b24c641 132 * @gdb_core_xml_file: File name for core registers GDB XML description.
2472b6c0
PM
133 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
134 * before the insn which triggers a watchpoint rather than after it.
b3820e6c
DH
135 * @gdb_arch_name: Optional callback that returns the architecture name known
136 * to GDB. The caller must free the returned string with g_free.
200bf5b7
AB
137 * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
138 * gdb stub. Returns a pointer to the XML contents for the specified XML file
139 * or NULL if the CPU doesn't have a dynamically generated content for it.
cffe7b32
RH
140 * @cpu_exec_enter: Callback for cpu_exec preparation.
141 * @cpu_exec_exit: Callback for cpu_exec cleanup.
9585db68 142 * @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec.
37b9de46 143 * @disas_set_info: Setup architecture specific components of disassembly info
40612000
JB
144 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
145 * address before attempting to match it against watchpoints.
dd83b06a
AF
146 *
147 * Represents a CPU family or model.
148 */
149typedef struct CPUClass {
150 /*< private >*/
961f8395 151 DeviceClass parent_class;
dd83b06a
AF
152 /*< public >*/
153
2b8c2754 154 ObjectClass *(*class_by_name)(const char *cpu_model);
62a48a2a 155 void (*parse_features)(const char *typename, char *str, Error **errp);
2b8c2754 156
dd83b06a 157 void (*reset)(CPUState *cpu);
91b1df8c 158 int reset_dump_flags;
8c2e1b00 159 bool (*has_work)(CPUState *cpu);
97a8ea5a 160 void (*do_interrupt)(CPUState *cpu);
c658b94f 161 CPUUnassignedAccess do_unassigned_access;
93e22326 162 void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
b35399bb
SS
163 MMUAccessType access_type,
164 int mmu_idx, uintptr_t retaddr);
0dff0939
PM
165 void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
166 unsigned size, MMUAccessType access_type,
167 int mmu_idx, MemTxAttrs attrs,
168 MemTxResult response, uintptr_t retaddr);
bf7663c4 169 bool (*virtio_is_big_endian)(CPUState *cpu);
f3659eee
AF
170 int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
171 uint8_t *buf, int len, bool is_write);
878096ee
AF
172 void (*dump_state)(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
173 int flags);
c86f106b 174 GuestPanicInformation* (*get_crash_info)(CPUState *cpu);
878096ee
AF
175 void (*dump_statistics)(CPUState *cpu, FILE *f,
176 fprintf_function cpu_fprintf, int flags);
997395d3 177 int64_t (*get_arch_id)(CPUState *cpu);
444d5590 178 bool (*get_paging_enabled)(const CPUState *cpu);
a23bbfda
AF
179 void (*get_memory_mapping)(CPUState *cpu, MemoryMappingList *list,
180 Error **errp);
f45748f1 181 void (*set_pc)(CPUState *cpu, vaddr value);
bdf7ae5b 182 void (*synchronize_from_tb)(CPUState *cpu, struct TranslationBlock *tb);
98670d47 183 int (*handle_mmu_fault)(CPUState *cpu, vaddr address, int size, int rw,
7510454e 184 int mmu_index);
00b941e5 185 hwaddr (*get_phys_page_debug)(CPUState *cpu, vaddr addr);
1dc6fb1f
PM
186 hwaddr (*get_phys_page_attrs_debug)(CPUState *cpu, vaddr addr,
187 MemTxAttrs *attrs);
d7f25a9e 188 int (*asidx_from_attrs)(CPUState *cpu, MemTxAttrs attrs);
5b50e790
AF
189 int (*gdb_read_register)(CPUState *cpu, uint8_t *buf, int reg);
190 int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
568496c0 191 bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
86025ee4 192 void (*debug_excp_handler)(CPUState *cpu);
b170fce3 193
c72bf468
JF
194 int (*write_elf64_note)(WriteCoreDumpFunction f, CPUState *cpu,
195 int cpuid, void *opaque);
196 int (*write_elf64_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
197 void *opaque);
198 int (*write_elf32_note)(WriteCoreDumpFunction f, CPUState *cpu,
199 int cpuid, void *opaque);
200 int (*write_elf32_qemunote)(WriteCoreDumpFunction f, CPUState *cpu,
201 void *opaque);
a0e372f0
AF
202
203 const struct VMStateDescription *vmsd;
5b24c641 204 const char *gdb_core_xml_file;
b3820e6c 205 gchar * (*gdb_arch_name)(CPUState *cpu);
200bf5b7 206 const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
cffe7b32
RH
207 void (*cpu_exec_enter)(CPUState *cpu);
208 void (*cpu_exec_exit)(CPUState *cpu);
9585db68 209 bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
37b9de46
PC
210
211 void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
40612000 212 vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
55c3ceef
RH
213 void (*tcg_initialize)(void);
214
215 /* Keep non-pointer data at the end to minimize holes. */
216 int gdb_num_core_regs;
217 bool gdb_stop_before_watchpoint;
dd83b06a
AF
218} CPUClass;
219
28ecfd7a
AF
220#ifdef HOST_WORDS_BIGENDIAN
221typedef struct icount_decr_u16 {
222 uint16_t high;
223 uint16_t low;
224} icount_decr_u16;
225#else
226typedef struct icount_decr_u16 {
227 uint16_t low;
228 uint16_t high;
229} icount_decr_u16;
230#endif
231
f0c3c505
AF
232typedef struct CPUBreakpoint {
233 vaddr pc;
234 int flags; /* BP_* */
235 QTAILQ_ENTRY(CPUBreakpoint) entry;
236} CPUBreakpoint;
237
568496c0 238struct CPUWatchpoint {
ff4700b0 239 vaddr vaddr;
05068c0d 240 vaddr len;
08225676 241 vaddr hitaddr;
66b9b43c 242 MemTxAttrs hitattrs;
ff4700b0
AF
243 int flags; /* BP_* */
244 QTAILQ_ENTRY(CPUWatchpoint) entry;
568496c0 245};
ff4700b0 246
a60f24b5 247struct KVMState;
f7575c96 248struct kvm_run;
a60f24b5 249
b0cb0a66
VP
250struct hax_vcpu_state;
251
8cd70437
AF
252#define TB_JMP_CACHE_BITS 12
253#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
254
4b4629d9 255/* work queue */
14e6fe12
PB
256
257/* The union type allows passing of 64 bit target pointers on 32 bit
258 * hosts in a single parameter
259 */
260typedef union {
261 int host_int;
262 unsigned long host_ulong;
263 void *host_ptr;
264 vaddr target_ptr;
265} run_on_cpu_data;
266
267#define RUN_ON_CPU_HOST_PTR(p) ((run_on_cpu_data){.host_ptr = (p)})
268#define RUN_ON_CPU_HOST_INT(i) ((run_on_cpu_data){.host_int = (i)})
269#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
270#define RUN_ON_CPU_TARGET_PTR(v) ((run_on_cpu_data){.target_ptr = (v)})
271#define RUN_ON_CPU_NULL RUN_ON_CPU_HOST_PTR(NULL)
272
273typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
274
d148d90e 275struct qemu_work_item;
4b4629d9 276
0b8497f0 277#define CPU_UNSET_NUMA_NODE_ID -1
d01c05c9 278#define CPU_TRACE_DSTATE_MAX_EVENTS 32
0b8497f0 279
dd83b06a
AF
280/**
281 * CPUState:
55e5c285 282 * @cpu_index: CPU index (informative).
ce3960eb
AF
283 * @nr_cores: Number of cores within this CPU package.
284 * @nr_threads: Number of threads within this CPU.
c265e976
PB
285 * @running: #true if CPU is currently running (lockless).
286 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
ab129972 287 * valid under cpu_list_lock.
61a46217 288 * @created: Indicates whether the CPU thread has been successfully created.
259186a7
AF
289 * @interrupt_request: Indicates a pending interrupt request.
290 * @halted: Nonzero if the CPU is in suspended state.
4fdeee7c 291 * @stop: Indicates a pending stop request.
f324e766 292 * @stopped: Indicates the CPU has been artificially stopped.
4c055ab5 293 * @unplug: Indicates a pending CPU unplug request.
bac05aa9 294 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
ed2803da 295 * @singlestep_enabled: Flags for single-stepping.
efee7340 296 * @icount_extra: Instructions until next timer event.
1aab16c2
PB
297 * @icount_decr: Low 16 bits: number of cycles left, only used in icount mode.
298 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this
299 * CPU and return to its top level loop (even in non-icount mode).
28ecfd7a
AF
300 * This allows a single read-compare-cbranch-write sequence to test
301 * for both decrementer underflow and exceptions.
414b15c9
PB
302 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
303 * requires that IO only be performed on the last instruction of a TB
304 * so that interrupts take effect immediately.
32857f4d
PM
305 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
306 * AddressSpaces this CPU has)
12ebc9a7 307 * @num_ases: number of CPUAddressSpaces in @cpu_ases
32857f4d
PM
308 * @as: Pointer to the first AddressSpace, for the convenience of targets which
309 * only have a single AddressSpace
c05efcb1 310 * @env_ptr: Pointer to subclass-specific CPUArchState field.
eac8b355 311 * @gdb_regs: Additional GDB registers.
a0e372f0 312 * @gdb_num_regs: Number of total registers accessible to GDB.
35143f01 313 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
182735ef 314 * @next_cpu: Next CPU sharing TB cache.
0429a971 315 * @opaque: User data.
93afeade
AF
316 * @mem_io_pc: Host Program Counter at which the memory was accessed.
317 * @mem_io_vaddr: Target virtual address at which the memory was accessed.
8737c51c 318 * @kvm_fd: vCPU file descriptor for KVM.
376692b9
PB
319 * @work_mutex: Lock to prevent multiple access to queued_work_*.
320 * @queued_work_first: First asynchronous work pending.
d4381116
LV
321 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
322 * to @trace_dstate).
48151859 323 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
ed860129
PM
324 * @ignore_memory_transaction_failures: Cached copy of the MachineState
325 * flag of the same name: allows the board to suppress calling of the
326 * CPU do_transaction_failed hook function.
dd83b06a
AF
327 *
328 * State of one CPU core or thread.
329 */
330struct CPUState {
331 /*< private >*/
961f8395 332 DeviceState parent_obj;
dd83b06a
AF
333 /*< public >*/
334
ce3960eb
AF
335 int nr_cores;
336 int nr_threads;
337
814e612e 338 struct QemuThread *thread;
bcba2a72
AF
339#ifdef _WIN32
340 HANDLE hThread;
341#endif
9f09e18a 342 int thread_id;
c265e976 343 bool running, has_waiter;
f5c121b8 344 struct QemuCond *halt_cond;
216fc9a4 345 bool thread_kicked;
61a46217 346 bool created;
4fdeee7c 347 bool stop;
f324e766 348 bool stopped;
4c055ab5 349 bool unplug;
bac05aa9 350 bool crash_occurred;
e0c38211 351 bool exit_request;
9b990ee5 352 uint32_t cflags_next_tb;
8d04fb55 353 /* updates protected by BQL */
259186a7 354 uint32_t interrupt_request;
ed2803da 355 int singlestep_enabled;
e4cd9657 356 int64_t icount_budget;
efee7340 357 int64_t icount_extra;
6f03bef0 358 sigjmp_buf jmp_env;
bcba2a72 359
376692b9
PB
360 QemuMutex work_mutex;
361 struct qemu_work_item *queued_work_first, *queued_work_last;
362
32857f4d 363 CPUAddressSpace *cpu_ases;
12ebc9a7 364 int num_ases;
09daed84 365 AddressSpace *as;
6731d864 366 MemoryRegion *memory;
09daed84 367
c05efcb1 368 void *env_ptr; /* CPUArchState */
7d7500d9 369
f3ced3c5 370 /* Accessed in parallel; all accesses must be atomic */
8cd70437 371 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
7d7500d9 372
eac8b355 373 struct GDBRegisterState *gdb_regs;
a0e372f0 374 int gdb_num_regs;
35143f01 375 int gdb_num_g_regs;
bdc44640 376 QTAILQ_ENTRY(CPUState) node;
d77953b9 377
f0c3c505
AF
378 /* ice debug support */
379 QTAILQ_HEAD(breakpoints_head, CPUBreakpoint) breakpoints;
380
ff4700b0
AF
381 QTAILQ_HEAD(watchpoints_head, CPUWatchpoint) watchpoints;
382 CPUWatchpoint *watchpoint_hit;
383
0429a971
AF
384 void *opaque;
385
93afeade
AF
386 /* In order to avoid passing too many arguments to the MMIO helpers,
387 * we store some rarely used information in the CPU context.
388 */
389 uintptr_t mem_io_pc;
390 vaddr mem_io_vaddr;
dbea78a4
PM
391 /*
392 * This is only needed for the legacy cpu_unassigned_access() hook;
393 * when all targets using it have been converted to use
394 * cpu_transaction_failed() instead it can be removed.
395 */
396 MMUAccessType mem_io_access_type;
93afeade 397
8737c51c 398 int kvm_fd;
a60f24b5 399 struct KVMState *kvm_state;
f7575c96 400 struct kvm_run *kvm_run;
8737c51c 401
d01c05c9 402 /* Used for events with 'vcpu' and *without* the 'disabled' properties */
d4381116 403 DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
d01c05c9 404 DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
48151859 405
f5df5baf 406 /* TODO Move common fields from CPUArchState here. */
6fda014e
DH
407 int cpu_index;
408 uint32_t halted;
99df7dce 409 uint32_t can_do_io;
6fda014e 410 int32_t exception_index;
7e4fb26d 411
99f31832
SAGDR
412 /* shared by kvm, hax and hvf */
413 bool vcpu_dirty;
414
2adcc85d
JH
415 /* Used to keep track of an outstanding cpu throttle thread for migration
416 * autoconverge
417 */
418 bool throttle_thread_scheduled;
419
ed860129
PM
420 bool ignore_memory_transaction_failures;
421
7e4fb26d
RH
422 /* Note that this is accessed at the start of every TB via a negative
423 offset from AREG0. Leave this field at the end so as to make the
424 (absolute value) offset as small as possible. This reduces code
425 size, especially for hosts without large memory offsets. */
1aab16c2
PB
426 union {
427 uint32_t u32;
428 icount_decr_u16 u16;
429 } icount_decr;
b0cb0a66 430
b0cb0a66 431 struct hax_vcpu_state *hax_vcpu;
e3b9ca81 432
c97d6d2c 433 int hvf_fd;
1f871c5e
PM
434
435 /* track IOMMUs whose translations we've cached in the TCG TLB */
436 GArray *iommu_notifiers;
dd83b06a
AF
437};
438
bdc44640
AF
439QTAILQ_HEAD(CPUTailQ, CPUState);
440extern struct CPUTailQ cpus;
068a5ea0
EC
441#define first_cpu QTAILQ_FIRST_RCU(&cpus)
442#define CPU_NEXT(cpu) QTAILQ_NEXT_RCU(cpu, node)
443#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node)
bdc44640 444#define CPU_FOREACH_SAFE(cpu, next_cpu) \
068a5ea0 445 QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu)
182735ef 446
f240eb6f 447extern __thread CPUState *current_cpu;
4917cf44 448
f3ced3c5
EC
449static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
450{
451 unsigned int i;
452
453 for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
454 atomic_set(&cpu->tb_jmp_cache[i], NULL);
455 }
456}
457
8d4e9146
FK
458/**
459 * qemu_tcg_mttcg_enabled:
460 * Check whether we are running MultiThread TCG or not.
461 *
462 * Returns: %true if we are in MTTCG mode %false otherwise.
463 */
464extern bool mttcg_enabled;
465#define qemu_tcg_mttcg_enabled() (mttcg_enabled)
466
444d5590
AF
467/**
468 * cpu_paging_enabled:
469 * @cpu: The CPU whose state is to be inspected.
470 *
471 * Returns: %true if paging is enabled, %false otherwise.
472 */
473bool cpu_paging_enabled(const CPUState *cpu);
474
a23bbfda
AF
475/**
476 * cpu_get_memory_mapping:
477 * @cpu: The CPU whose memory mappings are to be obtained.
478 * @list: Where to write the memory mappings to.
479 * @errp: Pointer for reporting an #Error.
480 */
481void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
482 Error **errp);
483
c72bf468
JF
484/**
485 * cpu_write_elf64_note:
486 * @f: pointer to a function that writes memory to a file
487 * @cpu: The CPU whose memory is to be dumped
488 * @cpuid: ID number of the CPU
489 * @opaque: pointer to the CPUState struct
490 */
491int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
492 int cpuid, void *opaque);
493
494/**
495 * cpu_write_elf64_qemunote:
496 * @f: pointer to a function that writes memory to a file
497 * @cpu: The CPU whose memory is to be dumped
498 * @cpuid: ID number of the CPU
499 * @opaque: pointer to the CPUState struct
500 */
501int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
502 void *opaque);
503
504/**
505 * cpu_write_elf32_note:
506 * @f: pointer to a function that writes memory to a file
507 * @cpu: The CPU whose memory is to be dumped
508 * @cpuid: ID number of the CPU
509 * @opaque: pointer to the CPUState struct
510 */
511int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
512 int cpuid, void *opaque);
513
514/**
515 * cpu_write_elf32_qemunote:
516 * @f: pointer to a function that writes memory to a file
517 * @cpu: The CPU whose memory is to be dumped
518 * @cpuid: ID number of the CPU
519 * @opaque: pointer to the CPUState struct
520 */
521int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
522 void *opaque);
dd83b06a 523
c86f106b
AN
524/**
525 * cpu_get_crash_info:
526 * @cpu: The CPU to get crash information for
527 *
528 * Gets the previously saved crash information.
529 * Caller is responsible for freeing the data.
530 */
531GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
532
878096ee
AF
533/**
534 * CPUDumpFlags:
535 * @CPU_DUMP_CODE:
536 * @CPU_DUMP_FPU: dump FPU register state, not just integer
537 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
538 */
539enum CPUDumpFlags {
540 CPU_DUMP_CODE = 0x00010000,
541 CPU_DUMP_FPU = 0x00020000,
542 CPU_DUMP_CCOP = 0x00040000,
543};
544
545/**
546 * cpu_dump_state:
547 * @cpu: The CPU whose state is to be dumped.
548 * @f: File to dump to.
549 * @cpu_fprintf: Function to dump with.
550 * @flags: Flags what to dump.
551 *
552 * Dumps CPU state.
553 */
554void cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
555 int flags);
556
557/**
558 * cpu_dump_statistics:
559 * @cpu: The CPU whose state is to be dumped.
560 * @f: File to dump to.
561 * @cpu_fprintf: Function to dump with.
562 * @flags: Flags what to dump.
563 *
564 * Dumps CPU statistics.
565 */
566void cpu_dump_statistics(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
567 int flags);
568
00b941e5 569#ifndef CONFIG_USER_ONLY
1dc6fb1f
PM
570/**
571 * cpu_get_phys_page_attrs_debug:
572 * @cpu: The CPU to obtain the physical page address for.
573 * @addr: The virtual address.
574 * @attrs: Updated on return with the memory transaction attributes to use
575 * for this access.
576 *
577 * Obtains the physical page corresponding to a virtual one, together
578 * with the corresponding memory transaction attributes to use for the access.
579 * Use it only for debugging because no protection checks are done.
580 *
581 * Returns: Corresponding physical page address or -1 if no page found.
582 */
583static inline hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
584 MemTxAttrs *attrs)
585{
586 CPUClass *cc = CPU_GET_CLASS(cpu);
587
588 if (cc->get_phys_page_attrs_debug) {
589 return cc->get_phys_page_attrs_debug(cpu, addr, attrs);
590 }
591 /* Fallback for CPUs which don't implement the _attrs_ hook */
592 *attrs = MEMTXATTRS_UNSPECIFIED;
593 return cc->get_phys_page_debug(cpu, addr);
594}
595
00b941e5
AF
596/**
597 * cpu_get_phys_page_debug:
598 * @cpu: The CPU to obtain the physical page address for.
599 * @addr: The virtual address.
600 *
601 * Obtains the physical page corresponding to a virtual one.
602 * Use it only for debugging because no protection checks are done.
603 *
604 * Returns: Corresponding physical page address or -1 if no page found.
605 */
606static inline hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr)
607{
1dc6fb1f 608 MemTxAttrs attrs = {};
00b941e5 609
1dc6fb1f 610 return cpu_get_phys_page_attrs_debug(cpu, addr, &attrs);
00b941e5 611}
d7f25a9e
PM
612
613/** cpu_asidx_from_attrs:
614 * @cpu: CPU
615 * @attrs: memory transaction attributes
616 *
617 * Returns the address space index specifying the CPU AddressSpace
618 * to use for a memory access with the given transaction attributes.
619 */
620static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs)
621{
622 CPUClass *cc = CPU_GET_CLASS(cpu);
9c8c334b 623 int ret = 0;
d7f25a9e
PM
624
625 if (cc->asidx_from_attrs) {
9c8c334b
RH
626 ret = cc->asidx_from_attrs(cpu, attrs);
627 assert(ret < cpu->num_ases && ret >= 0);
d7f25a9e 628 }
9c8c334b 629 return ret;
d7f25a9e 630}
00b941e5
AF
631#endif
632
267f685b
PB
633/**
634 * cpu_list_add:
635 * @cpu: The CPU to be added to the list of CPUs.
636 */
637void cpu_list_add(CPUState *cpu);
638
639/**
640 * cpu_list_remove:
641 * @cpu: The CPU to be removed from the list of CPUs.
642 */
643void cpu_list_remove(CPUState *cpu);
644
dd83b06a
AF
645/**
646 * cpu_reset:
647 * @cpu: The CPU whose state is to be reset.
648 */
649void cpu_reset(CPUState *cpu);
650
2b8c2754
AF
651/**
652 * cpu_class_by_name:
653 * @typename: The CPU base type.
654 * @cpu_model: The model string without any parameters.
655 *
656 * Looks up a CPU #ObjectClass matching name @cpu_model.
657 *
658 * Returns: A #CPUClass or %NULL if not matching class is found.
659 */
660ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
661
3c72234c
IM
662/**
663 * cpu_create:
664 * @typename: The CPU type.
665 *
666 * Instantiates a CPU and realizes the CPU.
667 *
668 * Returns: A #CPUState or %NULL if an error occurred.
669 */
670CPUState *cpu_create(const char *typename);
671
672/**
2278b939 673 * parse_cpu_model:
3c72234c
IM
674 * @cpu_model: The model string including optional parameters.
675 *
676 * processes optional parameters and registers them as global properties
677 *
4482e05c
IM
678 * Returns: type of CPU to create or prints error and terminates process
679 * if an error occurred.
3c72234c 680 */
2278b939 681const char *parse_cpu_model(const char *cpu_model);
9262685b 682
3993c6bd 683/**
8c2e1b00 684 * cpu_has_work:
3993c6bd
AF
685 * @cpu: The vCPU to check.
686 *
687 * Checks whether the CPU has work to do.
688 *
689 * Returns: %true if the CPU has work, %false otherwise.
690 */
8c2e1b00
AF
691static inline bool cpu_has_work(CPUState *cpu)
692{
693 CPUClass *cc = CPU_GET_CLASS(cpu);
694
695 g_assert(cc->has_work);
696 return cc->has_work(cpu);
697}
3993c6bd 698
60e82579
AF
699/**
700 * qemu_cpu_is_self:
701 * @cpu: The vCPU to check against.
702 *
703 * Checks whether the caller is executing on the vCPU thread.
704 *
705 * Returns: %true if called from @cpu's thread, %false otherwise.
706 */
707bool qemu_cpu_is_self(CPUState *cpu);
708
c08d7424
AF
709/**
710 * qemu_cpu_kick:
711 * @cpu: The vCPU to kick.
712 *
713 * Kicks @cpu's thread.
714 */
715void qemu_cpu_kick(CPUState *cpu);
716
2fa45344
AF
717/**
718 * cpu_is_stopped:
719 * @cpu: The CPU to check.
720 *
721 * Checks whether the CPU is stopped.
722 *
723 * Returns: %true if run state is not running or if artificially stopped;
724 * %false otherwise.
725 */
726bool cpu_is_stopped(CPUState *cpu);
727
d148d90e
SF
728/**
729 * do_run_on_cpu:
730 * @cpu: The vCPU to run on.
731 * @func: The function to be executed.
732 * @data: Data to pass to the function.
733 * @mutex: Mutex to release while waiting for @func to run.
734 *
735 * Used internally in the implementation of run_on_cpu.
736 */
14e6fe12 737void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
d148d90e
SF
738 QemuMutex *mutex);
739
f100f0b3
AF
740/**
741 * run_on_cpu:
742 * @cpu: The vCPU to run on.
743 * @func: The function to be executed.
744 * @data: Data to pass to the function.
745 *
746 * Schedules the function @func for execution on the vCPU @cpu.
747 */
14e6fe12 748void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
f100f0b3 749
3c02270d
CV
750/**
751 * async_run_on_cpu:
752 * @cpu: The vCPU to run on.
753 * @func: The function to be executed.
754 * @data: Data to pass to the function.
755 *
756 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
757 */
14e6fe12 758void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
3c02270d 759
53f5ed95
PB
760/**
761 * async_safe_run_on_cpu:
762 * @cpu: The vCPU to run on.
763 * @func: The function to be executed.
764 * @data: Data to pass to the function.
765 *
766 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
767 * while all other vCPUs are sleeping.
768 *
769 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
770 * BQL.
771 */
14e6fe12 772void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
53f5ed95 773
38d8f5c8
AF
774/**
775 * qemu_get_cpu:
776 * @index: The CPUState@cpu_index value of the CPU to obtain.
777 *
778 * Gets a CPU matching @index.
779 *
780 * Returns: The CPU or %NULL if there is no matching CPU.
781 */
782CPUState *qemu_get_cpu(int index);
783
69e5ff06
IM
784/**
785 * cpu_exists:
786 * @id: Guest-exposed CPU ID to lookup.
787 *
788 * Search for CPU with specified ID.
789 *
790 * Returns: %true - CPU is found, %false - CPU isn't found.
791 */
792bool cpu_exists(int64_t id);
793
5ce46cb3
EH
794/**
795 * cpu_by_arch_id:
796 * @id: Guest-exposed CPU ID of the CPU to obtain.
797 *
798 * Get a CPU with matching @id.
799 *
800 * Returns: The CPU or %NULL if there is no matching CPU.
801 */
802CPUState *cpu_by_arch_id(int64_t id);
803
2adcc85d
JH
804/**
805 * cpu_throttle_set:
806 * @new_throttle_pct: Percent of sleep time. Valid range is 1 to 99.
807 *
808 * Throttles all vcpus by forcing them to sleep for the given percentage of
809 * time. A throttle_percentage of 25 corresponds to a 75% duty cycle roughly.
810 * (example: 10ms sleep for every 30ms awake).
811 *
812 * cpu_throttle_set can be called as needed to adjust new_throttle_pct.
813 * Once the throttling starts, it will remain in effect until cpu_throttle_stop
814 * is called.
815 */
816void cpu_throttle_set(int new_throttle_pct);
817
818/**
819 * cpu_throttle_stop:
820 *
821 * Stops the vcpu throttling started by cpu_throttle_set.
822 */
823void cpu_throttle_stop(void);
824
825/**
826 * cpu_throttle_active:
827 *
828 * Returns: %true if the vcpus are currently being throttled, %false otherwise.
829 */
830bool cpu_throttle_active(void);
831
832/**
833 * cpu_throttle_get_percentage:
834 *
835 * Returns the vcpu throttle percentage. See cpu_throttle_set for details.
836 *
837 * Returns: The throttle percentage in range 1 to 99.
838 */
839int cpu_throttle_get_percentage(void);
840
c3affe56
AF
841#ifndef CONFIG_USER_ONLY
842
843typedef void (*CPUInterruptHandler)(CPUState *, int);
844
845extern CPUInterruptHandler cpu_interrupt_handler;
846
847/**
848 * cpu_interrupt:
849 * @cpu: The CPU to set an interrupt on.
7e63bc38 850 * @mask: The interrupts to set.
c3affe56
AF
851 *
852 * Invokes the interrupt handler.
853 */
854static inline void cpu_interrupt(CPUState *cpu, int mask)
855{
856 cpu_interrupt_handler(cpu, mask);
857}
858
859#else /* USER_ONLY */
860
861void cpu_interrupt(CPUState *cpu, int mask);
862
863#endif /* USER_ONLY */
864
47507383
TH
865#ifdef NEED_CPU_H
866
93e22326 867#ifdef CONFIG_SOFTMMU
c658b94f
AF
868static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr,
869 bool is_write, bool is_exec,
870 int opaque, unsigned size)
871{
872 CPUClass *cc = CPU_GET_CLASS(cpu);
873
874 if (cc->do_unassigned_access) {
875 cc->do_unassigned_access(cpu, addr, is_write, is_exec, opaque, size);
876 }
877}
878
93e22326 879static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
b35399bb
SS
880 MMUAccessType access_type,
881 int mmu_idx, uintptr_t retaddr)
93e22326
PB
882{
883 CPUClass *cc = CPU_GET_CLASS(cpu);
884
b35399bb 885 cc->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
93e22326 886}
0dff0939
PM
887
888static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
889 vaddr addr, unsigned size,
890 MMUAccessType access_type,
891 int mmu_idx, MemTxAttrs attrs,
892 MemTxResult response,
893 uintptr_t retaddr)
894{
895 CPUClass *cc = CPU_GET_CLASS(cpu);
896
ed860129 897 if (!cpu->ignore_memory_transaction_failures && cc->do_transaction_failed) {
0dff0939
PM
898 cc->do_transaction_failed(cpu, physaddr, addr, size, access_type,
899 mmu_idx, attrs, response, retaddr);
900 }
901}
c658b94f
AF
902#endif
903
47507383
TH
904#endif /* NEED_CPU_H */
905
2991b890
PC
906/**
907 * cpu_set_pc:
908 * @cpu: The CPU to set the program counter for.
909 * @addr: Program counter value.
910 *
911 * Sets the program counter for a CPU.
912 */
913static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
914{
915 CPUClass *cc = CPU_GET_CLASS(cpu);
916
917 cc->set_pc(cpu, addr);
918}
919
d8ed887b
AF
920/**
921 * cpu_reset_interrupt:
922 * @cpu: The CPU to clear the interrupt on.
923 * @mask: The interrupt mask to clear.
924 *
925 * Resets interrupts on the vCPU @cpu.
926 */
927void cpu_reset_interrupt(CPUState *cpu, int mask);
928
60a3e17a
AF
929/**
930 * cpu_exit:
931 * @cpu: The CPU to exit.
932 *
933 * Requests the CPU @cpu to exit execution.
934 */
935void cpu_exit(CPUState *cpu);
936
2993683b
IM
937/**
938 * cpu_resume:
939 * @cpu: The CPU to resume.
940 *
941 * Resumes CPU, i.e. puts CPU into runnable state.
942 */
943void cpu_resume(CPUState *cpu);
dd83b06a 944
4c055ab5
GZ
945/**
946 * cpu_remove:
947 * @cpu: The CPU to remove.
948 *
949 * Requests the CPU to be removed.
950 */
951void cpu_remove(CPUState *cpu);
952
2c579042
BR
953 /**
954 * cpu_remove_sync:
955 * @cpu: The CPU to remove.
956 *
957 * Requests the CPU to be removed and waits till it is removed.
958 */
959void cpu_remove_sync(CPUState *cpu);
960
d148d90e
SF
961/**
962 * process_queued_cpu_work() - process all items on CPU work queue
963 * @cpu: The CPU which work queue to process.
964 */
965void process_queued_cpu_work(CPUState *cpu);
966
ab129972
PB
967/**
968 * cpu_exec_start:
969 * @cpu: The CPU for the current thread.
970 *
971 * Record that a CPU has started execution and can be interrupted with
972 * cpu_exit.
973 */
974void cpu_exec_start(CPUState *cpu);
975
976/**
977 * cpu_exec_end:
978 * @cpu: The CPU for the current thread.
979 *
980 * Record that a CPU has stopped execution and exclusive sections
981 * can be executed without interrupting it.
982 */
983void cpu_exec_end(CPUState *cpu);
984
985/**
986 * start_exclusive:
987 *
988 * Wait for a concurrent exclusive section to end, and then start
989 * a section of work that is run while other CPUs are not running
990 * between cpu_exec_start and cpu_exec_end. CPUs that are running
991 * cpu_exec are exited immediately. CPUs that call cpu_exec_start
992 * during the exclusive section go to sleep until this CPU calls
993 * end_exclusive.
ab129972
PB
994 */
995void start_exclusive(void);
996
997/**
998 * end_exclusive:
999 *
1000 * Concludes an exclusive execution section started by start_exclusive.
ab129972
PB
1001 */
1002void end_exclusive(void);
1003
c643bed9
AF
1004/**
1005 * qemu_init_vcpu:
1006 * @cpu: The vCPU to initialize.
1007 *
1008 * Initializes a vCPU.
1009 */
1010void qemu_init_vcpu(CPUState *cpu);
1011
3825b28f
AF
1012#define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
1013#define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
1014#define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
1015
1016/**
1017 * cpu_single_step:
1018 * @cpu: CPU to the flags for.
1019 * @enabled: Flags to enable.
1020 *
1021 * Enables or disables single-stepping for @cpu.
1022 */
1023void cpu_single_step(CPUState *cpu, int enabled);
1024
b3310ab3
AF
1025/* Breakpoint/watchpoint flags */
1026#define BP_MEM_READ 0x01
1027#define BP_MEM_WRITE 0x02
1028#define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
1029#define BP_STOP_BEFORE_ACCESS 0x04
08225676 1030/* 0x08 currently unused */
b3310ab3
AF
1031#define BP_GDB 0x10
1032#define BP_CPU 0x20
b933066a 1033#define BP_ANY (BP_GDB | BP_CPU)
08225676
PM
1034#define BP_WATCHPOINT_HIT_READ 0x40
1035#define BP_WATCHPOINT_HIT_WRITE 0x80
1036#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
b3310ab3
AF
1037
1038int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
1039 CPUBreakpoint **breakpoint);
1040int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
1041void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
1042void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
1043
b933066a
RH
1044/* Return true if PC matches an installed breakpoint. */
1045static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
1046{
1047 CPUBreakpoint *bp;
1048
1049 if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
1050 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
1051 if (bp->pc == pc && (bp->flags & mask)) {
1052 return true;
1053 }
1054 }
1055 }
1056 return false;
1057}
1058
75a34036
AF
1059int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
1060 int flags, CPUWatchpoint **watchpoint);
1061int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
1062 vaddr len, int flags);
1063void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
1064void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
1065
63c91552
PB
1066/**
1067 * cpu_get_address_space:
1068 * @cpu: CPU to get address space from
1069 * @asidx: index identifying which address space to get
1070 *
1071 * Return the requested address space of this CPU. @asidx
1072 * specifies which address space to read.
1073 */
1074AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
1075
a47dddd7
AF
1076void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
1077 GCC_FMT_ATTR(2, 3);
c7e002c5 1078extern Property cpu_common_props[];
39e329e3 1079void cpu_exec_initfn(CPUState *cpu);
ce5b1bbf 1080void cpu_exec_realizefn(CPUState *cpu, Error **errp);
7bbc124e 1081void cpu_exec_unrealizefn(CPUState *cpu);
a47dddd7 1082
c95ac103
TH
1083/**
1084 * target_words_bigendian:
1085 * Returns true if the (default) endianness of the target is big endian,
1086 * false otherwise. Note that in target-specific code, you can use
1087 * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common
1088 * code should normally never need to know about the endianness of the
1089 * target, so please do *not* use this function unless you know very well
1090 * what you are doing!
1091 */
1092bool target_words_bigendian(void);
1093
47507383
TH
1094#ifdef NEED_CPU_H
1095
1a1562f5
AF
1096#ifdef CONFIG_SOFTMMU
1097extern const struct VMStateDescription vmstate_cpu_common;
1098#else
1099#define vmstate_cpu_common vmstate_dummy
1100#endif
1101
1102#define VMSTATE_CPU() { \
1103 .name = "parent_obj", \
1104 .size = sizeof(CPUState), \
1105 .vmsd = &vmstate_cpu_common, \
1106 .flags = VMS_STRUCT, \
1107 .offset = 0, \
1108}
1109
47507383
TH
1110#endif /* NEED_CPU_H */
1111
a07f953e
IM
1112#define UNASSIGNED_CPU_INDEX -1
1113
dd83b06a 1114#endif