X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=cpu-all.h;h=e8391009a36e4de3accd1ac2feb0a2ac28cb18de;hb=6e1db57b2ac9025c2443c665a0d9e78748637b26;hp=9823c24bab430204f652082e64a0e69741f75364;hpb=f374e826e340ea1e2de2e9b006b3ac5dbde1324f;p=qemu.git diff --git a/cpu-all.h b/cpu-all.h index 9823c24ba..e8391009a 100644 --- a/cpu-all.h +++ b/cpu-all.h @@ -123,8 +123,7 @@ typedef union { endian ! */ typedef union { float64 d; -#if defined(HOST_WORDS_BIGENDIAN) \ - || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) +#if defined(HOST_WORDS_BIGENDIAN) struct { uint32_t upper; uint32_t lower; @@ -138,11 +137,17 @@ typedef union { uint64_t ll; } CPU_DoubleU; -#ifdef TARGET_SPARC +typedef union { + floatx80 d; + struct { + uint64_t lower; + uint16_t upper; + } l; +} CPU_LDoubleU; + typedef union { float128 q; -#if defined(HOST_WORDS_BIGENDIAN) \ - || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT)) +#if defined(HOST_WORDS_BIGENDIAN) struct { uint32_t upmost; uint32_t upper; @@ -166,7 +171,6 @@ typedef union { } ll; #endif } CPU_QuadU; -#endif /* CPU memory access without any memory or io remapping */ @@ -627,23 +631,32 @@ static inline void stfq_be_p(void *ptr, float64 v) #if defined(CONFIG_USE_GUEST_BASE) extern unsigned long guest_base; extern int have_guest_base; +extern unsigned long reserved_va; #define GUEST_BASE guest_base +#define RESERVED_VA reserved_va #else #define GUEST_BASE 0ul +#define RESERVED_VA 0ul #endif /* All direct uses of g2h and h2g need to go away for usermode softmmu. */ #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE)) + +#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS +#define h2g_valid(x) 1 +#else +#define h2g_valid(x) ({ \ + unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ + __guest < (1ul << TARGET_VIRT_ADDR_SPACE_BITS); \ +}) +#endif + #define h2g(x) ({ \ unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \ /* Check if given address fits target address space */ \ - assert(__ret == (abi_ulong)__ret); \ + assert(h2g_valid(x)); \ (abi_ulong)__ret; \ }) -#define h2g_valid(x) ({ \ - unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \ - (__guest == (abi_ulong)__guest); \ -}) #define saddr(x) g2h(x) #define laddr(x) g2h(x) @@ -736,52 +749,105 @@ extern unsigned long qemu_host_page_mask; /* original state of the write flag (used when tracking self-modifying code */ #define PAGE_WRITE_ORG 0x0010 +#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) +/* FIXME: Code that sets/uses this is broken and needs to go away. */ #define PAGE_RESERVED 0x0020 +#endif +#if defined(CONFIG_USER_ONLY) void page_dump(FILE *f); -int walk_memory_regions(void *, - int (*fn)(void *, unsigned long, unsigned long, unsigned long)); + +typedef int (*walk_memory_regions_fn)(void *, abi_ulong, + abi_ulong, unsigned long); +int walk_memory_regions(void *, walk_memory_regions_fn); + int page_get_flags(target_ulong address); void page_set_flags(target_ulong start, target_ulong end, int flags); int page_check_range(target_ulong start, target_ulong len, int flags); +#endif -void cpu_exec_init_all(unsigned long tb_size); CPUState *cpu_copy(CPUState *env); CPUState *qemu_get_cpu(int cpu); -void cpu_dump_state(CPUState *env, FILE *f, - int (*cpu_fprintf)(FILE *f, const char *fmt, ...), +#define CPU_DUMP_CODE 0x00010000 + +void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf, int flags); -void cpu_dump_statistics (CPUState *env, FILE *f, - int (*cpu_fprintf)(FILE *f, const char *fmt, ...), - int flags); +void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf, + int flags); void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...) - __attribute__ ((__format__ (__printf__, 2, 3))); + GCC_FMT_ATTR(2, 3); extern CPUState *first_cpu; extern CPUState *cpu_single_env; -extern int64_t qemu_icount; -extern int use_icount; - -#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */ -#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */ -#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */ -#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */ -#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */ -#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */ -#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */ -#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */ -#define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */ -#define CPU_INTERRUPT_INIT 0x400 /* INIT pending. */ -#define CPU_INTERRUPT_SIPI 0x800 /* SIPI pending. */ -#define CPU_INTERRUPT_MCE 0x1000 /* (x86 only) MCE pending. */ - -void cpu_interrupt(CPUState *s, int mask); + +/* Flags for use in ENV->INTERRUPT_PENDING. + + The numbers assigned here are non-sequential in order to preserve + binary compatibility with the vmstate dump. Bit 0 (0x0001) was + previously used for CPU_INTERRUPT_EXIT, and is cleared when loading + the vmstate dump. */ + +/* External hardware interrupt pending. This is typically used for + interrupts from devices. */ +#define CPU_INTERRUPT_HARD 0x0002 + +/* Exit the current TB. This is typically used when some system-level device + makes some change to the memory mapping. E.g. the a20 line change. */ +#define CPU_INTERRUPT_EXITTB 0x0004 + +/* Halt the CPU. */ +#define CPU_INTERRUPT_HALT 0x0020 + +/* Debug event pending. */ +#define CPU_INTERRUPT_DEBUG 0x0080 + +/* Several target-specific external hardware interrupts. Each target/cpu.h + should define proper names based on these defines. */ +#define CPU_INTERRUPT_TGT_EXT_0 0x0008 +#define CPU_INTERRUPT_TGT_EXT_1 0x0010 +#define CPU_INTERRUPT_TGT_EXT_2 0x0040 +#define CPU_INTERRUPT_TGT_EXT_3 0x0200 +#define CPU_INTERRUPT_TGT_EXT_4 0x1000 + +/* Several target-specific internal interrupts. These differ from the + preceeding target-specific interrupts in that they are intended to + originate from within the cpu itself, typically in response to some + instruction being executed. These, therefore, are not masked while + single-stepping within the debugger. */ +#define CPU_INTERRUPT_TGT_INT_0 0x0100 +#define CPU_INTERRUPT_TGT_INT_1 0x0400 +#define CPU_INTERRUPT_TGT_INT_2 0x0800 + +/* First unused bit: 0x2000. */ + +/* The set of all bits that should be masked when single-stepping. */ +#define CPU_INTERRUPT_SSTEP_MASK \ + (CPU_INTERRUPT_HARD \ + | CPU_INTERRUPT_TGT_EXT_0 \ + | CPU_INTERRUPT_TGT_EXT_1 \ + | CPU_INTERRUPT_TGT_EXT_2 \ + | CPU_INTERRUPT_TGT_EXT_3 \ + | CPU_INTERRUPT_TGT_EXT_4) + +#ifndef CONFIG_USER_ONLY +typedef void (*CPUInterruptHandler)(CPUState *, int); + +extern CPUInterruptHandler cpu_interrupt_handler; + +static inline void cpu_interrupt(CPUState *s, int mask) +{ + cpu_interrupt_handler(s, mask); +} +#else /* USER_ONLY */ +void cpu_interrupt(CPUState *env, int mask); +#endif /* USER_ONLY */ + void cpu_reset_interrupt(CPUState *env, int mask); void cpu_exit(CPUState *s); -int qemu_cpu_has_work(CPUState *env); +bool qemu_cpu_has_work(CPUState *env); /* Breakpoint/watchpoint flags */ #define BP_MEM_READ 0x01 @@ -810,11 +876,8 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask); void cpu_single_step(CPUState *env, int enabled); void cpu_reset(CPUState *s); - -/* Return the physical page corresponding to a virtual one. Use it - only for debugging because no protection checks are done. Return -1 - if no page found. */ -target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); +int cpu_is_stopped(CPUState *env); +void run_on_cpu(CPUState *env, void (*func)(void *data), void *data); #define CPU_LOG_TB_OUT_ASM (1 << 0) #define CPU_LOG_TB_IN_ASM (1 << 1) @@ -840,12 +903,38 @@ void cpu_set_log(int log_flags); void cpu_set_log_filename(const char *filename); int cpu_str_to_log_mask(const char *str); +#if !defined(CONFIG_USER_ONLY) + +/* Return the physical page corresponding to a virtual one. Use it + only for debugging because no protection checks are done. Return -1 + if no page found. */ +target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr); + /* memory API */ extern int phys_ram_fd; -extern uint8_t *phys_ram_dirty; extern ram_addr_t ram_size; -extern ram_addr_t last_ram_offset; + +/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ +#define RAM_PREALLOC_MASK (1 << 0) + +typedef struct RAMBlock { + uint8_t *host; + ram_addr_t offset; + ram_addr_t length; + uint32_t flags; + char idstr[256]; + QLIST_ENTRY(RAMBlock) next; +#if defined(__linux__) && !defined(TARGET_S390X) + int fd; +#endif +} RAMBlock; + +typedef struct RAMList { + uint8_t *phys_dirty; + QLIST_HEAD(ram, RAMBlock) blocks; +} RAMList; +extern RAMList ram_list; extern const char *mem_path; extern int mem_prealloc; @@ -868,9 +957,6 @@ extern int mem_prealloc; /* Set if TLB entry is an IO callback. */ #define TLB_MMIO (1 << 5) -int cpu_memory_rw_debug(CPUState *env, target_ulong addr, - uint8_t *buf, int len, int is_write); - #define VGA_DIRTY_FLAG 0x01 #define CODE_DIRTY_FLAG 0x02 #define MIGRATION_DIRTY_FLAG 0x08 @@ -878,201 +964,67 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr, /* read dirty bit (return 0 or 1) */ static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) { - return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff; -} - -static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, - int dirty_flags) -{ - return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; -} - -static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) -{ - phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff; -} - -void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, - int dirty_flags); -void cpu_tlb_update_dirty(CPUState *env); - -int cpu_physical_memory_set_dirty_tracking(int enable); - -int cpu_physical_memory_get_dirty_tracking(void); - -int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, - target_phys_addr_t end_addr); - -void dump_exec_info(FILE *f, - int (*cpu_fprintf)(FILE *f, const char *fmt, ...)); - -/* Coalesced MMIO regions are areas where write operations can be reordered. - * This usually implies that write operations are side-effect free. This allows - * batching which can make a major impact on performance when using - * virtualization. - */ -void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); - -void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size); - -void qemu_flush_coalesced_mmio_buffer(void); - -/*******************************************/ -/* host CPU ticks (if available) */ - -#if defined(_ARCH_PPC) - -static inline int64_t cpu_get_real_ticks(void) -{ - int64_t retval; -#ifdef _ARCH_PPC64 - /* This reads timebase in one 64bit go and includes Cell workaround from: - http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html - */ - __asm__ __volatile__ ( - "mftb %0\n\t" - "cmpwi %0,0\n\t" - "beq- $-8" - : "=r" (retval)); -#else - /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ - unsigned long junk; - __asm__ __volatile__ ( - "mftbu %1\n\t" - "mftb %L0\n\t" - "mftbu %0\n\t" - "cmpw %0,%1\n\t" - "bne $-16" - : "=r" (retval), "=r" (junk)); -#endif - return retval; + return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff; } -#elif defined(__i386__) - -static inline int64_t cpu_get_real_ticks(void) +static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr) { - int64_t val; - asm volatile ("rdtsc" : "=A" (val)); - return val; + return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS]; } -#elif defined(__x86_64__) - -static inline int64_t cpu_get_real_ticks(void) +static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, + int dirty_flags) { - uint32_t low,high; - int64_t val; - asm volatile("rdtsc" : "=a" (low), "=d" (high)); - val = high; - val <<= 32; - val |= low; - return val; + return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags; } -#elif defined(__hppa__) - -static inline int64_t cpu_get_real_ticks(void) +static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) { - int val; - asm volatile ("mfctl %%cr16, %0" : "=r"(val)); - return val; + ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff; } -#elif defined(__ia64) - -static inline int64_t cpu_get_real_ticks(void) +static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr, + int dirty_flags) { - int64_t val; - asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory"); - return val; + return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags; } -#elif defined(__s390__) - -static inline int64_t cpu_get_real_ticks(void) +static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start, + int length, + int dirty_flags) { - int64_t val; - asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc"); - return val; -} - -#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__) + int i, mask, len; + uint8_t *p; -static inline int64_t cpu_get_real_ticks (void) -{ -#if defined(_LP64) - uint64_t rval; - asm volatile("rd %%tick,%0" : "=r"(rval)); - return rval; -#else - union { - uint64_t i64; - struct { - uint32_t high; - uint32_t low; - } i32; - } rval; - asm volatile("rd %%tick,%1; srlx %1,32,%0" - : "=r"(rval.i32.high), "=r"(rval.i32.low)); - return rval.i64; -#endif + len = length >> TARGET_PAGE_BITS; + mask = ~dirty_flags; + p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS); + for (i = 0; i < len; i++) { + p[i] &= mask; + } } -#elif defined(__mips__) && \ - ((defined(__mips_isa_rev) && __mips_isa_rev >= 2) || defined(__linux__)) -/* - * binutils wants to use rdhwr only on mips32r2 - * but as linux kernel emulate it, it's fine - * to use it. - * - */ -#define MIPS_RDHWR(rd, value) { \ - __asm__ __volatile__ ( \ - ".set push\n\t" \ - ".set mips32r2\n\t" \ - "rdhwr %0, "rd"\n\t" \ - ".set pop" \ - : "=r" (value)); \ -} +void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, + int dirty_flags); +void cpu_tlb_update_dirty(CPUState *env); -static inline int64_t cpu_get_real_ticks(void) -{ -/* On kernels >= 2.6.25 rdhwr , $2 and $3 are emulated */ - uint32_t count; - static uint32_t cyc_per_count = 0; +int cpu_physical_memory_set_dirty_tracking(int enable); - if (!cyc_per_count) - MIPS_RDHWR("$3", cyc_per_count); +int cpu_physical_memory_get_dirty_tracking(void); - MIPS_RDHWR("$2", count); - return (int64_t)(count * cyc_per_count); -} +int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, + target_phys_addr_t end_addr); -#else -/* The host CPU doesn't have an easily accessible cycle counter. - Just return a monotonically increasing value. This will be - totally wrong, but hopefully better than nothing. */ -static inline int64_t cpu_get_real_ticks (void) -{ - static int64_t ticks = 0; - return ticks++; -} -#endif +int cpu_physical_log_start(target_phys_addr_t start_addr, + ram_addr_t size); -/* profiling */ -#ifdef CONFIG_PROFILER -static inline int64_t profile_getclock(void) -{ - return cpu_get_real_ticks(); -} +int cpu_physical_log_stop(target_phys_addr_t start_addr, + ram_addr_t size); -extern int64_t qemu_time, qemu_time_start; -extern int64_t tlb_flush_time; -extern int64_t dev_time; -#endif +void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); +#endif /* !CONFIG_USER_ONLY */ -void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status, - uint64_t mcg_status, uint64_t addr, uint64_t misc); +int cpu_memory_rw_debug(CPUState *env, target_ulong addr, + uint8_t *buf, int len, int is_write); #endif /* CPU_ALL_H */