X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=accel%2Ftcg%2Fuser-exec.c;h=3ba7acf7f44ff046b353901784ef2ae8aea907ff;hb=b0702c91c66a9a9d8831ecb3d08f511e7d167489;hp=b09f7a15770228ed47191c6c1c8014d3ce92ca50;hpb=68d8ef4ec540682c3538d4963e836e43a211dd17;p=mirror_qemu.git diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index b09f7a1577..3ba7acf7f4 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -17,15 +17,17 @@ * License along with this library; if not, see . */ #include "qemu/osdep.h" -#include "cpu.h" +#include "hw/core/tcg-cpu-ops.h" #include "disas/disas.h" #include "exec/exec-all.h" -#include "tcg.h" +#include "tcg/tcg.h" #include "qemu/bitops.h" #include "exec/cpu_ldst.h" -#include "translate-all.h" +#include "exec/translate-all.h" #include "exec/helper-proto.h" #include "qemu/atomic128.h" +#include "trace/trace-root.h" +#include "trace/mem.h" #undef EAX #undef ECX @@ -47,7 +49,8 @@ __thread uintptr_t helper_retaddr; /* exit the current TB from a signal handler. The host registers are restored in a state compatible with the CPU emulator */ -static void cpu_exit_tb_from_sighandler(CPUState *cpu, sigset_t *old_set) +static void QEMU_NORETURN cpu_exit_tb_from_sighandler(CPUState *cpu, + sigset_t *old_set) { /* XXX: use siglongjmp ? */ sigprocmask(SIG_SETMASK, old_set, NULL); @@ -86,7 +89,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, * use that value directly. Within cpu_restore_state_from_tb, we * assume PC comes from GETPC(), as used by the helper functions, * so we adjust the address by -GETPC_ADJ to form an address that - * is within the call insn, so that the address does not accidentially + * is within the call insn, so that the address does not accidentally * match the beginning of the next guest insn. However, when the * pc comes from the signal frame it points to the actual faulting * host memory insn and not the return from a call insn. @@ -184,17 +187,17 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, clear_helper_retaddr(); cc = CPU_GET_CLASS(cpu); - cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc); + cc->tcg_ops->tlb_fill(cpu, address, 0, access_type, + MMU_USER_IDX, false, pc); g_assert_not_reached(); } -void *probe_access(CPUArchState *env, target_ulong addr, int size, - MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) +static int probe_access_internal(CPUArchState *env, target_ulong addr, + int fault_size, MMUAccessType access_type, + bool nonfault, uintptr_t ra) { int flags; - g_assert(-(addr | TARGET_PAGE_MASK) >= size); - switch (access_type) { case MMU_DATA_STORE: flags = PAGE_WRITE; @@ -209,43 +212,77 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, g_assert_not_reached(); } - if (!guest_addr_valid(addr) || page_check_range(addr, size, flags) < 0) { - CPUState *cpu = env_cpu(env); - CPUClass *cc = CPU_GET_CLASS(cpu); - cc->tlb_fill(cpu, addr, size, access_type, MMU_USER_IDX, false, - retaddr); - g_assert_not_reached(); + if (!guest_addr_valid_untagged(addr) || + page_check_range(addr, 1, flags) < 0) { + if (nonfault) { + return TLB_INVALID_MASK; + } else { + CPUState *cpu = env_cpu(env); + CPUClass *cc = CPU_GET_CLASS(cpu); + cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, + MMU_USER_IDX, false, ra); + g_assert_not_reached(); + } } + return 0; +} + +int probe_access_flags(CPUArchState *env, target_ulong addr, + MMUAccessType access_type, int mmu_idx, + bool nonfault, void **phost, uintptr_t ra) +{ + int flags; + + flags = probe_access_internal(env, addr, 0, access_type, nonfault, ra); + *phost = flags ? NULL : g2h(env_cpu(env), addr); + return flags; +} + +void *probe_access(CPUArchState *env, target_ulong addr, int size, + MMUAccessType access_type, int mmu_idx, uintptr_t ra) +{ + int flags; + + g_assert(-(addr | TARGET_PAGE_MASK) >= size); + flags = probe_access_internal(env, addr, size, access_type, false, ra); + g_assert(flags == 0); - return size ? g2h(addr) : NULL; + return size ? g2h(env_cpu(env), addr) : NULL; } #if defined(__i386__) #if defined(__NetBSD__) #include +#include #define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) #define MASK_sig(context) ((context)->uc_sigmask) +#define PAGE_FAULT_TRAP T_PAGEFLT #elif defined(__FreeBSD__) || defined(__DragonFly__) #include +#include #define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip)) #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) #define ERROR_sig(context) ((context)->uc_mcontext.mc_err) #define MASK_sig(context) ((context)->uc_sigmask) +#define PAGE_FAULT_TRAP T_PAGEFLT #elif defined(__OpenBSD__) +#include #define EIP_sig(context) ((context)->sc_eip) #define TRAP_sig(context) ((context)->sc_trapno) #define ERROR_sig(context) ((context)->sc_err) #define MASK_sig(context) ((context)->sc_mask) +#define PAGE_FAULT_TRAP T_PAGEFLT #else #define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP]) #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) #define MASK_sig(context) ((context)->uc_sigmask) +#define PAGE_FAULT_TRAP 0xe #endif int cpu_signal_handler(int host_signum, void *pinfo, @@ -271,34 +308,42 @@ int cpu_signal_handler(int host_signum, void *pinfo, pc = EIP_sig(uc); trapno = TRAP_sig(uc); return handle_cpu_signal(pc, info, - trapno == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0, + trapno == PAGE_FAULT_TRAP ? + (ERROR_sig(uc) >> 1) & 1 : 0, &MASK_sig(uc)); } #elif defined(__x86_64__) #ifdef __NetBSD__ +#include #define PC_sig(context) _UC_MACHINE_PC(context) #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) #define MASK_sig(context) ((context)->uc_sigmask) +#define PAGE_FAULT_TRAP T_PAGEFLT #elif defined(__OpenBSD__) +#include #define PC_sig(context) ((context)->sc_rip) #define TRAP_sig(context) ((context)->sc_trapno) #define ERROR_sig(context) ((context)->sc_err) #define MASK_sig(context) ((context)->sc_mask) +#define PAGE_FAULT_TRAP T_PAGEFLT #elif defined(__FreeBSD__) || defined(__DragonFly__) #include +#include #define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip)) #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) #define ERROR_sig(context) ((context)->uc_mcontext.mc_err) #define MASK_sig(context) ((context)->uc_sigmask) +#define PAGE_FAULT_TRAP T_PAGEFLT #else #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) #define MASK_sig(context) ((context)->uc_sigmask) +#define PAGE_FAULT_TRAP 0xe #endif int cpu_signal_handler(int host_signum, void *pinfo, @@ -316,7 +361,8 @@ int cpu_signal_handler(int host_signum, void *pinfo, pc = PC_sig(uc); return handle_cpu_signal(pc, info, - TRAP_sig(uc) == 0xe ? (ERROR_sig(uc) >> 1) & 1 : 0, + TRAP_sig(uc) == PAGE_FAULT_TRAP ? + (ERROR_sig(uc) >> 1) & 1 : 0, &MASK_sig(uc)); } @@ -490,6 +536,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, #if defined(__NetBSD__) #include +#include #endif int cpu_signal_handler(int host_signum, void *pinfo, @@ -498,10 +545,12 @@ int cpu_signal_handler(int host_signum, void *pinfo, siginfo_t *info = pinfo; #if defined(__NetBSD__) ucontext_t *uc = puc; + siginfo_t *si = pinfo; #else ucontext_t *uc = puc; #endif unsigned long pc; + uint32_t fsr; int is_write; #if defined(__NetBSD__) @@ -512,15 +561,48 @@ int cpu_signal_handler(int host_signum, void *pinfo, pc = uc->uc_mcontext.arm_pc; #endif - /* error_code is the FSR value, in which bit 11 is WnR (assuming a v6 or - * later processor; on v5 we will always report this as a read). +#ifdef __NetBSD__ + fsr = si->si_trap; +#else + fsr = uc->uc_mcontext.error_code; +#endif + /* + * In the FSR, bit 11 is WnR, assuming a v6 or + * later processor. On v5 we will always report + * this as a read, which will fail later. */ - is_write = extract32(uc->uc_mcontext.error_code, 11, 1); + is_write = extract32(fsr, 11, 1); return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); } #elif defined(__aarch64__) +#if defined(__NetBSD__) + +#include +#include + +int cpu_signal_handler(int host_signum, void *pinfo, void *puc) +{ + ucontext_t *uc = puc; + siginfo_t *si = pinfo; + unsigned long pc; + int is_write; + uint32_t esr; + + pc = uc->uc_mcontext.__gregs[_REG_PC]; + esr = si->si_trap; + + /* + * siginfo_t::si_trap is the ESR value, for data aborts ESR.EC + * is 0b10010x: then bit 6 is the WnR bit + */ + is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1; + return handle_cpu_signal(pc, si, is_write, &uc->uc_sigmask); +} + +#else + #ifndef ESR_MAGIC /* Pre-3.16 kernel headers don't have these, so provide fallback definitions */ #define ESR_MAGIC 0x45535201 @@ -583,6 +665,7 @@ int cpu_signal_handler(int host_signum, void *pinfo, void *puc) } return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); } +#endif #elif defined(__s390__) @@ -597,18 +680,26 @@ int cpu_signal_handler(int host_signum, void *pinfo, pc = uc->uc_mcontext.psw.addr; - /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead - of the normal 2 arguments. The 3rd argument contains the "int_code" - from the hardware which does in fact contain the is_write value. - The rt signal handler, as far as I can tell, does not give this value - at all. Not that we could get to it from here even if it were. */ - /* ??? This is not even close to complete, since it ignores all - of the read-modify-write instructions. */ + /* + * ??? On linux, the non-rt signal handler has 4 (!) arguments instead + * of the normal 2 arguments. The 4th argument contains the "Translation- + * Exception Identification for DAT Exceptions" from the hardware (aka + * "int_parm_long"), which does in fact contain the is_write value. + * The rt signal handler, as far as I can tell, does not give this value + * at all. Not that we could get to it from here even if it were. + * So fall back to parsing instructions. Treat read-modify-write ones as + * writes, which is not fully correct, but for tracking self-modifying code + * this is better than treating them as reads. Checking si_addr page flags + * might be a viable improvement, albeit a racy one. + */ + /* ??? This is not even close to complete. */ pinsn = (uint16_t *)pc; switch (pinsn[0] >> 8) { case 0x50: /* ST */ case 0x42: /* STC */ case 0x40: /* STH */ + case 0xba: /* CS */ + case 0xbb: /* CDS */ is_write = 1; break; case 0xc4: /* RIL format insns */ @@ -619,6 +710,12 @@ int cpu_signal_handler(int host_signum, void *pinfo, is_write = 1; } break; + case 0xc8: /* SSF format insns */ + switch (pinsn[0] & 0xf) { + case 0x2: /* CSST */ + is_write = 1; + } + break; case 0xe3: /* RXY format insns */ switch (pinsn[2] & 0xff) { case 0x50: /* STY */ @@ -632,22 +729,77 @@ int cpu_signal_handler(int host_signum, void *pinfo, is_write = 1; } break; + case 0xeb: /* RSY format insns */ + switch (pinsn[2] & 0xff) { + case 0x14: /* CSY */ + case 0x30: /* CSG */ + case 0x31: /* CDSY */ + case 0x3e: /* CDSG */ + case 0xe4: /* LANG */ + case 0xe6: /* LAOG */ + case 0xe7: /* LAXG */ + case 0xe8: /* LAAG */ + case 0xea: /* LAALG */ + case 0xf4: /* LAN */ + case 0xf6: /* LAO */ + case 0xf7: /* LAX */ + case 0xfa: /* LAAL */ + case 0xf8: /* LAA */ + is_write = 1; + } + break; } + return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); } #elif defined(__mips__) +#if defined(__misp16) || defined(__mips_micromips) +#error "Unsupported encoding" +#endif + int cpu_signal_handler(int host_signum, void *pinfo, void *puc) { siginfo_t *info = pinfo; ucontext_t *uc = puc; - greg_t pc = uc->uc_mcontext.pc; - int is_write; + uintptr_t pc = uc->uc_mcontext.pc; + uint32_t insn = *(uint32_t *)pc; + int is_write = 0; + + /* Detect all store instructions at program counter. */ + switch((insn >> 26) & 077) { + case 050: /* SB */ + case 051: /* SH */ + case 052: /* SWL */ + case 053: /* SW */ + case 054: /* SDL */ + case 055: /* SDR */ + case 056: /* SWR */ + case 070: /* SC */ + case 071: /* SWC1 */ + case 074: /* SCD */ + case 075: /* SDC1 */ + case 077: /* SD */ +#if !defined(__mips_isa_rev) || __mips_isa_rev < 6 + case 072: /* SWC2 */ + case 076: /* SDC2 */ +#endif + is_write = 1; + break; + case 023: /* COP1X */ + /* Required in all versions of MIPS64 since + MIPS64r1 and subsequent versions of MIPS32r2. */ + switch (insn & 077) { + case 010: /* SWXC1 */ + case 011: /* SDXC1 */ + case 015: /* SUXC1 */ + is_write = 1; + } + break; + } - /* XXX: compute is_write */ - is_write = 0; return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); } @@ -734,29 +886,385 @@ int cpu_signal_handler(int host_signum, void *pinfo, /* The softmmu versions of these helpers are in cputlb.c. */ -/* Do not allow unaligned operations to proceed. Return the host address. */ +uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr) +{ + MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, false); + uint32_t ret; + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + ret = ldub_p(g2h(env_cpu(env), ptr)); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); + return ret; +} + +int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr) +{ + return (int8_t)cpu_ldub_data(env, ptr); +} + +uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, false); + uint32_t ret; + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + ret = lduw_be_p(g2h(env_cpu(env), ptr)); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); + return ret; +} + +int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr) +{ + return (int16_t)cpu_lduw_be_data(env, ptr); +} + +uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, false); + uint32_t ret; + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + ret = ldl_be_p(g2h(env_cpu(env), ptr)); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); + return ret; +} + +uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr) +{ + MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, false); + uint64_t ret; + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + ret = ldq_be_p(g2h(env_cpu(env), ptr)); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); + return ret; +} + +uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, false); + uint32_t ret; + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + ret = lduw_le_p(g2h(env_cpu(env), ptr)); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); + return ret; +} + +int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr) +{ + return (int16_t)cpu_lduw_le_data(env, ptr); +} + +uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, false); + uint32_t ret; + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + ret = ldl_le_p(g2h(env_cpu(env), ptr)); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); + return ret; +} + +uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr) +{ + MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, false); + uint64_t ret; + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + ret = ldq_le_p(g2h(env_cpu(env), ptr)); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); + return ret; +} + +uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + uint32_t ret; + + set_helper_retaddr(retaddr); + ret = cpu_ldub_data(env, ptr); + clear_helper_retaddr(); + return ret; +} + +int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + return (int8_t)cpu_ldub_data_ra(env, ptr, retaddr); +} + +uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + uint32_t ret; + + set_helper_retaddr(retaddr); + ret = cpu_lduw_be_data(env, ptr); + clear_helper_retaddr(); + return ret; +} + +int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + return (int16_t)cpu_lduw_be_data_ra(env, ptr, retaddr); +} + +uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + uint32_t ret; + + set_helper_retaddr(retaddr); + ret = cpu_ldl_be_data(env, ptr); + clear_helper_retaddr(); + return ret; +} + +uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + uint64_t ret; + + set_helper_retaddr(retaddr); + ret = cpu_ldq_be_data(env, ptr); + clear_helper_retaddr(); + return ret; +} + +uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + uint32_t ret; + + set_helper_retaddr(retaddr); + ret = cpu_lduw_le_data(env, ptr); + clear_helper_retaddr(); + return ret; +} + +int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + return (int16_t)cpu_lduw_le_data_ra(env, ptr, retaddr); +} + +uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + uint32_t ret; + + set_helper_retaddr(retaddr); + ret = cpu_ldl_le_data(env, ptr); + clear_helper_retaddr(); + return ret; +} + +uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +{ + uint64_t ret; + + set_helper_retaddr(retaddr); + ret = cpu_ldq_le_data(env, ptr); + clear_helper_retaddr(); + return ret; +} + +void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val) +{ + MemOpIdx oi = make_memop_idx(MO_UB, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, true); + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + stb_p(g2h(env_cpu(env), ptr), val); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +} + +void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, true); + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + stw_be_p(g2h(env_cpu(env), ptr), val); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +} + +void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, true); + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + stl_be_p(g2h(env_cpu(env), ptr), val); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +} + +void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val) +{ + MemOpIdx oi = make_memop_idx(MO_BEQ, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, true); + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + stq_be_p(g2h(env_cpu(env), ptr), val); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +} + +void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, true); + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + stw_le_p(g2h(env_cpu(env), ptr), val); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +} + +void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, true); + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + stl_le_p(g2h(env_cpu(env), ptr), val); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +} + +void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val) +{ + MemOpIdx oi = make_memop_idx(MO_LEQ, MMU_USER_IDX); + uint16_t meminfo = trace_mem_get_info(oi, true); + + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + stq_le_p(g2h(env_cpu(env), ptr), val); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +} + +void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, + uint32_t val, uintptr_t retaddr) +{ + set_helper_retaddr(retaddr); + cpu_stb_data(env, ptr, val); + clear_helper_retaddr(); +} + +void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, + uint32_t val, uintptr_t retaddr) +{ + set_helper_retaddr(retaddr); + cpu_stw_be_data(env, ptr, val); + clear_helper_retaddr(); +} + +void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, + uint32_t val, uintptr_t retaddr) +{ + set_helper_retaddr(retaddr); + cpu_stl_be_data(env, ptr, val); + clear_helper_retaddr(); +} + +void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, + uint64_t val, uintptr_t retaddr) +{ + set_helper_retaddr(retaddr); + cpu_stq_be_data(env, ptr, val); + clear_helper_retaddr(); +} + +void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, + uint32_t val, uintptr_t retaddr) +{ + set_helper_retaddr(retaddr); + cpu_stw_le_data(env, ptr, val); + clear_helper_retaddr(); +} + +void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, + uint32_t val, uintptr_t retaddr) +{ + set_helper_retaddr(retaddr); + cpu_stl_le_data(env, ptr, val); + clear_helper_retaddr(); +} + +void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, + uint64_t val, uintptr_t retaddr) +{ + set_helper_retaddr(retaddr); + cpu_stq_le_data(env, ptr, val); + clear_helper_retaddr(); +} + +uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) +{ + uint32_t ret; + + set_helper_retaddr(1); + ret = ldub_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr) +{ + uint32_t ret; + + set_helper_retaddr(1); + ret = lduw_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr) +{ + uint32_t ret; + + set_helper_retaddr(1); + ret = ldl_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) +{ + uint64_t ret; + + set_helper_retaddr(1); + ret = ldq_p(g2h_untagged(ptr)); + clear_helper_retaddr(); + return ret; +} + +/* + * Do not allow unaligned operations to proceed. Return the host address. + * + * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. + */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - int size, uintptr_t retaddr) + MemOpIdx oi, int size, int prot, + uintptr_t retaddr) { /* Enforce qemu required alignment. */ if (unlikely(addr & (size - 1))) { cpu_loop_exit_atomic(env_cpu(env), retaddr); } - void *ret = g2h(addr); + void *ret = g2h(env_cpu(env), addr); set_helper_retaddr(retaddr); return ret; } -/* Macro to call the above, with local variables from the use context. */ -#define ATOMIC_MMU_DECLS do {} while (0) -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC()) -#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) -#define ATOMIC_MMU_IDX MMU_USER_IDX +#include "atomic_common.c.inc" -#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) -#define EXTRA_ARGS +/* + * First set of functions passes in OI and RETADDR. + * This makes them callable from other helpers. + */ -#include "atomic_common.inc.c" +#define ATOMIC_NAME(X) \ + glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) +#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0) +#define ATOMIC_MMU_IDX MMU_USER_IDX #define DATA_SIZE 1 #include "atomic_template.h" @@ -772,20 +1280,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, #include "atomic_template.h" #endif -/* The following is only callable from other helpers, and matches up - with the softmmu version. */ - #if HAVE_ATOMIC128 || HAVE_CMPXCHG128 - -#undef EXTRA_ARGS -#undef ATOMIC_NAME -#undef ATOMIC_MMU_LOOKUP - -#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr -#define ATOMIC_NAME(X) \ - HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) -#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, retaddr) - #define DATA_SIZE 16 #include "atomic_template.h" #endif