#include "exec/memory.h"
#include "exec/cpu_ldst.h"
#include "exec/cputlb.h"
-#include "exec/tb-hash.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "tcg/tcg.h"
#include "qemu/atomic128.h"
#include "exec/translate-all.h"
#include "trace/trace-root.h"
-#include "trace/mem.h"
+#include "tb-hash.h"
#include "internal.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
}
}
-static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
- run_on_cpu_data data)
+static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
+ run_on_cpu_data data)
{
TLBFlushRangeData *d = data.host_ptr;
tlb_flush_range_by_mmuidx_async_0(cpu, *d);
} else {
/* Otherwise allocate a structure, freed by the worker. */
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
- async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2,
+ async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
}
if (dst_cpu != src_cpu) {
TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
async_run_on_cpu(dst_cpu,
- tlb_flush_page_bits_by_mmuidx_async_2,
+ tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
}
CPU_FOREACH(dst_cpu) {
if (dst_cpu != src_cpu) {
p = g_memdup(&d, sizeof(d));
- async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
+ async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
}
p = g_memdup(&d, sizeof(d));
- async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
+ async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
RUN_ON_CPU_HOST_PTR(p));
}
data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
} else {
data->is_io = false;
- data->v.ram.hostaddr = addr + tlbe->addend;
+ data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
}
return true;
} else {
#endif
-/* Probe for a read-modify-write atomic operation. Do not allow unaligned
- * operations, or io operations to proceed. Return the host address. */
+/*
+ * Probe for an atomic operation. Do not allow unaligned operations,
+ * or io operations to proceed. Return the host address.
+ *
+ * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
+ */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, int size, int prot,
+ uintptr_t retaddr)
{
size_t mmu_idx = get_mmuidx(oi);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = tlb_addr_write(tlbe);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
- int s_bits = mop & MO_SIZE;
+ uintptr_t index;
+ CPUTLBEntry *tlbe;
+ target_ulong tlb_addr;
void *hostaddr;
/* Adjust the given return address. */
}
/* Enforce qemu required alignment. */
- if (unlikely(addr & ((1 << s_bits) - 1))) {
+ if (unlikely(addr & (size - 1))) {
/* We get here if guest alignment was not requested,
or was not enforced by cpu_unaligned_access above.
We might widen the access and emulate, but for now
goto stop_the_world;
}
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
+
/* Check TLB entry and enforce page permissions. */
- if (!tlb_hit(tlb_addr, addr)) {
- if (!VICTIM_TLB_HIT(addr_write, addr)) {
- tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
- mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- tlbe = tlb_entry(env, mmu_idx, addr);
+ if (prot & PAGE_WRITE) {
+ tlb_addr = tlb_addr_write(tlbe);
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_STORE, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
+ }
+
+ /* Let the guest notice RMW on a write-only page. */
+ if ((prot & PAGE_READ) &&
+ unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_LOAD, mmu_idx, retaddr);
+ /*
+ * Since we don't support reads and writes to different addresses,
+ * and we do have the proper page loaded for write, this shouldn't
+ * ever return. But just in case, handle via stop-the-world.
+ */
+ goto stop_the_world;
+ }
+ } else /* if (prot & PAGE_READ) */ {
+ tlb_addr = tlbe->addr_read;
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_LOAD, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
}
- tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
/* Notice an IO access or a needs-MMU-lookup access */
goto stop_the_world;
}
- /* Let the guest notice RMW on a write-only page. */
- if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
- tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
- mmu_idx, retaddr);
- /* Since we don't support reads and writes to different addresses,
- and we do have the proper page loaded for write, this shouldn't
- ever return. But just in case, handle via stop-the-world. */
- goto stop_the_world;
- }
-
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1 << s_bits,
+ notdirty_write(env_cpu(env), addr, size,
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
}
*/
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
+ MemOpIdx oi, uintptr_t retaddr);
static inline uint64_t QEMU_ALWAYS_INLINE
load_memop(const void *haddr, MemOp op)
}
static inline uint64_t QEMU_ALWAYS_INLINE
-load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
FullLoadHelper *full_load)
{
*/
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
}
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_ldub_mmu(env, addr, oi, retaddr);
}
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
full_le_lduw_mmu);
}
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_le_lduw_mmu(env, addr, oi, retaddr);
}
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
full_be_lduw_mmu);
}
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_be_lduw_mmu(env, addr, oi, retaddr);
}
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
full_le_ldul_mmu);
}
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_le_ldul_mmu(env, addr, oi, retaddr);
}
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
full_be_ldul_mmu);
}
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_be_ldul_mmu(env, addr, oi, retaddr);
}
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
helper_le_ldq_mmu);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
helper_be_ldq_mmu);
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
}
int mmu_idx, uintptr_t retaddr,
MemOp op, FullLoadHelper *full_load)
{
- uint16_t meminfo;
- TCGMemOpIdx oi;
+ MemOpIdx oi = make_memop_idx(op, mmu_idx);
uint64_t ret;
- meminfo = trace_mem_get_info(op, mmu_idx, false);
- trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
- op &= ~MO_SIGN;
- oi = make_memop_idx(op, mmu_idx);
ret = full_load(env, addr, oi, retaddr);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
- full_ldub_mmu);
+ return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW,
- full_be_lduw_mmu);
+ return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW,
- full_le_lduw_mmu);
+ return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uintptr_t index, index2;
CPUTLBEntry *entry, *entry2;
target_ulong page2, tlb_addr, tlb_addr2;
- TCGMemOpIdx oi;
+ MemOpIdx oi;
size_t size2;
int i;
static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
+ MemOpIdx oi, uintptr_t retaddr, MemOp op)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
void __attribute__((noinline))
helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_UB);
}
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
}
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
}
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
}
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
}
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
}
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
}
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr, MemOp op)
{
- TCGMemOpIdx oi;
- uint16_t meminfo;
+ MemOpIdx oi = make_memop_idx(op, mmu_idx);
- meminfo = trace_mem_get_info(op, mmu_idx, true);
- trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
- oi = make_memop_idx(op, mmu_idx);
store_helper(env, addr, val, oi, retaddr, op);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
cpu_stq_le_data_ra(env, ptr, val, 0);
}
-/* First set of helpers allows passing in of OI and RETADDR. This makes
- them callable from other helpers. */
+/*
+ * First set of functions passes in OI and RETADDR.
+ * This makes them callable from other helpers.
+ */
-#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
- HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
-#define ATOMIC_MMU_DECLS
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
+ glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
+
#define ATOMIC_MMU_CLEANUP
#define ATOMIC_MMU_IDX get_mmuidx(oi)
#include "atomic_template.h"
#endif
-/* Second set of helpers are directly callable from TCG as helpers. */
-
-#undef EXTRA_ARGS
-#undef ATOMIC_NAME
-#undef ATOMIC_MMU_LOOKUP
-#define EXTRA_ARGS , TCGMemOpIdx oi
-#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
-
-#define DATA_SIZE 1
-#include "atomic_template.h"
-
-#define DATA_SIZE 2
-#include "atomic_template.h"
-
-#define DATA_SIZE 4
-#include "atomic_template.h"
-
-#ifdef CONFIG_ATOMIC64
-#define DATA_SIZE 8
-#include "atomic_template.h"
-#endif
-#undef ATOMIC_MMU_IDX
-
/* Code access functions. */
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
}
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
return full_ldub_code(env, addr, oi, 0);
}
static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
return full_lduw_code(env, addr, oi, 0);
}
static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
return full_ldl_code(env, addr, oi, 0);
}
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
return full_ldq_code(env, addr, oi, 0);
}