#include "qemu/osdep.h"
#include "qemu/main-loop.h"
-#include "cpu.h"
+#include "hw/core/tcg-cpu-ops.h"
#include "exec/exec-all.h"
#include "exec/memory.h"
-#include "exec/address-spaces.h"
#include "exec/cpu_ldst.h"
#include "exec/cputlb.h"
#include "exec/memory-internal.h"
#include "exec/helper-proto.h"
#include "qemu/atomic.h"
#include "qemu/atomic128.h"
-#include "translate-all.h"
+#include "exec/translate-all.h"
#include "trace/trace-root.h"
-#include "trace/mem.h"
+#include "tb-hash.h"
+#include "internal.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
desc->window_max_entries = max_entries;
}
+static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
+{
+ unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
+
+ for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
+ qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
+ }
+}
+
+static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
+{
+ /* Discard jump cache entries for any tb which might potentially
+ overlap the flushed page. */
+ tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
+ tb_jmp_cache_clear_page(cpu, addr);
+}
+
/**
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
* @desc: The CPUTLBDesc portion of the TLB
tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
}
+static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
+ target_ulong page, target_ulong mask)
+{
+ page &= mask;
+ mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
+
+ return (page == (tlb_entry->addr_read & mask) ||
+ page == (tlb_addr_write(tlb_entry) & mask) ||
+ page == (tlb_entry->addr_code & mask));
+}
+
static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
target_ulong page)
{
- return tlb_hit_page(tlb_entry->addr_read, page) ||
- tlb_hit_page(tlb_addr_write(tlb_entry), page) ||
- tlb_hit_page(tlb_entry->addr_code, page);
+ return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
}
/**
}
/* Called with tlb_c.lock held */
-static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
- target_ulong page)
+static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
+ target_ulong page,
+ target_ulong mask)
{
- if (tlb_hit_page_anyprot(tlb_entry, page)) {
+ if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
memset(tlb_entry, -1, sizeof(*tlb_entry));
return true;
}
return false;
}
+static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
+ target_ulong page)
+{
+ return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
+}
+
/* Called with tlb_c.lock held */
-static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
- target_ulong page)
+static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
+ target_ulong page,
+ target_ulong mask)
{
CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
int k;
assert_cpu_is_self(env_cpu(env));
for (k = 0; k < CPU_VTLB_SIZE; k++) {
- if (tlb_flush_entry_locked(&d->vtable[k], page)) {
+ if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
tlb_n_used_entries_dec(env, mmu_idx);
}
}
}
+static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
+ target_ulong page)
+{
+ tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
+}
+
static void tlb_flush_page_locked(CPUArchState *env, int midx,
target_ulong page)
{
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
+static void tlb_flush_range_locked(CPUArchState *env, int midx,
+ target_ulong addr, target_ulong len,
+ unsigned bits)
+{
+ CPUTLBDesc *d = &env_tlb(env)->d[midx];
+ CPUTLBDescFast *f = &env_tlb(env)->f[midx];
+ target_ulong mask = MAKE_64BIT_MASK(0, bits);
+
+ /*
+ * If @bits is smaller than the tlb size, there may be multiple entries
+ * within the TLB; otherwise all addresses that match under @mask hit
+ * the same TLB entry.
+ * TODO: Perhaps allow bits to be a few bits less than the size.
+ * For now, just flush the entire TLB.
+ *
+ * If @len is larger than the tlb size, then it will take longer to
+ * test all of the entries in the TLB than it will to flush it all.
+ */
+ if (mask < f->mask || len > f->mask) {
+ tlb_debug("forcing full flush midx %d ("
+ TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
+ midx, addr, mask, len);
+ tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
+ return;
+ }
+
+ /*
+ * Check if we need to flush due to large pages.
+ * Because large_page_mask contains all 1's from the msb,
+ * we only need to test the end of the range.
+ */
+ if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
+ tlb_debug("forcing full flush midx %d ("
+ TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ midx, d->large_page_addr, d->large_page_mask);
+ tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
+ return;
+ }
+
+ for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
+ target_ulong page = addr + i;
+ CPUTLBEntry *entry = tlb_entry(env, midx, page);
+
+ if (tlb_flush_entry_mask_locked(entry, page, mask)) {
+ tlb_n_used_entries_dec(env, midx);
+ }
+ tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
+ }
+}
+
+typedef struct {
+ target_ulong addr;
+ target_ulong len;
+ uint16_t idxmap;
+ uint16_t bits;
+} TLBFlushRangeData;
+
+static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
+ TLBFlushRangeData d)
+{
+ CPUArchState *env = cpu->env_ptr;
+ int mmu_idx;
+
+ assert_cpu_is_self(cpu);
+
+ tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
+ d.addr, d.bits, d.len, d.idxmap);
+
+ qemu_spin_lock(&env_tlb(env)->c.lock);
+ for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
+ if ((d.idxmap >> mmu_idx) & 1) {
+ tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
+ }
+ }
+ qemu_spin_unlock(&env_tlb(env)->c.lock);
+
+ for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
+ tb_flush_jmp_cache(cpu, d.addr + i);
+ }
+}
+
+static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
+ run_on_cpu_data data)
+{
+ TLBFlushRangeData *d = data.host_ptr;
+ tlb_flush_range_by_mmuidx_async_0(cpu, *d);
+ g_free(d);
+}
+
+void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits)
+{
+ TLBFlushRangeData d;
+
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
+ tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
+ return;
+ }
+ /* If no page bits are significant, this devolves to tlb_flush. */
+ if (bits < TARGET_PAGE_BITS) {
+ tlb_flush_by_mmuidx(cpu, idxmap);
+ return;
+ }
+
+ /* This should already be page aligned */
+ d.addr = addr & TARGET_PAGE_MASK;
+ d.len = len;
+ d.idxmap = idxmap;
+ d.bits = bits;
+
+ if (qemu_cpu_is_self(cpu)) {
+ tlb_flush_range_by_mmuidx_async_0(cpu, d);
+ } else {
+ /* Otherwise allocate a structure, freed by the worker. */
+ TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
+ async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
+ }
+}
+
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap, unsigned bits)
+{
+ tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
+}
+
+void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
+ target_ulong addr, target_ulong len,
+ uint16_t idxmap, unsigned bits)
+{
+ TLBFlushRangeData d;
+ CPUState *dst_cpu;
+
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
+ tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
+ return;
+ }
+ /* If no page bits are significant, this devolves to tlb_flush. */
+ if (bits < TARGET_PAGE_BITS) {
+ tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
+ return;
+ }
+
+ /* This should already be page aligned */
+ d.addr = addr & TARGET_PAGE_MASK;
+ d.len = len;
+ d.idxmap = idxmap;
+ d.bits = bits;
+
+ /* Allocate a separate data block for each destination cpu. */
+ CPU_FOREACH(dst_cpu) {
+ if (dst_cpu != src_cpu) {
+ TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
+ async_run_on_cpu(dst_cpu,
+ tlb_flush_range_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
+ }
+ }
+
+ tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
+}
+
+void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
+ target_ulong addr,
+ uint16_t idxmap, unsigned bits)
+{
+ tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
+ idxmap, bits);
+}
+
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ target_ulong len,
+ uint16_t idxmap,
+ unsigned bits)
+{
+ TLBFlushRangeData d, *p;
+ CPUState *dst_cpu;
+
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
+ tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
+ return;
+ }
+ /* If no page bits are significant, this devolves to tlb_flush. */
+ if (bits < TARGET_PAGE_BITS) {
+ tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
+ return;
+ }
+
+ /* This should already be page aligned */
+ d.addr = addr & TARGET_PAGE_MASK;
+ d.len = len;
+ d.idxmap = idxmap;
+ d.bits = bits;
+
+ /* Allocate a separate data block for each destination cpu. */
+ CPU_FOREACH(dst_cpu) {
+ if (dst_cpu != src_cpu) {
+ p = g_memdup(&d, sizeof(d));
+ async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
+ }
+ }
+
+ p = g_memdup(&d, sizeof(d));
+ async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
+ RUN_ON_CPU_HOST_PTR(p));
+}
+
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
+ target_ulong addr,
+ uint16_t idxmap,
+ unsigned bits)
+{
+ tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
+ idxmap, bits);
+}
+
/* update the TLBs so that writes to code in the virtual page 'addr'
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
* This is not a probe, so only valid return is success; failure
* should result in exception + longjmp to the cpu loop.
*/
- ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
+ ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
+ access_type, mmu_idx, false, retaddr);
assert(ok);
}
+static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
+ cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
+}
+
+static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response,
+ uintptr_t retaddr)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
+ if (!cpu->ignore_memory_transaction_failures &&
+ cc->tcg_ops->do_transaction_failed) {
+ cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
+ access_type, mmu_idx, attrs,
+ response, retaddr);
+ }
+}
+
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
MMUAccessType access_type, MemOp op)
CPUState *cs = env_cpu(env);
CPUClass *cc = CPU_GET_CLASS(cs);
- if (!cc->tlb_fill(cs, addr, fault_size, access_type,
- mmu_idx, nonfault, retaddr)) {
+ if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
+ mmu_idx, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
return TLB_INVALID_MASK;
data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
} else {
data->is_io = false;
- data->v.ram.hostaddr = addr + tlbe->addend;
+ data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
}
return true;
} else {
#endif
-/* Probe for a read-modify-write atomic operation. Do not allow unaligned
- * operations, or io operations to proceed. Return the host address. */
+/*
+ * Probe for an atomic operation. Do not allow unaligned operations,
+ * or io operations to proceed. Return the host address.
+ *
+ * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
+ */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, int size, int prot,
+ uintptr_t retaddr)
{
size_t mmu_idx = get_mmuidx(oi);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr = tlb_addr_write(tlbe);
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
- int s_bits = mop & MO_SIZE;
+ uintptr_t index;
+ CPUTLBEntry *tlbe;
+ target_ulong tlb_addr;
void *hostaddr;
/* Adjust the given return address. */
}
/* Enforce qemu required alignment. */
- if (unlikely(addr & ((1 << s_bits) - 1))) {
+ if (unlikely(addr & (size - 1))) {
/* We get here if guest alignment was not requested,
or was not enforced by cpu_unaligned_access above.
We might widen the access and emulate, but for now
goto stop_the_world;
}
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
+
/* Check TLB entry and enforce page permissions. */
- if (!tlb_hit(tlb_addr, addr)) {
- if (!VICTIM_TLB_HIT(addr_write, addr)) {
- tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE,
- mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- tlbe = tlb_entry(env, mmu_idx, addr);
+ if (prot & PAGE_WRITE) {
+ tlb_addr = tlb_addr_write(tlbe);
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_STORE, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
+ }
+
+ /* Let the guest notice RMW on a write-only page. */
+ if ((prot & PAGE_READ) &&
+ unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_LOAD, mmu_idx, retaddr);
+ /*
+ * Since we don't support reads and writes to different addresses,
+ * and we do have the proper page loaded for write, this shouldn't
+ * ever return. But just in case, handle via stop-the-world.
+ */
+ goto stop_the_world;
+ }
+ } else /* if (prot & PAGE_READ) */ {
+ tlb_addr = tlbe->addr_read;
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!VICTIM_TLB_HIT(addr_write, addr)) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_LOAD, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
}
- tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
/* Notice an IO access or a needs-MMU-lookup access */
goto stop_the_world;
}
- /* Let the guest notice RMW on a write-only page. */
- if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
- tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD,
- mmu_idx, retaddr);
- /* Since we don't support reads and writes to different addresses,
- and we do have the proper page loaded for write, this shouldn't
- ever return. But just in case, handle via stop-the-world. */
- goto stop_the_world;
- }
-
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1 << s_bits,
+ notdirty_write(env_cpu(env), addr, size,
&env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
}
*/
typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr);
+ MemOpIdx oi, uintptr_t retaddr);
static inline uint64_t QEMU_ALWAYS_INLINE
load_memop(const void *haddr, MemOp op)
}
static inline uint64_t QEMU_ALWAYS_INLINE
-load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
+load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
uintptr_t retaddr, MemOp op, bool code_read,
FullLoadHelper *full_load)
{
*/
static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
}
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_ldub_mmu(env, addr, oi, retaddr);
}
static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
full_le_lduw_mmu);
}
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_le_lduw_mmu(env, addr, oi, retaddr);
}
static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
full_be_lduw_mmu);
}
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_be_lduw_mmu(env, addr, oi, retaddr);
}
static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
full_le_ldul_mmu);
}
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_le_ldul_mmu(env, addr, oi, retaddr);
}
static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
full_be_ldul_mmu);
}
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return full_be_ldul_mmu(env, addr, oi, retaddr);
}
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
helper_le_ldq_mmu);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
helper_be_ldq_mmu);
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
}
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
}
int mmu_idx, uintptr_t retaddr,
MemOp op, FullLoadHelper *full_load)
{
- uint16_t meminfo;
- TCGMemOpIdx oi;
+ MemOpIdx oi = make_memop_idx(op, mmu_idx);
uint64_t ret;
- meminfo = trace_mem_get_info(op, mmu_idx, false);
- trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
+ trace_guest_ld_before_exec(env_cpu(env), addr, oi);
- op &= ~MO_SIGN;
- oi = make_memop_idx(op, mmu_idx);
ret = full_load(env, addr, oi, retaddr);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
- full_ldub_mmu);
+ return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW,
- full_be_lduw_mmu);
+ return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW,
- full_le_lduw_mmu);
+ return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
}
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
uintptr_t index, index2;
CPUTLBEntry *entry, *entry2;
target_ulong page2, tlb_addr, tlb_addr2;
- TCGMemOpIdx oi;
+ MemOpIdx oi;
size_t size2;
int i;
static inline void QEMU_ALWAYS_INLINE
store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr, MemOp op)
+ MemOpIdx oi, uintptr_t retaddr, MemOp op)
{
uintptr_t mmu_idx = get_mmuidx(oi);
uintptr_t index = tlb_index(env, mmu_idx, addr);
void __attribute__((noinline))
helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_UB);
}
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_LEUW);
}
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_BEUW);
}
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_LEUL);
}
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_BEUL);
}
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_LEQ);
}
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
}
cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr, MemOp op)
{
- TCGMemOpIdx oi;
- uint16_t meminfo;
+ MemOpIdx oi = make_memop_idx(op, mmu_idx);
- meminfo = trace_mem_get_info(op, mmu_idx, true);
- trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
+ trace_guest_st_before_exec(env_cpu(env), addr, oi);
- oi = make_memop_idx(op, mmu_idx);
store_helper(env, addr, val, oi, retaddr, op);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
cpu_stq_le_data_ra(env, ptr, val, 0);
}
-/* First set of helpers allows passing in of OI and RETADDR. This makes
- them callable from other helpers. */
+/*
+ * First set of functions passes in OI and RETADDR.
+ * This makes them callable from other helpers.
+ */
-#define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr
#define ATOMIC_NAME(X) \
- HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
-#define ATOMIC_MMU_DECLS
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
+ glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
+
#define ATOMIC_MMU_CLEANUP
#define ATOMIC_MMU_IDX get_mmuidx(oi)
#include "atomic_template.h"
#endif
-/* Second set of helpers are directly callable from TCG as helpers. */
-
-#undef EXTRA_ARGS
-#undef ATOMIC_NAME
-#undef ATOMIC_MMU_LOOKUP
-#define EXTRA_ARGS , TCGMemOpIdx oi
-#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
-#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC())
-
-#define DATA_SIZE 1
-#include "atomic_template.h"
-
-#define DATA_SIZE 2
-#include "atomic_template.h"
-
-#define DATA_SIZE 4
-#include "atomic_template.h"
-
-#ifdef CONFIG_ATOMIC64
-#define DATA_SIZE 8
-#include "atomic_template.h"
-#endif
-#undef ATOMIC_MMU_IDX
-
/* Code access functions. */
static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
}
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
return full_ldub_code(env, addr, oi, 0);
}
static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
return full_lduw_code(env, addr, oi, 0);
}
static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
return full_ldl_code(env, addr, oi, 0);
}
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+ MemOpIdx oi, uintptr_t retaddr)
{
return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
- TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
return full_ldq_code(env, addr, oi, 0);
}