#include "qemu/atomic.h"
#include "qemu/atomic128.h"
#include "translate-all.h"
+#include "trace-root.h"
+#include "trace/mem.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
-static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx)
+static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
{
- return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS);
+ return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
+}
+
+static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
+{
+ return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
}
static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
desc->window_max_entries = max_entries;
}
-static void tlb_dyn_init(CPUArchState *env)
-{
- int i;
-
- for (i = 0; i < NB_MMU_MODES; i++) {
- CPUTLBDesc *desc = &env_tlb(env)->d[i];
- size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
-
- tlb_window_reset(desc, get_clock_realtime(), 0);
- desc->n_used_entries = 0;
- env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
- env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
- env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
- }
-}
-
/**
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
- * @env: CPU that owns the TLB
- * @mmu_idx: MMU index of the TLB
+ * @desc: The CPUTLBDesc portion of the TLB
+ * @fast: The CPUTLBDescFast portion of the same TLB
*
* Called with tlb_lock_held.
*
* high), since otherwise we are likely to have a significant amount of
* conflict misses.
*/
-static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx)
+static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
+ int64_t now)
{
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
- size_t old_size = tlb_n_entries(env, mmu_idx);
+ size_t old_size = tlb_n_entries(fast);
size_t rate;
size_t new_size = old_size;
- int64_t now = get_clock_realtime();
int64_t window_len_ms = 100;
int64_t window_len_ns = window_len_ms * 1000 * 1000;
bool window_expired = now > desc->window_begin_ns + window_len_ns;
return;
}
- g_free(env_tlb(env)->f[mmu_idx].table);
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
+ g_free(fast->table);
+ g_free(desc->iotlb);
tlb_window_reset(desc, now, 0);
/* desc->n_used_entries is cleared by the caller */
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
+ fast->table = g_try_new(CPUTLBEntry, new_size);
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
+
/*
* If the allocations fail, try smaller sizes. We just freed some
* memory, so going back to half of new_size has a good chance of working.
* allocations to fail though, so we progressively reduce the allocation
* size, aborting if we cannot even allocate the smallest TLB we support.
*/
- while (env_tlb(env)->f[mmu_idx].table == NULL ||
- env_tlb(env)->d[mmu_idx].iotlb == NULL) {
+ while (fast->table == NULL || desc->iotlb == NULL) {
if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
error_report("%s: %s", __func__, strerror(errno));
abort();
}
new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
- env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
+ fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
- g_free(env_tlb(env)->f[mmu_idx].table);
- g_free(env_tlb(env)->d[mmu_idx].iotlb);
- env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size);
- env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size);
+ g_free(fast->table);
+ g_free(desc->iotlb);
+ fast->table = g_try_new(CPUTLBEntry, new_size);
+ desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
}
}
-static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx)
+static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
{
- tlb_mmu_resize_locked(env, mmu_idx);
- memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx));
- env_tlb(env)->d[mmu_idx].n_used_entries = 0;
+ desc->n_used_entries = 0;
+ desc->large_page_addr = -1;
+ desc->large_page_mask = -1;
+ desc->vindex = 0;
+ memset(fast->table, -1, sizeof_tlb(fast));
+ memset(desc->vtable, -1, sizeof(desc->vtable));
+}
+
+static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
+ int64_t now)
+{
+ CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
+ CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
+
+ tlb_mmu_resize_locked(desc, fast, now);
+ tlb_mmu_flush_locked(desc, fast);
+}
+
+static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
+{
+ size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
+
+ tlb_window_reset(desc, now, 0);
+ desc->n_used_entries = 0;
+ fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
+ fast->table = g_new(CPUTLBEntry, n_entries);
+ desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
+ tlb_mmu_flush_locked(desc, fast);
}
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
void tlb_init(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
+ int64_t now = get_clock_realtime();
+ int i;
qemu_spin_init(&env_tlb(env)->c.lock);
- /* Ensure that cpu_reset performs a full flush. */
- env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
+ /* All tlbs are initialized flushed. */
+ env_tlb(env)->c.dirty = 0;
+
+ for (i = 0; i < NB_MMU_MODES; i++) {
+ tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
+ }
+}
+
+void tlb_destroy(CPUState *cpu)
+{
+ CPUArchState *env = cpu->env_ptr;
+ int i;
+
+ qemu_spin_destroy(&env_tlb(env)->c.lock);
+ for (i = 0; i < NB_MMU_MODES; i++) {
+ CPUTLBDesc *desc = &env_tlb(env)->d[i];
+ CPUTLBDescFast *fast = &env_tlb(env)->f[i];
- tlb_dyn_init(env);
+ g_free(fast->table);
+ g_free(desc->iotlb);
+ }
}
/* flush_all_helper: run fn across all cpus
*pelide = elide;
}
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
-{
- tlb_table_flush_by_mmuidx(env, mmu_idx);
- env_tlb(env)->d[mmu_idx].large_page_addr = -1;
- env_tlb(env)->d[mmu_idx].large_page_mask = -1;
- env_tlb(env)->d[mmu_idx].vindex = 0;
- memset(env_tlb(env)->d[mmu_idx].vtable, -1,
- sizeof(env_tlb(env)->d[0].vtable));
-}
-
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
CPUArchState *env = cpu->env_ptr;
uint16_t asked = data.host_int;
uint16_t all_dirty, work, to_clean;
+ int64_t now = get_clock_realtime();
assert_cpu_is_self(cpu);
for (work = to_clean; work != 0; work &= work - 1) {
int mmu_idx = ctz32(work);
- tlb_flush_one_mmuidx_locked(env, mmu_idx);
+ tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
}
qemu_spin_unlock(&env_tlb(env)->c.lock);
tlb_debug("forcing full flush midx %d ("
TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
midx, lp_addr, lp_mask);
- tlb_flush_one_mmuidx_locked(env, midx);
+ tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
} else {
if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
tlb_n_used_entries_dec(env, midx);
}
}
-/* As we are going to hijack the bottom bits of the page address for a
- * mmuidx bit mask we need to fail to build if we can't do that
+/**
+ * tlb_flush_page_by_mmuidx_async_0:
+ * @cpu: cpu on which to flush
+ * @addr: page of virtual address to flush
+ * @idxmap: set of mmu_idx to flush
+ *
+ * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
+ * at @addr from the tlbs indicated by @idxmap from @cpu.
*/
-QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN);
-
-static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu,
- run_on_cpu_data data)
+static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
+ target_ulong addr,
+ uint16_t idxmap)
{
CPUArchState *env = cpu->env_ptr;
- target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr;
- target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK;
- unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS;
int mmu_idx;
assert_cpu_is_self(cpu);
- tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n",
- addr, mmu_idx_bitmap);
+ tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
- if (test_bit(mmu_idx, &mmu_idx_bitmap)) {
+ if ((idxmap >> mmu_idx) & 1) {
tlb_flush_page_locked(env, mmu_idx, addr);
}
}
tb_flush_jmp_cache(cpu, addr);
}
-void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
+/**
+ * tlb_flush_page_by_mmuidx_async_1:
+ * @cpu: cpu on which to flush
+ * @data: encoded addr + idxmap
+ *
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
+ * async_run_on_cpu. The idxmap parameter is encoded in the page
+ * offset of the target_ptr field. This limits the set of mmu_idx
+ * that can be passed via this method.
+ */
+static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
+ run_on_cpu_data data)
+{
+ target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
+ target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
+ uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
+
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
+}
+
+typedef struct {
+ target_ulong addr;
+ uint16_t idxmap;
+} TLBFlushPageByMMUIdxData;
+
+/**
+ * tlb_flush_page_by_mmuidx_async_2:
+ * @cpu: cpu on which to flush
+ * @data: allocated addr + idxmap
+ *
+ * Helper for tlb_flush_page_by_mmuidx and friends, called through
+ * async_run_on_cpu. The addr+idxmap parameters are stored in a
+ * TLBFlushPageByMMUIdxData structure that has been allocated
+ * specifically for this helper. Free the structure when done.
+ */
+static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
+ run_on_cpu_data data)
{
- target_ulong addr_and_mmu_idx;
+ TLBFlushPageByMMUIdxData *d = data.host_ptr;
+
+ tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
+ g_free(d);
+}
+void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
+{
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
/* This should already be page aligned */
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
- addr_and_mmu_idx |= idxmap;
+ addr &= TARGET_PAGE_MASK;
- if (!qemu_cpu_is_self(cpu)) {
- async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work,
- RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ if (qemu_cpu_is_self(cpu)) {
+ tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
+ } else if (idxmap < TARGET_PAGE_SIZE) {
+ /*
+ * Most targets have only a few mmu_idx. In the case where
+ * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
+ * allocating memory for this operation.
+ */
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
} else {
- tlb_flush_page_by_mmuidx_async_work(
- cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
+
+ /* Otherwise allocate a structure, freed by the worker. */
+ d->addr = addr;
+ d->idxmap = idxmap;
+ async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
+ RUN_ON_CPU_HOST_PTR(d));
}
}
void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
uint16_t idxmap)
{
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
- target_ulong addr_and_mmu_idx;
-
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
/* This should already be page aligned */
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
- addr_and_mmu_idx |= idxmap;
+ addr &= TARGET_PAGE_MASK;
+
+ /*
+ * Allocate memory to hold addr+idxmap only when needed.
+ * See tlb_flush_page_by_mmuidx for details.
+ */
+ if (idxmap < TARGET_PAGE_SIZE) {
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
+ } else {
+ CPUState *dst_cpu;
+
+ /* Allocate a separate data block for each destination cpu. */
+ CPU_FOREACH(dst_cpu) {
+ if (dst_cpu != src_cpu) {
+ TLBFlushPageByMMUIdxData *d
+ = g_new(TLBFlushPageByMMUIdxData, 1);
+
+ d->addr = addr;
+ d->idxmap = idxmap;
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
+ RUN_ON_CPU_HOST_PTR(d));
+ }
+ }
+ }
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
- fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
}
void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
target_ulong addr,
uint16_t idxmap)
{
- const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work;
- target_ulong addr_and_mmu_idx;
-
tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
/* This should already be page aligned */
- addr_and_mmu_idx = addr & TARGET_PAGE_MASK;
- addr_and_mmu_idx |= idxmap;
+ addr &= TARGET_PAGE_MASK;
+
+ /*
+ * Allocate memory to hold addr+idxmap only when needed.
+ * See tlb_flush_page_by_mmuidx for details.
+ */
+ if (idxmap < TARGET_PAGE_SIZE) {
+ flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
+ RUN_ON_CPU_TARGET_PTR(addr | idxmap));
+ } else {
+ CPUState *dst_cpu;
+ TLBFlushPageByMMUIdxData *d;
+
+ /* Allocate a separate data block for each destination cpu. */
+ CPU_FOREACH(dst_cpu) {
+ if (dst_cpu != src_cpu) {
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
+ d->addr = addr;
+ d->idxmap = idxmap;
+ async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
+ RUN_ON_CPU_HOST_PTR(d));
+ }
+ }
- flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
- async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx));
+ d = g_new(TLBFlushPageByMMUIdxData, 1);
+ d->addr = addr;
+ d->idxmap = idxmap;
+ async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
+ RUN_ON_CPU_HOST_PTR(d));
+ }
}
void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
qemu_spin_lock(&env_tlb(env)->c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
unsigned int i;
- unsigned int n = tlb_n_entries(env, mmu_idx);
+ unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
for (i = 0; i < n; i++) {
tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
}
}
-/*
- * Probe for whether the specified guest access is permitted. If it is not
- * permitted then an exception will be taken in the same way as if this
- * were a real access (and we will not return).
- * If the size is 0 or the page requires I/O access, returns NULL; otherwise,
- * returns the address of the host page similar to tlb_vaddr_to_host().
- */
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+static int probe_access_internal(CPUArchState *env, target_ulong addr,
+ int fault_size, MMUAccessType access_type,
+ int mmu_idx, bool nonfault,
+ void **phost, uintptr_t retaddr)
{
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr;
+ target_ulong tlb_addr, page_addr;
size_t elt_ofs;
- int wp_access;
-
- g_assert(-(addr | TARGET_PAGE_MASK) >= size);
+ int flags;
switch (access_type) {
case MMU_DATA_LOAD:
elt_ofs = offsetof(CPUTLBEntry, addr_read);
- wp_access = BP_MEM_READ;
break;
case MMU_DATA_STORE:
elt_ofs = offsetof(CPUTLBEntry, addr_write);
- wp_access = BP_MEM_WRITE;
break;
case MMU_INST_FETCH:
elt_ofs = offsetof(CPUTLBEntry, addr_code);
- wp_access = BP_MEM_READ;
break;
default:
g_assert_not_reached();
}
tlb_addr = tlb_read_ofs(entry, elt_ofs);
- if (unlikely(!tlb_hit(tlb_addr, addr))) {
- if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs,
- addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, size, access_type, mmu_idx, retaddr);
- /* TLB resize via tlb_fill may have moved the entry. */
- index = tlb_index(env, mmu_idx, addr);
+ page_addr = addr & TARGET_PAGE_MASK;
+ if (!tlb_hit_page(tlb_addr, page_addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
+ CPUState *cs = env_cpu(env);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+
+ if (!cc->tlb_fill(cs, addr, fault_size, access_type,
+ mmu_idx, nonfault, retaddr)) {
+ /* Non-faulting page table read failed. */
+ *phost = NULL;
+ return TLB_INVALID_MASK;
+ }
+
+ /* TLB resize via tlb_fill may have moved the entry. */
entry = tlb_entry(env, mmu_idx, addr);
}
tlb_addr = tlb_read_ofs(entry, elt_ofs);
}
+ flags = tlb_addr & TLB_FLAGS_MASK;
- if (!size) {
- return NULL;
+ /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
+ if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
+ *phost = NULL;
+ return TLB_MMIO;
}
- if (unlikely(tlb_addr & TLB_FLAGS_MASK)) {
+ /* Everything else is RAM. */
+ *phost = (void *)((uintptr_t)addr + entry->addend);
+ return flags;
+}
+
+int probe_access_flags(CPUArchState *env, target_ulong addr,
+ MMUAccessType access_type, int mmu_idx,
+ bool nonfault, void **phost, uintptr_t retaddr)
+{
+ int flags;
+
+ flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
+ nonfault, phost, retaddr);
+
+ /* Handle clean RAM pages. */
+ if (unlikely(flags & TLB_NOTDIRTY)) {
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
- /* Reject I/O access, or other required slow-path. */
- if (tlb_addr & (TLB_MMIO | TLB_BSWAP | TLB_DISCARD_WRITE)) {
- return NULL;
- }
+ notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
+ flags &= ~TLB_NOTDIRTY;
+ }
+
+ return flags;
+}
+
+void *probe_access(CPUArchState *env, target_ulong addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+{
+ void *host;
+ int flags;
+
+ g_assert(-(addr | TARGET_PAGE_MASK) >= size);
+
+ flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
+ false, &host, retaddr);
+
+ /* Per the interface, size == 0 merely faults the access. */
+ if (size == 0) {
+ return NULL;
+ }
+
+ if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+ CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
/* Handle watchpoints. */
- if (tlb_addr & TLB_WATCHPOINT) {
+ if (flags & TLB_WATCHPOINT) {
+ int wp_access = (access_type == MMU_DATA_STORE
+ ? BP_MEM_WRITE : BP_MEM_READ);
cpu_check_watchpoint(env_cpu(env), addr, size,
iotlbentry->attrs, wp_access, retaddr);
}
/* Handle clean RAM pages. */
- if (tlb_addr & TLB_NOTDIRTY) {
- notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
+ if (flags & TLB_NOTDIRTY) {
+ notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
}
}
- return (void *)((uintptr_t)addr + entry->addend);
+ return host;
}
void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
MMUAccessType access_type, int mmu_idx)
{
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- target_ulong tlb_addr, page;
- size_t elt_ofs;
-
- switch (access_type) {
- case MMU_DATA_LOAD:
- elt_ofs = offsetof(CPUTLBEntry, addr_read);
- break;
- case MMU_DATA_STORE:
- elt_ofs = offsetof(CPUTLBEntry, addr_write);
- break;
- case MMU_INST_FETCH:
- elt_ofs = offsetof(CPUTLBEntry, addr_code);
- break;
- default:
- g_assert_not_reached();
- }
-
- page = addr & TARGET_PAGE_MASK;
- tlb_addr = tlb_read_ofs(entry, elt_ofs);
-
- if (!tlb_hit_page(tlb_addr, page)) {
- uintptr_t index = tlb_index(env, mmu_idx, addr);
-
- if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
- CPUState *cs = env_cpu(env);
- CPUClass *cc = CPU_GET_CLASS(cs);
-
- if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
- /* Non-faulting page table read failed. */
- return NULL;
- }
+ void *host;
+ int flags;
- /* TLB resize via tlb_fill may have moved the entry. */
- entry = tlb_entry(env, mmu_idx, addr);
- }
- tlb_addr = tlb_read_ofs(entry, elt_ofs);
- }
-
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- return NULL;
- }
+ flags = probe_access_internal(env, addr, 0, access_type,
+ mmu_idx, true, &host, 0);
- return (void *)((uintptr_t)addr + entry->addend);
+ /* No combination of flags are expected by the caller. */
+ return flags ? NULL : host;
}
-
#ifdef CONFIG_PLUGIN
/*
* Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
}
+/*
+ * Load helpers for cpu_ldst.h.
+ */
+
+static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t retaddr,
+ MemOp op, FullLoadHelper *full_load)
+{
+ uint16_t meminfo;
+ TCGMemOpIdx oi;
+ uint64_t ret;
+
+ meminfo = trace_mem_get_info(op, mmu_idx, false);
+ trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
+
+ op &= ~MO_SIGN;
+ oi = make_memop_idx(op, mmu_idx);
+ ret = full_load(env, addr, oi, retaddr);
+
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
+
+ return ret;
+}
+
+uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu);
+}
+
+int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB,
+ full_ldub_mmu);
+}
+
+uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu);
+}
+
+int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW,
+ full_be_lduw_mmu);
+}
+
+uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu);
+}
+
+uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu);
+}
+
+uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu);
+}
+
+int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW,
+ full_le_lduw_mmu);
+}
+
+uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu);
+}
+
+uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
+ int mmu_idx, uintptr_t ra)
+{
+ return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu);
+}
+
+uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
+{
+ return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
+{
+ return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr)
+{
+ return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr);
+}
+
+uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_ldub_data_ra(env, ptr, 0);
+}
+
+int cpu_ldsb_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_ldsb_data_ra(env, ptr, 0);
+}
+
+uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_lduw_be_data_ra(env, ptr, 0);
+}
+
+int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_ldsw_be_data_ra(env, ptr, 0);
+}
+
+uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_ldl_be_data_ra(env, ptr, 0);
+}
+
+uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_ldq_be_data_ra(env, ptr, 0);
+}
+
+uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_lduw_le_data_ra(env, ptr, 0);
+}
+
+int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_ldsw_le_data_ra(env, ptr, 0);
+}
+
+uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_ldl_le_data_ra(env, ptr, 0);
+}
+
+uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr)
+{
+ return cpu_ldq_le_data_ra(env, ptr, 0);
+}
+
/*
* Store Helpers
*/
store_helper(env, addr, val, oi, retaddr, MO_BEQ);
}
+/*
+ * Store Helpers for cpu_ldst.h
+ */
+
+static inline void QEMU_ALWAYS_INLINE
+cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx, uintptr_t retaddr, MemOp op)
+{
+ TCGMemOpIdx oi;
+ uint16_t meminfo;
+
+ meminfo = trace_mem_get_info(op, mmu_idx, true);
+ trace_guest_mem_before_exec(env_cpu(env), addr, meminfo);
+
+ oi = make_memop_idx(op, mmu_idx);
+ store_helper(env, addr, val, oi, retaddr, op);
+
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo);
+}
+
+void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx, uintptr_t retaddr)
+{
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB);
+}
+
+void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx, uintptr_t retaddr)
+{
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW);
+}
+
+void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx, uintptr_t retaddr)
+{
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL);
+}
+
+void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx, uintptr_t retaddr)
+{
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ);
+}
+
+void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx, uintptr_t retaddr)
+{
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW);
+}
+
+void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val,
+ int mmu_idx, uintptr_t retaddr)
+{
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL);
+}
+
+void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val,
+ int mmu_idx, uintptr_t retaddr)
+{
+ cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ);
+}
+
+void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr,
+ uint32_t val, uintptr_t retaddr)
+{
+ cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
+}
+
+void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr,
+ uint32_t val, uintptr_t retaddr)
+{
+ cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
+}
+
+void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr,
+ uint32_t val, uintptr_t retaddr)
+{
+ cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
+}
+
+void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr,
+ uint64_t val, uintptr_t retaddr)
+{
+ cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
+}
+
+void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr,
+ uint32_t val, uintptr_t retaddr)
+{
+ cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
+}
+
+void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr,
+ uint32_t val, uintptr_t retaddr)
+{
+ cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
+}
+
+void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr,
+ uint64_t val, uintptr_t retaddr)
+{
+ cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr);
+}
+
+void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val)
+{
+ cpu_stb_data_ra(env, ptr, val, 0);
+}
+
+void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
+{
+ cpu_stw_be_data_ra(env, ptr, val, 0);
+}
+
+void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val)
+{
+ cpu_stl_be_data_ra(env, ptr, val, 0);
+}
+
+void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val)
+{
+ cpu_stq_be_data_ra(env, ptr, val, 0);
+}
+
+void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
+{
+ cpu_stw_le_data_ra(env, ptr, val, 0);
+}
+
+void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val)
+{
+ cpu_stl_le_data_ra(env, ptr, val, 0);
+}
+
+void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val)
+{
+ cpu_stq_le_data_ra(env, ptr, val, 0);
+}
+
/* First set of helpers allows passing in of OI and RETADDR. This makes
them callable from other helpers. */
/* Code access functions. */
-static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr,
+static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu);
-}
-
-uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return full_ldub_cmmu(env, addr, oi, retaddr);
-}
-
-int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr);
-}
-
-static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return load_helper(env, addr, oi, retaddr, MO_LEUW, true,
- full_le_lduw_cmmu);
-}
-
-uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return full_le_lduw_cmmu(env, addr, oi, retaddr);
-}
-
-int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr);
-}
-
-static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return load_helper(env, addr, oi, retaddr, MO_BEUW, true,
- full_be_lduw_cmmu);
+ return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
}
-uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
- return full_be_lduw_cmmu(env, addr, oi, retaddr);
+ TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
+ return full_ldub_code(env, addr, oi, 0);
}
-int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
-{
- return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr);
-}
-
-static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_LEUL, true,
- full_le_ldul_cmmu);
+ return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
}
-uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
- return full_le_ldul_cmmu(env, addr, oi, retaddr);
+ TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
+ return full_lduw_code(env, addr, oi, 0);
}
-static uint64_t full_be_ldul_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_BEUL, true,
- full_be_ldul_cmmu);
+ return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
}
-uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
- return full_be_ldul_cmmu(env, addr, oi, retaddr);
+ TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
+ return full_ldl_code(env, addr, oi, 0);
}
-uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
+ TCGMemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_LEQ, true,
- helper_le_ldq_cmmu);
+ return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
}
-uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
- TCGMemOpIdx oi, uintptr_t retaddr)
+uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
- return load_helper(env, addr, oi, retaddr, MO_BEQ, true,
- helper_be_ldq_cmmu);
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
+ return full_ldq_code(env, addr, oi, 0);
}