#include "exec/memory.h"
#include "exec/cpu_ldst.h"
#include "exec/cputlb.h"
+#include "exec/tb-flush.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "tcg/tcg.h"
#include "exec/translate-all.h"
#include "trace.h"
#include "tb-hash.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
memset(desc->vtable, -1, sizeof(desc->vtable));
}
-static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
+static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx,
int64_t now)
{
- CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
- CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
+ CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx];
+ CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx];
tlb_mmu_resize_locked(desc, fast, now);
tlb_mmu_flush_locked(desc, fast);
tlb_mmu_flush_locked(desc, fast);
}
-static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
+static inline void tlb_n_used_entries_inc(CPUState *cpu, uintptr_t mmu_idx)
{
- env_tlb(env)->d[mmu_idx].n_used_entries++;
+ cpu->neg.tlb.d[mmu_idx].n_used_entries++;
}
-static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
+static inline void tlb_n_used_entries_dec(CPUState *cpu, uintptr_t mmu_idx)
{
- env_tlb(env)->d[mmu_idx].n_used_entries--;
+ cpu->neg.tlb.d[mmu_idx].n_used_entries--;
}
void tlb_init(CPUState *cpu)
{
- CPUArchState *env = cpu->env_ptr;
int64_t now = get_clock_realtime();
int i;
- qemu_spin_init(&env_tlb(env)->c.lock);
+ qemu_spin_init(&cpu->neg.tlb.c.lock);
/* All tlbs are initialized flushed. */
- env_tlb(env)->c.dirty = 0;
+ cpu->neg.tlb.c.dirty = 0;
for (i = 0; i < NB_MMU_MODES; i++) {
- tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
+ tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now);
}
}
void tlb_destroy(CPUState *cpu)
{
- CPUArchState *env = cpu->env_ptr;
int i;
- qemu_spin_destroy(&env_tlb(env)->c.lock);
+ qemu_spin_destroy(&cpu->neg.tlb.c.lock);
for (i = 0; i < NB_MMU_MODES; i++) {
- CPUTLBDesc *desc = &env_tlb(env)->d[i];
- CPUTLBDescFast *fast = &env_tlb(env)->f[i];
+ CPUTLBDesc *desc = &cpu->neg.tlb.d[i];
+ CPUTLBDescFast *fast = &cpu->neg.tlb.f[i];
g_free(fast->table);
g_free(desc->fulltlb);
}
}
-void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
-{
- CPUState *cpu;
- size_t full = 0, part = 0, elide = 0;
-
- CPU_FOREACH(cpu) {
- CPUArchState *env = cpu->env_ptr;
-
- full += qatomic_read(&env_tlb(env)->c.full_flush_count);
- part += qatomic_read(&env_tlb(env)->c.part_flush_count);
- elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
- }
- *pfull = full;
- *ppart = part;
- *pelide = elide;
-}
-
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
- CPUArchState *env = cpu->env_ptr;
uint16_t asked = data.host_int;
uint16_t all_dirty, work, to_clean;
int64_t now = get_clock_realtime();
tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
- all_dirty = env_tlb(env)->c.dirty;
+ all_dirty = cpu->neg.tlb.c.dirty;
to_clean = asked & all_dirty;
all_dirty &= ~to_clean;
- env_tlb(env)->c.dirty = all_dirty;
+ cpu->neg.tlb.c.dirty = all_dirty;
for (work = to_clean; work != 0; work &= work - 1) {
int mmu_idx = ctz32(work);
- tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
+ tlb_flush_one_mmuidx_locked(cpu, mmu_idx, now);
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
tcg_flush_jmp_cache(cpu);
if (to_clean == ALL_MMUIDX_BITS) {
- qatomic_set(&env_tlb(env)->c.full_flush_count,
- env_tlb(env)->c.full_flush_count + 1);
+ qatomic_set(&cpu->neg.tlb.c.full_flush_count,
+ cpu->neg.tlb.c.full_flush_count + 1);
} else {
- qatomic_set(&env_tlb(env)->c.part_flush_count,
- env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
+ qatomic_set(&cpu->neg.tlb.c.part_flush_count,
+ cpu->neg.tlb.c.part_flush_count + ctpop16(to_clean));
if (to_clean != asked) {
- qatomic_set(&env_tlb(env)->c.elide_flush_count,
- env_tlb(env)->c.elide_flush_count +
- ctpop16(asked & ~to_clean));
+ qatomic_set(&cpu->neg.tlb.c.elide_flush_count,
+ cpu->neg.tlb.c.elide_flush_count +
+ ctpop16(asked & ~to_clean));
}
}
}
}
/* Called with tlb_c.lock held */
-static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
+static void tlb_flush_vtlb_page_mask_locked(CPUState *cpu, int mmu_idx,
vaddr page,
vaddr mask)
{
- CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
+ CPUTLBDesc *d = &cpu->neg.tlb.d[mmu_idx];
int k;
- assert_cpu_is_self(env_cpu(env));
+ assert_cpu_is_self(cpu);
for (k = 0; k < CPU_VTLB_SIZE; k++) {
if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
- tlb_n_used_entries_dec(env, mmu_idx);
+ tlb_n_used_entries_dec(cpu, mmu_idx);
}
}
}
-static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
+static inline void tlb_flush_vtlb_page_locked(CPUState *cpu, int mmu_idx,
vaddr page)
{
- tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
+ tlb_flush_vtlb_page_mask_locked(cpu, mmu_idx, page, -1);
}
-static void tlb_flush_page_locked(CPUArchState *env, int midx, vaddr page)
+static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page)
{
- vaddr lp_addr = env_tlb(env)->d[midx].large_page_addr;
- vaddr lp_mask = env_tlb(env)->d[midx].large_page_mask;
+ vaddr lp_addr = cpu->neg.tlb.d[midx].large_page_addr;
+ vaddr lp_mask = cpu->neg.tlb.d[midx].large_page_mask;
/* Check if we need to flush due to large pages. */
if ((page & lp_mask) == lp_addr) {
tlb_debug("forcing full flush midx %d (%016"
VADDR_PRIx "/%016" VADDR_PRIx ")\n",
midx, lp_addr, lp_mask);
- tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
+ tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
} else {
- if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
- tlb_n_used_entries_dec(env, midx);
+ if (tlb_flush_entry_locked(tlb_entry(cpu, midx, page), page)) {
+ tlb_n_used_entries_dec(cpu, midx);
}
- tlb_flush_vtlb_page_locked(env, midx, page);
+ tlb_flush_vtlb_page_locked(cpu, midx, page);
}
}
vaddr addr,
uint16_t idxmap)
{
- CPUArchState *env = cpu->env_ptr;
int mmu_idx;
assert_cpu_is_self(cpu);
tlb_debug("page addr: %016" VADDR_PRIx " mmu_map:0x%x\n", addr, idxmap);
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if ((idxmap >> mmu_idx) & 1) {
- tlb_flush_page_locked(env, mmu_idx, addr);
+ tlb_flush_page_locked(cpu, mmu_idx, addr);
}
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
/*
* Discard jump cache entries for any tb which might potentially
tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
}
-static void tlb_flush_range_locked(CPUArchState *env, int midx,
+static void tlb_flush_range_locked(CPUState *cpu, int midx,
vaddr addr, vaddr len,
unsigned bits)
{
- CPUTLBDesc *d = &env_tlb(env)->d[midx];
- CPUTLBDescFast *f = &env_tlb(env)->f[midx];
+ CPUTLBDesc *d = &cpu->neg.tlb.d[midx];
+ CPUTLBDescFast *f = &cpu->neg.tlb.f[midx];
vaddr mask = MAKE_64BIT_MASK(0, bits);
/*
tlb_debug("forcing full flush midx %d ("
"%016" VADDR_PRIx "/%016" VADDR_PRIx "+%016" VADDR_PRIx ")\n",
midx, addr, mask, len);
- tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
+ tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
return;
}
tlb_debug("forcing full flush midx %d ("
"%016" VADDR_PRIx "/%016" VADDR_PRIx ")\n",
midx, d->large_page_addr, d->large_page_mask);
- tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
+ tlb_flush_one_mmuidx_locked(cpu, midx, get_clock_realtime());
return;
}
for (vaddr i = 0; i < len; i += TARGET_PAGE_SIZE) {
vaddr page = addr + i;
- CPUTLBEntry *entry = tlb_entry(env, midx, page);
+ CPUTLBEntry *entry = tlb_entry(cpu, midx, page);
if (tlb_flush_entry_mask_locked(entry, page, mask)) {
- tlb_n_used_entries_dec(env, midx);
+ tlb_n_used_entries_dec(cpu, midx);
}
- tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
+ tlb_flush_vtlb_page_mask_locked(cpu, midx, page, mask);
}
}
static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
TLBFlushRangeData d)
{
- CPUArchState *env = cpu->env_ptr;
int mmu_idx;
assert_cpu_is_self(cpu);
tlb_debug("range: %016" VADDR_PRIx "/%u+%016" VADDR_PRIx " mmu_map:0x%x\n",
d.addr, d.bits, d.len, d.idxmap);
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
if ((d.idxmap >> mmu_idx) & 1) {
- tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
+ tlb_flush_range_locked(cpu, mmu_idx, d.addr, d.len, d.bits);
}
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
/*
* If the length is larger than the jump cache size, then it will take
*/
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
{
- CPUArchState *env;
-
int mmu_idx;
- env = cpu->env_ptr;
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
unsigned int i;
- unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
+ unsigned int n = tlb_n_entries(&cpu->neg.tlb.f[mmu_idx]);
for (i = 0; i < n; i++) {
- tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
+ tlb_reset_dirty_range_locked(&cpu->neg.tlb.f[mmu_idx].table[i],
start1, length);
}
for (i = 0; i < CPU_VTLB_SIZE; i++) {
- tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
+ tlb_reset_dirty_range_locked(&cpu->neg.tlb.d[mmu_idx].vtable[i],
start1, length);
}
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
}
/* Called with tlb_c.lock held */
so that it is no longer dirty */
void tlb_set_dirty(CPUState *cpu, vaddr addr)
{
- CPUArchState *env = cpu->env_ptr;
int mmu_idx;
assert_cpu_is_self(cpu);
addr &= TARGET_PAGE_MASK;
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
- tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, addr), addr);
+ tlb_set_dirty1_locked(tlb_entry(cpu, mmu_idx, addr), addr);
}
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
int k;
for (k = 0; k < CPU_VTLB_SIZE; k++) {
- tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], addr);
+ tlb_set_dirty1_locked(&cpu->neg.tlb.d[mmu_idx].vtable[k], addr);
}
}
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
}
/* Our TLB does not support large pages, so remember the area covered by
large pages and trigger a full TLB flush if these are invalidated. */
-static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
+static void tlb_add_large_page(CPUState *cpu, int mmu_idx,
vaddr addr, uint64_t size)
{
- vaddr lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
+ vaddr lp_addr = cpu->neg.tlb.d[mmu_idx].large_page_addr;
vaddr lp_mask = ~(size - 1);
if (lp_addr == (vaddr)-1) {
/* Extend the existing region to include the new page.
This is a compromise between unnecessary flushes and
the cost of maintaining a full variable size TLB. */
- lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
+ lp_mask &= cpu->neg.tlb.d[mmu_idx].large_page_mask;
while (((lp_addr ^ addr) & lp_mask) != 0) {
lp_mask <<= 1;
}
}
- env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
- env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
+ cpu->neg.tlb.d[mmu_idx].large_page_addr = lp_addr & lp_mask;
+ cpu->neg.tlb.d[mmu_idx].large_page_mask = lp_mask;
}
static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
void tlb_set_page_full(CPUState *cpu, int mmu_idx,
vaddr addr, CPUTLBEntryFull *full)
{
- CPUArchState *env = cpu->env_ptr;
- CPUTLB *tlb = env_tlb(env);
+ CPUTLB *tlb = &cpu->neg.tlb;
CPUTLBDesc *desc = &tlb->d[mmu_idx];
MemoryRegionSection *section;
unsigned int index, read_flags, write_flags;
sz = TARGET_PAGE_SIZE;
} else {
sz = (hwaddr)1 << full->lg_page_size;
- tlb_add_large_page(env, mmu_idx, addr, sz);
+ tlb_add_large_page(cpu, mmu_idx, addr, sz);
}
addr_page = addr & TARGET_PAGE_MASK;
paddr_page = full->phys_addr & TARGET_PAGE_MASK;
wp_flags = cpu_watchpoint_address_matches(cpu, addr_page,
TARGET_PAGE_SIZE);
- index = tlb_index(env, mmu_idx, addr_page);
- te = tlb_entry(env, mmu_idx, addr_page);
+ index = tlb_index(cpu, mmu_idx, addr_page);
+ te = tlb_entry(cpu, mmu_idx, addr_page);
/*
* Hold the TLB lock for the rest of the function. We could acquire/release
tlb->c.dirty |= 1 << mmu_idx;
/* Make sure there's no cached translation for the new page. */
- tlb_flush_vtlb_page_locked(env, mmu_idx, addr_page);
+ tlb_flush_vtlb_page_locked(cpu, mmu_idx, addr_page);
/*
* Only evict the old entry to the victim tlb if it's for a
/* Evict the old entry into the victim tlb. */
copy_tlb_helper_locked(tv, te);
desc->vfulltlb[vidx] = desc->fulltlb[index];
- tlb_n_used_entries_dec(env, mmu_idx);
+ tlb_n_used_entries_dec(cpu, mmu_idx);
}
/* refill the tlb */
MMU_DATA_STORE, prot & PAGE_WRITE);
copy_tlb_helper_locked(te, &tn);
- tlb_n_used_entries_inc(env, mmu_idx);
+ tlb_n_used_entries_inc(cpu, mmu_idx);
qemu_spin_unlock(&tlb->c.lock);
}
}
static MemoryRegionSection *
-io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
+io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
{
- CPUState *cpu = env_cpu(env);
MemoryRegionSection *section;
hwaddr mr_offset;
section = iotlb_to_section(cpu, xlat, attrs);
mr_offset = (xlat & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
- if (!cpu->can_do_io) {
+ if (!cpu->neg.can_do_io) {
cpu_io_recompile(cpu, retaddr);
}
return section;
}
-static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
+static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
unsigned size, MMUAccessType access_type, int mmu_idx,
MemTxResult response, uintptr_t retaddr)
{
- CPUState *cpu = env_cpu(env);
-
- if (!cpu->ignore_memory_transaction_failures) {
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
- if (cc->tcg_ops->do_transaction_failed) {
- hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
-
- cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
- access_type, mmu_idx,
- full->attrs, response, retaddr);
- }
- }
-}
-
-static uint64_t io_readx(CPUArchState *env, CPUTLBEntryFull *full,
- int mmu_idx, vaddr addr, uintptr_t retaddr,
- MMUAccessType access_type, MemOp op)
-{
- MemoryRegionSection *section;
- hwaddr mr_offset;
- MemoryRegion *mr;
- MemTxResult r;
- uint64_t val;
-
- section = io_prepare(&mr_offset, env, full->xlat_section,
- full->attrs, addr, retaddr);
- mr = section->mr;
+ if (!cpu->ignore_memory_transaction_failures
+ && cpu->cc->tcg_ops->do_transaction_failed) {
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
- {
- QEMU_IOTHREAD_LOCK_GUARD();
- r = memory_region_dispatch_read(mr, mr_offset, &val, op, full->attrs);
- }
-
- if (r != MEMTX_OK) {
- io_failed(env, full, addr, memop_size(op), access_type, mmu_idx,
- r, retaddr);
- }
- return val;
-}
-
-static void io_writex(CPUArchState *env, CPUTLBEntryFull *full,
- int mmu_idx, uint64_t val, vaddr addr,
- uintptr_t retaddr, MemOp op)
-{
- MemoryRegionSection *section;
- hwaddr mr_offset;
- MemoryRegion *mr;
- MemTxResult r;
-
- section = io_prepare(&mr_offset, env, full->xlat_section,
- full->attrs, addr, retaddr);
- mr = section->mr;
-
- {
- QEMU_IOTHREAD_LOCK_GUARD();
- r = memory_region_dispatch_write(mr, mr_offset, val, op, full->attrs);
- }
-
- if (r != MEMTX_OK) {
- io_failed(env, full, addr, memop_size(op), MMU_DATA_STORE, mmu_idx,
- r, retaddr);
+ cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
+ access_type, mmu_idx,
+ full->attrs, response, retaddr);
}
}
/* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */
-static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
+static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index,
MMUAccessType access_type, vaddr page)
{
size_t vidx;
- assert_cpu_is_self(env_cpu(env));
+ assert_cpu_is_self(cpu);
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
- CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
+ CPUTLBEntry *vtlb = &cpu->neg.tlb.d[mmu_idx].vtable[vidx];
uint64_t cmp = tlb_read_idx(vtlb, access_type);
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
- CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
+ CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index];
- qemu_spin_lock(&env_tlb(env)->c.lock);
+ qemu_spin_lock(&cpu->neg.tlb.c.lock);
copy_tlb_helper_locked(&tmptlb, tlb);
copy_tlb_helper_locked(tlb, vtlb);
copy_tlb_helper_locked(vtlb, &tmptlb);
- qemu_spin_unlock(&env_tlb(env)->c.lock);
+ qemu_spin_unlock(&cpu->neg.tlb.c.lock);
- CPUTLBEntryFull *f1 = &env_tlb(env)->d[mmu_idx].fulltlb[index];
- CPUTLBEntryFull *f2 = &env_tlb(env)->d[mmu_idx].vfulltlb[vidx];
+ CPUTLBEntryFull *f1 = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
+ CPUTLBEntryFull *f2 = &cpu->neg.tlb.d[mmu_idx].vfulltlb[vidx];
CPUTLBEntryFull tmpf;
tmpf = *f1; *f1 = *f2; *f2 = tmpf;
return true;
}
}
-static int probe_access_internal(CPUArchState *env, vaddr addr,
+static int probe_access_internal(CPUState *cpu, vaddr addr,
int fault_size, MMUAccessType access_type,
int mmu_idx, bool nonfault,
void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr, bool check_mem_cbs)
{
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ uintptr_t index = tlb_index(cpu, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
vaddr page_addr = addr & TARGET_PAGE_MASK;
int flags = TLB_FLAGS_MASK & ~TLB_FORCE_SLOW;
- bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(env_cpu(env));
+ bool force_mmio = check_mem_cbs && cpu_plugin_mem_cbs_enabled(cpu);
CPUTLBEntryFull *full;
if (!tlb_hit_page(tlb_addr, page_addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, access_type, page_addr)) {
- CPUState *cs = env_cpu(env);
-
- if (!cs->cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
- mmu_idx, nonfault, retaddr)) {
+ if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
+ if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
+ mmu_idx, nonfault, retaddr)) {
/* Non-faulting page table read failed. */
*phost = NULL;
*pfull = NULL;
}
/* TLB resize via tlb_fill may have moved the entry. */
- index = tlb_index(env, mmu_idx, addr);
- entry = tlb_entry(env, mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ entry = tlb_entry(cpu, mmu_idx, addr);
/*
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
}
flags &= tlb_addr;
- *pfull = full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ *pfull = full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
flags |= full->slow_flags[access_type];
/* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
bool nonfault, void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr)
{
- int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- nonfault, phost, pfull, retaddr, true);
+ int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
+ mmu_idx, nonfault, phost, pfull, retaddr,
+ true);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
+ int dirtysize = size == 0 ? 1 : size;
+ notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
flags &= ~TLB_NOTDIRTY;
}
phost = phost ? phost : &discard_phost;
pfull = pfull ? pfull : &discard_tlb;
- int flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- true, phost, pfull, 0, false);
+ int flags = probe_access_internal(env_cpu(env), addr, size, access_type,
+ mmu_idx, true, phost, pfull, 0, false);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
+ int dirtysize = size == 0 ? 1 : size;
+ notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
flags &= ~TLB_NOTDIRTY;
}
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
- flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- nonfault, phost, &full, retaddr, true);
+ flags = probe_access_internal(env_cpu(env), addr, size, access_type,
+ mmu_idx, nonfault, phost, &full, retaddr,
+ true);
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
+ int dirtysize = size == 0 ? 1 : size;
+ notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
flags &= ~TLB_NOTDIRTY;
}
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
- flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
- false, &host, &full, retaddr, true);
+ flags = probe_access_internal(env_cpu(env), addr, size, access_type,
+ mmu_idx, false, &host, &full, retaddr,
+ true);
/* Per the interface, size == 0 merely faults the access. */
if (size == 0) {
/* Handle clean RAM pages. */
if (flags & TLB_NOTDIRTY) {
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
+ notdirty_write(env_cpu(env), addr, size, full, retaddr);
}
}
void *host;
int flags;
- flags = probe_access_internal(env, addr, 0, access_type,
+ flags = probe_access_internal(env_cpu(env), addr, 0, access_type,
mmu_idx, true, &host, &full, 0, false);
/* No combination of flags are expected by the caller. */
CPUTLBEntryFull *full;
void *p;
- (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
+ (void)probe_access_internal(env_cpu(env), addr, 1, MMU_INST_FETCH,
cpu_mmu_index(env, true), false,
&p, &full, 0, false);
if (p == NULL) {
bool tlb_plugin_lookup(CPUState *cpu, vaddr addr, int mmu_idx,
bool is_store, struct qemu_plugin_hwaddr *data)
{
- CPUArchState *env = cpu->env_ptr;
- CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
+ CPUTLBEntry *tlbe = tlb_entry(cpu, mmu_idx, addr);
+ uintptr_t index = tlb_index(cpu, mmu_idx, addr);
MMUAccessType access_type = is_store ? MMU_DATA_STORE : MMU_DATA_LOAD;
uint64_t tlb_addr = tlb_read_idx(tlbe, access_type);
CPUTLBEntryFull *full;
return false;
}
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
data->phys_addr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
/* We must have an iotlb entry for MMIO */
/**
* mmu_lookup1: translate one page
- * @env: cpu context
+ * @cpu: generic cpu state
* @data: lookup parameters
* @mmu_idx: virtual address context
* @access_type: load/store/code
* tlb_fill will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized.
*/
-static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
vaddr addr = data->addr;
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ uintptr_t index = tlb_index(cpu, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
bool maybe_resized = false;
CPUTLBEntryFull *full;
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, access_type,
+ if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra);
+ tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
maybe_resized = true;
- index = tlb_index(env, mmu_idx, addr);
- entry = tlb_entry(env, mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ entry = tlb_entry(cpu, mmu_idx, addr);
}
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
}
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
/**
* mmu_watch_or_dirty
- * @env: cpu context
+ * @cpu: generic cpu state
* @data: lookup parameters
* @access_type: load/store/code
* @ra: return address into tcg generated code, or 0
* Trigger watchpoints for @data.addr:@data.size;
* record writes to protected clean pages.
*/
-static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
+static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
MMUAccessType access_type, uintptr_t ra)
{
CPUTLBEntryFull *full = data->full;
/* On watchpoint hit, this will longjmp out. */
if (flags & TLB_WATCHPOINT) {
int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
- cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra);
+ cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
flags &= ~TLB_WATCHPOINT;
}
/* Note that notdirty is only set for writes. */
if (flags & TLB_NOTDIRTY) {
- notdirty_write(env_cpu(env), addr, size, full, ra);
+ notdirty_write(cpu, addr, size, full, ra);
flags &= ~TLB_NOTDIRTY;
}
data->flags = flags;
/**
* mmu_lookup: translate page(s)
- * @env: cpu context
+ * @cpu: generic cpu state
* @addr: virtual address
* @oi: combined mmu_idx and MemOp
* @ra: return address into tcg generated code, or 0
* Resolve the translation for the page(s) beginning at @addr, for MemOp.size
* bytes. Return true if the lookup crosses a page boundary.
*/
-static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
unsigned a_bits;
/* Handle CPU specific unaligned behaviour */
a_bits = get_alignment_bits(l->memop);
if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra);
+ cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}
l->page[0].addr = addr;
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
if (likely(!crosspage)) {
- mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
+ mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
flags = l->page[0].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
- mmu_watch_or_dirty(env, &l->page[0], type, ra);
+ mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
}
if (unlikely(flags & TLB_BSWAP)) {
l->memop ^= MO_BSWAP;
* Lookup both pages, recognizing exceptions from either. If the
* second lookup potentially resized, refresh first CPUTLBEntryFull.
*/
- mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
- if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) {
- uintptr_t index = tlb_index(env, l->mmu_idx, addr);
- l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index];
+ mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
+ if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
+ uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
+ l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
}
flags = l->page[0].flags | l->page[1].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
- mmu_watch_or_dirty(env, &l->page[0], type, ra);
- mmu_watch_or_dirty(env, &l->page[1], type, ra);
+ mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
+ mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
}
/*
* Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address.
*/
-static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
/* Enforce guest required alignment. */
if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
- cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
+ cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
goto stop_the_world;
}
- index = tlb_index(env, mmu_idx, addr);
- tlbe = tlb_entry(env, mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ tlbe = tlb_entry(cpu, mmu_idx, addr);
/* Check TLB entry and enforce page permissions. */
tlb_addr = tlb_addr_write(tlbe);
if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE,
+ if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, size,
+ tlb_fill(cpu, addr, size,
MMU_DATA_STORE, mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- tlbe = tlb_entry(env, mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ tlbe = tlb_entry(cpu, mmu_idx, addr);
}
tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
* but addr_read will only be -1 if PAGE_READ was unset.
*/
if (unlikely(tlbe->addr_read == -1)) {
- tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+ tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
/*
* Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for
}
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, size, full, retaddr);
+ notdirty_write(cpu, addr, size, full, retaddr);
}
if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
wp_flags |= BP_MEM_READ;
}
if (wp_flags) {
- cpu_check_watchpoint(env_cpu(env), addr, size,
+ cpu_check_watchpoint(cpu, addr, size,
full->attrs, wp_flags, retaddr);
}
}
return hostaddr;
stop_the_world:
- cpu_loop_exit_atomic(env_cpu(env), retaddr);
+ cpu_loop_exit_atomic(cpu, retaddr);
}
/*
/**
* do_ld_mmio_beN:
- * @env: cpu context
+ * @cpu: generic cpu state
* @full: page parameters
* @ret_be: accumulated data
* @addr: virtual address
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
- * Context: iothread lock held
+ * Context: BQL held
*
* Load @size bytes from @addr, which is memory-mapped i/o.
* The bytes are concatenated in big-endian order with @ret_be.
*/
-static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
- uint64_t ret_be, vaddr addr, int size,
- int mmu_idx, MMUAccessType type, uintptr_t ra)
+static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
+ uint64_t ret_be, vaddr addr, int size,
+ int mmu_idx, MMUAccessType type, uintptr_t ra,
+ MemoryRegion *mr, hwaddr mr_offset)
{
- uint64_t t;
-
- tcg_debug_assert(size > 0 && size <= 8);
do {
+ MemOp this_mop;
+ unsigned this_size;
+ uint64_t val;
+ MemTxResult r;
+
/* Read aligned pieces up to 8 bytes. */
- switch ((size | (int)addr) & 7) {
- case 1:
- case 3:
- case 5:
- case 7:
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_UB);
- ret_be = (ret_be << 8) | t;
- size -= 1;
- addr += 1;
- break;
- case 2:
- case 6:
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUW);
- ret_be = (ret_be << 16) | t;
- size -= 2;
- addr += 2;
- break;
- case 4:
- t = io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUL);
- ret_be = (ret_be << 32) | t;
- size -= 4;
- addr += 4;
- break;
- case 0:
- return io_readx(env, full, mmu_idx, addr, ra, type, MO_BEUQ);
- default:
- qemu_build_not_reached();
+ this_mop = ctz32(size | (int)addr | 8);
+ this_size = 1 << this_mop;
+ this_mop |= MO_BE;
+
+ r = memory_region_dispatch_read(mr, mr_offset, &val,
+ this_mop, full->attrs);
+ if (unlikely(r != MEMTX_OK)) {
+ io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
+ }
+ if (this_size == 8) {
+ return val;
}
+
+ ret_be = (ret_be << (this_size * 8)) | val;
+ addr += this_size;
+ mr_offset += this_size;
+ size -= this_size;
} while (size);
+
return ret_be;
}
+static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
+ uint64_t ret_be, vaddr addr, int size,
+ int mmu_idx, MMUAccessType type, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ MemoryRegion *mr;
+ hwaddr mr_offset;
+ MemTxAttrs attrs;
+ uint64_t ret;
+
+ tcg_debug_assert(size > 0 && size <= 8);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ bql_lock();
+ ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
+ type, ra, mr, mr_offset);
+ bql_unlock();
+
+ return ret;
+}
+
+static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
+ uint64_t ret_be, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ MemoryRegion *mr;
+ hwaddr mr_offset;
+ MemTxAttrs attrs;
+ uint64_t a, b;
+
+ tcg_debug_assert(size > 8 && size <= 16);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ bql_lock();
+ a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
+ MMU_DATA_LOAD, ra, mr, mr_offset);
+ b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
+ MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
+ bql_unlock();
+
+ return int128_make128(b, a);
+}
+
/**
* do_ld_bytes_beN
* @p: translation parameters
* As do_ld_bytes_beN, but with one atomic load.
* Eight aligned bytes are guaranteed to cover the load.
*/
-static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra,
+static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
MMULookupPageData *p, uint64_t ret_be)
{
int o = p->addr & 7;
- uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o);
+ uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
x = cpu_to_be64(x);
x <<= o * 8;
* As do_ld_bytes_beN, but with one atomic load.
* 16 aligned bytes are guaranteed to cover the load.
*/
-static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra,
+static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
MMULookupPageData *p, uint64_t ret_be)
{
int o = p->addr & 15;
- Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o);
+ Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
int size = p->size;
if (!HOST_BIG_ENDIAN) {
/*
* Wrapper for the above.
*/
-static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
+static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
uint64_t ret_be, int mmu_idx, MMUAccessType type,
MemOp mop, uintptr_t ra)
{
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size,
+ return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
mmu_idx, type, ra);
}
if (!HAVE_al8_fast && p->size < 4) {
return do_ld_whole_be4(p, ret_be);
} else {
- return do_ld_whole_be8(env, ra, p, ret_be);
+ return do_ld_whole_be8(cpu, ra, p, ret_be);
}
}
/* fall through */
/*
* Wrapper for the above, for 8 < size < 16.
*/
-static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
+static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
{
int size = p->size;
MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- a = do_ld_mmio_beN(env, p->full, a, p->addr, size - 8,
- mmu_idx, MMU_DATA_LOAD, ra);
- b = do_ld_mmio_beN(env, p->full, 0, p->addr + 8, 8,
- mmu_idx, MMU_DATA_LOAD, ra);
- return int128_make128(b, a);
+ return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
}
/*
case MO_ATOM_WITHIN16_PAIR:
/* Since size > 8, this is the half that must be atomic. */
- return do_ld_whole_be16(env, ra, p, a);
+ return do_ld_whole_be16(cpu, ra, p, a);
case MO_ATOM_IFALIGN_PAIR:
/*
return int128_make128(b, a);
}
-static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
+ return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
} else {
return *(uint8_t *)p->haddr;
}
}
-static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, MemOp memop, uintptr_t ra)
{
uint16_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra);
+ ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap16(ret);
}
} else {
/* Perform the load host endian, then swap if necessary. */
- ret = load_atom_2(env, ra, p->haddr, memop);
+ ret = load_atom_2(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap16(ret);
}
return ret;
}
-static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, MemOp memop, uintptr_t ra)
{
uint32_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra);
+ ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap32(ret);
}
} else {
/* Perform the load host endian. */
- ret = load_atom_4(env, ra, p->haddr, memop);
+ ret = load_atom_4(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap32(ret);
}
return ret;
}
-static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, MemOp memop, uintptr_t ra)
{
uint64_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra);
+ ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap64(ret);
}
} else {
/* Perform the load host endian. */
- ret = load_atom_8(env, ra, p->haddr, memop);
+ ret = load_atom_8(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap64(ret);
}
return ret;
}
-static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
bool crosspage;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
tcg_debug_assert(!crosspage);
- return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
-}
-
-tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
- return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
+ return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
}
-static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
uint8_t a, b;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
- return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
+ return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
- b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra);
+ a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
+ b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = a | (b << 8);
return ret;
}
-tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
uint32_t ret;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
- return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
+ return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
- ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap32(ret);
}
return ret;
}
-tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
uint64_t ret;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
- return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
+ return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
- ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap64(ret);
}
return ret;
}
-uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-/*
- * Provide signed versions of the load routines as well. We can of course
- * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
- */
-
-tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
-}
-
-tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
-}
-
-tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
-}
-
-static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
+static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
int first;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- a = do_ld_mmio_beN(env, l.page[0].full, 0, addr, 8,
- l.mmu_idx, MMU_DATA_LOAD, ra);
- b = do_ld_mmio_beN(env, l.page[0].full, 0, addr + 8, 8,
- l.mmu_idx, MMU_DATA_LOAD, ra);
- ret = int128_make128(b, a);
+ ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
+ l.mmu_idx, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap128(ret);
}
} else {
/* Perform the load host endian. */
- ret = load_atom_16(env, ra, l.page[0].haddr, l.memop);
+ ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
if (l.memop & MO_BSWAP) {
ret = bswap128(ret);
}
if (first == 8) {
MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
- a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
- b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
+ a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
+ b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
if ((mop8 & MO_BSWAP) == MO_LE) {
ret = int128_make128(a, b);
} else {
}
if (first < 8) {
- a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx,
+ a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
MMU_DATA_LOAD, l.memop, ra);
- ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra);
+ ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
} else {
- ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra);
+ ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
b = int128_getlo(ret);
ret = int128_lshift(ret, l.page[1].size * 8);
a = int128_gethi(ret);
- b = do_ld_beN(env, &l.page[1], b, l.mmu_idx,
+ b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
MMU_DATA_LOAD, l.memop, ra);
ret = int128_make128(b, a);
}
return ret;
}
-Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
- uint32_t oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- return do_ld16_mmu(env, addr, oi, retaddr);
-}
-
-Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
-{
- return helper_ld16_mmu(env, addr, oi, GETPC());
-}
-
-/*
- * Load helpers for cpu_ldst.h.
- */
-
-static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-}
-
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
-{
- uint8_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
- ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint16_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint32_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint64_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- Int128 ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- ret = do_ld16_mmu(env, addr, oi, ra);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
/*
* Store Helpers
*/
/**
* do_st_mmio_leN:
- * @env: cpu context
+ * @cpu: generic cpu state
* @full: page parameters
* @val_le: data to store
* @addr: virtual address
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
- * Context: iothread lock held
+ * Context: BQL held
*
* Store @size bytes at @addr, which is memory-mapped i/o.
* The bytes to store are extracted in little-endian order from @val_le;
* return the bytes of @val_le beyond @p->size that have not been stored.
*/
-static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
- uint64_t val_le, vaddr addr, int size,
- int mmu_idx, uintptr_t ra)
+static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
+ uint64_t val_le, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra,
+ MemoryRegion *mr, hwaddr mr_offset)
{
- tcg_debug_assert(size > 0 && size <= 8);
-
do {
+ MemOp this_mop;
+ unsigned this_size;
+ MemTxResult r;
+
/* Store aligned pieces up to 8 bytes. */
- switch ((size | (int)addr) & 7) {
- case 1:
- case 3:
- case 5:
- case 7:
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_UB);
- val_le >>= 8;
- size -= 1;
- addr += 1;
- break;
- case 2:
- case 6:
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUW);
- val_le >>= 16;
- size -= 2;
- addr += 2;
- break;
- case 4:
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUL);
- val_le >>= 32;
- size -= 4;
- addr += 4;
- break;
- case 0:
- io_writex(env, full, mmu_idx, val_le, addr, ra, MO_LEUQ);
+ this_mop = ctz32(size | (int)addr | 8);
+ this_size = 1 << this_mop;
+ this_mop |= MO_LE;
+
+ r = memory_region_dispatch_write(mr, mr_offset, val_le,
+ this_mop, full->attrs);
+ if (unlikely(r != MEMTX_OK)) {
+ io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
+ mmu_idx, r, ra);
+ }
+ if (this_size == 8) {
return 0;
- default:
- qemu_build_not_reached();
}
+
+ val_le >>= this_size * 8;
+ addr += this_size;
+ mr_offset += this_size;
+ size -= this_size;
} while (size);
return val_le;
}
+static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
+ uint64_t val_le, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ hwaddr mr_offset;
+ MemoryRegion *mr;
+ MemTxAttrs attrs;
+ uint64_t ret;
+
+ tcg_debug_assert(size > 0 && size <= 8);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ bql_lock();
+ ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
+ ra, mr, mr_offset);
+ bql_unlock();
+
+ return ret;
+}
+
+static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
+ Int128 val_le, vaddr addr, int size,
+ int mmu_idx, uintptr_t ra)
+{
+ MemoryRegionSection *section;
+ MemoryRegion *mr;
+ hwaddr mr_offset;
+ MemTxAttrs attrs;
+ uint64_t ret;
+
+ tcg_debug_assert(size > 8 && size <= 16);
+
+ attrs = full->attrs;
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
+ mr = section->mr;
+
+ bql_lock();
+ int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
+ mmu_idx, ra, mr, mr_offset);
+ ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
+ size - 8, mmu_idx, ra, mr, mr_offset + 8);
+ bql_unlock();
+
+ return ret;
+}
+
/*
* Wrapper for the above.
*/
-static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
+static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
uint64_t val_le, int mmu_idx,
MemOp mop, uintptr_t ra)
{
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- return do_st_mmio_leN(env, p->full, val_le, p->addr,
+ return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
p->size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return val_le >> (p->size * 8);
} else if (HAVE_al8) {
return store_whole_le8(p->haddr, p->size, val_le);
} else {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
}
/* fall through */
/*
* Wrapper for the above, for 8 < size < 16.
*/
-static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
+static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
Int128 val_le, int mmu_idx,
MemOp mop, uintptr_t ra)
{
MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, p->full, int128_getlo(val_le),
- p->addr, 8, mmu_idx, ra);
- return do_st_mmio_leN(env, p->full, int128_gethi(val_le),
- p->addr + 8, size - 8, mmu_idx, ra);
+ return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
+ size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return int128_gethi(val_le) >> ((size - 8) * 8);
}
case MO_ATOM_WITHIN16_PAIR:
/* Since size > 8, this is the half that must be atomic. */
- if (!HAVE_ATOMIC128_RW) {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ if (!HAVE_CMPXCHG128) {
+ cpu_loop_exit_atomic(cpu, ra);
}
return store_whole_le16(p->haddr, p->size, val_le);
}
}
-static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
+static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
int mmu_idx, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
}
}
-static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
+static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
int mmu_idx, MemOp memop, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap16(val);
}
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
if (memop & MO_BSWAP) {
val = bswap16(val);
}
- store_atom_2(env, ra, p->haddr, memop, val);
+ store_atom_2(cpu, ra, p->haddr, memop, val);
}
}
-static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
+static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
int mmu_idx, MemOp memop, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
if (memop & MO_BSWAP) {
val = bswap32(val);
}
- store_atom_4(env, ra, p->haddr, memop, val);
+ store_atom_4(cpu, ra, p->haddr, memop, val);
}
}
-static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
+static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
int mmu_idx, MemOp memop, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
if (memop & MO_BSWAP) {
val = bswap64(val);
}
- store_atom_8(env, ra, p->haddr, memop, val);
+ store_atom_8(cpu, ra, p->haddr, memop, val);
}
}
-void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
+static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage);
- do_st_1(env, &l.page[0], val, l.mmu_idx, ra);
+ do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
}
-static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
+static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
uint8_t a, b;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
- do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
return;
}
} else {
b = val, a = val >> 8;
}
- do_st_1(env, &l.page[0], a, l.mmu_idx, ra);
- do_st_1(env, &l.page[1], b, l.mmu_idx, ra);
+ do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
+ do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
}
-void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- do_st2_mmu(env, addr, val, oi, retaddr);
-}
-
-static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
+static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
- do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
return;
}
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
- val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
- (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
+ val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- do_st4_mmu(env, addr, val, oi, retaddr);
-}
-
-static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
+static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
- do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
return;
}
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
- val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
- (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
-}
-
-void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- do_st8_mmu(env, addr, val, oi, retaddr);
+ val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
+static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
int first;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap128(val);
}
- a = int128_getlo(val);
- b = int128_gethi(val);
- QEMU_IOTHREAD_LOCK_GUARD();
- do_st_mmio_leN(env, l.page[0].full, a, addr, 8, l.mmu_idx, ra);
- do_st_mmio_leN(env, l.page[0].full, b, addr + 8, 8, l.mmu_idx, ra);
+ do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
if (l.memop & MO_BSWAP) {
val = bswap128(val);
}
- store_atom_16(env, ra, l.page[0].haddr, l.memop, val);
+ store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
}
return;
}
} else {
a = int128_getlo(val), b = int128_gethi(val);
}
- do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra);
- do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra);
+ do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
+ do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
return;
}
val = bswap128(val);
}
if (first < 8) {
- do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
+ do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
val = int128_urshift(val, first * 8);
- do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
+ do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
} else {
- b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
- do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra);
+ b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
}
}
-void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- do_st16_mmu(env, addr, val, oi, retaddr);
-}
-
-void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
-{
- helper_st16_mmu(env, addr, val, oi, GETPC());
-}
-
-/*
- * Store Helpers for cpu_ldst.h
- */
-
-static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- helper_stb_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- do_st2_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- do_st4_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- do_st8_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- do_st16_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
#include "ldst_common.c.inc"
/*
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
- return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
- return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
- return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
- return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}