#include "exec/memory.h"
#include "exec/cpu_ldst.h"
#include "exec/cputlb.h"
+#include "exec/tb-flush.h"
#include "exec/memory-internal.h"
#include "exec/ram_addr.h"
#include "tcg/tcg.h"
#include "exec/translate-all.h"
#include "trace.h"
#include "tb-hash.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
#ifdef CONFIG_PLUGIN
#include "qemu/plugin-memory.h"
#endif
}
}
-void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
-{
- CPUState *cpu;
- size_t full = 0, part = 0, elide = 0;
-
- CPU_FOREACH(cpu) {
- full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
- part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
- elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
- }
- *pfull = full;
- *ppart = part;
- *pelide = elide;
-}
-
static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
{
uint16_t asked = data.host_int;
}
static MemoryRegionSection *
-io_prepare(hwaddr *out_offset, CPUArchState *env, hwaddr xlat,
+io_prepare(hwaddr *out_offset, CPUState *cpu, hwaddr xlat,
MemTxAttrs attrs, vaddr addr, uintptr_t retaddr)
{
- CPUState *cpu = env_cpu(env);
MemoryRegionSection *section;
hwaddr mr_offset;
return section;
}
-static void io_failed(CPUArchState *env, CPUTLBEntryFull *full, vaddr addr,
+static void io_failed(CPUState *cpu, CPUTLBEntryFull *full, vaddr addr,
unsigned size, MMUAccessType access_type, int mmu_idx,
MemTxResult response, uintptr_t retaddr)
{
- CPUState *cpu = env_cpu(env);
-
- if (!cpu->ignore_memory_transaction_failures) {
- CPUClass *cc = CPU_GET_CLASS(cpu);
+ if (!cpu->ignore_memory_transaction_failures
+ && cpu->cc->tcg_ops->do_transaction_failed) {
+ hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
- if (cc->tcg_ops->do_transaction_failed) {
- hwaddr physaddr = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
-
- cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
- access_type, mmu_idx,
- full->attrs, response, retaddr);
- }
+ cpu->cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
+ access_type, mmu_idx,
+ full->attrs, response, retaddr);
}
}
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1, *pfull, retaddr);
+ int dirtysize = size == 0 ? 1 : size;
+ notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
flags &= ~TLB_NOTDIRTY;
}
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1, *pfull, 0);
+ int dirtysize = size == 0 ? 1 : size;
+ notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
flags &= ~TLB_NOTDIRTY;
}
/* Handle clean RAM pages. */
if (unlikely(flags & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
+ int dirtysize = size == 0 ? 1 : size;
+ notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
flags &= ~TLB_NOTDIRTY;
}
/* Handle clean RAM pages. */
if (flags & TLB_NOTDIRTY) {
- notdirty_write(env_cpu(env), addr, 1, full, retaddr);
+ notdirty_write(env_cpu(env), addr, size, full, retaddr);
}
}
/**
* mmu_lookup1: translate one page
- * @env: cpu context
+ * @cpu: generic cpu state
* @data: lookup parameters
* @mmu_idx: virtual address context
* @access_type: load/store/code
* tlb_fill will longjmp out. Return true if the softmmu tlb for
* @mmu_idx may have resized.
*/
-static bool mmu_lookup1(CPUArchState *env, MMULookupPageData *data,
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
{
vaddr addr = data->addr;
- uintptr_t index = tlb_index(env_cpu(env), mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env_cpu(env), mmu_idx, addr);
+ uintptr_t index = tlb_index(cpu, mmu_idx, addr);
+ CPUTLBEntry *entry = tlb_entry(cpu, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
bool maybe_resized = false;
CPUTLBEntryFull *full;
/* If the TLB entry is for a different page, reload and try again. */
if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env_cpu(env), mmu_idx, index, access_type,
+ if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, data->size, access_type, mmu_idx, ra);
+ tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
maybe_resized = true;
- index = tlb_index(env_cpu(env), mmu_idx, addr);
- entry = tlb_entry(env_cpu(env), mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ entry = tlb_entry(cpu, mmu_idx, addr);
}
tlb_addr = tlb_read_idx(entry, access_type) & ~TLB_INVALID_MASK;
}
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
flags |= full->slow_flags[access_type];
/**
* mmu_watch_or_dirty
- * @env: cpu context
+ * @cpu: generic cpu state
* @data: lookup parameters
* @access_type: load/store/code
* @ra: return address into tcg generated code, or 0
* Trigger watchpoints for @data.addr:@data.size;
* record writes to protected clean pages.
*/
-static void mmu_watch_or_dirty(CPUArchState *env, MMULookupPageData *data,
+static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
MMUAccessType access_type, uintptr_t ra)
{
CPUTLBEntryFull *full = data->full;
/* On watchpoint hit, this will longjmp out. */
if (flags & TLB_WATCHPOINT) {
int wp = access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ;
- cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs, wp, ra);
+ cpu_check_watchpoint(cpu, addr, size, full->attrs, wp, ra);
flags &= ~TLB_WATCHPOINT;
}
/* Note that notdirty is only set for writes. */
if (flags & TLB_NOTDIRTY) {
- notdirty_write(env_cpu(env), addr, size, full, ra);
+ notdirty_write(cpu, addr, size, full, ra);
flags &= ~TLB_NOTDIRTY;
}
data->flags = flags;
/**
* mmu_lookup: translate page(s)
- * @env: cpu context
+ * @cpu: generic cpu state
* @addr: virtual address
* @oi: combined mmu_idx and MemOp
* @ra: return address into tcg generated code, or 0
* Resolve the translation for the page(s) beginning at @addr, for MemOp.size
* bytes. Return true if the lookup crosses a page boundary.
*/
-static bool mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
{
unsigned a_bits;
/* Handle CPU specific unaligned behaviour */
a_bits = get_alignment_bits(l->memop);
if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(env_cpu(env), addr, type, l->mmu_idx, ra);
+ cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
}
l->page[0].addr = addr;
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
if (likely(!crosspage)) {
- mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
+ mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
flags = l->page[0].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
- mmu_watch_or_dirty(env, &l->page[0], type, ra);
+ mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
}
if (unlikely(flags & TLB_BSWAP)) {
l->memop ^= MO_BSWAP;
* Lookup both pages, recognizing exceptions from either. If the
* second lookup potentially resized, refresh first CPUTLBEntryFull.
*/
- mmu_lookup1(env, &l->page[0], l->mmu_idx, type, ra);
- if (mmu_lookup1(env, &l->page[1], l->mmu_idx, type, ra)) {
- uintptr_t index = tlb_index(env_cpu(env), l->mmu_idx, addr);
- l->page[0].full = &env_tlb(env)->d[l->mmu_idx].fulltlb[index];
+ mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
+ if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
+ uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
+ l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
}
flags = l->page[0].flags | l->page[1].flags;
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
- mmu_watch_or_dirty(env, &l->page[0], type, ra);
- mmu_watch_or_dirty(env, &l->page[1], type, ra);
+ mmu_watch_or_dirty(cpu, &l->page[0], type, ra);
+ mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
}
/*
* Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address.
*/
-static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
/* Enforce guest required alignment. */
if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
- cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
+ cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
goto stop_the_world;
}
- index = tlb_index(env_cpu(env), mmu_idx, addr);
- tlbe = tlb_entry(env_cpu(env), mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ tlbe = tlb_entry(cpu, mmu_idx, addr);
/* Check TLB entry and enforce page permissions. */
tlb_addr = tlb_addr_write(tlbe);
if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env_cpu(env), mmu_idx, index, MMU_DATA_STORE,
+ if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, size,
+ tlb_fill(cpu, addr, size,
MMU_DATA_STORE, mmu_idx, retaddr);
- index = tlb_index(env_cpu(env), mmu_idx, addr);
- tlbe = tlb_entry(env_cpu(env), mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ tlbe = tlb_entry(cpu, mmu_idx, addr);
}
tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
* but addr_read will only be -1 if PAGE_READ was unset.
*/
if (unlikely(tlbe->addr_read == -1)) {
- tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+ tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
/*
* Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for
}
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, size, full, retaddr);
+ notdirty_write(cpu, addr, size, full, retaddr);
}
if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
wp_flags |= BP_MEM_READ;
}
if (wp_flags) {
- cpu_check_watchpoint(env_cpu(env), addr, size,
+ cpu_check_watchpoint(cpu, addr, size,
full->attrs, wp_flags, retaddr);
}
}
return hostaddr;
stop_the_world:
- cpu_loop_exit_atomic(env_cpu(env), retaddr);
+ cpu_loop_exit_atomic(cpu, retaddr);
}
/*
/**
* do_ld_mmio_beN:
- * @env: cpu context
+ * @cpu: generic cpu state
* @full: page parameters
* @ret_be: accumulated data
* @addr: virtual address
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
- * Context: iothread lock held
+ * Context: BQL held
*
* Load @size bytes from @addr, which is memory-mapped i/o.
* The bytes are concatenated in big-endian order with @ret_be.
*/
-static uint64_t int_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t ret_be, vaddr addr, int size,
int mmu_idx, MMUAccessType type, uintptr_t ra,
MemoryRegion *mr, hwaddr mr_offset)
r = memory_region_dispatch_read(mr, mr_offset, &val,
this_mop, full->attrs);
if (unlikely(r != MEMTX_OK)) {
- io_failed(env, full, addr, this_size, type, mmu_idx, r, ra);
+ io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
}
if (this_size == 8) {
return val;
return ret_be;
}
-static uint64_t do_ld_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t ret_be, vaddr addr, int size,
int mmu_idx, MMUAccessType type, uintptr_t ra)
{
tcg_debug_assert(size > 0 && size <= 8);
attrs = full->attrs;
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
- ret = int_ld_mmio_beN(env, full, ret_be, addr, size, mmu_idx,
+ bql_lock();
+ ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
type, ra, mr, mr_offset);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
-static Int128 do_ld16_mmio_beN(CPUArchState *env, CPUTLBEntryFull *full,
+static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t ret_be, vaddr addr, int size,
int mmu_idx, uintptr_t ra)
{
tcg_debug_assert(size > 8 && size <= 16);
attrs = full->attrs;
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
- a = int_ld_mmio_beN(env, full, ret_be, addr, size - 8, mmu_idx,
+ bql_lock();
+ a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset);
- b = int_ld_mmio_beN(env, full, ret_be, addr + size - 8, 8, mmu_idx,
+ b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return int128_make128(b, a);
}
* As do_ld_bytes_beN, but with one atomic load.
* Eight aligned bytes are guaranteed to cover the load.
*/
-static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra,
+static uint64_t do_ld_whole_be8(CPUState *cpu, uintptr_t ra,
MMULookupPageData *p, uint64_t ret_be)
{
int o = p->addr & 7;
- uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o);
+ uint64_t x = load_atomic8_or_exit(cpu, ra, p->haddr - o);
x = cpu_to_be64(x);
x <<= o * 8;
* As do_ld_bytes_beN, but with one atomic load.
* 16 aligned bytes are guaranteed to cover the load.
*/
-static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra,
+static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
MMULookupPageData *p, uint64_t ret_be)
{
int o = p->addr & 15;
- Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o);
+ Int128 x, y = load_atomic16_or_exit(cpu, ra, p->haddr - o);
int size = p->size;
if (!HOST_BIG_ENDIAN) {
/*
* Wrapper for the above.
*/
-static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
+static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
uint64_t ret_be, int mmu_idx, MMUAccessType type,
MemOp mop, uintptr_t ra)
{
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
- return do_ld_mmio_beN(env, p->full, ret_be, p->addr, p->size,
+ return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
mmu_idx, type, ra);
}
if (!HAVE_al8_fast && p->size < 4) {
return do_ld_whole_be4(p, ret_be);
} else {
- return do_ld_whole_be8(env, ra, p, ret_be);
+ return do_ld_whole_be8(cpu, ra, p, ret_be);
}
}
/* fall through */
/*
* Wrapper for the above, for 8 < size < 16.
*/
-static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
+static Int128 do_ld16_beN(CPUState *cpu, MMULookupPageData *p,
uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
{
int size = p->size;
MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) {
- return do_ld16_mmio_beN(env, p->full, a, p->addr, size, mmu_idx, ra);
+ return do_ld16_mmio_beN(cpu, p->full, a, p->addr, size, mmu_idx, ra);
}
/*
case MO_ATOM_WITHIN16_PAIR:
/* Since size > 8, this is the half that must be atomic. */
- return do_ld_whole_be16(env, ra, p, a);
+ return do_ld_whole_be16(cpu, ra, p, a);
case MO_ATOM_IFALIGN_PAIR:
/*
return int128_make128(b, a);
}
-static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint8_t do_ld_1(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
- return do_ld_mmio_beN(env, p->full, 0, p->addr, 1, mmu_idx, type, ra);
+ return do_ld_mmio_beN(cpu, p->full, 0, p->addr, 1, mmu_idx, type, ra);
} else {
return *(uint8_t *)p->haddr;
}
}
-static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint16_t do_ld_2(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, MemOp memop, uintptr_t ra)
{
uint16_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 2, mmu_idx, type, ra);
+ ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 2, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap16(ret);
}
} else {
/* Perform the load host endian, then swap if necessary. */
- ret = load_atom_2(env, ra, p->haddr, memop);
+ ret = load_atom_2(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap16(ret);
}
return ret;
}
-static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint32_t do_ld_4(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, MemOp memop, uintptr_t ra)
{
uint32_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 4, mmu_idx, type, ra);
+ ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 4, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap32(ret);
}
} else {
/* Perform the load host endian. */
- ret = load_atom_4(env, ra, p->haddr, memop);
+ ret = load_atom_4(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap32(ret);
}
return ret;
}
-static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
+static uint64_t do_ld_8(CPUState *cpu, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, MemOp memop, uintptr_t ra)
{
uint64_t ret;
if (unlikely(p->flags & TLB_MMIO)) {
- ret = do_ld_mmio_beN(env, p->full, 0, p->addr, 8, mmu_idx, type, ra);
+ ret = do_ld_mmio_beN(cpu, p->full, 0, p->addr, 8, mmu_idx, type, ra);
if ((memop & MO_BSWAP) == MO_LE) {
ret = bswap64(ret);
}
} else {
/* Perform the load host endian. */
- ret = load_atom_8(env, ra, p->haddr, memop);
+ ret = load_atom_8(cpu, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap64(ret);
}
return ret;
}
-static uint8_t do_ld1_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
bool crosspage;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
tcg_debug_assert(!crosspage);
- return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
-}
-
-tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
- return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
+ return do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
}
-static uint16_t do_ld2_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
uint8_t a, b;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
- return do_ld_2(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
+ return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- a = do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
- b = do_ld_1(env, &l.page[1], l.mmu_idx, access_type, ra);
+ a = do_ld_1(cpu, &l.page[0], l.mmu_idx, access_type, ra);
+ b = do_ld_1(cpu, &l.page[1], l.mmu_idx, access_type, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = a | (b << 8);
return ret;
}
-tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-static uint32_t do_ld4_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
uint32_t ret;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
- return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
+ return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
- ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap32(ret);
}
return ret;
}
-tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-static uint64_t do_ld8_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uintptr_t ra, MMUAccessType access_type)
{
MMULookupLocals l;
uint64_t ret;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, access_type, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
- return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
+ return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
- ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(cpu, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap64(ret);
}
return ret;
}
-uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-/*
- * Provide signed versions of the load routines as well. We can of course
- * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
- */
-
-tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
-}
-
-tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
-}
-
-tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
-}
-
-static Int128 do_ld16_mmu(CPUArchState *env, vaddr addr,
+static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
int first;
cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
- ret = do_ld16_mmio_beN(env, l.page[0].full, 0, addr, 16,
+ ret = do_ld16_mmio_beN(cpu, l.page[0].full, 0, addr, 16,
l.mmu_idx, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap128(ret);
}
} else {
/* Perform the load host endian. */
- ret = load_atom_16(env, ra, l.page[0].haddr, l.memop);
+ ret = load_atom_16(cpu, ra, l.page[0].haddr, l.memop);
if (l.memop & MO_BSWAP) {
ret = bswap128(ret);
}
if (first == 8) {
MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
- a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
- b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
+ a = do_ld_8(cpu, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
+ b = do_ld_8(cpu, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
if ((mop8 & MO_BSWAP) == MO_LE) {
ret = int128_make128(a, b);
} else {
}
if (first < 8) {
- a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx,
+ a = do_ld_beN(cpu, &l.page[0], 0, l.mmu_idx,
MMU_DATA_LOAD, l.memop, ra);
- ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra);
+ ret = do_ld16_beN(cpu, &l.page[1], a, l.mmu_idx, l.memop, ra);
} else {
- ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra);
+ ret = do_ld16_beN(cpu, &l.page[0], 0, l.mmu_idx, l.memop, ra);
b = int128_getlo(ret);
ret = int128_lshift(ret, l.page[1].size * 8);
a = int128_gethi(ret);
- b = do_ld_beN(env, &l.page[1], b, l.mmu_idx,
+ b = do_ld_beN(cpu, &l.page[1], b, l.mmu_idx,
MMU_DATA_LOAD, l.memop, ra);
ret = int128_make128(b, a);
}
return ret;
}
-Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
- uint32_t oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- return do_ld16_mmu(env, addr, oi, retaddr);
-}
-
-Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
-{
- return helper_ld16_mmu(env, addr, oi, GETPC());
-}
-
-/*
- * Load helpers for cpu_ldst.h.
- */
-
-static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-}
-
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
-{
- uint8_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
- ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint16_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint32_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint64_t ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- Int128 ret;
-
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- ret = do_ld16_mmu(env, addr, oi, ra);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
/*
* Store Helpers
*/
/**
* do_st_mmio_leN:
- * @env: cpu context
+ * @cpu: generic cpu state
* @full: page parameters
* @val_le: data to store
* @addr: virtual address
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
- * Context: iothread lock held
+ * Context: BQL held
*
* Store @size bytes at @addr, which is memory-mapped i/o.
* The bytes to store are extracted in little-endian order from @val_le;
* return the bytes of @val_le beyond @p->size that have not been stored.
*/
-static uint64_t int_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t int_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t val_le, vaddr addr, int size,
int mmu_idx, uintptr_t ra,
MemoryRegion *mr, hwaddr mr_offset)
r = memory_region_dispatch_write(mr, mr_offset, val_le,
this_mop, full->attrs);
if (unlikely(r != MEMTX_OK)) {
- io_failed(env, full, addr, this_size, MMU_DATA_STORE,
+ io_failed(cpu, full, addr, this_size, MMU_DATA_STORE,
mmu_idx, r, ra);
}
if (this_size == 8) {
return val_le;
}
-static uint64_t do_st_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
uint64_t val_le, vaddr addr, int size,
int mmu_idx, uintptr_t ra)
{
tcg_debug_assert(size > 0 && size <= 8);
attrs = full->attrs;
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
- ret = int_st_mmio_leN(env, full, val_le, addr, size, mmu_idx,
+ bql_lock();
+ ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
ra, mr, mr_offset);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
-static uint64_t do_st16_mmio_leN(CPUArchState *env, CPUTLBEntryFull *full,
+static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
Int128 val_le, vaddr addr, int size,
int mmu_idx, uintptr_t ra)
{
tcg_debug_assert(size > 8 && size <= 16);
attrs = full->attrs;
- section = io_prepare(&mr_offset, env, full->xlat_section, attrs, addr, ra);
+ section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
- int_st_mmio_leN(env, full, int128_getlo(val_le), addr, 8,
+ bql_lock();
+ int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
mmu_idx, ra, mr, mr_offset);
- ret = int_st_mmio_leN(env, full, int128_gethi(val_le), addr + 8,
+ ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
size - 8, mmu_idx, ra, mr, mr_offset + 8);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
/*
* Wrapper for the above.
*/
-static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
+static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
uint64_t val_le, int mmu_idx,
MemOp mop, uintptr_t ra)
{
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
- return do_st_mmio_leN(env, p->full, val_le, p->addr,
+ return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
p->size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return val_le >> (p->size * 8);
} else if (HAVE_al8) {
return store_whole_le8(p->haddr, p->size, val_le);
} else {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ cpu_loop_exit_atomic(cpu, ra);
}
}
/* fall through */
/*
* Wrapper for the above, for 8 < size < 16.
*/
-static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
+static uint64_t do_st16_leN(CPUState *cpu, MMULookupPageData *p,
Int128 val_le, int mmu_idx,
MemOp mop, uintptr_t ra)
{
MemOp atom;
if (unlikely(p->flags & TLB_MMIO)) {
- return do_st16_mmio_leN(env, p->full, val_le, p->addr,
+ return do_st16_mmio_leN(cpu, p->full, val_le, p->addr,
size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return int128_gethi(val_le) >> ((size - 8) * 8);
case MO_ATOM_WITHIN16_PAIR:
/* Since size > 8, this is the half that must be atomic. */
- if (!HAVE_ATOMIC128_RW) {
- cpu_loop_exit_atomic(env_cpu(env), ra);
+ if (!HAVE_CMPXCHG128) {
+ cpu_loop_exit_atomic(cpu, ra);
}
return store_whole_le16(p->haddr, p->size, val_le);
}
}
-static void do_st_1(CPUArchState *env, MMULookupPageData *p, uint8_t val,
+static void do_st_1(CPUState *cpu, MMULookupPageData *p, uint8_t val,
int mmu_idx, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
- do_st_mmio_leN(env, p->full, val, p->addr, 1, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 1, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
}
}
-static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
+static void do_st_2(CPUState *cpu, MMULookupPageData *p, uint16_t val,
int mmu_idx, MemOp memop, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap16(val);
}
- do_st_mmio_leN(env, p->full, val, p->addr, 2, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 2, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
if (memop & MO_BSWAP) {
val = bswap16(val);
}
- store_atom_2(env, ra, p->haddr, memop, val);
+ store_atom_2(cpu, ra, p->haddr, memop, val);
}
}
-static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
+static void do_st_4(CPUState *cpu, MMULookupPageData *p, uint32_t val,
int mmu_idx, MemOp memop, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
- do_st_mmio_leN(env, p->full, val, p->addr, 4, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 4, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
if (memop & MO_BSWAP) {
val = bswap32(val);
}
- store_atom_4(env, ra, p->haddr, memop, val);
+ store_atom_4(cpu, ra, p->haddr, memop, val);
}
}
-static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
+static void do_st_8(CPUState *cpu, MMULookupPageData *p, uint64_t val,
int mmu_idx, MemOp memop, uintptr_t ra)
{
if (unlikely(p->flags & TLB_MMIO)) {
if ((memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
- do_st_mmio_leN(env, p->full, val, p->addr, 8, mmu_idx, ra);
+ do_st_mmio_leN(cpu, p->full, val, p->addr, 8, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
if (memop & MO_BSWAP) {
val = bswap64(val);
}
- store_atom_8(env, ra, p->haddr, memop, val);
+ store_atom_8(cpu, ra, p->haddr, memop, val);
}
}
-void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
+static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage);
- do_st_1(env, &l.page[0], val, l.mmu_idx, ra);
+ do_st_1(cpu, &l.page[0], val, l.mmu_idx, ra);
}
-static void do_st2_mmu(CPUArchState *env, vaddr addr, uint16_t val,
+static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
uint8_t a, b;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
- do_st_2(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
return;
}
} else {
b = val, a = val >> 8;
}
- do_st_1(env, &l.page[0], a, l.mmu_idx, ra);
- do_st_1(env, &l.page[1], b, l.mmu_idx, ra);
-}
-
-void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- do_st2_mmu(env, addr, val, oi, retaddr);
+ do_st_1(cpu, &l.page[0], a, l.mmu_idx, ra);
+ do_st_1(cpu, &l.page[1], b, l.mmu_idx, ra);
}
-static void do_st4_mmu(CPUArchState *env, vaddr addr, uint32_t val,
+static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
- do_st_4(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
return;
}
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
- val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
- (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
-}
-
-void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- do_st4_mmu(env, addr, val, oi, retaddr);
+ val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-static void do_st8_mmu(CPUArchState *env, vaddr addr, uint64_t val,
+static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
- do_st_8(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
return;
}
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
- val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
- (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
+ val = do_st_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ (void) do_st_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- do_st8_mmu(env, addr, val, oi, retaddr);
-}
-
-static void do_st16_mmu(CPUArchState *env, vaddr addr, Int128 val,
+static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
int first;
cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
- crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap128(val);
}
- do_st16_mmio_leN(env, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
+ do_st16_mmio_leN(cpu, l.page[0].full, val, addr, 16, l.mmu_idx, ra);
} else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
/* nothing */
} else {
if (l.memop & MO_BSWAP) {
val = bswap128(val);
}
- store_atom_16(env, ra, l.page[0].haddr, l.memop, val);
+ store_atom_16(cpu, ra, l.page[0].haddr, l.memop, val);
}
return;
}
} else {
a = int128_getlo(val), b = int128_gethi(val);
}
- do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra);
- do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra);
+ do_st_8(cpu, &l.page[0], a, l.mmu_idx, mop8, ra);
+ do_st_8(cpu, &l.page[1], b, l.mmu_idx, mop8, ra);
return;
}
val = bswap128(val);
}
if (first < 8) {
- do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
+ do_st_leN(cpu, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
val = int128_urshift(val, first * 8);
- do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
+ do_st16_leN(cpu, &l.page[1], val, l.mmu_idx, l.memop, ra);
} else {
- b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
- do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra);
+ b = do_st16_leN(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_leN(cpu, &l.page[1], b, l.mmu_idx, l.memop, ra);
}
}
-void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- do_st16_mmu(env, addr, val, oi, retaddr);
-}
-
-void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
-{
- helper_st16_mmu(env, addr, val, oi, GETPC());
-}
-
-/*
- * Store Helpers for cpu_ldst.h
- */
-
-static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- helper_stb_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
- do_st2_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
- do_st4_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
- do_st8_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
- do_st16_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
#include "ldst_common.c.inc"
/*
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
- return do_ld1_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld1_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
- return do_ld2_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld2_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
- return do_ld4_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld4_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
- return do_ld8_mmu(env, addr, oi, 0, MMU_INST_FETCH);
+ return do_ld8_mmu(env_cpu(env), addr, oi, 0, MMU_INST_FETCH);
}
uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld1_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld2_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld4_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}
uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return do_ld8_mmu(env, addr, oi, retaddr, MMU_INST_FETCH);
+ return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_INST_FETCH);
}