The io_readx() function needs to know whether the load it is
doing is an MMU_DATA_LOAD or an MMU_INST_FETCH, so that it
can pass the right value to the cpu_transaction_failed()
function. Plumb this information through from the softmmu
code.
This is currently not often going to give the wrong answer,
because usually instruction fetches go via get_page_addr_code().
However once we switch over to handling execution from non-RAM by
creating single-insn TBs, the path for an insn fetch to generate
a bus error will be through cpu_ld*_code() and io_readx(),
so without this change we will generate a d-side fault when we
should generate an i-side fault.
We also have to pass the access type via a CPU struct global
down to unassigned_mem_read(), for the benefit of the targets
which still use the cpu_unassigned_access() hook (m68k, mips,
sparc, xtensa).
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Cédric Le Goater <clg@kaod.org>
Message-id:
20180710160013.26559-2-peter.maydell@linaro.org
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx,
target_ulong addr, uintptr_t retaddr,
- bool recheck, int size)
+ bool recheck, MMUAccessType access_type, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset;
}
cpu->mem_io_vaddr = addr;
+ cpu->mem_io_access_type = access_type;
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
section->offset_within_address_space -
section->offset_within_region;
- cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
+ cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
mmu_idx, iotlbentry->attrs, r, retaddr);
}
if (locked) {
size_t mmu_idx, size_t index,
target_ulong addr,
uintptr_t retaddr,
- bool recheck)
+ bool recheck,
+ MMUAccessType access_type)
{
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck,
- DATA_SIZE);
+ access_type, DATA_SIZE);
}
#endif
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK);
+ tlb_addr & TLB_RECHECK,
+ READ_ACCESS_TYPE);
res = TGT_LE(res);
return res;
}
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK);
+ tlb_addr & TLB_RECHECK,
+ READ_ACCESS_TYPE);
res = TGT_BE(res);
return res;
}
*/
uintptr_t mem_io_pc;
vaddr mem_io_vaddr;
+ /*
+ * This is only needed for the legacy cpu_unassigned_access() hook;
+ * when all targets using it have been converted to use
+ * cpu_transaction_failed() instead it can be removed.
+ */
+ MMUAccessType mem_io_access_type;
int kvm_fd;
struct KVMState *kvm_state;
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
if (current_cpu != NULL) {
- cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
+ bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
+ cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
}
return 0;
}