variables:
IMAGE: centos8
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-fdt=system
- --enable-modules --enable-trace-backends=dtrace
+ --enable-modules --enable-trace-backends=dtrace --enable-docs
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
MAKE_CHECK_ARGS: check-build
F: docs/specs/acpi_pci_hotplug.rst
F: docs/specs/acpi_hw_reduced_hotplug.rst
+ACPI/VIOT
+M: Jean-Philippe Brucker <jean-philippe@linaro.org>
+R: Ani Sinha <ani@anisinha.ca>
+S: Supported
+F: hw/acpi/viot.c
+F: hw/acpi/viot.h
+
ACPI/HEST/GHES
R: Dongjiu Geng <gengdongjiu1@gmail.com>
L: qemu-arm@nongnu.org
M: Michael S. Tsirkin <mst@redhat.com>
M: David Hildenbrand <david@redhat.com>
S: Maintained
+F: docs/interop/virtio-balloon-stats.rst
F: hw/virtio/virtio-balloon*.c
F: include/hw/virtio/virtio-balloon.h
F: softmmu/balloon.c
return (uint32_t)ldl_be_p(haddr);
case MO_LEUL:
return (uint32_t)ldl_le_p(haddr);
- case MO_BEQ:
+ case MO_BEUQ:
return ldq_be_p(haddr);
- case MO_LEQ:
+ case MO_LEUQ:
return ldq_le_p(haddr);
default:
qemu_build_not_reached();
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_LEQ);
- return load_helper(env, addr, oi, retaddr, MO_LEQ, false,
+ validate_memop(oi, MO_LEUQ);
+ return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
helper_le_ldq_mmu);
}
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEQ);
- return load_helper(env, addr, oi, retaddr, MO_BEQ, false,
+ validate_memop(oi, MO_BEUQ);
+ return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
helper_be_ldq_mmu);
}
uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
- return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu);
+ return cpu_load_helper(env, addr, oi, MO_BEUQ, helper_be_ldq_mmu);
}
uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
case MO_LEUL:
stl_le_p(haddr, val);
break;
- case MO_BEQ:
+ case MO_BEUQ:
stq_be_p(haddr, val);
break;
- case MO_LEQ:
+ case MO_LEUQ:
stq_le_p(haddr, val);
break;
default:
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_LEQ);
- store_helper(env, addr, val, oi, retaddr, MO_LEQ);
+ validate_memop(oi, MO_LEUQ);
+ store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
}
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEQ);
- store_helper(env, addr, val, oi, retaddr, MO_BEQ);
+ validate_memop(oi, MO_BEUQ);
+ store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
}
/*
static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
MemOpIdx oi, uintptr_t retaddr)
{
- return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code);
+ return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
}
uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
{
- MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true));
+ MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
return full_ldq_code(env, addr, oi, 0);
}
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_be_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
- MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
return cpu_ldq_le_mmu(env, addr, oi, ra);
}
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
- MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
cpu_stq_be_mmu(env, addr, val, oi, ra);
}
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
- MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
cpu_stq_le_mmu(env, addr, val, oi, ra);
}
void *haddr;
uint64_t ret;
- validate_memop(oi, MO_BEQ);
+ validate_memop(oi, MO_BEUQ);
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_be_p(haddr);
void *haddr;
uint64_t ret;
- validate_memop(oi, MO_LEQ);
+ validate_memop(oi, MO_LEUQ);
trace_guest_ld_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_le_p(haddr);
{
void *haddr;
- validate_memop(oi, MO_BEQ);
+ validate_memop(oi, MO_BEUQ);
trace_guest_st_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_be_p(haddr, val);
{
void *haddr;
- validate_memop(oi, MO_LEQ);
+ validate_memop(oi, MO_LEUQ);
trace_guest_st_before_exec(env_cpu(env), addr, oi);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
stq_le_p(haddr, val);
--- /dev/null
+/*
+ * arm signal functions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu.h"
+
+/*
+ * Compare to arm/arm/machdep.c sendsig()
+ * Assumes that target stack frame memory is locked.
+ */
+abi_long set_sigtramp_args(CPUARMState *env, int sig,
+ struct target_sigframe *frame,
+ abi_ulong frame_addr,
+ struct target_sigaction *ka)
+{
+ /*
+ * Arguments to signal handler:
+ * r0 = signal number
+ * r1 = siginfo pointer
+ * r2 = ucontext pointer
+ * r5 = ucontext pointer
+ * pc = signal handler pointer
+ * sp = sigframe struct pointer
+ * lr = sigtramp at base of user stack
+ */
+
+ env->regs[0] = sig;
+ env->regs[1] = frame_addr +
+ offsetof(struct target_sigframe, sf_si);
+ env->regs[2] = frame_addr +
+ offsetof(struct target_sigframe, sf_uc);
+
+ /* the trampoline uses r5 as the uc address */
+ env->regs[5] = frame_addr +
+ offsetof(struct target_sigframe, sf_uc);
+ env->regs[TARGET_REG_PC] = ka->_sa_handler & ~1;
+ env->regs[TARGET_REG_SP] = frame_addr;
+ env->regs[TARGET_REG_LR] = TARGET_PS_STRINGS - TARGET_SZSIGCODE;
+ /*
+ * Low bit indicates whether or not we're entering thumb mode.
+ */
+ cpsr_write(env, (ka->_sa_handler & 1) * CPSR_T, CPSR_T, CPSRWriteByInstr);
+
+ return 0;
+}
+
+/*
+ * Compare to arm/arm/machdep.c get_mcontext()
+ * Assumes that the memory is locked if mcp points to user memory.
+ */
+abi_long get_mcontext(CPUARMState *env, target_mcontext_t *mcp, int flags)
+{
+ int err = 0;
+ uint32_t *gr = mcp->__gregs;
+
+ if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(target_mcontext_vfp_t)) {
+ return -TARGET_EINVAL;
+ }
+
+ gr[TARGET_REG_CPSR] = tswap32(cpsr_read(env));
+ if (flags & TARGET_MC_GET_CLEAR_RET) {
+ gr[TARGET_REG_R0] = 0;
+ gr[TARGET_REG_CPSR] &= ~CPSR_C;
+ } else {
+ gr[TARGET_REG_R0] = tswap32(env->regs[0]);
+ }
+
+ gr[TARGET_REG_R1] = tswap32(env->regs[1]);
+ gr[TARGET_REG_R2] = tswap32(env->regs[2]);
+ gr[TARGET_REG_R3] = tswap32(env->regs[3]);
+ gr[TARGET_REG_R4] = tswap32(env->regs[4]);
+ gr[TARGET_REG_R5] = tswap32(env->regs[5]);
+ gr[TARGET_REG_R6] = tswap32(env->regs[6]);
+ gr[TARGET_REG_R7] = tswap32(env->regs[7]);
+ gr[TARGET_REG_R8] = tswap32(env->regs[8]);
+ gr[TARGET_REG_R9] = tswap32(env->regs[9]);
+ gr[TARGET_REG_R10] = tswap32(env->regs[10]);
+ gr[TARGET_REG_R11] = tswap32(env->regs[11]);
+ gr[TARGET_REG_R12] = tswap32(env->regs[12]);
+
+ gr[TARGET_REG_SP] = tswap32(env->regs[13]);
+ gr[TARGET_REG_LR] = tswap32(env->regs[14]);
+ gr[TARGET_REG_PC] = tswap32(env->regs[15]);
+
+ if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr != 0) {
+ /* see get_vfpcontext in sys/arm/arm/exec_machdep.c */
+ target_mcontext_vfp_t *vfp;
+ vfp = lock_user(VERIFY_WRITE, mcp->mc_vfp_ptr, sizeof(*vfp), 0);
+ for (int i = 0; i < 32; i++) {
+ vfp->mcv_reg[i] = tswap64(*aa32_vfp_dreg(env, i));
+ }
+ vfp->mcv_fpscr = tswap32(vfp_get_fpscr(env));
+ unlock_user(vfp, mcp->mc_vfp_ptr, sizeof(*vfp));
+ }
+ return err;
+}
+
+/* Compare to arm/arm/exec_machdep.c set_mcontext() */
+abi_long set_mcontext(CPUARMState *env, target_mcontext_t *mcp, int srflag)
+{
+ int err = 0;
+ const uint32_t *gr = mcp->__gregs;
+ uint32_t cpsr, ccpsr = cpsr_read(env);
+ uint32_t fpscr, mask;
+
+ cpsr = tswap32(gr[TARGET_REG_CPSR]);
+ /*
+ * Only allow certain bits to change, reject attempted changes to non-user
+ * bits. In addition, make sure we're headed for user mode and none of the
+ * interrupt bits are set.
+ */
+ if ((ccpsr & ~CPSR_USER) != (cpsr & ~CPSR_USER)) {
+ return -TARGET_EINVAL;
+ }
+ if ((cpsr & CPSR_M) != ARM_CPU_MODE_USR ||
+ (cpsr & (CPSR_I | CPSR_F)) != 0) {
+ return -TARGET_EINVAL;
+ }
+
+ /*
+ * The movs pc,lr instruction that implements the return to userland masks
+ * these bits out.
+ */
+ mask = cpsr & CPSR_T ? 0x1 : 0x3;
+
+ /*
+ * Make sure that we either have no vfp, or it's the correct size.
+ * FreeBSD just ignores it, though, so maybe we'll need to adjust
+ * things below instead.
+ */
+ if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_size != sizeof(target_mcontext_vfp_t)) {
+ return -TARGET_EINVAL;
+ }
+
+ env->regs[0] = tswap32(gr[TARGET_REG_R0]);
+ env->regs[1] = tswap32(gr[TARGET_REG_R1]);
+ env->regs[2] = tswap32(gr[TARGET_REG_R2]);
+ env->regs[3] = tswap32(gr[TARGET_REG_R3]);
+ env->regs[4] = tswap32(gr[TARGET_REG_R4]);
+ env->regs[5] = tswap32(gr[TARGET_REG_R5]);
+ env->regs[6] = tswap32(gr[TARGET_REG_R6]);
+ env->regs[7] = tswap32(gr[TARGET_REG_R7]);
+ env->regs[8] = tswap32(gr[TARGET_REG_R8]);
+ env->regs[9] = tswap32(gr[TARGET_REG_R9]);
+ env->regs[10] = tswap32(gr[TARGET_REG_R10]);
+ env->regs[11] = tswap32(gr[TARGET_REG_R11]);
+ env->regs[12] = tswap32(gr[TARGET_REG_R12]);
+
+ env->regs[13] = tswap32(gr[TARGET_REG_SP]);
+ env->regs[14] = tswap32(gr[TARGET_REG_LR]);
+ env->regs[15] = tswap32(gr[TARGET_REG_PC] & ~mask);
+ if (mcp->mc_vfp_size != 0 && mcp->mc_vfp_ptr != 0) {
+ /* see set_vfpcontext in sys/arm/arm/exec_machdep.c */
+ target_mcontext_vfp_t *vfp;
+
+ vfp = lock_user(VERIFY_READ, mcp->mc_vfp_ptr, sizeof(*vfp), 1);
+ for (int i = 0; i < 32; i++) {
+ __get_user(*aa32_vfp_dreg(env, i), &vfp->mcv_reg[i]);
+ }
+ __get_user(fpscr, &vfp->mcv_fpscr);
+ vfp_set_fpscr(env, fpscr);
+ unlock_user(vfp, mcp->mc_vfp_ptr, sizeof(target_ucontext_t));
+
+ /*
+ * linux-user sets fpexc, fpinst and fpinst2, but these aren't in
+ * FreeBSD's mcontext, what to do?
+ */
+ }
+ cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC, CPSRWriteByInstr);
+
+ return err;
+}
+
+/* Compare to arm/arm/machdep.c sys_sigreturn() */
+abi_long get_ucontext_sigreturn(CPUARMState *env, abi_ulong target_sf,
+ abi_ulong *target_uc)
+{
+ *target_uc = target_sf;
+
+ return 0;
+}
--- /dev/null
+/*
+ * ARM 32-bit specific prototypes for bsd-user
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_H_
+#define _TARGET_ARCH_H_
+
+#include "qemu.h"
+
+void target_cpu_set_tls(CPUARMState *env, target_ulong newtls);
+target_ulong target_cpu_get_tls(CPUARMState *env);
+
+#endif /* !_TARGET_ARCH_H_ */
--- /dev/null
+/*
+ * arm cpu related code
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "target_arch.h"
+
+void target_cpu_set_tls(CPUARMState *env, target_ulong newtls)
+{
+ if (access_secure_reg(env)) {
+ env->cp15.tpidrurw_s = newtls;
+ env->cp15.tpidruro_s = newtls;
+ return;
+ }
+
+ env->cp15.tpidr_el[0] = newtls;
+ env->cp15.tpidrro_el[0] = newtls;
+}
+
+target_ulong target_cpu_get_tls(CPUARMState *env)
+{
+ if (access_secure_reg(env)) {
+ return env->cp15.tpidruro_s;
+ }
+ return env->cp15.tpidrro_el[0];
+}
--- /dev/null
+/*
+ * arm cpu init and loop
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_CPU_H_
+#define _TARGET_ARCH_CPU_H_
+
+#include "target_arch.h"
+
+#define TARGET_DEFAULT_CPU_MODEL "any"
+
+static inline void target_cpu_init(CPUARMState *env,
+ struct target_pt_regs *regs)
+{
+ int i;
+
+ cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC,
+ CPSRWriteByInstr);
+ for (i = 0; i < 16; i++) {
+ env->regs[i] = regs->uregs[i];
+ }
+}
+
+static inline void target_cpu_loop(CPUARMState *env)
+{
+ int trapnr;
+ target_siginfo_t info;
+ unsigned int n;
+ CPUState *cs = env_cpu(env);
+
+ for (;;) {
+ cpu_exec_start(cs);
+ trapnr = cpu_exec(cs);
+ cpu_exec_end(cs);
+ process_queued_cpu_work(cs);
+ switch (trapnr) {
+ case EXCP_UDEF:
+ {
+ /* See arm/arm/undefined.c undefinedinstruction(); */
+ info.si_addr = env->regs[15];
+
+ /* illegal instruction */
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = TARGET_ILL_ILLOPC;
+ queue_signal(env, info.si_signo, &info);
+
+ /* TODO: What about instruction emulation? */
+ }
+ break;
+ case EXCP_SWI:
+ case EXCP_BKPT:
+ {
+ /*
+ * system call
+ * See arm/arm/trap.c cpu_fetch_syscall_args()
+ */
+ if (trapnr == EXCP_BKPT) {
+ if (env->thumb) {
+ env->regs[15] += 2;
+ } else {
+ env->regs[15] += 4;
+ }
+ }
+ n = env->regs[7];
+ if (bsd_type == target_freebsd) {
+ int ret;
+ abi_ulong params = get_sp_from_cpustate(env);
+ int32_t syscall_nr = n;
+ int32_t arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8;
+
+ /* See arm/arm/trap.c cpu_fetch_syscall_args() */
+ if (syscall_nr == TARGET_FREEBSD_NR_syscall) {
+ syscall_nr = env->regs[0];
+ arg1 = env->regs[1];
+ arg2 = env->regs[2];
+ arg3 = env->regs[3];
+ get_user_s32(arg4, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg5, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg6, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg7, params);
+ arg8 = 0;
+ } else if (syscall_nr == TARGET_FREEBSD_NR___syscall) {
+ syscall_nr = env->regs[0];
+ arg1 = env->regs[2];
+ arg2 = env->regs[3];
+ get_user_s32(arg3, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg4, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg5, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg6, params);
+ arg7 = 0;
+ arg8 = 0;
+ } else {
+ arg1 = env->regs[0];
+ arg2 = env->regs[1];
+ arg3 = env->regs[2];
+ arg4 = env->regs[3];
+ get_user_s32(arg5, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg6, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg7, params);
+ params += sizeof(int32_t);
+ get_user_s32(arg8, params);
+ }
+ ret = do_freebsd_syscall(env, syscall_nr, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7, arg8);
+ /*
+ * Compare to arm/arm/vm_machdep.c
+ * cpu_set_syscall_retval()
+ */
+ if (-TARGET_EJUSTRETURN == ret) {
+ /*
+ * Returning from a successful sigreturn syscall.
+ * Avoid clobbering register state.
+ */
+ break;
+ }
+ if (-TARGET_ERESTART == ret) {
+ env->regs[15] -= env->thumb ? 2 : 4;
+ break;
+ }
+ if ((unsigned int)ret >= (unsigned int)(-515)) {
+ ret = -ret;
+ cpsr_write(env, CPSR_C, CPSR_C, CPSRWriteByInstr);
+ env->regs[0] = ret;
+ } else {
+ cpsr_write(env, 0, CPSR_C, CPSRWriteByInstr);
+ env->regs[0] = ret; /* XXX need to handle lseek()? */
+ /* env->regs[1] = 0; */
+ }
+ } else {
+ fprintf(stderr, "qemu: bsd_type (= %d) syscall "
+ "not supported\n", bsd_type);
+ }
+ }
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_PREFETCH_ABORT:
+ /* See arm/arm/trap.c prefetch_abort_handler() */
+ case EXCP_DATA_ABORT:
+ /* See arm/arm/trap.c data_abort_handler() */
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = 0;
+ info.si_addr = env->exception.vaddress;
+ queue_signal(env, info.si_signo, &info);
+ break;
+ case EXCP_DEBUG:
+ {
+
+ info.si_signo = TARGET_SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ info.si_addr = env->exception.vaddress;
+ queue_signal(env, info.si_signo, &info);
+ }
+ break;
+ case EXCP_ATOMIC:
+ cpu_exec_step_atomic(cs);
+ break;
+ case EXCP_YIELD:
+ /* nothing to do here for user-mode, just resume guest code */
+ break;
+ default:
+ fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
+ trapnr);
+ cpu_dump_state(cs, stderr, 0);
+ abort();
+ } /* switch() */
+ process_pending_signals(env);
+ } /* for (;;) */
+}
+
+static inline void target_cpu_clone_regs(CPUARMState *env, target_ulong newsp)
+{
+ if (newsp) {
+ env->regs[13] = newsp;
+ }
+ env->regs[0] = 0;
+}
+
+static inline void target_cpu_reset(CPUArchState *cpu)
+{
+}
+
+#endif /* !_TARGET_ARCH_CPU_H */
--- /dev/null
+/*
+ * arm ELF definitions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _TARGET_ARCH_ELF_H_
+#define _TARGET_ARCH_ELF_H_
+
+#define ELF_START_MMAP 0x80000000
+#define ELF_ET_DYN_LOAD_ADDR 0x500000
+
+#define elf_check_arch(x) ((x) == EM_ARM)
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_ARM
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+#define ELF_HWCAP get_elf_hwcap()
+#define ELF_HWCAP2 get_elf_hwcap2()
+
+#define GET_FEATURE(feat, hwcap) \
+ do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
+
+#define GET_FEATURE_ID(feat, hwcap) \
+ do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
+
+enum {
+ ARM_HWCAP_ARM_SWP = 1 << 0,
+ ARM_HWCAP_ARM_HALF = 1 << 1,
+ ARM_HWCAP_ARM_THUMB = 1 << 2,
+ ARM_HWCAP_ARM_26BIT = 1 << 3,
+ ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
+ ARM_HWCAP_ARM_FPA = 1 << 5,
+ ARM_HWCAP_ARM_VFP = 1 << 6,
+ ARM_HWCAP_ARM_EDSP = 1 << 7,
+ ARM_HWCAP_ARM_JAVA = 1 << 8,
+ ARM_HWCAP_ARM_IWMMXT = 1 << 9,
+ ARM_HWCAP_ARM_CRUNCH = 1 << 10,
+ ARM_HWCAP_ARM_THUMBEE = 1 << 11,
+ ARM_HWCAP_ARM_NEON = 1 << 12,
+ ARM_HWCAP_ARM_VFPv3 = 1 << 13,
+ ARM_HWCAP_ARM_VFPv3D16 = 1 << 14,
+ ARM_HWCAP_ARM_TLS = 1 << 15,
+ ARM_HWCAP_ARM_VFPv4 = 1 << 16,
+ ARM_HWCAP_ARM_IDIVA = 1 << 17,
+ ARM_HWCAP_ARM_IDIVT = 1 << 18,
+ ARM_HWCAP_ARM_VFPD32 = 1 << 19,
+ ARM_HWCAP_ARM_LPAE = 1 << 20,
+ ARM_HWCAP_ARM_EVTSTRM = 1 << 21,
+};
+
+enum {
+ ARM_HWCAP2_ARM_AES = 1 << 0,
+ ARM_HWCAP2_ARM_PMULL = 1 << 1,
+ ARM_HWCAP2_ARM_SHA1 = 1 << 2,
+ ARM_HWCAP2_ARM_SHA2 = 1 << 3,
+ ARM_HWCAP2_ARM_CRC32 = 1 << 4,
+};
+
+static uint32_t get_elf_hwcap(void)
+{
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
+ uint32_t hwcaps = 0;
+
+ hwcaps |= ARM_HWCAP_ARM_SWP;
+ hwcaps |= ARM_HWCAP_ARM_HALF;
+ hwcaps |= ARM_HWCAP_ARM_THUMB;
+ hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
+
+ /* probe for the extra features */
+ /* EDSP is in v5TE and above */
+ GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
+ GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
+ GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
+ GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
+ GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
+ GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE);
+ GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA);
+ GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT);
+ GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP);
+
+ if (cpu_isar_feature(aa32_fpsp_v3, cpu) ||
+ cpu_isar_feature(aa32_fpdp_v3, cpu)) {
+ hwcaps |= ARM_HWCAP_ARM_VFPv3;
+ if (cpu_isar_feature(aa32_simd_r32, cpu)) {
+ hwcaps |= ARM_HWCAP_ARM_VFPD32;
+ } else {
+ hwcaps |= ARM_HWCAP_ARM_VFPv3D16;
+ }
+ }
+ GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4);
+
+ return hwcaps;
+}
+
+static uint32_t get_elf_hwcap2(void)
+{
+ ARMCPU *cpu = ARM_CPU(thread_cpu);
+ uint32_t hwcaps = 0;
+
+ GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
+ GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
+ GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
+ GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
+ GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
+ return hwcaps;
+}
+
+#undef GET_FEATURE
+#undef GET_FEATURE_ID
+
+#endif /* _TARGET_ARCH_ELF_H_ */
--- /dev/null
+/*
+ * FreeBSD arm register structures
+ *
+ * Copyright (c) 2015 Stacey Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_REG_H_
+#define _TARGET_ARCH_REG_H_
+
+/* See sys/arm/include/reg.h */
+typedef struct target_reg {
+ uint32_t r[13];
+ uint32_t r_sp;
+ uint32_t r_lr;
+ uint32_t r_pc;
+ uint32_t r_cpsr;
+} target_reg_t;
+
+typedef struct target_fp_reg {
+ uint32_t fp_exponent;
+ uint32_t fp_mantissa_hi;
+ u_int32_t fp_mantissa_lo;
+} target_fp_reg_t;
+
+typedef struct target_fpreg {
+ uint32_t fpr_fpsr;
+ target_fp_reg_t fpr[8];
+} target_fpreg_t;
+
+#define tswapreg(ptr) tswapal(ptr)
+
+static inline void target_copy_regs(target_reg_t *regs, const CPUARMState *env)
+{
+ int i;
+
+ for (i = 0; i < 13; i++) {
+ regs->r[i] = tswapreg(env->regs[i + 1]);
+ }
+ regs->r_sp = tswapreg(env->regs[13]);
+ regs->r_lr = tswapreg(env->regs[14]);
+ regs->r_pc = tswapreg(env->regs[15]);
+ regs->r_cpsr = tswapreg(cpsr_read((CPUARMState *)env));
+}
+
+#undef tswapreg
+
+#endif /* !_TARGET_ARCH_REG_H_ */
--- /dev/null
+/*
+ * arm signal definitions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _TARGET_ARCH_SIGNAL_H_
+#define _TARGET_ARCH_SIGNAL_H_
+
+#include "cpu.h"
+
+#define TARGET_REG_R0 0
+#define TARGET_REG_R1 1
+#define TARGET_REG_R2 2
+#define TARGET_REG_R3 3
+#define TARGET_REG_R4 4
+#define TARGET_REG_R5 5
+#define TARGET_REG_R6 6
+#define TARGET_REG_R7 7
+#define TARGET_REG_R8 8
+#define TARGET_REG_R9 9
+#define TARGET_REG_R10 10
+#define TARGET_REG_R11 11
+#define TARGET_REG_R12 12
+#define TARGET_REG_R13 13
+#define TARGET_REG_R14 14
+#define TARGET_REG_R15 15
+#define TARGET_REG_CPSR 16
+#define TARGET__NGREG 17
+/* Convenience synonyms */
+#define TARGET_REG_FP TARGET_REG_R11
+#define TARGET_REG_SP TARGET_REG_R13
+#define TARGET_REG_LR TARGET_REG_R14
+#define TARGET_REG_PC TARGET_REG_R15
+
+#define TARGET_INSN_SIZE 4 /* arm instruction size */
+
+/* Size of the signal trampolin code. See _sigtramp(). */
+#define TARGET_SZSIGCODE ((abi_ulong)(9 * TARGET_INSN_SIZE))
+
+/* compare to arm/include/_limits.h */
+#define TARGET_MINSIGSTKSZ (1024 * 4) /* min sig stack size */
+#define TARGET_SIGSTKSZ (TARGET_MINSIGSTKSZ + 32768) /* recommended size */
+
+/*
+ * Floating point register state
+ */
+typedef struct target_mcontext_vfp {
+ abi_ullong mcv_reg[32];
+ abi_ulong mcv_fpscr;
+} target_mcontext_vfp_t;
+
+typedef struct target_mcontext {
+ abi_uint __gregs[TARGET__NGREG];
+
+ /*
+ * Originally, rest of this structure was named __fpu, 35 * 4 bytes
+ * long, never accessed from kernel.
+ */
+ abi_ulong mc_vfp_size;
+ abi_ptr mc_vfp_ptr;
+ abi_int mc_spare[33];
+} target_mcontext_t;
+
+#define TARGET_MCONTEXT_SIZE 208
+#define TARGET_UCONTEXT_SIZE 260
+
+#include "target_os_ucontext.h"
+
+struct target_sigframe {
+ target_siginfo_t sf_si; /* saved siginfo */
+ target_ucontext_t sf_uc; /* saved ucontext */
+ target_mcontext_vfp_t sf_vfp; /* actual saved VFP context */
+};
+
+#endif /* !_TARGET_ARCH_SIGNAL_H_ */
--- /dev/null
+/*
+ * arm sysarch() system call emulation
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_SIGTRAMP_H_
+#define _TARGET_ARCH_SIGTRAMP_H_
+
+/* Compare to arm/arm/locore.S ENTRY_NP(sigcode) */
+static inline abi_long setup_sigtramp(abi_ulong offset, unsigned sigf_uc,
+ unsigned sys_sigreturn)
+{
+ int i;
+ uint32_t sys_exit = TARGET_FREEBSD_NR_exit;
+ uint32_t sigtramp_code[] = {
+ /* 1 */ 0xE1A0000D, /* mov r0, sp */
+ /* 2 */ 0xE2800000 + sigf_uc, /* add r0, r0, #SIGF_UC */
+ /* 3 */ 0xE59F700C, /* ldr r7, [pc, #12] */
+ /* 4 */ 0xEF000000 + sys_sigreturn, /* swi (SYS_sigreturn) */
+ /* 5 */ 0xE59F7008, /* ldr r7, [pc, #8] */
+ /* 6 */ 0xEF000000 + sys_exit, /* swi (SYS_exit)*/
+ /* 7 */ 0xEAFFFFFA, /* b . -16 */
+ /* 8 */ sys_sigreturn,
+ /* 9 */ sys_exit
+ };
+
+ G_STATIC_ASSERT(sizeof(sigtramp_code) == TARGET_SZSIGCODE);
+
+ for (i = 0; i < 9; i++) {
+ tswap32s(&sigtramp_code[i]);
+ }
+
+ return memcpy_to_target(offset, sigtramp_code, TARGET_SZSIGCODE);
+}
+#endif /* _TARGET_ARCH_SIGTRAMP_H_ */
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef BSD_USER_ARCH_SYSARCH_H_
-#define BSD_USER_ARCH_SYSARCH_H_
+#ifndef _TARGET_ARCH_SYSARCH_H_
+#define _TARGET_ARCH_SYSARCH_H_
#include "target_syscall.h"
#include "target_arch.h"
}
}
-#endif /*!BSD_USER_ARCH_SYSARCH_H_ */
+#endif /*!_TARGET_ARCH_SYSARCH_H_ */
--- /dev/null
+/*
+ * arm thread support
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _TARGET_ARCH_THREAD_H_
+#define _TARGET_ARCH_THREAD_H_
+
+/* Compare to arm/arm/vm_machdep.c cpu_set_upcall_kse() */
+static inline void target_thread_set_upcall(CPUARMState *env, abi_ulong entry,
+ abi_ulong arg, abi_ulong stack_base, abi_ulong stack_size)
+{
+ abi_ulong sp;
+
+ /*
+ * Make sure the stack is properly aligned.
+ * arm/include/param.h (STACKLIGN() macro)
+ */
+ sp = (u_int)(stack_base + stack_size) & ~0x7;
+
+ /* sp = stack base */
+ env->regs[13] = sp;
+ /* pc = start function entry */
+ env->regs[15] = entry & 0xfffffffe;
+ /* r0 = arg */
+ env->regs[0] = arg;
+ env->spsr = ARM_CPU_MODE_USR;
+ /*
+ * Thumb mode is encoded by the low bit in the entry point (since ARM can't
+ * execute at odd addresses). When it's set, set the Thumb bit (T) in the
+ * CPSR.
+ */
+ cpsr_write(env, (entry & 1) * CPSR_T, CPSR_T, CPSRWriteByInstr);
+}
+
+static inline void target_thread_init(struct target_pt_regs *regs,
+ struct image_info *infop)
+{
+ abi_long stack = infop->start_stack;
+ memset(regs, 0, sizeof(*regs));
+ regs->ARM_cpsr = ARM_CPU_MODE_USR;
+ /*
+ * Thumb mode is encoded by the low bit in the entry point (since ARM can't
+ * execute at odd addresses). When it's set, set the Thumb bit (T) in the
+ * CPSR.
+ */
+ if (infop->entry & 1) {
+ regs->ARM_cpsr |= CPSR_T;
+ }
+ regs->ARM_pc = infop->entry & 0xfffffffe;
+ regs->ARM_sp = stack;
+ if (bsd_type == target_freebsd) {
+ regs->ARM_lr = infop->entry & 0xfffffffe;
+ }
+ /*
+ * FreeBSD kernel passes the ps_strings pointer in r0. This is used by some
+ * programs to set status messages that we see in ps. bsd-user doesn't
+ * support that functionality, so it's ignored. When set to 0, FreeBSD's csu
+ * code ignores it. For the static case, r1 and r2 are effectively ignored
+ * by the csu __startup() routine. For the dynamic case, rtld saves r0 but
+ * generates r1 and r2 and passes them into the csu _startup.
+ *
+ * r0 ps_strings 0 passed since ps arg setting not supported
+ * r1 obj_main ignored by _start(), so 0 passed
+ * r2 cleanup generated by rtld or ignored by _start(), so 0 passed
+ */
+}
+
+#endif /* !_TARGET_ARCH_THREAD_H_ */
--- /dev/null
+/*
+ * arm VM parameters definitions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _TARGET_ARCH_VMPARAM_H_
+#define _TARGET_ARCH_VMPARAM_H_
+
+#include "cpu.h"
+
+/* compare to sys/arm/include/vmparam.h */
+#define TARGET_MAXTSIZ (64 * MiB) /* max text size */
+#define TARGET_DFLDSIZ (128 * MiB) /* initial data size limit */
+#define TARGET_MAXDSIZ (512 * MiB) /* max data size */
+#define TARGET_DFLSSIZ (4 * MiB) /* initial stack size limit */
+#define TARGET_MAXSSIZ (64 * MiB) /* max stack size */
+#define TARGET_SGROWSIZ (128 * KiB) /* amount to grow stack */
+
+#define TARGET_RESERVED_VA 0xf7000000
+
+ /* KERNBASE - 512 MB */
+#define TARGET_VM_MAXUSER_ADDRESS (0xc0000000 - (512 * MiB))
+#define TARGET_USRSTACK TARGET_VM_MAXUSER_ADDRESS
+
+static inline abi_ulong get_sp_from_cpustate(CPUARMState *state)
+{
+ return state->regs[13]; /* sp */
+}
+
+static inline void set_second_rval(CPUARMState *state, abi_ulong retval2)
+{
+ state->regs[1] = retval2;
+}
+
+#endif /* ! _TARGET_ARCH_VMPARAM_H_ */
-#ifndef BSD_USER_ARCH_SYSCALL_H_
-#define BSD_USER_ARCH_SYSCALL_H_
+/*
+ * arm cpu system call stubs
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TARGET_ARCH_SYSCALL_H_
+#define _TARGET_ARCH_SYSCALL_H_
struct target_pt_regs {
abi_long uregs[17];
#define TARGET_FREEBSD_ARM_GET_TP 3
#define TARGET_HW_MACHINE "arm"
-#define TARGET_HW_MACHINE_ARCH "armv6"
+#define TARGET_HW_MACHINE_ARCH "armv7"
-#endif /* !BSD_USER_ARCH_SYSCALL_H_ */
+#endif /* !_TARGET_ARCH_SYSCALL_H_ */
#ifndef _TARGET_OS_SIGNAL_H_
#define _TARGET_OS_SIGNAL_H_
-/* FreeBSD's sys/ucontext.h defines this */
-#define TARGET_MC_GET_CLEAR_RET 0x0001
-
#include "target_os_siginfo.h"
#include "target_arch_signal.h"
--- /dev/null
+/*
+ * FreeBSD has a common ucontext definition for all architectures.
+ *
+ * Copyright 2021 Warner Losh <imp@bsdimp.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later OR BSD-3-Clause
+ */
+#ifndef TARGET_OS_UCONTEXT_H
+#define TARGET_OS_UCONTEXT_H
+
+/*
+ * Defines the common bits for all of FreeBSD's architectures. Has to be
+ * included AFTER the MD target_mcontext_t is defined, however, so can't
+ * be in the grab-bag that is target_os_signal.h.
+ */
+
+/* See FreeBSD's sys/ucontext.h */
+#define TARGET_MC_GET_CLEAR_RET 0x0001
+
+/* FreeBSD's sys/_ucontext.h structures */
+typedef struct target_ucontext {
+ target_sigset_t uc_sigmask;
+ target_mcontext_t uc_mcontext;
+ abi_ulong uc_link;
+ target_stack_t uc_stack;
+ int32_t uc_flags;
+ int32_t __spare__[4];
+} target_ucontext_t;
+
+G_STATIC_ASSERT(TARGET_MCONTEXT_SIZE == sizeof(target_mcontext_t));
+G_STATIC_ASSERT(TARGET_UCONTEXT_SIZE == sizeof(target_ucontext_t));
+
+struct target_sigframe;
+
+abi_long set_sigtramp_args(CPUArchState *env, int sig,
+ struct target_sigframe *frame,
+ abi_ulong frame_addr,
+ struct target_sigaction *ka);
+abi_long get_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int flags);
+abi_long set_mcontext(CPUArchState *regs, target_mcontext_t *mcp, int srflag);
+abi_long get_ucontext_sigreturn(CPUArchState *regs, abi_ulong target_sf,
+ abi_ulong *target_uc);
+
+#endif /* TARGET_OS_UCONTEXT_H */
--- /dev/null
+/*
+ * i386 dependent signal definitions
+ *
+ * Copyright (c) 2013 Stacey D. Son
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu.h"
+
+/*
+ * Compare to i386/i386/machdep.c sendsig()
+ * Assumes that target stack frame memory is locked.
+ */
+abi_long set_sigtramp_args(CPUX86State *env, int sig,
+ struct target_sigframe *frame,
+ abi_ulong frame_addr,
+ struct target_sigaction *ka)
+{
+ /* XXX return -TARGET_EOPNOTSUPP; */
+ return 0;
+}
+
+/* Compare to i386/i386/machdep.c get_mcontext() */
+abi_long get_mcontext(CPUX86State *regs, target_mcontext_t *mcp, int flags)
+{
+ /* XXX */
+ return -TARGET_EOPNOTSUPP;
+}
+
+/* Compare to i386/i386/machdep.c set_mcontext() */
+abi_long set_mcontext(CPUX86State *regs, target_mcontext_t *mcp, int srflag)
+{
+ /* XXX */
+ return -TARGET_EOPNOTSUPP;
+}
+
+abi_long get_ucontext_sigreturn(CPUX86State *regs, abi_ulong target_sf,
+ abi_ulong *target_uc)
+{
+ /* XXX */
+ *target_uc = 0;
+ return -TARGET_EOPNOTSUPP;
+}
#define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */
#define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */
-struct target_sigcontext {
- /* to be added */
-};
-
typedef struct target_mcontext {
+ abi_ulong mc_onstack; /* XXX - sigcontext compat. */
+ abi_ulong mc_gs; /* machine state (struct trapframe) */
+ abi_ulong mc_fs;
+ abi_ulong mc_es;
+ abi_ulong mc_ds;
+ abi_ulong mc_edi;
+ abi_ulong mc_esi;
+ abi_ulong mc_ebp;
+ abi_ulong mc_isp;
+ abi_ulong mc_ebx;
+ abi_ulong mc_edx;
+ abi_ulong mc_ecx;
+ abi_ulong mc_eax;
+ abi_ulong mc_trapno;
+ abi_ulong mc_err;
+ abi_ulong mc_eip;
+ abi_ulong mc_cs;
+ abi_ulong mc_eflags;
+ abi_ulong mc_esp;
+ abi_ulong mc_ss;
+
+ int32_t mc_len; /* sizeof(mcontext_t) */
+#define _MC_FPFMT_NODEV 0x10000 /* device not present or configured */
+#define _MC_FPFMT_387 0x10001
+#define _MC_FPFMT_XMM 0x10002
+ int32_t mc_fpformat;
+#define _MC_FPOWNED_NONE 0x20000 /* FP state not used */
+#define _MC_FPOWNED_FPU 0x20001 /* FP state came from FPU */
+#define _MC_FPOWNED_PCB 0x20002 /* FP state came from PCB */
+ int32_t mc_ownedfp;
+ abi_ulong mc_flags;
+ /*
+ * See <machine/npx.h> for the internals of mc_fpstate[].
+ */
+ int32_t mc_fpstate[128] __aligned(16);
+
+ abi_ulong mc_fsbase;
+ abi_ulong mc_gsbase;
+
+ abi_ulong mc_xfpustate;
+ abi_ulong mc_xfpustate_len;
+
+ int32_t mc_spare2[4];
} target_mcontext_t;
-typedef struct target_ucontext {
- target_sigset_t uc_sigmask;
- target_mcontext_t uc_mcontext;
- abi_ulong uc_link;
- target_stack_t uc_stack;
- int32_t uc_flags;
- int32_t __spare__[4];
-} target_ucontext_t;
+#define TARGET_MCONTEXT_SIZE 640
+#define TARGET_UCONTEXT_SIZE 704
+
+#include "target_os_ucontext.h"
struct target_sigframe {
abi_ulong sf_signum;
uint32_t __spare__[2];
};
-/*
- * Compare to i386/i386/machdep.c sendsig()
- * Assumes that target stack frame memory is locked.
- */
-static inline abi_long set_sigtramp_args(CPUX86State *regs,
- int sig, struct target_sigframe *frame, abi_ulong frame_addr,
- struct target_sigaction *ka)
-{
- /* XXX return -TARGET_EOPNOTSUPP; */
- return 0;
-}
-
-/* Compare to i386/i386/machdep.c get_mcontext() */
-static inline abi_long get_mcontext(CPUX86State *regs,
- target_mcontext_t *mcp, int flags)
-{
- /* XXX */
- return -TARGET_EOPNOTSUPP;
-}
-
-/* Compare to i386/i386/machdep.c set_mcontext() */
-static inline abi_long set_mcontext(CPUX86State *regs,
- target_mcontext_t *mcp, int srflag)
-{
- /* XXX */
- return -TARGET_EOPNOTSUPP;
-}
-
-static inline abi_long get_ucontext_sigreturn(CPUX86State *regs,
- abi_ulong target_sf, abi_ulong *target_uc)
-{
- /* XXX */
- *target_uc = 0;
- return -TARGET_EOPNOTSUPP;
-}
-
#endif /* TARGET_ARCH_SIGNAL_H */
+++ /dev/null
-/*
- * mips sysarch() system call emulation
- *
- * Copyright (c) 2013 Stacey D. Son
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef BSD_USER_ARCH_SYSARCH_H_
-#define BSD_USER_ARCH_SYSARCH_H_
-
-#include "target_syscall.h"
-#include "target_arch.h"
-
-static inline abi_long do_freebsd_arch_sysarch(CPUMIPSState *env, int op,
- abi_ulong parms)
-{
- int ret = 0;
-
- switch (op) {
- case TARGET_MIPS_SET_TLS:
- target_cpu_set_tls(env, parms);
- break;
-
- case TARGET_MIPS_GET_TLS:
- if (put_user(target_cpu_get_tls(env), parms, abi_ulong)) {
- ret = -TARGET_EFAULT;
- }
- break;
-
- default:
- ret = -TARGET_EINVAL;
- break;
- }
-
- return ret;
-}
-
-static inline void do_freebsd_arch_print_sysarch(
- const struct syscallname *name, abi_long arg1, abi_long arg2,
- abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6)
-{
-
- switch (arg1) {
- case TARGET_MIPS_SET_TLS:
- gemu_log("%s(SET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2);
- break;
-
- case TARGET_MIPS_GET_TLS:
- gemu_log("%s(GET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2);
- break;
-
- default:
- gemu_log("UNKNOWN OP: %d, " TARGET_ABI_FMT_lx ")", (int)arg1, arg2);
- }
-}
-
-#endif /*!BSD_USER_ARCH_SYSARCH_H_ */
+++ /dev/null
-/*
- * mips system call definitions
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef _MIPS_SYSCALL_H_
-#define _MIPS_SYSCALL_H_
-
-/*
- * struct target_pt_regs defines the way the registers are stored on the stack
- * during a system call.
- */
-
-struct target_pt_regs {
- /* Saved main processor registers. */
- abi_ulong regs[32];
-
- /* Saved special registers. */
- abi_ulong cp0_status;
- abi_ulong lo;
- abi_ulong hi;
- abi_ulong cp0_badvaddr;
- abi_ulong cp0_cause;
- abi_ulong cp0_epc;
-};
-
-#if defined(TARGET_WORDS_BIGENDIAN)
-#define UNAME_MACHINE "mips"
-#else
-#define UNAME_MACHINE "mipsel"
-#endif
-
-#define TARGET_HW_MACHINE "mips"
-#define TARGET_HW_MACHINE_ARCH UNAME_MACHINE
-
-/* sysarch() commands */
-#define TARGET_MIPS_SET_TLS 1
-#define TARGET_MIPS_GET_TLS 2
-
-#endif /* !_MIPS_SYSCALL_H_ */
+++ /dev/null
-/*
- * mips64 sysarch() system call emulation
- *
- * Copyright (c) 2013 Stacey D. Son
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef BSD_USER_ARCH_SYSARCH_H_
-#define BSD_USER_ARCH_SYSARCH_H_
-
-#include "target_syscall.h"
-#include "target_arch.h"
-
-static inline abi_long do_freebsd_arch_sysarch(CPUMIPSState *env, int op,
- abi_ulong parms)
-{
- int ret = 0;
-
- switch (op) {
- case TARGET_MIPS_SET_TLS:
- target_cpu_set_tls(env, parms);
- break;
-
- case TARGET_MIPS_GET_TLS:
- if (put_user(target_cpu_get_tls(env), parms, abi_ulong)) {
- ret = -TARGET_EFAULT;
- }
- break;
-
- default:
- ret = -TARGET_EINVAL;
- break;
- }
-
- return ret;
-}
-
-static inline void do_freebsd_arch_print_sysarch(
- const struct syscallname *name, abi_long arg1, abi_long arg2,
- abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6)
-{
-
- switch (arg1) {
- case TARGET_MIPS_SET_TLS:
- gemu_log("%s(SET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2);
- break;
-
- case TARGET_MIPS_GET_TLS:
- gemu_log("%s(GET_TLS, 0x" TARGET_ABI_FMT_lx ")", name->name, arg2);
- break;
-
- default:
- gemu_log("UNKNOWN OP: %d, " TARGET_ABI_FMT_lx ")", (int)arg1, arg2);
- }
-}
-
-#endif /*!BSD_USER_ARCH_SYSARCH_H_ */
+++ /dev/null
-/*
- * mips64 system call definitions
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef _MIPS64_SYSCALL_H_
-#define _MIPS64_SYSCALL_H_
-
-/*
- * struct target_pt_regs defines the way the registers are stored on the stack
- * during a system call.
- */
-
-struct target_pt_regs {
- /* Saved main processor registers. */
- abi_ulong regs[32];
-
- /* Saved special registers. */
- abi_ulong cp0_status;
- abi_ulong lo;
- abi_ulong hi;
- abi_ulong cp0_badvaddr;
- abi_ulong cp0_cause;
- abi_ulong cp0_epc;
-};
-
-
-#if defined(TARGET_WORDS_BIGENDIAN)
-#define UNAME_MACHINE "mips64"
-#else
-#define UNAME_MACHINE "mips64el"
-#endif
-
-#define TARGET_HW_MACHINE "mips"
-#define TARGET_HW_MACHINE_ARCH UNAME_MACHINE
-
-/* sysarch() commands */
-#define TARGET_MIPS_SET_TLS 1
-#define TARGET_MIPS_GET_TLS 2
-
-#endif /* !_MIPS64_SYSCALL_H_ */
--- /dev/null
+/*
+ * x86_64 signal definitions
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu.h"
+
+/*
+ * Compare to amd64/amd64/machdep.c sendsig()
+ * Assumes that target stack frame memory is locked.
+ */
+abi_long set_sigtramp_args(CPUX86State *regs,
+ int sig, struct target_sigframe *frame, abi_ulong frame_addr,
+ struct target_sigaction *ka)
+{
+ /* XXX return -TARGET_EOPNOTSUPP; */
+ return 0;
+}
+
+/* Compare to amd64/amd64/machdep.c get_mcontext() */
+abi_long get_mcontext(CPUX86State *regs,
+ target_mcontext_t *mcp, int flags)
+{
+ /* XXX */
+ return -TARGET_EOPNOTSUPP;
+}
+
+/* Compare to amd64/amd64/machdep.c set_mcontext() */
+abi_long set_mcontext(CPUX86State *regs,
+ target_mcontext_t *mcp, int srflag)
+{
+ /* XXX */
+ return -TARGET_EOPNOTSUPP;
+}
+
+abi_long get_ucontext_sigreturn(CPUX86State *regs,
+ abi_ulong target_sf, abi_ulong *target_uc)
+{
+ /* XXX */
+ *target_uc = 0;
+ return -TARGET_EOPNOTSUPP;
+}
#define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */
#define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */
-struct target_sigcontext {
- /* to be added */
-};
-
typedef struct target_mcontext {
+ abi_ulong mc_onstack; /* XXX - sigcontext compat. */
+ abi_ulong mc_rdi; /* machine state (struct trapframe) */
+ abi_ulong mc_rsi;
+ abi_ulong mc_rdx;
+ abi_ulong mc_rcx;
+ abi_ulong mc_r8;
+ abi_ulong mc_r9;
+ abi_ulong mc_rax;
+ abi_ulong mc_rbx;
+ abi_ulong mc_rbp;
+ abi_ulong mc_r10;
+ abi_ulong mc_r11;
+ abi_ulong mc_r12;
+ abi_ulong mc_r13;
+ abi_ulong mc_r14;
+ abi_ulong mc_r15;
+ uint32_t mc_trapno;
+ uint16_t mc_fs;
+ uint16_t mc_gs;
+ abi_ulong mc_addr;
+ uint32_t mc_flags;
+ uint16_t mc_es;
+ uint16_t mc_ds;
+ abi_ulong mc_err;
+ abi_ulong mc_rip;
+ abi_ulong mc_cs;
+ abi_ulong mc_rflags;
+ abi_ulong mc_rsp;
+ abi_ulong mc_ss;
+
+ abi_long mc_len; /* sizeof(mcontext_t) */
+
+#define _MC_FPFMT_NODEV 0x10000 /* device not present or configured */
+#define _MC_FPFMT_XMM 0x10002
+ abi_long mc_fpformat;
+#define _MC_FPOWNED_NONE 0x20000 /* FP state not used */
+#define _MC_FPOWNED_FPU 0x20001 /* FP state came from FPU */
+#define _MC_FPOWNED_PCB 0x20002 /* FP state came from PCB */
+ abi_long mc_ownedfp;
+ /*
+ * See <machine/fpu.h> for the internals of mc_fpstate[].
+ */
+ abi_long mc_fpstate[64] __aligned(16);
+
+ abi_ulong mc_fsbase;
+ abi_ulong mc_gsbase;
+
+ abi_ulong mc_xfpustate;
+ abi_ulong mc_xfpustate_len;
+
+ abi_long mc_spare[4];
} target_mcontext_t;
-typedef struct target_ucontext {
- target_sigset_t uc_sigmask;
- target_mcontext_t uc_mcontext;
- abi_ulong uc_link;
- target_stack_t uc_stack;
- int32_t uc_flags;
- int32_t __spare__[4];
-} target_ucontext_t;
+#define TARGET_MCONTEXT_SIZE 800
+#define TARGET_UCONTEXT_SIZE 880
+
+#include "target_os_ucontext.h"
struct target_sigframe {
abi_ulong sf_signum;
uint32_t __spare__[2];
};
-/*
- * Compare to amd64/amd64/machdep.c sendsig()
- * Assumes that target stack frame memory is locked.
- */
-static inline abi_long set_sigtramp_args(CPUX86State *regs,
- int sig, struct target_sigframe *frame, abi_ulong frame_addr,
- struct target_sigaction *ka)
-{
- /* XXX return -TARGET_EOPNOTSUPP; */
- return 0;
-}
-
-/* Compare to amd64/amd64/machdep.c get_mcontext() */
-static inline abi_long get_mcontext(CPUX86State *regs,
- target_mcontext_t *mcp, int flags)
-{
- /* XXX */
- return -TARGET_EOPNOTSUPP;
-}
-
-/* Compare to amd64/amd64/machdep.c set_mcontext() */
-static inline abi_long set_mcontext(CPUX86State *regs,
- target_mcontext_t *mcp, int srflag)
-{
- /* XXX */
- return -TARGET_EOPNOTSUPP;
-}
-
-static inline abi_long get_ucontext_sigreturn(CPUX86State *regs,
- abi_ulong target_sf, abi_ulong *target_uc)
-{
- /* XXX */
- *target_uc = 0;
- return -TARGET_EOPNOTSUPP;
-}
-
#endif /* !TARGET_ARCH_SIGNAL_H_ */
NULL);
}
- if (ret == QIO_CHANNEL_ERR_BLOCK) {
- errno = EAGAIN;
- ret = -1;
- } else if (ret == -1) {
- errno = EIO;
- }
-
if (msgfds_num) {
/* close and clean read_msgfds */
for (i = 0; i < s->read_msgfds_num; i++) {
#endif
}
+ if (ret == QIO_CHANNEL_ERR_BLOCK) {
+ errno = EAGAIN;
+ ret = -1;
+ } else if (ret == -1) {
+ errno = EIO;
+ }
+
return ret;
}
{
SocketChardev *s = SOCKET_CHARDEV(chr);
int size;
+ int saved_errno;
if (s->state != TCP_CHARDEV_STATE_CONNECTED) {
return 0;
qio_channel_set_blocking(s->ioc, true, NULL);
size = tcp_chr_recv(chr, (void *) buf, len);
+ saved_errno = errno;
if (s->state != TCP_CHARDEV_STATE_DISCONNECTED) {
qio_channel_set_blocking(s->ioc, false, NULL);
}
tcp_chr_disconnect(chr);
}
+ errno = saved_errno;
return size;
}
pop %ebp
.cfi_adjust_cfa_offset -4
.cfi_restore ebp
+ mov %eax, 4(%esp)
jmp safe_syscall_set_errno_tail
.cfi_endproc
1: USE_ALT_CP(t0)
SETUP_GPX(t1)
SETUP_GPX64(t0, t1)
+ move a0, v0
PTR_LA t9, safe_syscall_set_errno_tail
jr t9
1: pop %rbp
.cfi_def_cfa_offset 8
.cfi_restore rbp
+ mov %eax, %edi
jmp safe_syscall_set_errno_tail
.cfi_endproc
--- /dev/null
+TARGET_ARCH=arm
+TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml
cpu_list_remove(cpu);
}
+/*
+ * This can't go in hw/core/cpu.c because that file is compiled only
+ * once for both user-mode and system builds.
+ */
static Property cpu_common_props[] = {
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ /*
+ * Create a property for the user-only object, so users can
+ * adjust prctl(PR_SET_UNALIGN) from the command-line.
+ * Has no effect if the target does not support the feature.
+ */
+ DEFINE_PROP_BOOL("prctl-unalign-sigbus", CPUState,
+ prctl_unalign_sigbus, false),
+#else
/*
- * Create a memory property for softmmu CPU object,
- * so users can wire up its memory. (This can't go in hw/core/cpu.c
- * because that file is compiled only once for both user-mode
- * and system builds.) The default if no link is set up is to use
+ * Create a memory property for softmmu CPU object, so users can
+ * wire up its memory. The default if no link is set up is to use
* the system address space.
*/
DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
{
return print_insn_riscv(memaddr, info, rv64);
}
+
+int print_insn_riscv128(bfd_vma memaddr, struct disassemble_info *info)
+{
+ return print_insn_riscv(memaddr, info, rv128);
+}
Use ``-display sdl,window-close=...`` instead (i.e. with a minus instead of
an underscore between "window" and "close").
-``-no-quit`` (since 6.1)
-''''''''''''''''''''''''
-
-The ``-no-quit`` is a synonym for ``-display ...,window-close=off`` which
-should be used instead.
-
``-alt-grab`` and ``-display sdl,alt_grab=on`` (since 6.2)
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
QEMU 5.1 changes the default behaviour from ``-bios none`` to ``-bios default``
for the RISC-V ``virt`` machine and ``sifive_u`` machine.
+``-no-quit`` (removed in 7.0)
+'''''''''''''''''''''''''''''
+
+The ``-no-quit`` was a synonym for ``-display ...,window-close=off`` which
+should be used instead.
+
+
QEMU Machine Protocol (QMP) commands
------------------------------------
vhost-user
vhost-user-gpu
vhost-vdpa
+ virtio-balloon-stats
--- /dev/null
+Virtio balloon memory statistics
+================================
+
+The virtio balloon driver supports guest memory statistics reporting. These
+statistics are available to QEMU users as QOM (QEMU Object Model) device
+properties via a polling mechanism.
+
+Before querying the available stats, clients first have to enable polling.
+This is done by writing a time interval value (in seconds) to the
+guest-stats-polling-interval property. This value can be:
+
+ > 0
+ enables polling in the specified interval. If polling is already
+ enabled, the polling time interval is changed to the new value
+
+ 0
+ disables polling. Previous polled statistics are still valid and
+ can be queried.
+
+Once polling is enabled, the virtio-balloon device in QEMU will start
+polling the guest's balloon driver for new stats in the specified time
+interval.
+
+To retrieve those stats, clients have to query the guest-stats property,
+which will return a dictionary containing:
+
+ * A key named 'stats', containing all available stats. If the guest
+ doesn't support a particular stat, or if it couldn't be retrieved,
+ its value will be -1. Currently, the following stats are supported:
+
+ - stat-swap-in
+ - stat-swap-out
+ - stat-major-faults
+ - stat-minor-faults
+ - stat-free-memory
+ - stat-total-memory
+ - stat-available-memory
+ - stat-disk-caches
+ - stat-htlb-pgalloc
+ - stat-htlb-pgfail
+
+ * A key named last-update, which contains the last stats update
+ timestamp in seconds. Since this timestamp is generated by the host,
+ a buggy guest can't influence its value. The value is 0 if the guest
+ has not updated the stats (yet).
+
+It's also important to note the following:
+
+ - Previously polled statistics remain available even if the polling is
+ later disabled
+
+ - As noted above, if a guest doesn't support a particular stat its value
+ will always be -1. However, it's also possible that a guest temporarily
+ couldn't update one or even all stats. If this happens, just wait for
+ the next update
+
+ - Polling can be enabled even if the guest doesn't have stats support
+ or the balloon driver wasn't loaded in the guest. If this is the case
+ and stats are queried, last-update will be 0.
+
+ - The polling timer is only re-armed when the guest responds to the
+ statistics request. This means that if a (buggy) guest doesn't ever
+ respond to the request the timer will never be re-armed, which has
+ the same effect as disabling polling
+
+Here are a few examples. QEMU is started with ``-device virtio-balloon``,
+which generates ``/machine/peripheral-anon/device[1]`` as the QOM path for
+the balloon device.
+
+Enable polling with 2 seconds interval::
+
+ { "execute": "qom-set",
+ "arguments": { "path": "/machine/peripheral-anon/device[1]",
+ "property": "guest-stats-polling-interval", "value": 2 } }
+
+ { "return": {} }
+
+Change polling to 10 seconds::
+
+ { "execute": "qom-set",
+ "arguments": { "path": "/machine/peripheral-anon/device[1]",
+ "property": "guest-stats-polling-interval", "value": 10 } }
+
+ { "return": {} }
+
+Get stats::
+
+ { "execute": "qom-get",
+ "arguments": { "path": "/machine/peripheral-anon/device[1]",
+ "property": "guest-stats" } }
+ {
+ "return": {
+ "stats": {
+ "stat-swap-out": 0,
+ "stat-free-memory": 844943360,
+ "stat-minor-faults": 219028,
+ "stat-major-faults": 235,
+ "stat-total-memory": 1044406272,
+ "stat-swap-in": 0
+ },
+ "last-update": 1358529861
+ }
+ }
+
+Disable polling::
+
+ { "execute": "qom-set",
+ "arguments": { "path": "/machine/peripheral-anon/device[1]",
+ "property": "stats-polling-interval", "value": 0 } }
+
+ { "return": {} }
# Author: Marc-André Lureau <marcandre.lureau@redhat.com>
"""dbus-doc is a Sphinx extension that provides documentation from D-Bus XML."""
+from docutils.parsers.rst import Directive
from sphinx.application import Sphinx
-from sphinx.util.docutils import SphinxDirective
from typing import Any, Dict
-class FakeDBusDocDirective(SphinxDirective):
+class FakeDBusDocDirective(Directive):
has_content = True
required_arguments = 1
any of the listed names. If no *PATTERN* is given, the all possible
probes will be listed.
- For example, to list all probes available in the ``qemu-system-x86_64``
+ For example, to list all probes available in the |qemu_system|
binary:
- ::
+ .. parsed-literal::
- $ qemu-trace-stap list qemu-system-x86_64
+ $ qemu-trace-stap list |qemu_system|
To filter the list to only cover probes related to QEMU's cryptographic
subsystem, in a binary outside ``$PATH``
- ::
+ .. parsed-literal::
- $ qemu-trace-stap list /opt/qemu/4.0.0/bin/qemu-system-x86_64 'qcrypto*'
+ $ qemu-trace-stap list /opt/qemu/|version|/bin/|qemu_system| 'qcrypto*'
.. option:: run OPTIONS BINARY PATTERN...
Restrict the tracing session so that it only triggers for the process
identified by *PID*.
- For example, to monitor all processes executing ``qemu-system-x86_64``
+ For example, to monitor all processes executing |qemu_system|
as found on ``$PATH``, displaying all I/O related probes:
- ::
+ .. parsed-literal::
- $ qemu-trace-stap run qemu-system-x86_64 'qio*'
+ $ qemu-trace-stap run |qemu_system| 'qio*'
To monitor only the QEMU process with PID 1732
- ::
+ .. parsed-literal::
- $ qemu-trace-stap run --pid=1732 qemu-system-x86_64 'qio*'
+ $ qemu-trace-stap run --pid=1732 |qemu_system| 'qio*'
To monitor QEMU processes running an alternative binary outside of
``$PATH``, displaying verbose information about setup of the
tracing environment:
- ::
+ .. parsed-literal::
- $ qemu-trace-stap -v run /opt/qemu/4.0.0/qemu-system-x86_64 'qio*'
+ $ qemu-trace-stap -v run /opt/qemu/|version|/bin/|qemu_system| 'qio*'
See also
--------
+++ /dev/null
-virtio balloon memory statistics
-================================
-
-The virtio balloon driver supports guest memory statistics reporting. These
-statistics are available to QEMU users as QOM (QEMU Object Model) device
-properties via a polling mechanism.
-
-Before querying the available stats, clients first have to enable polling.
-This is done by writing a time interval value (in seconds) to the
-guest-stats-polling-interval property. This value can be:
-
- > 0 enables polling in the specified interval. If polling is already
- enabled, the polling time interval is changed to the new value
-
- 0 disables polling. Previous polled statistics are still valid and
- can be queried.
-
-Once polling is enabled, the virtio-balloon device in QEMU will start
-polling the guest's balloon driver for new stats in the specified time
-interval.
-
-To retrieve those stats, clients have to query the guest-stats property,
-which will return a dictionary containing:
-
- o A key named 'stats', containing all available stats. If the guest
- doesn't support a particular stat, or if it couldn't be retrieved,
- its value will be -1. Currently, the following stats are supported:
-
- - stat-swap-in
- - stat-swap-out
- - stat-major-faults
- - stat-minor-faults
- - stat-free-memory
- - stat-total-memory
- - stat-available-memory
- - stat-disk-caches
- - stat-htlb-pgalloc
- - stat-htlb-pgfail
-
- o A key named last-update, which contains the last stats update
- timestamp in seconds. Since this timestamp is generated by the host,
- a buggy guest can't influence its value. The value is 0 if the guest
- has not updated the stats (yet).
-
-It's also important to note the following:
-
- - Previously polled statistics remain available even if the polling is
- later disabled
-
- - As noted above, if a guest doesn't support a particular stat its value
- will always be -1. However, it's also possible that a guest temporarily
- couldn't update one or even all stats. If this happens, just wait for
- the next update
-
- - Polling can be enabled even if the guest doesn't have stats support
- or the balloon driver wasn't loaded in the guest. If this is the case
- and stats are queried, last-update will be 0.
-
- - The polling timer is only re-armed when the guest responds to the
- statistics request. This means that if a (buggy) guest doesn't ever
- respond to the request the timer will never be re-armed, which has
- the same effect as disabling polling
-
-Here are a few examples. QEMU is started with '-device virtio-balloon',
-which generates '/machine/peripheral-anon/device[1]' as the QOM path for
-the balloon device.
-
-Enable polling with 2 seconds interval:
-
-{ "execute": "qom-set",
- "arguments": { "path": "/machine/peripheral-anon/device[1]",
- "property": "guest-stats-polling-interval", "value": 2 } }
-
-{ "return": {} }
-
-Change polling to 10 seconds:
-
-{ "execute": "qom-set",
- "arguments": { "path": "/machine/peripheral-anon/device[1]",
- "property": "guest-stats-polling-interval", "value": 10 } }
-
-{ "return": {} }
-
-Get stats:
-
-{ "execute": "qom-get",
- "arguments": { "path": "/machine/peripheral-anon/device[1]",
- "property": "guest-stats" } }
-{
- "return": {
- "stats": {
- "stat-swap-out": 0,
- "stat-free-memory": 844943360,
- "stat-minor-faults": 219028,
- "stat-major-faults": 235,
- "stat-total-memory": 1044406272,
- "stat-swap-in": 0
- },
- "last-update": 1358529861
- }
-}
-
-Disable polling:
-
-{ "execute": "qom-set",
- "arguments": { "path": "/machine/peripheral-anon/device[1]",
- "property": "stats-polling-interval", "value": 0 } }
-
-{ "return": {} }
struct acpi_table_header *hdr = (void *)(u - sizeof(hdr->_length));
if (memcmp(hdr->sig, "SLIC", 4) == 0) {
- oem->id = hdr->oem_id;
- oem->table_id = hdr->oem_table_id;
+ oem->id = g_strndup(hdr->oem_id, 6);
+ oem->table_id = g_strndup(hdr->oem_table_id, 8);
return 0;
}
}
static void acpi_pcihp_disable_root_bus(void)
{
- static bool root_hp_disabled;
Object *host = acpi_get_i386_pci_host();
PCIBus *bus;
- if (root_hp_disabled) {
- return;
- }
-
bus = PCI_HOST_BRIDGE(host)->bus;
- if (bus) {
+ if (bus && qbus_is_hotpluggable(BUS(bus))) {
/* setting the hotplug handler to NULL makes the bus non-hotpluggable */
qbus_set_hotplug_handler(BUS(bus), NULL);
}
- root_hp_disabled = true;
+
return;
}
}
bus = acpi_pcihp_find_hotplug_bus(s, s->hotplug_select);
+ if (!bus) {
+ break;
+ }
QTAILQ_FOREACH_SAFE(kid, &bus->qbus.children, sibling, next) {
Object *o = OBJECT(kid->child);
PCIDevice *dev = PCI_DEVICE(o);
#include "sysemu/sysemu.h"
#define ASPEED_SOC_IOMEM_SIZE 0x00200000
+#define ASPEED_SOC_DPMCU_SIZE 0x00040000
static const hwaddr aspeed_soc_ast2600_memmap[] = {
[ASPEED_DEV_SRAM] = 0x10000000,
+ [ASPEED_DEV_DPMCU] = 0x18000000,
/* 0x16000000 0x17FFFFFF : AHB BUS do LPC Bus bridge */
[ASPEED_DEV_IOMEM] = 0x1E600000,
[ASPEED_DEV_PWM] = 0x1E610000,
[ASPEED_DEV_SCU] = 0x1E6E2000,
[ASPEED_DEV_XDMA] = 0x1E6E7000,
[ASPEED_DEV_ADC] = 0x1E6E9000,
+ [ASPEED_DEV_DP] = 0x1E6EB000,
[ASPEED_DEV_VIDEO] = 0x1E700000,
[ASPEED_DEV_SDHCI] = 0x1E740000,
[ASPEED_DEV_EMMC] = 0x1E750000,
[ASPEED_DEV_ETH3] = 32,
[ASPEED_DEV_ETH4] = 33,
[ASPEED_DEV_KCS] = 138, /* 138 -> 142 */
+ [ASPEED_DEV_DP] = 62,
};
static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl)
memory_region_add_subregion(get_system_memory(),
sc->memmap[ASPEED_DEV_SRAM], &s->sram);
+ /* DPMCU */
+ create_unimplemented_device("aspeed.dpmcu", sc->memmap[ASPEED_DEV_DPMCU],
+ ASPEED_SOC_DPMCU_SIZE);
+
/* SCU */
if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) {
return;
*/
}
+static void kudo_bmc_i2c_init(NPCM7xxState *soc)
+{
+ I2CSlave *i2c_mux;
+
+ i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), TYPE_PCA9548, 0x75);
+ i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 1), TYPE_PCA9548, 0x77);
+
+ i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 4), TYPE_PCA9548, 0x77);
+
+ at24c_eeprom_init(soc, 4, 0x50, 8192); /* mbfru */
+
+ i2c_mux = i2c_slave_create_simple(npcm7xx_i2c_get_bus(soc, 13),
+ TYPE_PCA9548, 0x77);
+
+ /* tmp105 is compatible with the lm75 */
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 2), "tmp105", 0x48);
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 3), "tmp105", 0x49);
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 4), "tmp105", 0x48);
+ i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 5), "tmp105", 0x49);
+
+ at24c_eeprom_init(soc, 14, 0x55, 8192); /* bmcfru */
+
+ /* TODO: Add remaining i2c devices. */
+}
+
static void npcm750_evb_init(MachineState *machine)
{
NPCM7xxState *soc;
npcm7xx_connect_flash(&soc->fiu[1], 0, "mx66u51235f",
drive_get(IF_MTD, 3, 0));
+ kudo_bmc_i2c_init(soc);
+ sdhci_attach_drive(&soc->mmc.sdhci, 0);
npcm7xx_load_kernel(machine, soc);
}
Aml *dev = aml_device("TPM0");
aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101")));
+ aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device")));
aml_append(dev, aml_name_decl("_UID", aml_int(0)));
Aml *crs = aml_resource_template();
smbios_set_defaults("QEMU", product,
vmc->smbios_old_sys_ver ? "1.0" : mc->name, false,
- true, SMBIOS_ENTRY_POINT_30);
+ true, SMBIOS_ENTRY_POINT_TYPE_64);
smbios_get_tables(MACHINE(vms), NULL, 0,
&smbios_tables, &smbios_tables_len,
}
type_init(machvirt_machine_init);
+static void virt_machine_7_0_options(MachineClass *mc)
+{
+}
+DEFINE_VIRT_MACHINE_AS_LATEST(7, 0)
+
static void virt_machine_6_2_options(MachineClass *mc)
{
+ virt_machine_7_0_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
}
-DEFINE_VIRT_MACHINE_AS_LATEST(6, 2)
+DEFINE_VIRT_MACHINE(6, 2)
static void virt_machine_6_1_options(MachineClass *mc)
{
&local_err);
if (ret < 0) {
error_report_err(local_err);
- return -1;
+ return ret;
}
/* valid for resize only */
VHostUserBlk *s = VHOST_USER_BLK(vdev);
/* Turn on pre-defined features */
+ virtio_add_feature(&features, VIRTIO_BLK_F_SIZE_MAX);
virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
*errp = NULL;
}
ret = vhost_user_blk_realize_connect(s, errp);
- } while (ret == -EPROTO && retries--);
+ } while (ret < 0 && retries--);
if (ret < 0) {
goto virtio_err;
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-pci.h"
+GlobalProperty hw_compat_6_2[] = {};
+const size_t hw_compat_6_2_len = G_N_ELEMENTS(hw_compat_6_2);
+
GlobalProperty hw_compat_6_1[] = {
{ "vhost-user-vsock-device", "seqpacket", "off" },
{ "nvme-ns", "shared", "off" },
memory_region_init_ram(&s->mem_vram, OBJECT(dev), "macfb-vram",
MACFB_VRAM_SIZE, &error_abort);
+ memory_region_set_log(&s->mem_vram, true, DIRTY_MEMORY_VGA);
s->vram = memory_region_get_ram_ptr(&s->mem_vram);
s->vram_bit_mask = MACFB_VRAM_SIZE - 1;
- memory_region_set_coalescing(&s->mem_vram);
s->vbl_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, macfb_vbl_timer, s);
macfb_update_mode(s);
s->chan[ch].state = DMA_CHAN_STATE_IDLE;
}
-static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
+static uint64_t sifive_pdma_readq(SiFivePDMAState *s, int ch, hwaddr offset)
{
- SiFivePDMAState *s = opaque;
- int ch = SIFIVE_PDMA_CHAN_NO(offset);
uint64_t val = 0;
- if (ch >= SIFIVE_PDMA_CHANS) {
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
- __func__, ch);
- return 0;
+ offset &= 0xfff;
+ switch (offset) {
+ case DMA_NEXT_BYTES:
+ val = s->chan[ch].next_bytes;
+ break;
+ case DMA_NEXT_DST:
+ val = s->chan[ch].next_dst;
+ break;
+ case DMA_NEXT_SRC:
+ val = s->chan[ch].next_src;
+ break;
+ case DMA_EXEC_BYTES:
+ val = s->chan[ch].exec_bytes;
+ break;
+ case DMA_EXEC_DST:
+ val = s->chan[ch].exec_dst;
+ break;
+ case DMA_EXEC_SRC:
+ val = s->chan[ch].exec_src;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n",
+ __func__, offset);
+ break;
}
+ return val;
+}
+
+static uint32_t sifive_pdma_readl(SiFivePDMAState *s, int ch, hwaddr offset)
+{
+ uint32_t val = 0;
+
offset &= 0xfff;
switch (offset) {
case DMA_CONTROL:
val = s->chan[ch].next_config;
break;
case DMA_NEXT_BYTES:
- val = s->chan[ch].next_bytes;
+ val = extract64(s->chan[ch].next_bytes, 0, 32);
+ break;
+ case DMA_NEXT_BYTES + 4:
+ val = extract64(s->chan[ch].next_bytes, 32, 32);
break;
case DMA_NEXT_DST:
- val = s->chan[ch].next_dst;
+ val = extract64(s->chan[ch].next_dst, 0, 32);
+ break;
+ case DMA_NEXT_DST + 4:
+ val = extract64(s->chan[ch].next_dst, 32, 32);
break;
case DMA_NEXT_SRC:
- val = s->chan[ch].next_src;
+ val = extract64(s->chan[ch].next_src, 0, 32);
+ break;
+ case DMA_NEXT_SRC + 4:
+ val = extract64(s->chan[ch].next_src, 32, 32);
break;
case DMA_EXEC_CONFIG:
val = s->chan[ch].exec_config;
break;
case DMA_EXEC_BYTES:
- val = s->chan[ch].exec_bytes;
+ val = extract64(s->chan[ch].exec_bytes, 0, 32);
+ break;
+ case DMA_EXEC_BYTES + 4:
+ val = extract64(s->chan[ch].exec_bytes, 32, 32);
break;
case DMA_EXEC_DST:
- val = s->chan[ch].exec_dst;
+ val = extract64(s->chan[ch].exec_dst, 0, 32);
+ break;
+ case DMA_EXEC_DST + 4:
+ val = extract64(s->chan[ch].exec_dst, 32, 32);
break;
case DMA_EXEC_SRC:
- val = s->chan[ch].exec_src;
+ val = extract64(s->chan[ch].exec_src, 0, 32);
+ break;
+ case DMA_EXEC_SRC + 4:
+ val = extract64(s->chan[ch].exec_src, 32, 32);
break;
default:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n",
__func__, offset);
break;
}
return val;
}
-static void sifive_pdma_write(void *opaque, hwaddr offset,
- uint64_t value, unsigned size)
+static uint64_t sifive_pdma_read(void *opaque, hwaddr offset, unsigned size)
{
SiFivePDMAState *s = opaque;
int ch = SIFIVE_PDMA_CHAN_NO(offset);
- bool claimed, run;
+ uint64_t val = 0;
if (ch >= SIFIVE_PDMA_CHANS) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
__func__, ch);
- return;
+ return 0;
+ }
+
+ switch (size) {
+ case 8:
+ val = sifive_pdma_readq(s, ch, offset);
+ break;
+ case 4:
+ val = sifive_pdma_readl(s, ch, offset);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid read size %u to PDMA\n",
+ __func__, size);
+ return 0;
}
+ return val;
+}
+
+static void sifive_pdma_writeq(SiFivePDMAState *s, int ch,
+ hwaddr offset, uint64_t value)
+{
+ offset &= 0xfff;
+ switch (offset) {
+ case DMA_NEXT_BYTES:
+ s->chan[ch].next_bytes = value;
+ break;
+ case DMA_NEXT_DST:
+ s->chan[ch].next_dst = value;
+ break;
+ case DMA_NEXT_SRC:
+ s->chan[ch].next_src = value;
+ break;
+ case DMA_EXEC_BYTES:
+ case DMA_EXEC_DST:
+ case DMA_EXEC_SRC:
+ /* these are read-only registers */
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unexpected 64-bit access to 0x%" HWADDR_PRIX "\n",
+ __func__, offset);
+ break;
+ }
+}
+
+static void sifive_pdma_writel(SiFivePDMAState *s, int ch,
+ hwaddr offset, uint32_t value)
+{
+ bool claimed, run;
+
offset &= 0xfff;
switch (offset) {
case DMA_CONTROL:
s->chan[ch].next_config = value;
break;
case DMA_NEXT_BYTES:
- s->chan[ch].next_bytes = value;
+ s->chan[ch].next_bytes =
+ deposit64(s->chan[ch].next_bytes, 0, 32, value);
+ break;
+ case DMA_NEXT_BYTES + 4:
+ s->chan[ch].next_bytes =
+ deposit64(s->chan[ch].next_bytes, 32, 32, value);
break;
case DMA_NEXT_DST:
- s->chan[ch].next_dst = value;
+ s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 0, 32, value);
+ break;
+ case DMA_NEXT_DST + 4:
+ s->chan[ch].next_dst = deposit64(s->chan[ch].next_dst, 32, 32, value);
break;
case DMA_NEXT_SRC:
- s->chan[ch].next_src = value;
+ s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 0, 32, value);
+ break;
+ case DMA_NEXT_SRC + 4:
+ s->chan[ch].next_src = deposit64(s->chan[ch].next_src, 32, 32, value);
break;
case DMA_EXEC_CONFIG:
case DMA_EXEC_BYTES:
+ case DMA_EXEC_BYTES + 4:
case DMA_EXEC_DST:
+ case DMA_EXEC_DST + 4:
case DMA_EXEC_SRC:
+ case DMA_EXEC_SRC + 4:
/* these are read-only registers */
break;
default:
- qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unexpected 32-bit access to 0x%" HWADDR_PRIX "\n",
__func__, offset);
break;
}
}
+static void sifive_pdma_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ SiFivePDMAState *s = opaque;
+ int ch = SIFIVE_PDMA_CHAN_NO(offset);
+
+ if (ch >= SIFIVE_PDMA_CHANS) {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n",
+ __func__, ch);
+ return;
+ }
+
+ switch (size) {
+ case 8:
+ sifive_pdma_writeq(s, ch, offset, value);
+ break;
+ case 4:
+ sifive_pdma_writel(s, ch, offset, (uint32_t) value);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid write size %u to PDMA\n",
+ __func__, size);
+ break;
+ }
+}
+
static const MemoryRegionOps sifive_pdma_ops = {
.read = sifive_pdma_read,
.write = sifive_pdma_write,
.impl = {
.min_access_size = 4,
.max_access_size = 8,
+ },
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
}
};
dev = aml_device("TPM");
aml_append(dev, aml_name_decl("_HID",
aml_string("MSFT0101")));
+ aml_append(dev,
+ aml_name_decl("_STR",
+ aml_string("TPM 2.0 Device")));
} else {
dev = aml_device("ISA.TPM");
aml_append(dev, aml_name_decl("_HID",
aml_eisaid("PNP0C31")));
}
+ aml_append(dev, aml_name_decl("_UID", aml_int(1)));
aml_append(dev, aml_name_decl("_STA", aml_int(0xF)));
crs = aml_resource_template();
if (TPM_IS_CRB(tpm)) {
dev = aml_device("TPM");
aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101")));
+ aml_append(dev, aml_name_decl("_STR",
+ aml_string("TPM 2.0 Device")));
crs = aml_resource_template();
aml_append(crs, aml_memory32_fixed(TPM_CRB_ADDR_BASE,
TPM_CRB_ADDR_SIZE, AML_READ_WRITE));
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(dev, aml_name_decl("_STA", aml_int(0xf)));
+ aml_append(dev, aml_name_decl("_UID", aml_int(1)));
tpm_build_ppi_acpi(tpm, dev);
/* Cleanup memory that's no longer used. */
g_array_free(table_offsets, true);
+ g_free(slic_oem.id);
+ g_free(slic_oem.table_id);
}
static void acpi_ram_update(MemoryRegion *mr, GArray *data)
* 1st-level translation or 2nd-level translation, it depends
* on PGTT setting.
*/
-static bool vtd_dev_pt_enabled(VTDAddressSpace *as)
+static bool vtd_dev_pt_enabled(IntelIOMMUState *s, VTDContextEntry *ce)
+{
+ VTDPASIDEntry pe;
+ int ret;
+
+ if (s->root_scalable) {
+ ret = vtd_ce_get_rid2pasid_entry(s, ce, &pe);
+ if (ret) {
+ error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32,
+ __func__, ret);
+ return false;
+ }
+ return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT);
+ }
+
+ return (vtd_ce_get_type(ce) == VTD_CONTEXT_TT_PASS_THROUGH);
+
+}
+
+static bool vtd_as_pt_enabled(VTDAddressSpace *as)
{
IntelIOMMUState *s;
VTDContextEntry ce;
- VTDPASIDEntry pe;
int ret;
assert(as);
return false;
}
- if (s->root_scalable) {
- ret = vtd_ce_get_rid2pasid_entry(s, &ce, &pe);
- if (ret) {
- error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32,
- __func__, ret);
- return false;
- }
- return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT);
- }
-
- return (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH);
+ return vtd_dev_pt_enabled(s, &ce);
}
/* Return whether the device is using IOMMU translation. */
assert(as);
- use_iommu = as->iommu_state->dmar_enabled && !vtd_dev_pt_enabled(as);
+ use_iommu = as->iommu_state->dmar_enabled && !vtd_as_pt_enabled(as);
trace_vtd_switch_address_space(pci_bus_num(as->bus),
VTD_PCI_SLOT(as->devfn),
* We don't need to translate for pass-through context entries.
* Also, let's ignore IOTLB caching as well for PT devices.
*/
- if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) {
+ if (vtd_dev_pt_enabled(s, &ce)) {
entry->iova = addr & VTD_PAGE_MASK_4K;
entry->translated_addr = entry->iova;
entry->addr_mask = ~VTD_PAGE_MASK_4K;
#include "hw/mem/nvdimm.h"
#include "qapi/error.h"
#include "qapi/qapi-visit-common.h"
+#include "qapi/qapi-visit-machine.h"
#include "qapi/visitor.h"
#include "hw/core/cpu.h"
#include "hw/usb.h"
#include "trace.h"
#include CONFIG_DEVICES
+GlobalProperty pc_compat_6_2[] = {
+ { "virtio-mem", "unplugged-inaccessible", "off" },
+};
+const size_t pc_compat_6_2_len = G_N_ELEMENTS(pc_compat_6_2);
+
GlobalProperty pc_compat_6_1[] = {
{ TYPE_X86_CPU, "hv-version-id-build", "0x1bbc" },
{ TYPE_X86_CPU, "hv-version-id-major", "0x0006" },
pcms->default_bus_bypass_iommu = value;
}
+static void pc_machine_get_smbios_ep(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ PCMachineState *pcms = PC_MACHINE(obj);
+ SmbiosEntryPointType smbios_entry_point_type = pcms->smbios_entry_point_type;
+
+ visit_type_SmbiosEntryPointType(v, name, &smbios_entry_point_type, errp);
+}
+
+static void pc_machine_set_smbios_ep(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ PCMachineState *pcms = PC_MACHINE(obj);
+
+ visit_type_SmbiosEntryPointType(v, name, &pcms->smbios_entry_point_type, errp);
+}
+
static void pc_machine_get_max_ram_below_4g(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
pcms->vmport = ON_OFF_AUTO_OFF;
#endif /* CONFIG_VMPORT */
pcms->max_ram_below_4g = 0; /* use default */
+ pcms->smbios_entry_point_type = SMBIOS_ENTRY_POINT_TYPE_32;
+
/* acpi build is enabled by default if machine supports it */
pcms->acpi_build_enabled = PC_MACHINE_GET_CLASS(pcms)->has_acpi_build;
pcms->smbus_enabled = true;
object_class_property_add_bool(oc, PC_MACHINE_SMBUS,
pc_machine_get_smbus, pc_machine_set_smbus);
+ object_class_property_set_description(oc, PC_MACHINE_SMBUS,
+ "Enable/disable system management bus");
object_class_property_add_bool(oc, PC_MACHINE_SATA,
pc_machine_get_sata, pc_machine_set_sata);
+ object_class_property_set_description(oc, PC_MACHINE_SATA,
+ "Enable/disable Serial ATA bus");
object_class_property_add_bool(oc, PC_MACHINE_PIT,
pc_machine_get_pit, pc_machine_set_pit);
+ object_class_property_set_description(oc, PC_MACHINE_PIT,
+ "Enable/disable Intel 8254 programmable interval timer emulation");
object_class_property_add_bool(oc, "hpet",
pc_machine_get_hpet, pc_machine_set_hpet);
+ object_class_property_set_description(oc, "hpet",
+ "Enable/disable high precision event timer emulation");
object_class_property_add_bool(oc, "default-bus-bypass-iommu",
pc_machine_get_default_bus_bypass_iommu,
NULL, NULL);
object_class_property_set_description(oc, PC_MACHINE_MAX_FW_SIZE,
"Maximum combined firmware size");
+
+ object_class_property_add(oc, PC_MACHINE_SMBIOS_EP, "str",
+ pc_machine_get_smbios_ep, pc_machine_set_smbios_ep,
+ NULL, NULL);
+ object_class_property_set_description(oc, PC_MACHINE_SMBIOS_EP,
+ "SMBIOS Entry Point type [32, 64]");
}
static const TypeInfo pc_machine_info = {
smbios_set_defaults("QEMU", "Standard PC (i440FX + PIIX, 1996)",
mc->name, pcmc->smbios_legacy_mode,
pcmc->smbios_uuid_encoded,
- SMBIOS_ENTRY_POINT_21);
+ pcms->smbios_entry_point_type);
}
/* allocate ram and load rom/bios */
machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE);
}
-static void pc_i440fx_6_2_machine_options(MachineClass *m)
+static void pc_i440fx_7_0_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_i440fx_machine_options(m);
pcmc->default_cpu_version = 1;
}
+DEFINE_I440FX_MACHINE(v7_0, "pc-i440fx-7.0", NULL,
+ pc_i440fx_7_0_machine_options);
+
+static void pc_i440fx_6_2_machine_options(MachineClass *m)
+{
+ pc_i440fx_7_0_machine_options(m);
+ m->alias = NULL;
+ m->is_default = false;
+ compat_props_add(m->compat_props, hw_compat_6_2, hw_compat_6_2_len);
+ compat_props_add(m->compat_props, pc_compat_6_2, pc_compat_6_2_len);
+}
+
DEFINE_I440FX_MACHINE(v6_2, "pc-i440fx-6.2", NULL,
pc_i440fx_6_2_machine_options);
smbios_set_defaults("QEMU", "Standard PC (Q35 + ICH9, 2009)",
mc->name, pcmc->smbios_legacy_mode,
pcmc->smbios_uuid_encoded,
- SMBIOS_ENTRY_POINT_21);
+ pcms->smbios_entry_point_type);
}
/* allocate ram and load rom/bios */
m->max_cpus = 288;
}
-static void pc_q35_6_2_machine_options(MachineClass *m)
+static void pc_q35_7_0_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_machine_options(m);
pcmc->default_cpu_version = 1;
}
+DEFINE_Q35_MACHINE(v7_0, "pc-q35-7.0", NULL,
+ pc_q35_7_0_machine_options);
+
+static void pc_q35_6_2_machine_options(MachineClass *m)
+{
+ pc_q35_7_0_machine_options(m);
+ m->alias = NULL;
+ compat_props_add(m->compat_props, hw_compat_6_2, hw_compat_6_2_len);
+ compat_props_add(m->compat_props, pc_compat_6_2, pc_compat_6_2_len);
+}
+
DEFINE_Q35_MACHINE(v6_2, "pc-q35-6.2", NULL,
pc_q35_6_2_machine_options);
uint64_t value;
bool valid_l2t;
uint32_t l2t_id;
- uint32_t max_l2_entries;
+ uint32_t num_l2_entries;
if (s->ct.indirect) {
l2t_id = icid / (s->ct.page_sz / L1TABLE_ENTRY_SIZE);
valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
if (valid_l2t) {
- max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+ num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
l2t_addr = value & ((1ULL << 51) - 1);
*cte = address_space_ldq_le(as, l2t_addr +
- ((icid % max_l2_entries) * GITS_CTE_SIZE),
+ ((icid % num_l2_entries) * GITS_CTE_SIZE),
MEMTXATTRS_UNSPECIFIED, res);
}
}
MEMTXATTRS_UNSPECIFIED, res);
}
- return (*cte & TABLE_ENTRY_VALID_MASK) != 0;
+ return FIELD_EX64(*cte, CTE, VALID);
}
static bool update_ite(GICv3ITSState *s, uint32_t eventid, uint64_t dte,
uint64_t itt_addr;
MemTxResult res = MEMTX_OK;
- itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
+ itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
address_space_stq_le(as, itt_addr + (eventid * (sizeof(uint64_t) +
bool status = false;
IteEntry ite = {};
- itt_addr = (dte & GITS_DTE_ITTADDR_MASK) >> GITS_DTE_ITTADDR_SHIFT;
+ itt_addr = FIELD_EX64(dte, DTE, ITTADDR);
itt_addr <<= ITTADDR_SHIFT; /* 256 byte aligned */
ite.itel = address_space_ldq_le(as, itt_addr +
MEMTXATTRS_UNSPECIFIED, res);
if (*res == MEMTX_OK) {
- if (ite.itel & TABLE_ENTRY_VALID_MASK) {
- if ((ite.itel >> ITE_ENTRY_INTTYPE_SHIFT) &
- GITS_TYPE_PHYSICAL) {
- *pIntid = (ite.itel & ITE_ENTRY_INTID_MASK) >>
- ITE_ENTRY_INTID_SHIFT;
- *icid = ite.iteh & ITE_ENTRY_ICID_MASK;
+ if (FIELD_EX64(ite.itel, ITE_L, VALID)) {
+ int inttype = FIELD_EX64(ite.itel, ITE_L, INTTYPE);
+ if (inttype == ITE_INTTYPE_PHYSICAL) {
+ *pIntid = FIELD_EX64(ite.itel, ITE_L, INTID);
+ *icid = FIELD_EX32(ite.iteh, ITE_H, ICID);
status = true;
}
}
uint64_t value;
bool valid_l2t;
uint32_t l2t_id;
- uint32_t max_l2_entries;
+ uint32_t num_l2_entries;
if (s->dt.indirect) {
l2t_id = devid / (s->dt.page_sz / L1TABLE_ENTRY_SIZE);
valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
if (valid_l2t) {
- max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+ num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
l2t_addr = value & ((1ULL << 51) - 1);
value = address_space_ldq_le(as, l2t_addr +
- ((devid % max_l2_entries) * GITS_DTE_SIZE),
+ ((devid % num_l2_entries) * GITS_DTE_SIZE),
MEMTXATTRS_UNSPECIFIED, res);
}
}
if (res != MEMTX_OK) {
return result;
}
- dte_valid = dte & TABLE_ENTRY_VALID_MASK;
+ dte_valid = FIELD_EX64(dte, DTE, VALID);
if (dte_valid) {
- max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
+ max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1);
ite_valid = get_ite(s, eventid, dte, &icid, &pIntid, &res);
* In this implementation, in case of guest errors we ignore the
* command and move onto the next command in the queue.
*/
- if (devid > s->dt.maxids.max_devids) {
+ if (devid >= s->dt.num_ids) {
qemu_log_mask(LOG_GUEST_ERROR,
- "%s: invalid command attributes: devid %d>%d",
- __func__, devid, s->dt.maxids.max_devids);
+ "%s: invalid command attributes: devid %d>=%d",
+ __func__, devid, s->dt.num_ids);
} else if (!dte_valid || !ite_valid || !cte_valid) {
qemu_log_mask(LOG_GUEST_ERROR,
* Current implementation only supports rdbase == procnum
* Hence rdbase physical address is ignored
*/
- rdbase = (cte & GITS_CTE_RDBASE_PROCNUM_MASK) >> 1U;
+ rdbase = FIELD_EX64(cte, CTE, RDBASE);
- if (rdbase > s->gicv3->num_cpu) {
+ if (rdbase >= s->gicv3->num_cpu) {
return result;
}
MemTxResult res = MEMTX_OK;
uint16_t icid = 0;
uint64_t dte = 0;
- IteEntry ite;
- uint32_t int_spurious = INTID_SPURIOUS;
bool result = false;
devid = ((value & DEVID_MASK) >> DEVID_SHIFT);
eventid = (value & EVENTID_MASK);
- if (!ignore_pInt) {
+ if (ignore_pInt) {
+ pIntid = eventid;
+ } else {
pIntid = ((value & pINTID_MASK) >> pINTID_SHIFT);
}
if (res != MEMTX_OK) {
return result;
}
- dte_valid = dte & TABLE_ENTRY_VALID_MASK;
-
- max_eventid = (1UL << (((dte >> 1U) & SIZE_MASK) + 1));
-
- if (!ignore_pInt) {
- max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
- }
+ dte_valid = FIELD_EX64(dte, DTE, VALID);
+ max_eventid = 1UL << (FIELD_EX64(dte, DTE, SIZE) + 1);
+ max_Intid = (1ULL << (GICD_TYPER_IDBITS + 1)) - 1;
- if ((devid > s->dt.maxids.max_devids) || (icid > s->ct.maxids.max_collids)
+ if ((devid >= s->dt.num_ids) || (icid >= s->ct.num_ids)
|| !dte_valid || (eventid > max_eventid) ||
- (!ignore_pInt && (((pIntid < GICV3_LPI_INTID_START) ||
- (pIntid > max_Intid)) && (pIntid != INTID_SPURIOUS)))) {
+ (((pIntid < GICV3_LPI_INTID_START) || (pIntid > max_Intid)) &&
+ (pIntid != INTID_SPURIOUS))) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid command attributes "
"devid %d or icid %d or eventid %d or pIntid %d or"
*/
} else {
/* add ite entry to interrupt translation table */
- ite.itel = (dte_valid & TABLE_ENTRY_VALID_MASK) |
- (GITS_TYPE_PHYSICAL << ITE_ENTRY_INTTYPE_SHIFT);
-
- if (ignore_pInt) {
- ite.itel |= (eventid << ITE_ENTRY_INTID_SHIFT);
- } else {
- ite.itel |= (pIntid << ITE_ENTRY_INTID_SHIFT);
- }
- ite.itel |= (int_spurious << ITE_ENTRY_INTSP_SHIFT);
- ite.iteh = icid;
+ IteEntry ite = {};
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, VALID, dte_valid);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, INTTYPE, ITE_INTTYPE_PHYSICAL);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, INTID, pIntid);
+ ite.itel = FIELD_DP64(ite.itel, ITE_L, DOORBELL, INTID_SPURIOUS);
+ ite.iteh = FIELD_DP32(ite.iteh, ITE_H, ICID, icid);
result = update_ite(s, eventid, dte, ite);
}
uint64_t l2t_addr;
bool valid_l2t;
uint32_t l2t_id;
- uint32_t max_l2_entries;
+ uint32_t num_l2_entries;
uint64_t cte = 0;
MemTxResult res = MEMTX_OK;
if (valid) {
/* add mapping entry to collection table */
- cte = (valid & TABLE_ENTRY_VALID_MASK) | (rdbase << 1ULL);
+ cte = FIELD_DP64(cte, CTE, VALID, 1);
+ cte = FIELD_DP64(cte, CTE, RDBASE, rdbase);
}
/*
valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
if (valid_l2t) {
- max_l2_entries = s->ct.page_sz / s->ct.entry_sz;
+ num_l2_entries = s->ct.page_sz / s->ct.entry_sz;
l2t_addr = value & ((1ULL << 51) - 1);
address_space_stq_le(as, l2t_addr +
- ((icid % max_l2_entries) * GITS_CTE_SIZE),
+ ((icid % num_l2_entries) * GITS_CTE_SIZE),
cte, MEMTXATTRS_UNSPECIFIED, &res);
}
} else {
valid = (value & CMD_FIELD_VALID_MASK);
- if ((icid > s->ct.maxids.max_collids) || (rdbase > s->gicv3->num_cpu)) {
+ if ((icid >= s->ct.num_ids) || (rdbase >= s->gicv3->num_cpu)) {
qemu_log_mask(LOG_GUEST_ERROR,
"ITS MAPC: invalid collection table attributes "
"icid %d rdbase %" PRIu64 "\n", icid, rdbase);
uint64_t l2t_addr;
bool valid_l2t;
uint32_t l2t_id;
- uint32_t max_l2_entries;
+ uint32_t num_l2_entries;
uint64_t dte = 0;
MemTxResult res = MEMTX_OK;
if (s->dt.valid) {
if (valid) {
/* add mapping entry to device table */
- dte = (valid & TABLE_ENTRY_VALID_MASK) |
- ((size & SIZE_MASK) << 1U) |
- (itt_addr << GITS_DTE_ITTADDR_SHIFT);
+ dte = FIELD_DP64(dte, DTE, VALID, 1);
+ dte = FIELD_DP64(dte, DTE, SIZE, size);
+ dte = FIELD_DP64(dte, DTE, ITTADDR, itt_addr);
}
} else {
return true;
valid_l2t = (value & L2_TABLE_VALID_MASK) != 0;
if (valid_l2t) {
- max_l2_entries = s->dt.page_sz / s->dt.entry_sz;
+ num_l2_entries = s->dt.page_sz / s->dt.entry_sz;
l2t_addr = value & ((1ULL << 51) - 1);
address_space_stq_le(as, l2t_addr +
- ((devid % max_l2_entries) * GITS_DTE_SIZE),
+ ((devid % num_l2_entries) * GITS_DTE_SIZE),
dte, MEMTXATTRS_UNSPECIFIED, &res);
}
} else {
valid = (value & CMD_FIELD_VALID_MASK);
- if ((devid > s->dt.maxids.max_devids) ||
+ if ((devid >= s->dt.num_ids) ||
(size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS))) {
qemu_log_mask(LOG_GUEST_ERROR,
"ITS MAPD: invalid device table attributes "
uint8_t cmd;
int i;
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
return;
}
wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
- if (wr_offset > s->cq.max_entries) {
+ if (wr_offset >= s->cq.num_entries) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid write offset "
"%d\n", __func__, wr_offset);
rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
- if (rd_offset > s->cq.max_entries) {
+ if (rd_offset >= s->cq.num_entries) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid read offset "
"%d\n", __func__, rd_offset);
}
if (result) {
rd_offset++;
- rd_offset %= s->cq.max_entries;
+ rd_offset %= s->cq.num_entries;
s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
} else {
/*
uint64_t value;
for (int i = 0; i < 8; i++) {
+ TableDesc *td;
+ int idbits;
+
value = s->baser[i];
if (!value) {
type = FIELD_EX64(value, GITS_BASER, TYPE);
switch (type) {
-
case GITS_BASER_TYPE_DEVICE:
- memset(&s->dt, 0 , sizeof(s->dt));
- s->dt.valid = FIELD_EX64(value, GITS_BASER, VALID);
-
- if (!s->dt.valid) {
- return;
- }
-
- s->dt.page_sz = page_sz;
- s->dt.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
- s->dt.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
-
- if (!s->dt.indirect) {
- s->dt.max_entries = (num_pages * page_sz) / s->dt.entry_sz;
- } else {
- s->dt.max_entries = (((num_pages * page_sz) /
- L1TABLE_ENTRY_SIZE) *
- (page_sz / s->dt.entry_sz));
- }
-
- s->dt.maxids.max_devids = (1UL << (FIELD_EX64(s->typer, GITS_TYPER,
- DEVBITS) + 1));
-
- s->dt.base_addr = baser_base_addr(value, page_sz);
-
+ td = &s->dt;
+ idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
break;
-
case GITS_BASER_TYPE_COLLECTION:
- memset(&s->ct, 0 , sizeof(s->ct));
- s->ct.valid = FIELD_EX64(value, GITS_BASER, VALID);
-
- /*
- * GITS_TYPER.HCC is 0 for this implementation
- * hence writes are discarded if ct.valid is 0
- */
- if (!s->ct.valid) {
- return;
- }
-
- s->ct.page_sz = page_sz;
- s->ct.indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
- s->ct.entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE);
-
- if (!s->ct.indirect) {
- s->ct.max_entries = (num_pages * page_sz) / s->ct.entry_sz;
- } else {
- s->ct.max_entries = (((num_pages * page_sz) /
- L1TABLE_ENTRY_SIZE) *
- (page_sz / s->ct.entry_sz));
- }
-
+ td = &s->ct;
if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
- s->ct.maxids.max_collids = (1UL << (FIELD_EX64(s->typer,
- GITS_TYPER, CIDBITS) + 1));
+ idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
} else {
/* 16-bit CollectionId supported when CIL == 0 */
- s->ct.maxids.max_collids = (1UL << 16);
+ idbits = 16;
}
-
- s->ct.base_addr = baser_base_addr(value, page_sz);
-
break;
-
default:
- break;
+ /*
+ * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
+ * ensures we will only see type values corresponding to
+ * the values set up in gicv3_its_reset().
+ */
+ g_assert_not_reached();
+ }
+
+ memset(td, 0, sizeof(*td));
+ td->valid = FIELD_EX64(value, GITS_BASER, VALID);
+ /*
+ * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
+ * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
+ * do not have a special case where the GITS_BASER<n>.Valid bit is 0
+ * for the register corresponding to the Collection table but we
+ * still have to process interrupts using non-memory-backed
+ * Collection table entries.)
+ */
+ if (!td->valid) {
+ continue;
+ }
+ td->page_sz = page_sz;
+ td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
+ td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
+ td->base_addr = baser_base_addr(value, page_sz);
+ if (!td->indirect) {
+ td->num_entries = (num_pages * page_sz) / td->entry_sz;
+ } else {
+ td->num_entries = (((num_pages * page_sz) /
+ L1TABLE_ENTRY_SIZE) *
+ (page_sz / td->entry_sz));
}
+ td->num_ids = 1ULL << idbits;
}
}
s->cq.valid = FIELD_EX64(value, GITS_CBASER, VALID);
if (s->cq.valid) {
- s->cq.max_entries = (num_pages * GITS_PAGE_SIZE_4K) /
+ s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
GITS_CMDQ_ENTRY_SIZE;
s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
switch (offset) {
case GITS_TRANSLATER:
- if (s->ctlr & ITS_CTLR_ENABLED) {
+ if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
devid = attrs.requester_id;
result = process_its_cmd(s, data, devid, NONE);
}
switch (offset) {
case GITS_CTLR:
if (value & R_GITS_CTLR_ENABLED_MASK) {
- s->ctlr |= ITS_CTLR_ENABLED;
+ s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
extract_table_params(s);
extract_cmdq_params(s);
s->creadr = 0;
process_cmdq(s);
} else {
- s->ctlr &= ~ITS_CTLR_ENABLED;
+ s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
}
break;
case GITS_CBASER:
* IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = deposit64(s->cbaser, 0, 32, value);
s->creadr = 0;
s->cwriter = s->creadr;
* IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = deposit64(s->cbaser, 32, 32, value);
s->creadr = 0;
s->cwriter = s->creadr;
* IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
index = (offset - GITS_BASER) / 8;
if (offset & 7) {
* IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
index = (offset - GITS_BASER) / 8;
s->baser[index] &= GITS_BASER_RO_MASK;
s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
* IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
* already enabled
*/
- if (!(s->ctlr & ITS_CTLR_ENABLED)) {
+ if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
s->cbaser = value;
s->creadr = 0;
s->cwriter = s->creadr;
"gicv3-its-sysmem");
/* set the ITS default features supported */
- s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL,
- GITS_TYPE_PHYSICAL);
+ s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
ITS_ITT_ENTRY_SIZE - 1);
s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
static void gicv3_its_post_load(GICv3ITSState *s)
{
- if (s->ctlr & ITS_CTLR_ENABLED) {
+ if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
extract_table_params(s);
extract_cmdq_params(s);
}
#define GITS_IDREGS 0xFFD0
-#define ITS_CTLR_ENABLED (1U) /* ITS Enabled */
-
#define GITS_BASER_RO_MASK (R_GITS_BASER_ENTRYSIZE_MASK | \
R_GITS_BASER_TYPE_MASK)
#define L2_TABLE_VALID_MASK CMD_FIELD_VALID_MASK
#define TABLE_ENTRY_VALID_MASK (1ULL << 0)
-/**
- * Default features advertised by this version of ITS
- */
-/* Physical LPIs supported */
-#define GITS_TYPE_PHYSICAL (1U << 0)
-
/*
* 12 bytes Interrupt translation Table Entry size
* as per Table 5.3 in GICv3 spec
* ITE Lower 8 Bytes
* Bits: | 49 ... 26 | 25 ... 2 | 1 | 0 |
- * Values: | 1023 | IntNum | IntType | Valid |
+ * Values: | Doorbell | IntNum | IntType | Valid |
* ITE Higher 4 Bytes
* Bits: | 31 ... 16 | 15 ...0 |
* Values: | vPEID | ICID |
+ * (When Doorbell is unused, as it always is in GICv3, it is 1023)
*/
#define ITS_ITT_ENTRY_SIZE 0xC
-#define ITE_ENTRY_INTTYPE_SHIFT 1
-#define ITE_ENTRY_INTID_SHIFT 2
-#define ITE_ENTRY_INTID_MASK MAKE_64BIT_MASK(2, 24)
-#define ITE_ENTRY_INTSP_SHIFT 26
-#define ITE_ENTRY_ICID_MASK MAKE_64BIT_MASK(0, 16)
+
+FIELD(ITE_L, VALID, 0, 1)
+FIELD(ITE_L, INTTYPE, 1, 1)
+FIELD(ITE_L, INTID, 2, 24)
+FIELD(ITE_L, DOORBELL, 26, 24)
+
+FIELD(ITE_H, ICID, 0, 16)
+FIELD(ITE_H, VPEID, 16, 16)
+
+/* Possible values for ITE_L INTTYPE */
+#define ITE_INTTYPE_VIRTUAL 0
+#define ITE_INTTYPE_PHYSICAL 1
/* 16 bits EventId */
#define ITS_IDBITS GICD_TYPER_IDBITS
* Valid = 1 bit,ITTAddr = 44 bits,Size = 5 bits
*/
#define GITS_DTE_SIZE (0x8ULL)
-#define GITS_DTE_ITTADDR_SHIFT 6
-#define GITS_DTE_ITTADDR_MASK MAKE_64BIT_MASK(GITS_DTE_ITTADDR_SHIFT, \
- ITTADDR_LENGTH)
+
+FIELD(DTE, VALID, 0, 1)
+FIELD(DTE, SIZE, 1, 5)
+FIELD(DTE, ITTADDR, 6, 44)
/*
* 8 bytes Collection Table Entry size
- * Valid = 1 bit,RDBase = 36 bits(considering max RDBASE)
+ * Valid = 1 bit, RDBase = 16 bits
*/
#define GITS_CTE_SIZE (0x8ULL)
-#define GITS_CTE_RDBASE_PROCNUM_MASK MAKE_64BIT_MASK(1, RDBASE_PROCNUM_LENGTH)
+FIELD(CTE, VALID, 0, 1)
+FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH)
/* Special interrupt IDs */
#define INTID_SECURE 1020
#include "migration/vmstate.h"
#include "hw/irq.h"
-#define RISCV_DEBUG_PLIC 0
+static bool addr_between(uint32_t addr, uint32_t base, uint32_t num)
+{
+ return addr >= base && addr - base < num;
+}
static PLICMode char_to_mode(char c)
{
}
}
-static char mode_to_char(PLICMode m)
-{
- switch (m) {
- case PLICMode_U: return 'U';
- case PLICMode_S: return 'S';
- case PLICMode_H: return 'H';
- case PLICMode_M: return 'M';
- default: return '?';
- }
-}
-
-static void sifive_plic_print_state(SiFivePLICState *plic)
-{
- int i;
- int addrid;
-
- /* pending */
- qemu_log("pending : ");
- for (i = plic->bitfield_words - 1; i >= 0; i--) {
- qemu_log("%08x", plic->pending[i]);
- }
- qemu_log("\n");
-
- /* pending */
- qemu_log("claimed : ");
- for (i = plic->bitfield_words - 1; i >= 0; i--) {
- qemu_log("%08x", plic->claimed[i]);
- }
- qemu_log("\n");
-
- for (addrid = 0; addrid < plic->num_addrs; addrid++) {
- qemu_log("hart%d-%c enable: ",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode));
- for (i = plic->bitfield_words - 1; i >= 0; i--) {
- qemu_log("%08x", plic->enable[addrid * plic->bitfield_words + i]);
- }
- qemu_log("\n");
- }
-}
-
static uint32_t atomic_set_masked(uint32_t *a, uint32_t mask, uint32_t value)
{
uint32_t old, new, cmp = qatomic_read(a);
atomic_set_masked(&plic->claimed[irq >> 5], 1 << (irq & 31), -!!level);
}
-static int sifive_plic_irqs_pending(SiFivePLICState *plic, uint32_t addrid)
+static uint32_t sifive_plic_claimed(SiFivePLICState *plic, uint32_t addrid)
{
+ uint32_t max_irq = 0;
+ uint32_t max_prio = plic->target_priority[addrid];
int i, j;
+
for (i = 0; i < plic->bitfield_words; i++) {
uint32_t pending_enabled_not_claimed =
- (plic->pending[i] & ~plic->claimed[i]) &
- plic->enable[addrid * plic->bitfield_words + i];
+ (plic->pending[i] & ~plic->claimed[i]) &
+ plic->enable[addrid * plic->bitfield_words + i];
+
if (!pending_enabled_not_claimed) {
continue;
}
+
for (j = 0; j < 32; j++) {
int irq = (i << 5) + j;
uint32_t prio = plic->source_priority[irq];
int enabled = pending_enabled_not_claimed & (1 << j);
- if (enabled && prio > plic->target_priority[addrid]) {
- return 1;
+
+ if (enabled && prio > max_prio) {
+ max_irq = irq;
+ max_prio = prio;
}
}
}
- return 0;
+
+ return max_irq;
}
static void sifive_plic_update(SiFivePLICState *plic)
for (addrid = 0; addrid < plic->num_addrs; addrid++) {
uint32_t hartid = plic->addr_config[addrid].hartid;
PLICMode mode = plic->addr_config[addrid].mode;
- int level = sifive_plic_irqs_pending(plic, addrid);
+ bool level = !!sifive_plic_claimed(plic, addrid);
switch (mode) {
case PLICMode_M:
break;
}
}
-
- if (RISCV_DEBUG_PLIC) {
- sifive_plic_print_state(plic);
- }
-}
-
-static uint32_t sifive_plic_claim(SiFivePLICState *plic, uint32_t addrid)
-{
- int i, j;
- uint32_t max_irq = 0;
- uint32_t max_prio = plic->target_priority[addrid];
-
- for (i = 0; i < plic->bitfield_words; i++) {
- uint32_t pending_enabled_not_claimed =
- (plic->pending[i] & ~plic->claimed[i]) &
- plic->enable[addrid * plic->bitfield_words + i];
- if (!pending_enabled_not_claimed) {
- continue;
- }
- for (j = 0; j < 32; j++) {
- int irq = (i << 5) + j;
- uint32_t prio = plic->source_priority[irq];
- int enabled = pending_enabled_not_claimed & (1 << j);
- if (enabled && prio > max_prio) {
- max_irq = irq;
- max_prio = prio;
- }
- }
- }
-
- if (max_irq) {
- sifive_plic_set_pending(plic, max_irq, false);
- sifive_plic_set_claimed(plic, max_irq, true);
- }
- return max_irq;
}
static uint64_t sifive_plic_read(void *opaque, hwaddr addr, unsigned size)
{
SiFivePLICState *plic = opaque;
- /* writes must be 4 byte words */
- if ((addr & 0x3) != 0) {
- goto err;
- }
-
- if (addr >= plic->priority_base && /* 4 bytes per source */
- addr < plic->priority_base + (plic->num_sources << 2))
- {
+ if (addr_between(addr, plic->priority_base, plic->num_sources << 2)) {
uint32_t irq = ((addr - plic->priority_base) >> 2) + 1;
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read priority: irq=%d priority=%d\n",
- irq, plic->source_priority[irq]);
- }
+
return plic->source_priority[irq];
- } else if (addr >= plic->pending_base && /* 1 bit per source */
- addr < plic->pending_base + (plic->num_sources >> 3))
- {
+ } else if (addr_between(addr, plic->pending_base, plic->num_sources >> 3)) {
uint32_t word = (addr - plic->pending_base) >> 2;
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read pending: word=%d value=%d\n",
- word, plic->pending[word]);
- }
+
return plic->pending[word];
- } else if (addr >= plic->enable_base && /* 1 bit per source */
- addr < plic->enable_base + plic->num_addrs * plic->enable_stride)
- {
+ } else if (addr_between(addr, plic->enable_base,
+ plic->num_addrs * plic->enable_stride)) {
uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride;
uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2;
+
if (wordid < plic->bitfield_words) {
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read enable: hart%d-%c word=%d value=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode), wordid,
- plic->enable[addrid * plic->bitfield_words + wordid]);
- }
return plic->enable[addrid * plic->bitfield_words + wordid];
}
- } else if (addr >= plic->context_base && /* 1 bit per source */
- addr < plic->context_base + plic->num_addrs * plic->context_stride)
- {
+ } else if (addr_between(addr, plic->context_base,
+ plic->num_addrs * plic->context_stride)) {
uint32_t addrid = (addr - plic->context_base) / plic->context_stride;
uint32_t contextid = (addr & (plic->context_stride - 1));
+
if (contextid == 0) {
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read priority: hart%d-%c priority=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode),
- plic->target_priority[addrid]);
- }
return plic->target_priority[addrid];
} else if (contextid == 4) {
- uint32_t value = sifive_plic_claim(plic, addrid);
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: read claim: hart%d-%c irq=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode),
- value);
+ uint32_t max_irq = sifive_plic_claimed(plic, addrid);
+
+ if (max_irq) {
+ sifive_plic_set_pending(plic, max_irq, false);
+ sifive_plic_set_claimed(plic, max_irq, true);
}
+
sifive_plic_update(plic);
- return value;
+ return max_irq;
}
}
-err:
qemu_log_mask(LOG_GUEST_ERROR,
"%s: Invalid register read 0x%" HWADDR_PRIx "\n",
__func__, addr);
{
SiFivePLICState *plic = opaque;
- /* writes must be 4 byte words */
- if ((addr & 0x3) != 0) {
- goto err;
- }
-
- if (addr >= plic->priority_base && /* 4 bytes per source */
- addr < plic->priority_base + (plic->num_sources << 2))
- {
+ if (addr_between(addr, plic->priority_base, plic->num_sources << 2)) {
uint32_t irq = ((addr - plic->priority_base) >> 2) + 1;
+
plic->source_priority[irq] = value & 7;
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: write priority: irq=%d priority=%d\n",
- irq, plic->source_priority[irq]);
- }
sifive_plic_update(plic);
- return;
- } else if (addr >= plic->pending_base && /* 1 bit per source */
- addr < plic->pending_base + (plic->num_sources >> 3))
- {
+ } else if (addr_between(addr, plic->pending_base,
+ plic->num_sources >> 3)) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: invalid pending write: 0x%" HWADDR_PRIx "",
__func__, addr);
- return;
- } else if (addr >= plic->enable_base && /* 1 bit per source */
- addr < plic->enable_base + plic->num_addrs * plic->enable_stride)
- {
+ } else if (addr_between(addr, plic->enable_base,
+ plic->num_addrs * plic->enable_stride)) {
uint32_t addrid = (addr - plic->enable_base) / plic->enable_stride;
uint32_t wordid = (addr & (plic->enable_stride - 1)) >> 2;
+
if (wordid < plic->bitfield_words) {
plic->enable[addrid * plic->bitfield_words + wordid] = value;
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: write enable: hart%d-%c word=%d value=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode), wordid,
- plic->enable[addrid * plic->bitfield_words + wordid]);
- }
- return;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid enable write 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
}
- } else if (addr >= plic->context_base && /* 4 bytes per reg */
- addr < plic->context_base + plic->num_addrs * plic->context_stride)
- {
+ } else if (addr_between(addr, plic->context_base,
+ plic->num_addrs * plic->context_stride)) {
uint32_t addrid = (addr - plic->context_base) / plic->context_stride;
uint32_t contextid = (addr & (plic->context_stride - 1));
+
if (contextid == 0) {
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: write priority: hart%d-%c priority=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode),
- plic->target_priority[addrid]);
- }
if (value <= plic->num_priorities) {
plic->target_priority[addrid] = value;
sifive_plic_update(plic);
}
- return;
} else if (contextid == 4) {
- if (RISCV_DEBUG_PLIC) {
- qemu_log("plic: write claim: hart%d-%c irq=%x\n",
- plic->addr_config[addrid].hartid,
- mode_to_char(plic->addr_config[addrid].mode),
- (uint32_t)value);
- }
if (value < plic->num_sources) {
sifive_plic_set_claimed(plic, value, false);
sifive_plic_update(plic);
}
- return;
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid context write 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
}
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Invalid register write 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
}
-
-err:
- qemu_log_mask(LOG_GUEST_ERROR,
- "%s: Invalid register write 0x%" HWADDR_PRIx "\n",
- __func__, addr);
}
static const MemoryRegionOps sifive_plic_ops = {
}
};
+static void sifive_plic_reset(DeviceState *dev)
+{
+ SiFivePLICState *s = SIFIVE_PLIC(dev);
+ int i;
+
+ memset(s->source_priority, 0, sizeof(uint32_t) * s->num_sources);
+ memset(s->target_priority, 0, sizeof(uint32_t) * s->num_addrs);
+ memset(s->pending, 0, sizeof(uint32_t) * s->bitfield_words);
+ memset(s->claimed, 0, sizeof(uint32_t) * s->bitfield_words);
+ memset(s->enable, 0, sizeof(uint32_t) * s->num_enables);
+
+ for (i = 0; i < s->num_harts; i++) {
+ qemu_set_irq(s->m_external_irqs[i], 0);
+ qemu_set_irq(s->s_external_irqs[i], 0);
+ }
+}
+
/*
* parse PLIC hart/mode address offset config
*
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->reset = sifive_plic_reset;
device_class_set_props(dc, sifive_plic_properties);
dc->realize = sifive_plic_realize;
dc->vmsd = &vmstate_sifive_plic;
/* Remove qtest_enabled() check once firmware files are in the tree */
if (!qtest_enabled()) {
- if (bios_size < 0 || bios_size > MACROM_SIZE) {
+ if (bios_size <= 0 || bios_size > MACROM_SIZE) {
error_report("could not load MacROM '%s'", bios_name);
exit(1);
}
- ptr = rom_ptr(MACROM_ADDR, MACROM_SIZE);
+ ptr = rom_ptr(MACROM_ADDR, bios_size);
+ assert(ptr != NULL);
stl_phys(cs->as, 0, ldl_p(ptr)); /* reset initial SP */
stl_phys(cs->as, 4,
MACROM_ADDR + ldl_p(ptr + 4)); /* reset initial PC */
} \
type_init(machvirt_machine_##major##_##minor##_init);
+static void virt_machine_7_0_options(MachineClass *mc)
+{
+}
+DEFINE_VIRT_MACHINE(7, 0, true)
+
static void virt_machine_6_2_options(MachineClass *mc)
{
+ virt_machine_7_0_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
}
-DEFINE_VIRT_MACHINE(6, 2, true)
+DEFINE_VIRT_MACHINE(6, 2, false)
static void virt_machine_6_1_options(MachineClass *mc)
{
memory_region_init(&phb->pci_mmio, OBJECT(phb), "pci-mmio",
PCI_MMIO_TOTAL_SIZE);
- pci->bus = pci_register_root_bus(dev, "root-bus",
+ pci->bus = pci_register_root_bus(dev,
+ dev->id ? dev->id : NULL,
pnv_phb3_set_irq, pnv_phb3_map_irq, phb,
&phb->pci_mmio, &phb->pci_io,
0, 4, TYPE_PNV_PHB3_ROOT_BUS);
memory_region_init(&phb->pci_mmio, OBJECT(phb), name,
PCI_MMIO_TOTAL_SIZE);
- pci->bus = pci_register_root_bus(dev, "root-bus",
+ pci->bus = pci_register_root_bus(dev, dev->id,
pnv_phb4_set_irq, pnv_phb4_map_irq, phb,
&phb->pci_mmio, &phb->pci_io,
0, 4, TYPE_PNV_PHB4_ROOT_BUS);
phb->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, xsrc->nr_irqs);
}
-static void pnv_phb4_reset(DeviceState *dev)
-{
- PnvPHB4 *phb = PNV_PHB4(dev);
- PCIDevice *root_dev = PCI_DEVICE(&phb->root);
-
- /*
- * Configure PCI device id at reset using a property.
- */
- pci_config_set_vendor_id(root_dev->config, PCI_VENDOR_ID_IBM);
- pci_config_set_device_id(root_dev->config, phb->device_id);
-}
-
static const char *pnv_phb4_root_bus_path(PCIHostState *host_bridge,
PCIBus *rootbus)
{
DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0),
DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0),
DEFINE_PROP_UINT64("version", PnvPHB4, version, 0),
- DEFINE_PROP_UINT16("device-id", PnvPHB4, device_id, 0),
DEFINE_PROP_LINK("stack", PnvPHB4, stack, TYPE_PNV_PHB4_PEC_STACK,
PnvPhb4PecStack *),
DEFINE_PROP_END_OF_LIST(),
device_class_set_props(dc, pnv_phb4_properties);
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->user_creatable = false;
- dc->reset = pnv_phb4_reset;
xfc->notify = pnv_phb4_xive_notify;
}
pecc->stk_compat = stk_compat;
pecc->stk_compat_size = sizeof(stk_compat);
pecc->version = PNV_PHB4_VERSION;
- pecc->device_id = PNV_PHB4_DEVICE_ID;
pecc->num_stacks = pnv_pec_num_stacks;
}
&error_fatal);
object_property_set_int(OBJECT(&stack->phb), "version", pecc->version,
&error_fatal);
- object_property_set_int(OBJECT(&stack->phb), "device-id", pecc->device_id,
- &error_fatal);
object_property_set_link(OBJECT(&stack->phb), "stack", OBJECT(stack),
&error_abort);
if (!sysbus_realize(SYS_BUS_DEVICE(&stack->phb), errp)) {
/* now do the real mapping */
if (r->addr != PCI_BAR_UNMAPPED) {
- trace_pci_update_mappings_del(d, pci_dev_bus_num(d),
+ trace_pci_update_mappings_del(d->name, pci_dev_bus_num(d),
PCI_SLOT(d->devfn),
PCI_FUNC(d->devfn),
i, r->addr, r->size);
}
r->addr = new_addr;
if (r->addr != PCI_BAR_UNMAPPED) {
- trace_pci_update_mappings_add(d, pci_dev_bus_num(d),
+ trace_pci_update_mappings_add(d->name, pci_dev_bus_num(d),
PCI_SLOT(d->devfn),
PCI_FUNC(d->devfn),
i, r->addr, r->size);
pci_change_irq_level(pci_dev, irq_num, change);
}
-static inline int pci_intx(PCIDevice *pci_dev)
-{
- return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1;
-}
-
qemu_irq pci_allocate_irq(PCIDevice *pci_dev)
{
int intx = pci_intx(pci_dev);
return;
}
- trace_pci_cfg_write(pci_dev->name, PCI_SLOT(pci_dev->devfn),
+ trace_pci_cfg_write(pci_dev->name, pci_dev_bus_num(pci_dev),
+ PCI_SLOT(pci_dev->devfn),
PCI_FUNC(pci_dev->devfn), addr, val);
pci_dev->config_write(pci_dev, addr, val, MIN(len, limit - addr));
}
}
ret = pci_dev->config_read(pci_dev, addr, MIN(len, limit - addr));
- trace_pci_cfg_read(pci_dev->name, PCI_SLOT(pci_dev->devfn),
+ trace_pci_cfg_read(pci_dev->name, pci_dev_bus_num(pci_dev),
+ PCI_SLOT(pci_dev->devfn),
PCI_FUNC(pci_dev->devfn), addr, ret);
return ret;
uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
/* 6.2.4.1.2 Interrupt Generation */
if (!msix_enabled(dev) && !msi_enabled(dev)) {
- pci_set_irq(dev, !!(root_cmd & enabled_cmd));
+ if (pci_intx(dev) != -1) {
+ pci_set_irq(dev, !!(root_cmd & enabled_cmd));
+ }
return;
}
# See docs/devel/tracing.rst for syntax documentation.
# pci.c
-pci_update_mappings_del(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
-pci_update_mappings_add(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
+pci_update_mappings_del(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
+pci_update_mappings_add(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "%s %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64
# pci_host.c
-pci_cfg_read(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x -> 0x%x"
-pci_cfg_write(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x <- 0x%x"
+pci_cfg_read(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x -> 0x%x"
+pci_cfg_write(const char *dev, uint32_t bus, uint32_t slot, uint32_t func, unsigned offs, unsigned val) "%s %02x:%02x.%x @0x%x <- 0x%x"
# msix.c
msix_write_config(char *name, bool enabled, bool masked) "dev %s enabled %d masked %d"
#include "qemu/osdep.h"
#include "qemu/module.h"
+#include "qemu/log.h"
#include "sysemu/runstate.h"
#include "cpu.h"
#include "hw/sysbus.h"
value = env->spr[SPR_E500_SVR];
break;
default:
- fprintf(stderr, "guts: Unknown register read: %x\n", (int)addr);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: Unknown register 0x%" HWADDR_PRIx "\n",
+ __func__, addr);
break;
}
}
break;
default:
- fprintf(stderr, "guts: Unknown register write: %x = %x\n",
- (int)addr, (unsigned)value);
+ qemu_log_mask(LOG_GUEST_ERROR, "%s: Unknown register 0x%" HWADDR_PRIx
+ " = 0x%" PRIx64 "\n", __func__, addr, value);
break;
}
}
k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */
k->cores_mask = POWER8_CORE_MASK;
- k->num_phbs = 3;
+ k->num_phbs = 4;
k->core_pir = pnv_chip_core_pir_p8;
k->intc_create = pnv_chip_power8_intc_create;
k->intc_reset = pnv_chip_power8_intc_reset;
/* Fixed interval timer */
static void cpu_4xx_fit_cb (void *opaque)
{
- PowerPCCPU *cpu;
- CPUPPCState *env;
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
ppc_tb_t *tb_env;
ppc40x_timer_t *ppc40x_timer;
uint64_t now, next;
- env = opaque;
- cpu = env_archcpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
static void cpu_4xx_pit_cb (void *opaque)
{
- PowerPCCPU *cpu;
- CPUPPCState *env;
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
ppc_tb_t *tb_env;
ppc40x_timer_t *ppc40x_timer;
- env = opaque;
- cpu = env_archcpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
env->spr[SPR_40x_TSR] |= 1 << 27;
/* Watchdog timer */
static void cpu_4xx_wdt_cb (void *opaque)
{
- PowerPCCPU *cpu;
- CPUPPCState *env;
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
ppc_tb_t *tb_env;
ppc40x_timer_t *ppc40x_timer;
uint64_t now, next;
- env = opaque;
- cpu = env_archcpu(env);
tb_env = env->tb_env;
ppc40x_timer = tb_env->opaque;
now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
return cpu_ppc_load_decr(env);
}
+void store_40x_tsr(CPUPPCState *env, target_ulong val)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ trace_ppc40x_store_tcr(val);
+
+ env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
+ if (val & 0x80000000) {
+ ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
+ }
+}
+
+void store_40x_tcr(CPUPPCState *env, target_ulong val)
+{
+ PowerPCCPU *cpu = env_archcpu(env);
+ ppc_tb_t *tb_env;
+
+ trace_ppc40x_store_tsr(val);
+
+ tb_env = env->tb_env;
+ env->spr[SPR_40x_TCR] = val & 0xFFC00000;
+ start_stop_pit(env, tb_env, 1);
+ cpu_4xx_wdt_cb(cpu);
+}
+
static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
{
CPUPPCState *env = opaque;
{
ppc_tb_t *tb_env;
ppc40x_timer_t *ppc40x_timer;
+ PowerPCCPU *cpu = env_archcpu(env);
+
+ trace_ppc40x_timers_init(freq);
tb_env = g_malloc0(sizeof(ppc_tb_t));
+ ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
+
env->tb_env = tb_env;
tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
- ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
tb_env->tb_freq = freq;
tb_env->decr_freq = freq;
tb_env->opaque = ppc40x_timer;
- trace_ppc40x_timers_init(freq);
- if (ppc40x_timer != NULL) {
- /* We use decr timer for PIT */
- tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
- ppc40x_timer->fit_timer =
- timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
- ppc40x_timer->wdt_timer =
- timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
- ppc40x_timer->decr_excp = decr_excp;
- }
+
+ /* We use decr timer for PIT */
+ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
+ ppc40x_timer->fit_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
+ ppc40x_timer->wdt_timer =
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
+ ppc40x_timer->decr_excp = decr_excp;
return &ppc_40x_set_tb_clk;
}
ppc4xx_pob_init(env);
/* OBP arbitrer */
ppc4xx_opba_init(0xef600600);
- /* Initialize timers */
- ppc_booke_timers_init(cpu, sysclk, 0);
/* Universal interrupt controller */
uicdev = qdev_new(TYPE_PPC_UIC);
uicsbd = SYS_BUS_DEVICE(uicdev);
#include "exec/address-spaces.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
-
-/*#define DEBUG_UIC*/
-
-#ifdef DEBUG_UIC
-# define LOG_UIC(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
-#else
-# define LOG_UIC(...) do { } while (0)
-#endif
+#include "trace.h"
static void ppc4xx_reset(void *opaque)
{
bcr = 0x000C0000;
break;
default:
- printf("%s: invalid RAM size " TARGET_FMT_plx "\n", __func__,
- ram_size);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid RAM size 0x%" HWADDR_PRIx "\n", __func__,
+ ram_size);
return 0x00000000;
}
bcr |= ram_base & 0xFF800000;
{
if (sdram->bcr[i] & 0x00000001) {
/* Unmap RAM */
-#ifdef DEBUG_SDRAM
- printf("%s: unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
- __func__, sdram_base(sdram->bcr[i]), sdram_size(sdram->bcr[i]));
-#endif
+ trace_ppc4xx_sdram_unmap(sdram_base(sdram->bcr[i]),
+ sdram_size(sdram->bcr[i]));
memory_region_del_subregion(get_system_memory(),
&sdram->containers[i]);
memory_region_del_subregion(&sdram->containers[i],
}
sdram->bcr[i] = bcr & 0xFFDEE001;
if (enabled && (bcr & 0x00000001)) {
-#ifdef DEBUG_SDRAM
- printf("%s: Map RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
- __func__, sdram_base(bcr), sdram_size(bcr));
-#endif
+ trace_ppc4xx_sdram_unmap(sdram_base(bcr), sdram_size(bcr));
memory_region_init(&sdram->containers[i], NULL, "sdram-containers",
sdram_size(bcr));
memory_region_add_subregion(&sdram->containers[i], 0,
int i;
for (i = 0; i < sdram->nbanks; i++) {
-#ifdef DEBUG_SDRAM
- printf("%s: Unmap RAM area " TARGET_FMT_plx " " TARGET_FMT_lx "\n",
- __func__, sdram_base(sdram->bcr[i]), sdram_size(sdram->bcr[i]));
-#endif
+ trace_ppc4xx_sdram_unmap(sdram_base(sdram->bcr[i]),
+ sdram_size(sdram->bcr[i]));
memory_region_del_subregion(get_system_memory(),
&sdram->ram_memories[i]);
}
case 0x20: /* SDRAM_CFG */
val &= 0xFFE00000;
if (!(sdram->cfg & 0x80000000) && (val & 0x80000000)) {
-#ifdef DEBUG_SDRAM
- printf("%s: enable SDRAM controller\n", __func__);
-#endif
+ trace_ppc4xx_sdram_enable("enable");
/* validate all RAM mappings */
sdram_map_bcr(sdram);
sdram->status &= ~0x80000000;
} else if ((sdram->cfg & 0x80000000) && !(val & 0x80000000)) {
-#ifdef DEBUG_SDRAM
- printf("%s: disable SDRAM controller\n", __func__);
-#endif
+ trace_ppc4xx_sdram_enable("disable");
/* invalidate all RAM mappings */
sdram_unmap_bcr(sdram);
sdram->status |= 0x80000000;
* 4xx SoCs, such as the 440EP. */
#include "qemu/osdep.h"
+#include "qemu/log.h"
#include "hw/irq.h"
#include "hw/ppc/ppc.h"
#include "hw/ppc/ppc4xx.h"
break;
default:
- printf("%s: unhandled PCI internal register 0x%lx\n", __func__,
- (unsigned long)offset);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: unhandled PCI internal register 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
break;
}
}
break;
default:
- printf("%s: invalid PCI internal register 0x%lx\n", __func__,
- (unsigned long)offset);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: invalid PCI internal register 0x%" HWADDR_PRIx "\n",
+ __func__, offset);
value = 0;
}
} \
type_init(spapr_machine_register_##suffix)
+/*
+ * pseries-7.0
+ */
+static void spapr_machine_7_0_class_options(MachineClass *mc)
+{
+ /* Defaults for the latest behaviour inherited from the base class */
+}
+
+DEFINE_SPAPR_MACHINE(7_0, "7.0", true);
+
/*
* pseries-6.2
*/
static void spapr_machine_6_2_class_options(MachineClass *mc)
{
- /* Defaults for the latest behaviour inherited from the base class */
+ spapr_machine_7_0_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
}
-DEFINE_SPAPR_MACHINE(6_2, "6.2", true);
+DEFINE_SPAPR_MACHINE(6_2, "6.2", false);
/*
* pseries-6.1
ppc4xx_pit(uint32_t ar, uint32_t ir, uint64_t tcr, uint64_t tsr, uint64_t reload) "ar %d ir %d TCR 0x%" PRIx64 " TSR 0x%" PRIx64 " PIT 0x%016" PRIx64
ppc4xx_wdt(uint64_t tcr, uint64_t tsr) "TCR 0x%" PRIx64 " TSR 0x%" PRIx64
ppc40x_store_pit(uint64_t value) "val 0x%" PRIx64
+ppc40x_store_tcr(uint64_t value) "val 0x%" PRIx64
+ppc40x_store_tsr(uint64_t value) "val 0x%" PRIx64
ppc40x_set_tb_clk(uint32_t value) "new frequency %" PRIu32
ppc40x_timers_init(uint32_t value) "frequency %" PRIu32
ppc405ep_clocks_compute(const char *param, uint32_t param2, uint32_t val) "%s 0x%1" PRIx32 " %d"
ppc405ep_clocks_setup(const char *trace) "%s"
+
+# ppc4xx_devs.c
+ppc4xx_sdram_enable(const char *trace) "%s SDRAM controller"
+ppc4xx_sdram_unmap(uint64_t addr, uint64_t size) "Unmap RAM area 0x%" PRIx64 " size 0x%" PRIx64
+ppc4xx_sdram_map(uint64_t addr, uint64_t size) "Map RAM area 0x%" PRIx64 " size 0x%" PRIx64
/* Initialize SoC */
object_initialize_child(OBJECT(machine), "soc", &s->soc,
TYPE_MICROCHIP_PFSOC);
- qdev_realize(DEVICE(&s->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
/* Split RAM into low and high regions using aliases to machine->ram */
mem_low_size = memmap[MICROCHIP_PFSOC_DRAM_LO].size;
/* Initialize SoC */
object_initialize_child(OBJECT(machine), "soc", &s->soc,
TYPE_RISCV_IBEX_SOC);
- qdev_realize(DEVICE(&s->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
memory_region_add_subregion(sys_mem,
memmap[IBEX_DEV_RAM].base, machine->ram);
/* Initialize SoC */
object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_E_SOC);
- qdev_realize(DEVICE(&s->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
/* Data Tightly Integrated Memory */
memory_region_add_subregion(sys_mem,
&error_abort);
object_property_set_str(OBJECT(&s->soc), "cpu-type", machine->cpu_type,
&error_abort);
- qdev_realize(DEVICE(&s->soc), NULL, &error_abort);
+ qdev_realize(DEVICE(&s->soc), NULL, &error_fatal);
/* register RAM */
memory_region_add_subregion(system_memory, memmap[SIFIVE_U_DEV_DRAM].base,
} \
type_init(ccw_machine_register_##suffix)
+static void ccw_machine_7_0_instance_options(MachineState *machine)
+{
+}
+
+static void ccw_machine_7_0_class_options(MachineClass *mc)
+{
+}
+DEFINE_CCW_MACHINE(7_0, "7.0", true);
+
static void ccw_machine_6_2_instance_options(MachineState *machine)
{
+ ccw_machine_7_0_instance_options(machine);
}
static void ccw_machine_6_2_class_options(MachineClass *mc)
{
+ ccw_machine_7_0_class_options(mc);
+ compat_props_add(mc->compat_props, hw_compat_6_2, hw_compat_6_2_len);
}
-DEFINE_CCW_MACHINE(6_2, "6.2", true);
+DEFINE_CCW_MACHINE(6_2, "6.2", false);
static void ccw_machine_6_1_instance_options(MachineState *machine)
{
Error *err = NULL;
int vhostfd = -1;
int ret;
+ struct vhost_virtqueue *vqs = NULL;
if (!vs->conf.wwpn) {
error_setg(errp, "vhost-scsi: missing wwpn");
}
vsc->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues;
- vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs);
+ vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs);
+ vsc->dev.vqs = vqs;
vsc->dev.vq_index = 0;
vsc->dev.backend_features = 0;
ret = vhost_dev_init(&vsc->dev, (void *)(uintptr_t)vhostfd,
VHOST_BACKEND_TYPE_KERNEL, 0, errp);
if (ret < 0) {
+ /*
+ * vhost_dev_init calls vhost_dev_cleanup on error, which closes
+ * vhostfd, don't double close it.
+ */
+ vhostfd = -1;
goto free_vqs;
}
return;
free_vqs:
- g_free(vsc->dev.vqs);
+ g_free(vqs);
if (!vsc->migratable) {
migrate_del_blocker(vsc->migration_blocker);
}
error_free(vsc->migration_blocker);
virtio_scsi_common_unrealize(dev);
close_fd:
- close(vhostfd);
+ if (vhostfd >= 0) {
+ close(vhostfd);
+ }
return;
}
size_t smbios_tables_len;
unsigned smbios_table_max;
unsigned smbios_table_cnt;
-static SmbiosEntryPointType smbios_ep_type = SMBIOS_ENTRY_POINT_21;
+static SmbiosEntryPointType smbios_ep_type = SMBIOS_ENTRY_POINT_TYPE_32;
static SmbiosEntryPoint ep;
exit(1);
}
- if (smbios_ep_type == SMBIOS_ENTRY_POINT_21 &&
+ if (smbios_ep_type == SMBIOS_ENTRY_POINT_TYPE_32 &&
smbios_tables_len > SMBIOS_21_MAX_TABLES_LEN) {
error_report("SMBIOS 2.1 table length %zu exceeds %d",
smbios_tables_len, SMBIOS_21_MAX_TABLES_LEN);
static void smbios_entry_point_setup(void)
{
switch (smbios_ep_type) {
- case SMBIOS_ENTRY_POINT_21:
+ case SMBIOS_ENTRY_POINT_TYPE_32:
memcpy(ep.ep21.anchor_string, "_SM_", 4);
memcpy(ep.ep21.intermediate_anchor_string, "_DMI_", 5);
ep.ep21.length = sizeof(struct smbios_21_entry_point);
ep.ep21.structure_table_address = cpu_to_le32(0);
break;
- case SMBIOS_ENTRY_POINT_30:
+ case SMBIOS_ENTRY_POINT_TYPE_64:
memcpy(ep.ep30.anchor_string, "_SM3_", 5);
ep.ep30.length = sizeof(struct smbios_30_entry_point);
ep.ep30.entry_point_revision = 1;
assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
- return close(fd);
+ return close(fd) < 0 ? -errno : 0;
}
static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
&s, NULL, NULL)) {
uint64_t val = g_ascii_strtoull(s, NULL, 10);
- if (!((val == G_MAXUINT64 || !val) && errno)) {
+ if (val < INT_MAX && val > 0) {
g_free(s);
return val;
}
r = qemu_chr_fe_read_all(chr, p, size);
if (r != size) {
+ int saved_errno = errno;
error_report("Failed to read msg header. Read %d instead of %d."
" Original request %d.", r, size, msg->hdr.request);
- return -1;
+ return r < 0 ? -saved_errno : -EIO;
}
/* validate received flags */
error_report("Failed to read msg header."
" Flags 0x%x instead of 0x%x.", msg->hdr.flags,
VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
- return -1;
+ return -EPROTO;
}
return 0;
uint8_t *p = (uint8_t *) msg;
int r, size;
- if (vhost_user_read_header(dev, msg) < 0) {
- data->ret = -1;
+ r = vhost_user_read_header(dev, msg);
+ if (r < 0) {
+ data->ret = r;
goto end;
}
error_report("Failed to read msg header."
" Size %d exceeds the maximum %zu.", msg->hdr.size,
VHOST_USER_PAYLOAD_SIZE);
- data->ret = -1;
+ data->ret = -EPROTO;
goto end;
}
size = msg->hdr.size;
r = qemu_chr_fe_read_all(chr, p, size);
if (r != size) {
+ int saved_errno = errno;
error_report("Failed to read msg payload."
" Read %d instead of %d.", r, msg->hdr.size);
- data->ret = -1;
+ data->ret = r < 0 ? -saved_errno : -EIO;
goto end;
}
}
static int process_message_reply(struct vhost_dev *dev,
const VhostUserMsg *msg)
{
+ int ret;
VhostUserMsg msg_reply;
if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
return 0;
}
- if (vhost_user_read(dev, &msg_reply) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg_reply);
+ if (ret < 0) {
+ return ret;
}
if (msg_reply.hdr.request != msg->hdr.request) {
error_report("Received unexpected msg type. "
"Expected %d received %d",
msg->hdr.request, msg_reply.hdr.request);
- return -1;
+ return -EPROTO;
}
- return msg_reply.payload.u64 ? -1 : 0;
+ return msg_reply.payload.u64 ? -EIO : 0;
}
static bool vhost_user_one_time_request(VhostUserRequest request)
if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
error_report("Failed to set msg fds.");
- return -1;
+ return -EINVAL;
}
ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
if (ret != size) {
+ int saved_errno = errno;
error_report("Failed to write msg."
" Wrote %d instead of %d.", ret, size);
- return -1;
+ return ret < 0 ? -saved_errno : -EIO;
}
return 0;
size_t fd_num = 0;
bool shmfd = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_LOG_SHMFD);
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_SET_LOG_BASE,
.hdr.flags = VHOST_USER_VERSION,
fds[fd_num++] = log->fd;
}
- if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, fds, fd_num);
+ if (ret < 0) {
+ return ret;
}
if (shmfd) {
msg.hdr.size = 0;
- if (vhost_user_read(dev, &msg) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ return ret;
}
if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
error_report("Received unexpected msg type. "
"Expected %d received %d",
VHOST_USER_SET_LOG_BASE, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
}
u->region_rb[i] = mr->ram_block;
} else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
error_report("Failed preparing vhost-user memory table msg");
- return -1;
+ return -ENOBUFS;
}
vhost_user_fill_msg_region(®ion_buffer, reg, offset);
msg->payload.memory.regions[*fd_num] = region_buffer;
if (!*fd_num) {
error_report("Failed initializing vhost-user memory map, "
"consider using -object memory-backend-file share=on");
- return -1;
+ return -EINVAL;
}
msg->hdr.size = sizeof(msg->payload.memory.nregions);
msg->hdr.size += sizeof(msg->payload.memory.padding);
msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
- return 1;
+ return 0;
}
static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
vhost_user_fill_msg_region(®ion_buffer, shadow_reg, 0);
msg->payload.mem_reg.region = region_buffer;
- if (vhost_user_write(dev, msg, &fd, 1) < 0) {
- return -1;
+ ret = vhost_user_write(dev, msg, &fd, 1);
+ if (ret < 0) {
+ return ret;
}
if (reply_supported) {
vhost_user_fill_msg_region(®ion_buffer, reg, offset);
msg->payload.mem_reg.region = region_buffer;
- if (vhost_user_write(dev, msg, &fd, 1) < 0) {
- return -1;
+ ret = vhost_user_write(dev, msg, &fd, 1);
+ if (ret < 0) {
+ return ret;
}
if (track_ramblocks) {
uint64_t reply_gpa;
- if (vhost_user_read(dev, &msg_reply) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg_reply);
+ if (ret < 0) {
+ return ret;
}
reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
"Expected %d received %d", __func__,
VHOST_USER_ADD_MEM_REG,
msg_reply.hdr.request);
- return -1;
+ return -EPROTO;
}
/*
error_report("%s: Unexpected size for postcopy reply "
"%d vs %d", __func__, msg_reply.hdr.size,
msg->hdr.size);
- return -1;
+ return -EPROTO;
}
/* Get the postcopy client base from the backend's reply. */
"Got guest physical address %" PRIX64 ", expected "
"%" PRIX64, __func__, reply_gpa,
dev->mem->regions[reg_idx].guest_phys_addr);
- return -1;
+ return -EPROTO;
}
} else if (reply_supported) {
ret = process_message_reply(dev, msg);
struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
int nr_add_reg, nr_rem_reg;
+ int ret;
msg->hdr.size = sizeof(msg->payload.mem_reg);
scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
shadow_pcb, track_ramblocks);
- if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
- reply_supported) < 0)
- {
- goto err;
+ if (nr_rem_reg) {
+ ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
+ reply_supported);
+ if (ret < 0) {
+ goto err;
+ }
}
- if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg,
- shadow_pcb, reply_supported, track_ramblocks) < 0)
- {
- goto err;
+ if (nr_add_reg) {
+ ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb,
+ reply_supported, track_ramblocks);
+ if (ret < 0) {
+ goto err;
+ }
}
if (track_ramblocks) {
msg->hdr.size = sizeof(msg->payload.u64);
msg->payload.u64 = 0; /* OK */
- if (vhost_user_write(dev, msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
}
sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
}
- return -1;
+ return ret;
}
static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
size_t fd_num = 0;
VhostUserMsg msg_reply;
int region_i, msg_i;
+ int ret;
VhostUserMsg msg = {
.hdr.flags = VHOST_USER_VERSION,
}
if (config_mem_slots) {
- if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
- true) < 0) {
- return -1;
+ ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true);
+ if (ret < 0) {
+ return ret;
}
} else {
- if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
- true) < 0) {
- return -1;
+ ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
+ true);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, fds, fd_num);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_read(dev, &msg_reply) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg_reply);
+ if (ret < 0) {
+ return ret;
}
if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
error_report("%s: Received unexpected msg type."
"Expected %d received %d", __func__,
VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
- return -1;
+ return -EPROTO;
}
/*
error_report("%s: Unexpected size for postcopy reply "
"%d vs %d", __func__, msg_reply.hdr.size,
msg.hdr.size);
- return -1;
+ return -EPROTO;
}
memset(u->postcopy_client_bases, 0,
error_report("%s: postcopy reply not fully consumed "
"%d vs %zd",
__func__, msg_i, fd_num);
- return -1;
+ return -EIO;
}
/*
/* TODO: Use this for failure cases as well with a bad value. */
msg.hdr.size = sizeof(msg.payload.u64);
msg.payload.u64 = 0; /* OK */
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
}
bool config_mem_slots =
virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
+ int ret;
if (do_postcopy) {
/*
}
if (config_mem_slots) {
- if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
- false) < 0) {
- return -1;
+ ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false);
+ if (ret < 0) {
+ return ret;
}
} else {
- if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
- false) < 0) {
- return -1;
+ ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
+ false);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
- return -1;
+
+ ret = vhost_user_write(dev, &msg, fds, fd_num);
+ if (ret < 0) {
+ return ret;
}
if (reply_supported) {
if (!cross_endian) {
error_report("vhost-user trying to send unhandled ioctl");
- return -1;
+ return -ENOTSUP;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_set_vring(struct vhost_dev *dev,
.hdr.size = sizeof(msg.payload.state),
};
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_user_set_vring_num(struct vhost_dev *dev,
int i;
if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
- return -1;
+ return -EINVAL;
}
for (i = 0; i < dev->nvqs; ++i) {
+ int ret;
struct vhost_vring_state state = {
.index = dev->vq_index + i,
.num = enable,
};
- vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
+ ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
+ if (ret < 0) {
+ /*
+ * Restoring the previous state is likely infeasible, as well as
+ * proceeding regardless the error, so just bail out and hope for
+ * the device-level recovery.
+ */
+ return ret;
+ }
}
return 0;
static int vhost_user_get_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_GET_VRING_BASE,
.hdr.flags = VHOST_USER_VERSION,
vhost_user_host_notifier_remove(dev, ring->index);
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ return ret;
}
if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
error_report("Received unexpected msg type. Expected %d received %d",
VHOST_USER_GET_VRING_BASE, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.state)) {
error_report("Received bad msg size.");
- return -1;
+ return -EPROTO;
}
*ring = msg.payload.state;
msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
}
- if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, fds, fd_num);
}
static int vhost_user_set_vring_kick(struct vhost_dev *dev,
static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = request,
.hdr.flags = VHOST_USER_VERSION,
return 0;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ return ret;
}
if (msg.hdr.request != request) {
error_report("Received unexpected msg type. Expected %d received %d",
request, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.u64)) {
error_report("Received bad msg size.");
- return -1;
+ return -EPROTO;
}
*u64 = msg.payload.u64;
static int vhost_user_set_vring_addr(struct vhost_dev *dev,
struct vhost_vring_addr *addr)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_SET_VRING_ADDR,
.hdr.flags = VHOST_USER_VERSION,
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
if (wait_for_reply) {
.payload.u64 = u64,
.hdr.size = sizeof(msg.payload.u64),
};
+ int ret;
if (wait_for_reply) {
bool reply_supported = virtio_has_feature(dev->protocol_features,
}
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
if (wait_for_reply) {
.hdr.flags = VHOST_USER_VERSION,
};
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -EPROTO;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_user_get_max_memslots(struct vhost_dev *dev,
? VHOST_USER_RESET_DEVICE
: VHOST_USER_RESET_OWNER;
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, NULL, 0);
}
static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
{
- int ret = -1;
-
- if (!dev->config_ops) {
- return -1;
+ if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
+ return -ENOSYS;
}
- if (dev->config_ops->vhost_dev_config_notifier) {
- ret = dev->config_ops->vhost_dev_config_notifier(dev);
- }
-
- return ret;
+ return dev->config_ops->vhost_dev_config_notifier(dev);
}
static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
if (!virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
- return -1;
+ return -EINVAL;
}
n = &user->notifier[queue_idx];
/* Sanity check. */
if (area->size != page_size) {
- return -1;
+ return -EINVAL;
}
addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
fd, area->offset);
if (addr == MAP_FAILED) {
- return -1;
+ return -EFAULT;
}
name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
object_unparent(OBJECT(&n->mr));
munmap(addr, page_size);
- return -1;
+ return -ENXIO;
}
n->addr = addr;
}
if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
+ int saved_errno = errno;
error_report("socketpair() failed");
- return -1;
+ return -saved_errno;
}
ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
if (!ioc) {
error_report_err(local_err);
- return -1;
+ return -ECONNREFUSED;
}
u->slave_ioc = ioc;
slave_update_read_handler(dev, NULL);
struct vhost_user *u = dev->opaque;
CharBackend *chr = u->user->chr;
int ufd;
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_POSTCOPY_ADVISE,
.hdr.flags = VHOST_USER_VERSION,
};
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
error_setg(errp, "Failed to send postcopy_advise to vhost");
- return -1;
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
error_setg(errp, "Failed to get postcopy_advise reply from vhost");
- return -1;
+ return ret;
}
if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
error_setg(errp, "Unexpected msg type. Expected %d received %d",
VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size) {
error_setg(errp, "Received bad msg size.");
- return -1;
+ return -EPROTO;
}
ufd = qemu_chr_fe_get_msgfd(chr);
if (ufd < 0) {
error_setg(errp, "%s: Failed to get ufd", __func__);
- return -1;
+ return -EIO;
}
qemu_set_nonblock(ufd);
return 0;
#else
error_setg(errp, "Postcopy not supported on non-Linux systems");
- return -1;
+ return -ENOSYS;
#endif
}
.hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
};
u->postcopy_listen = true;
+
trace_vhost_user_postcopy_listen();
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
+
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
error_setg(errp, "Failed to send postcopy_listen to vhost");
- return -1;
+ return ret;
}
ret = process_message_reply(dev, &msg);
struct vhost_user *u = dev->opaque;
trace_vhost_user_postcopy_end_entry();
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
+
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
error_setg(errp, "Failed to send postcopy_end to vhost");
- return -1;
+ return ret;
}
ret = process_message_reply(dev, &msg);
return vhost_user_write(dev, &msg, NULL, 0);
}
- return -1;
+ return -ENOTSUP;
}
static bool vhost_user_can_merge(struct vhost_dev *dev,
VhostUserMsg msg;
bool reply_supported = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK);
+ int ret;
if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
return 0;
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
/* If reply_ack supported, slave has to ack specified MTU is valid */
static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *imsg)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_IOTLB_MSG,
.hdr.size = sizeof(msg.payload.iotlb),
.payload.iotlb = *imsg,
};
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -EFAULT;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
return process_message_reply(dev, &msg);
static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
uint32_t config_len, Error **errp)
{
+ int ret;
VhostUserMsg msg = {
.hdr.request = VHOST_USER_GET_CONFIG,
.hdr.flags = VHOST_USER_VERSION,
msg.payload.config.offset = 0;
msg.payload.config.size = config_len;
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- error_setg_errno(errp, EPROTO, "vhost_get_config failed");
- return -EPROTO;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "vhost_get_config failed");
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- error_setg_errno(errp, EPROTO, "vhost_get_config failed");
- return -EPROTO;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret, "vhost_get_config failed");
+ return ret;
}
if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
error_setg(errp,
"Received unexpected msg type. Expected %d received %d",
VHOST_USER_GET_CONFIG, msg.hdr.request);
- return -EINVAL;
+ return -EPROTO;
}
if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
error_setg(errp, "Received bad msg size.");
- return -EINVAL;
+ return -EPROTO;
}
memcpy(config, msg.payload.config.region, config_len);
static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
uint32_t offset, uint32_t size, uint32_t flags)
{
+ int ret;
uint8_t *p;
bool reply_supported = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_REPLY_ACK);
if (!virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CONFIG)) {
- return -1;
+ return -ENOTSUP;
}
if (reply_supported) {
}
if (size > VHOST_USER_MAX_CONFIG_SIZE) {
- return -1;
+ return -EINVAL;
}
msg.payload.config.offset = offset,
p = msg.payload.config.region;
memcpy(p, data, size);
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
if (reply_supported) {
void *session_info,
uint64_t *session_id)
{
+ int ret;
bool crypto_session = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
CryptoDevBackendSymSessionInfo *sess_info = session_info;
if (!crypto_session) {
error_report("vhost-user trying to send unhandled ioctl");
- return -1;
+ return -ENOTSUP;
}
memcpy(&msg.payload.session.session_setup_data, sess_info,
memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
sess_info->auth_key_len);
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- error_report("vhost_user_write() return -1, create session failed");
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ error_report("vhost_user_write() return %d, create session failed",
+ ret);
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- error_report("vhost_user_read() return -1, create session failed");
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ error_report("vhost_user_read() return %d, create session failed",
+ ret);
+ return ret;
}
if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
error_report("Received unexpected msg type. Expected %d received %d",
VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.session)) {
error_report("Received bad msg size.");
- return -1;
+ return -EPROTO;
}
if (msg.payload.session.session_id < 0) {
error_report("Bad session id: %" PRId64 "",
msg.payload.session.session_id);
- return -1;
+ return -EINVAL;
}
*session_id = msg.payload.session.session_id;
static int
vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
{
+ int ret;
bool crypto_session = virtio_has_feature(dev->protocol_features,
VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
VhostUserMsg msg = {
if (!crypto_session) {
error_report("vhost-user trying to send unhandled ioctl");
- return -1;
+ return -ENOTSUP;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- error_report("vhost_user_write() return -1, close session failed");
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ error_report("vhost_user_write() return %d, close session failed",
+ ret);
+ return ret;
}
return 0;
{
void *addr;
int fd;
+ int ret;
struct vhost_user *u = dev->opaque;
CharBackend *chr = u->user->chr;
VhostUserMsg msg = {
return 0;
}
- if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
- return -1;
+ ret = vhost_user_write(dev, &msg, NULL, 0);
+ if (ret < 0) {
+ return ret;
}
- if (vhost_user_read(dev, &msg) < 0) {
- return -1;
+ ret = vhost_user_read(dev, &msg);
+ if (ret < 0) {
+ return ret;
}
if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
error_report("Received unexpected msg type. "
"Expected %d received %d",
VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
- return -1;
+ return -EPROTO;
}
if (msg.hdr.size != sizeof(msg.payload.inflight)) {
error_report("Received bad msg size.");
- return -1;
+ return -EPROTO;
}
if (!msg.payload.inflight.mmap_size) {
fd = qemu_chr_fe_get_msgfd(chr);
if (fd < 0) {
error_report("Failed to get mem fd");
- return -1;
+ return -EIO;
}
addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
if (addr == MAP_FAILED) {
error_report("Failed to mmap mem fd");
close(fd);
- return -1;
+ return -EFAULT;
}
inflight->addr = addr;
return 0;
}
- if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
- return -1;
- }
-
- return 0;
+ return vhost_user_write(dev, &msg, &inflight->fd, 1);
}
bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
return ret < 0 ? -errno : ret;
}
-static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
+static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
{
uint8_t s;
+ int ret;
trace_vhost_vdpa_add_status(dev, status);
- if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
- return;
+ ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
+ if (ret < 0) {
+ return ret;
}
s |= status;
- vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
+ ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!(s & status)) {
+ return -EIO;
+ }
+
+ return 0;
}
static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
}
}
if (mem->padding) {
- return -1;
+ return -EINVAL;
}
return 0;
trace_vhost_vdpa_set_features(dev, features);
ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
- uint8_t status = 0;
if (ret) {
return ret;
}
- vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
- vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
- return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
+ return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
}
static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
}
if (started) {
- uint8_t status = 0;
memory_listener_register(&v->listener, &address_space_memory);
- vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
- vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
-
- return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
+ return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
} else {
vhost_vdpa_reset_device(dev);
vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
ret = vhost_dev_init(&vvc->vhost_dev, (void *)(uintptr_t)vhostfd,
VHOST_BACKEND_TYPE_KERNEL, 0, errp);
if (ret < 0) {
+ /*
+ * vhostfd is closed by vhost_dev_cleanup, which is called
+ * by vhost_dev_init on initialization error.
+ */
goto err_virtio;
}
return;
err_vhost_dev:
- vhost_dev_cleanup(&vvc->vhost_dev);
/* vhost_dev_cleanup() closes the vhostfd passed to vhost_dev_init() */
- vhostfd = -1;
+ vhost_dev_cleanup(&vvc->vhost_dev);
err_virtio:
vhost_vsock_common_unrealize(vdev);
- if (vhostfd >= 0) {
- close(vhostfd);
- }
- return;
}
static void vhost_vsock_device_unrealize(DeviceState *dev)
#define _VHOST_DEBUG 1
#ifdef _VHOST_DEBUG
-#define VHOST_OPS_DEBUG(fmt, ...) \
- do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
- strerror(errno), errno); } while (0)
+#define VHOST_OPS_DEBUG(retval, fmt, ...) \
+ do { \
+ error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
+ strerror(-retval), -retval); \
+ } while (0)
#else
-#define VHOST_OPS_DEBUG(fmt, ...) \
+#define VHOST_OPS_DEBUG(retval, fmt, ...) \
do { } while (0)
#endif
releasing the current log, to ensure no logging is lost */
r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_log_base failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
}
vhost_log_put(dev, true);
if (!dev->log_enabled) {
r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_mem_table failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
}
goto out;
}
}
r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_mem_table failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
}
/* To log less, can only decrease log size after table update. */
if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
if (dev->vhost_ops->vhost_vq_get_addr) {
r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_vq_get_addr failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed");
+ return r;
}
} else {
addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc;
addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_vring_addr failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed");
}
- return 0;
+ return r;
}
static int vhost_dev_set_features(struct vhost_dev *dev,
}
r = dev->vhost_ops->vhost_set_features(dev, features);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_features failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_features failed");
goto out;
}
if (dev->vhost_ops->vhost_set_backend_cap) {
r = dev->vhost_ops->vhost_set_backend_cap(dev);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_backend_cap failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed");
goto out;
}
}
out:
- return r < 0 ? -errno : 0;
+ return r;
}
static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
bool is_big_endian,
int vhost_vq_index)
{
+ int r;
struct vhost_vring_state s = {
.index = vhost_vq_index,
.num = is_big_endian
};
- if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) {
- return 0;
- }
-
- VHOST_OPS_DEBUG("vhost_set_vring_endian failed");
- if (errno == ENOTTY) {
- error_report("vhost does not support cross-endian");
- return -ENOSYS;
+ r = dev->vhost_ops->vhost_set_vring_endian(dev, &s);
+ if (r < 0) {
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed");
}
-
- return -errno;
+ return r;
}
static int vhost_memory_region_lookup(struct vhost_dev *hdev,
vq->num = state.num = virtio_queue_get_num(vdev, idx);
r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_num failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed");
+ return r;
}
state.num = virtio_queue_get_last_avail_idx(vdev, idx);
r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_base failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed");
+ return r;
}
if (vhost_needs_vring_endian(vdev)) {
virtio_is_big_endian(vdev),
vhost_vq_index);
if (r) {
- return -errno;
+ return r;
}
}
r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
if (r < 0) {
- r = -errno;
goto fail_alloc;
}
file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_kick failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
goto fail_kick;
}
r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r);
+ VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r);
/* Connection to the backend is broken, so let's sync internal
* last avail idx to the device used idx.
*/
r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed");
return r;
}
file.fd = event_notifier_get_fd(&vq->masked_notifier);
r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_vring_call failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
goto fail_call;
}
file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_vring_call failed");
+ VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
}
}
}
error_setg(errp, "vhost_get_config not implemented");
- return -ENOTSUP;
+ return -ENOSYS;
}
int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
size, flags);
}
- return -1;
+ return -ENOSYS;
}
void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
if (err) {
error_report_err(err);
- return -1;
+ return -ENOMEM;
}
vhost_dev_free_inflight(inflight);
}
if (inflight->size != size) {
- if (vhost_dev_resize_inflight(inflight, size)) {
- return -1;
+ int ret = vhost_dev_resize_inflight(inflight, size);
+ if (ret < 0) {
+ return ret;
}
}
inflight->queue_size = qemu_get_be16(f);
r = vhost_dev_set_features(hdev, hdev->log_enabled);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_dev_prepare_inflight failed");
+ VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed");
return r;
}
if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
if (r) {
- VHOST_OPS_DEBUG("vhost_set_inflight_fd failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed");
+ return r;
}
}
if (dev->vhost_ops->vhost_get_inflight_fd) {
r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
if (r) {
- VHOST_OPS_DEBUG("vhost_get_inflight_fd failed");
- return -errno;
+ VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed");
+ return r;
}
}
r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_mem_table failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
goto fail_mem;
}
for (i = 0; i < hdev->nvqs; ++i) {
hdev->log_size ? log_base : 0,
hdev->log);
if (r < 0) {
- VHOST_OPS_DEBUG("vhost_set_log_base failed");
- r = -errno;
+ VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
goto fail_log;
}
}
return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
}
- return -1;
+ return -ENOSYS;
}
#include CONFIG_DEVICES
#include "trace.h"
+/*
+ * We only had legacy x86 guests that did not support
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE. Other targets don't have legacy guests.
+ */
+#if defined(TARGET_X86_64) || defined(TARGET_I386)
+#define VIRTIO_MEM_HAS_LEGACY_GUESTS
+#endif
+
/*
* Let's not allow blocks smaller than 1 MiB, for example, to keep the tracking
* bitmap small.
return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE);
}
+#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS)
+static bool virtio_mem_has_shared_zeropage(RAMBlock *rb)
+{
+ /*
+ * We only have a guaranteed shared zeropage on ordinary MAP_PRIVATE
+ * anonymous RAM. In any other case, reading unplugged *can* populate a
+ * fresh page, consuming actual memory.
+ */
+ return !qemu_ram_is_shared(rb) && rb->fd < 0 &&
+ qemu_ram_pagesize(rb) == qemu_real_host_page_size;
+}
+#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */
+
/*
* Size the usable region bigger than the requested size if possible. Esp.
* Linux guests will only add (aligned) memory blocks in case they fully
return -EBUSY;
}
virtio_mem_notify_unplug(vmem, offset, size);
- } else if (virtio_mem_notify_plug(vmem, offset, size)) {
- /* Could be a mapping attempt resulted in memory getting populated. */
- ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size);
- return -EBUSY;
+ } else {
+ int ret = 0;
+
+ if (vmem->prealloc) {
+ void *area = memory_region_get_ram_ptr(&vmem->memdev->mr) + offset;
+ int fd = memory_region_get_fd(&vmem->memdev->mr);
+ Error *local_err = NULL;
+
+ os_mem_prealloc(fd, area, size, 1, &local_err);
+ if (local_err) {
+ static bool warned;
+
+ /*
+ * Warn only once, we don't want to fill the log with these
+ * warnings.
+ */
+ if (!warned) {
+ warn_report_err(local_err);
+ warned = true;
+ } else {
+ error_free(local_err);
+ }
+ ret = -EBUSY;
+ }
+ }
+ if (!ret) {
+ ret = virtio_mem_notify_plug(vmem, offset, size);
+ }
+
+ if (ret) {
+ /* Could be preallocation or a notifier populated memory. */
+ ram_block_discard_range(vmem->memdev->mr.ram_block, offset, size);
+ return -EBUSY;
+ }
}
virtio_mem_set_bitmap(vmem, start_gpa, size, plug);
return 0;
Error **errp)
{
MachineState *ms = MACHINE(qdev_get_machine());
+ VirtIOMEM *vmem = VIRTIO_MEM(vdev);
if (ms->numa_state) {
#if defined(CONFIG_ACPI)
virtio_add_feature(&features, VIRTIO_MEM_F_ACPI_PXM);
#endif
}
+ assert(vmem->unplugged_inaccessible != ON_OFF_AUTO_AUTO);
+ if (vmem->unplugged_inaccessible == ON_OFF_AUTO_ON) {
+ virtio_add_feature(&features, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE);
+ }
return features;
}
+static int virtio_mem_validate_features(VirtIODevice *vdev)
+{
+ if (virtio_host_has_feature(vdev, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE) &&
+ !virtio_vdev_has_feature(vdev, VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE)) {
+ return -EFAULT;
+ }
+ return 0;
+}
+
static void virtio_mem_system_reset(void *opaque)
{
VirtIOMEM *vmem = VIRTIO_MEM(opaque);
rb = vmem->memdev->mr.ram_block;
page_size = qemu_ram_pagesize(rb);
+#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS)
+ switch (vmem->unplugged_inaccessible) {
+ case ON_OFF_AUTO_AUTO:
+ if (virtio_mem_has_shared_zeropage(rb)) {
+ vmem->unplugged_inaccessible = ON_OFF_AUTO_OFF;
+ } else {
+ vmem->unplugged_inaccessible = ON_OFF_AUTO_ON;
+ }
+ break;
+ case ON_OFF_AUTO_OFF:
+ if (!virtio_mem_has_shared_zeropage(rb)) {
+ warn_report("'%s' property set to 'off' with a memdev that does"
+ " not support the shared zeropage.",
+ VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP);
+ }
+ break;
+ default:
+ break;
+ }
+#else /* VIRTIO_MEM_HAS_LEGACY_GUESTS */
+ vmem->unplugged_inaccessible = ON_OFF_AUTO_ON;
+#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */
+
/*
* If the block size wasn't configured by the user, use a sane default. This
* allows using hugetlbfs backends of any page size without manual
warn_report("'%s' property is smaller than the default block size (%"
PRIx64 " MiB)", VIRTIO_MEM_BLOCK_SIZE_PROP,
virtio_mem_default_block_size(rb) / MiB);
- } else if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) {
+ }
+ if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) {
error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64
")", VIRTIO_MEM_REQUESTED_SIZE_PROP,
VIRTIO_MEM_BLOCK_SIZE_PROP, vmem->block_size);
static Property virtio_mem_properties[] = {
DEFINE_PROP_UINT64(VIRTIO_MEM_ADDR_PROP, VirtIOMEM, addr, 0),
DEFINE_PROP_UINT32(VIRTIO_MEM_NODE_PROP, VirtIOMEM, node, 0),
+ DEFINE_PROP_BOOL(VIRTIO_MEM_PREALLOC_PROP, VirtIOMEM, prealloc, false),
DEFINE_PROP_LINK(VIRTIO_MEM_MEMDEV_PROP, VirtIOMEM, memdev,
TYPE_MEMORY_BACKEND, HostMemoryBackend *),
+#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS)
+ DEFINE_PROP_ON_OFF_AUTO(VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP, VirtIOMEM,
+ unplugged_inaccessible, ON_OFF_AUTO_AUTO),
+#endif
DEFINE_PROP_END_OF_LIST(),
};
vdc->unrealize = virtio_mem_device_unrealize;
vdc->get_config = virtio_mem_get_config;
vdc->get_features = virtio_mem_get_features;
+ vdc->validate_features = virtio_mem_validate_features;
vdc->vmsd = &vmstate_virtio_mem_device;
vmc->fill_device_info = virtio_mem_fill_device_info;
if (vq->used_idx >= vq->vring.num) {
vq->used_idx -= vq->vring.num;
vq->used_wrap_counter ^= 1;
+ vq->signalled_used_valid = false;
}
}
int print_insn_xtensa (bfd_vma, disassemble_info*);
int print_insn_riscv32 (bfd_vma, disassemble_info*);
int print_insn_riscv64 (bfd_vma, disassemble_info*);
+int print_insn_riscv128 (bfd_vma, disassemble_info*);
int print_insn_rx(bfd_vma, disassemble_info *);
int print_insn_hexagon(bfd_vma, disassemble_info *);
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
+ MO_UQ = MO_64,
+ MO_UO = MO_128,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
- MO_Q = MO_64,
+ MO_SQ = MO_SIGN | MO_64,
+ MO_SO = MO_SIGN | MO_128,
MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
+ MO_LEUQ = MO_LE | MO_UQ,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
- MO_LEQ = MO_LE | MO_Q,
+ MO_LESQ = MO_LE | MO_SQ,
MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
+ MO_BEUQ = MO_BE | MO_UQ,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
- MO_BEQ = MO_BE | MO_Q,
+ MO_BESQ = MO_BE | MO_SQ,
#ifdef NEED_CPU_H
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
+ MO_TEUQ = MO_TE | MO_UQ,
+ MO_TEUO = MO_TE | MO_UO,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
- MO_TEQ = MO_TE | MO_Q,
+ MO_TESQ = MO_TE | MO_SQ,
#endif
MO_SSIZE = MO_SIZE | MO_SIGN,
ASPEED_DEV_EMMC,
ASPEED_DEV_KCS,
ASPEED_DEV_HACE,
+ ASPEED_DEV_DPMCU,
+ ASPEED_DEV_DP,
};
#endif /* ASPEED_SOC_H */
} \
type_init(machine_initfn##_register_types)
+extern GlobalProperty hw_compat_6_2[];
+extern const size_t hw_compat_6_2_len;
+
extern GlobalProperty hw_compat_6_1[];
extern const size_t hw_compat_6_1_len;
bool ignore_memory_transaction_failures;
+ /* Used for user-only emulation of prctl(PR_SET_UNALIGN). */
+ bool prctl_unalign_sigbus;
+
struct hax_vcpu_state *hax_vcpu;
struct hvf_vcpu_state *hvf;
#ifndef QEMU_SMBIOS_H
#define QEMU_SMBIOS_H
+#include "qapi/qapi-types-machine.h"
+
/*
* SMBIOS Support
*
uint64_t length;
};
-/*
- * SMBIOS spec defined tables
- */
-typedef enum SmbiosEntryPointType {
- SMBIOS_ENTRY_POINT_21,
- SMBIOS_ENTRY_POINT_30,
-} SmbiosEntryPointType;
-
/* SMBIOS Entry Point
* There are two types of entry points defined in the SMBIOS specification
* (see below). BIOS must place the entry point(s) at a 16-byte-aligned
#include "hw/hotplug.h"
#include "qom/object.h"
#include "hw/i386/sgx-epc.h"
+#include "hw/firmware/smbios.h"
#define HPET_INTCAP "hpet-intcap"
/* Configuration options: */
uint64_t max_ram_below_4g;
OnOffAuto vmport;
+ SmbiosEntryPointType smbios_entry_point_type;
bool acpi_build_enabled;
bool smbus_enabled;
#define PC_MACHINE_SATA "sata"
#define PC_MACHINE_PIT "pit"
#define PC_MACHINE_MAX_FW_SIZE "max-fw-size"
+#define PC_MACHINE_SMBIOS_EP "smbios-entry-point-type"
+
/**
* PCMachineClass:
*
/* sgx.c */
void pc_machine_init_sgx_epc(PCMachineState *pcms);
+extern GlobalProperty pc_compat_6_2[];
+extern const size_t pc_compat_6_2_len;
+
extern GlobalProperty pc_compat_6_1[];
extern const size_t pc_compat_6_1_len;
bool indirect;
uint16_t entry_sz;
uint32_t page_sz;
- uint32_t max_entries;
- union {
- uint32_t max_devids;
- uint32_t max_collids;
- } maxids;
+ uint32_t num_entries;
+ uint32_t num_ids;
uint64_t base_addr;
} TableDesc;
typedef struct {
bool valid;
- uint32_t max_entries;
+ uint32_t num_entries;
uint64_t base_addr;
} CmdQDesc;
uint32_t phb_id;
uint64_t version;
- uint16_t device_id;
char bus_path[8];
const char *stk_compat;
int stk_compat_size;
uint64_t version;
- uint64_t device_id;
const uint32_t *num_stacks;
};
qemu_irq pci_allocate_irq(PCIDevice *pci_dev);
void pci_set_irq(PCIDevice *pci_dev, int level);
+static inline int pci_intx(PCIDevice *pci_dev)
+{
+ return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1;
+}
+
static inline void pci_irq_assert(PCIDevice *pci_dev)
{
pci_set_irq(pci_dev, 1);
#include "hw/block/flash.h"
#include "qom/object.h"
-#define VIRT_CPUS_MAX 8
+#define VIRT_CPUS_MAX 32
#define VIRT_SOCKETS_MAX 8
#define TYPE_RISCV_VIRT_MACHINE MACHINE_TYPE_NAME("virt")
#define VIRTIO_MEM_REQUESTED_SIZE_PROP "requested-size"
#define VIRTIO_MEM_BLOCK_SIZE_PROP "block-size"
#define VIRTIO_MEM_ADDR_PROP "memaddr"
+#define VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP "unplugged-inaccessible"
+#define VIRTIO_MEM_PREALLOC_PROP "prealloc"
struct VirtIOMEM {
VirtIODevice parent_obj;
/* block size and alignment */
uint64_t block_size;
+ /*
+ * Whether we indicate VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE to the guest.
+ * For !x86 targets this will always be "on" and consequently indicate
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE.
+ */
+ OnOffAuto unplugged_inaccessible;
+
+ /* whether to prealloc memory when plugging new blocks */
+ bool prealloc;
+
/* notifiers to notify when "size" changes */
NotifierList size_change_notifiers;
#endif
}
+static inline Int128 int128_divu(Int128 a, Int128 b)
+{
+ return (__uint128_t)a / (__uint128_t)b;
+}
+
+static inline Int128 int128_remu(Int128 a, Int128 b)
+{
+ return (__uint128_t)a % (__uint128_t)b;
+}
+
+static inline Int128 int128_divs(Int128 a, Int128 b)
+{
+ return a / b;
+}
+
+static inline Int128 int128_rems(Int128 a, Int128 b)
+{
+ return a % b;
+}
+
#else /* !CONFIG_INT128 */
typedef struct Int128 Int128;
return int128_make128(bswap64(a.hi), bswap64(a.lo));
}
+Int128 int128_divu(Int128, Int128);
+Int128 int128_remu(Int128, Int128);
+Int128 int128_divs(Int128, Int128);
+Int128 int128_rems(Int128, Int128);
+
#endif /* CONFIG_INT128 */
static inline void bswap128s(Int128 *s)
*s = bswap128(*s);
}
+#define UINT128_MAX int128_make128(~0LL, ~0LL)
+
#endif /* INT128_H */
#else
#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
#endif
+#ifdef MADV_POPULATE_WRITE
+#define QEMU_MADV_POPULATE_WRITE MADV_POPULATE_WRITE
+#else
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
+#endif
#elif defined(CONFIG_POSIX_MADVISE)
#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
#define QEMU_MADV_REMOVE QEMU_MADV_DONTNEED
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
#else /* no-op */
#define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID
#define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID
#define QEMU_MADV_REMOVE QEMU_MADV_INVALID
+#define QEMU_MADV_POPULATE_WRITE QEMU_MADV_INVALID
#endif
* explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
*
* There are no guarantees what will happen if unplugged memory is
- * read/written. Such memory should, in general, not be touched. E.g.,
- * even writing might succeed, but the values will simply be discarded at
- * random points in time.
+ * read/written. In general, unplugged memory should not be touched, because
+ * the resulting action is undefined. There is one exception: without
+ * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, unplugged memory inside the usable
+ * region can be read, to simplify creation of memory dumps.
*
* It can happen that the device cannot process a request, because it is
* busy. The device driver has to retry later.
/* node_id is an ACPI PXM and is valid */
#define VIRTIO_MEM_F_ACPI_PXM 0
+/* unplugged memory must not be accessed */
+#define VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE 1
/* --- virtio-mem: guest -> host requests --- */
void qemu_add_exit_notifier(Notifier *notify);
void qemu_remove_exit_notifier(Notifier *notify);
-void qemu_run_machine_init_done_notifiers(void);
void qemu_add_machine_init_done_notifier(Notifier *notify);
void qemu_remove_machine_init_done_notifier(Notifier *notify);
static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index)
{
- tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEQ);
+ tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ);
}
static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index)
static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index)
{
- tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEQ);
+ tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ);
}
void tcg_gen_atomic_cmpxchg_i32(TCGv_i32, TCGv, TCGv_i32, TCGv_i32,
--- /dev/null
+/*
+ * AArch64 specific prctl functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef AARCH64_TARGET_PRCTL_H
+#define AARCH64_TARGET_PRCTL_H
+
+static abi_long do_prctl_get_vl(CPUArchState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ return ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_get_vl do_prctl_get_vl
+
+static abi_long do_prctl_set_vl(CPUArchState *env, abi_long arg2)
+{
+ /*
+ * We cannot support either PR_SVE_SET_VL_ONEXEC or PR_SVE_VL_INHERIT.
+ * Note the kernel definition of sve_vl_valid allows for VQ=512,
+ * i.e. VL=8192, even though the current architectural maximum is VQ=16.
+ */
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env))
+ && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
+ ARMCPU *cpu = env_archcpu(env);
+ uint32_t vq, old_vq;
+
+ old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
+ vq = MAX(arg2 / 16, 1);
+ vq = MIN(vq, cpu->sve_max_vq);
+
+ if (vq < old_vq) {
+ aarch64_sve_narrow_vq(env, vq);
+ }
+ env->vfp.zcr_el[1] = vq - 1;
+ arm_rebuild_hflags(env);
+ return vq * 16;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_set_vl do_prctl_set_vl
+
+static abi_long do_prctl_reset_keys(CPUArchState *env, abi_long arg2)
+{
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ int all = (PR_PAC_APIAKEY | PR_PAC_APIBKEY |
+ PR_PAC_APDAKEY | PR_PAC_APDBKEY | PR_PAC_APGAKEY);
+ int ret = 0;
+ Error *err = NULL;
+
+ if (arg2 == 0) {
+ arg2 = all;
+ } else if (arg2 & ~all) {
+ return -TARGET_EINVAL;
+ }
+ if (arg2 & PR_PAC_APIAKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apia,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APIBKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apib,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APDAKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apda,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APDBKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apdb,
+ sizeof(ARMPACKey), &err);
+ }
+ if (arg2 & PR_PAC_APGAKEY) {
+ ret |= qemu_guest_getrandom(&env->keys.apga,
+ sizeof(ARMPACKey), &err);
+ }
+ if (ret != 0) {
+ /*
+ * Some unknown failure in the crypto. The best
+ * we can do is log it and fail the syscall.
+ * The real syscall cannot fail this way.
+ */
+ qemu_log_mask(LOG_UNIMP, "PR_PAC_RESET_KEYS: Crypto failure: %s",
+ error_get_pretty(err));
+ error_free(err);
+ return -TARGET_EIO;
+ }
+ return 0;
+ }
+ return -TARGET_EINVAL;
+}
+#define do_prctl_reset_keys do_prctl_reset_keys
+
+static abi_long do_prctl_set_tagged_addr_ctrl(CPUArchState *env, abi_long arg2)
+{
+ abi_ulong valid_mask = PR_TAGGED_ADDR_ENABLE;
+ ARMCPU *cpu = env_archcpu(env);
+
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ valid_mask |= PR_MTE_TCF_MASK;
+ valid_mask |= PR_MTE_TAG_MASK;
+ }
+
+ if (arg2 & ~valid_mask) {
+ return -TARGET_EINVAL;
+ }
+ env->tagged_addr_enable = arg2 & PR_TAGGED_ADDR_ENABLE;
+
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ switch (arg2 & PR_MTE_TCF_MASK) {
+ case PR_MTE_TCF_NONE:
+ case PR_MTE_TCF_SYNC:
+ case PR_MTE_TCF_ASYNC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
+ * Note that the syscall values are consistent with hw.
+ */
+ env->cp15.sctlr_el[1] =
+ deposit64(env->cp15.sctlr_el[1], 38, 2, arg2 >> PR_MTE_TCF_SHIFT);
+
+ /*
+ * Write PR_MTE_TAG to GCR_EL1[Exclude].
+ * Note that the syscall uses an include mask,
+ * and hardware uses an exclude mask -- invert.
+ */
+ env->cp15.gcr_el1 =
+ deposit64(env->cp15.gcr_el1, 0, 16, ~arg2 >> PR_MTE_TAG_SHIFT);
+ arm_rebuild_hflags(env);
+ }
+ return 0;
+}
+#define do_prctl_set_tagged_addr_ctrl do_prctl_set_tagged_addr_ctrl
+
+static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ abi_long ret = 0;
+
+ if (env->tagged_addr_enable) {
+ ret |= PR_TAGGED_ADDR_ENABLE;
+ }
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ /* See do_prctl_set_tagged_addr_ctrl. */
+ ret |= extract64(env->cp15.sctlr_el[1], 38, 2) << PR_MTE_TCF_SHIFT;
+ ret = deposit64(ret, PR_MTE_TAG_SHIFT, 16, ~env->cp15.gcr_el1);
+ }
+ return ret;
+}
+#define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl
+
+#endif /* AARCH64_TARGET_PRCTL_H */
#ifndef AARCH64_TARGET_SIGNAL_H
#define AARCH64_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#endif
#define UNAME_MINIMUM_RELEASE "3.8.0"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
-#define TARGET_PR_SVE_SET_VL 50
-#define TARGET_PR_SVE_GET_VL 51
-
-#define TARGET_PR_PAC_RESET_KEYS 54
-# define TARGET_PR_PAC_APIAKEY (1 << 0)
-# define TARGET_PR_PAC_APIBKEY (1 << 1)
-# define TARGET_PR_PAC_APDAKEY (1 << 2)
-# define TARGET_PR_PAC_APDBKEY (1 << 3)
-# define TARGET_PR_PAC_APGAKEY (1 << 4)
-
-#define TARGET_PR_SET_TAGGED_ADDR_CTRL 55
-#define TARGET_PR_GET_TAGGED_ADDR_CTRL 56
-# define TARGET_PR_TAGGED_ADDR_ENABLE (1UL << 0)
-/* MTE tag check fault modes */
-# define TARGET_PR_MTE_TCF_SHIFT 1
-# define TARGET_PR_MTE_TCF_NONE (0UL << TARGET_PR_MTE_TCF_SHIFT)
-# define TARGET_PR_MTE_TCF_SYNC (1UL << TARGET_PR_MTE_TCF_SHIFT)
-# define TARGET_PR_MTE_TCF_ASYNC (2UL << TARGET_PR_MTE_TCF_SHIFT)
-# define TARGET_PR_MTE_TCF_MASK (3UL << TARGET_PR_MTE_TCF_SHIFT)
-/* MTE tag inclusion mask */
-# define TARGET_PR_MTE_TAG_SHIFT 3
-# define TARGET_PR_MTE_TAG_MASK (0xffffUL << TARGET_PR_MTE_TAG_SHIFT)
-
#endif /* AARCH64_TARGET_SYSCALL_H */
--- /dev/null
+#include "../generic/target_prctl_unalign.h"
#define TARGET_SA_SIGINFO 0x00000040
#define TARGET_MINSIGSTKSZ 4096
-#define TARGET_SIGSTKSZ 16384
/* From <asm/gentrap.h>. */
#define TARGET_GEN_INTOVF -1 /* integer overflow */
#define TARGET_UAC_NOPRINT 1
#define TARGET_UAC_NOFIX 2
#define TARGET_UAC_SIGBUS 4
-#define TARGET_MINSIGSTKSZ 4096
#define TARGET_MCL_CURRENT 0x2000
#define TARGET_MCL_FUTURE 0x4000
#define TARGET_MCL_ONFAULT 0x8000
--- /dev/null
+/* No special prctl support required. */
#ifndef ARM_TARGET_SIGNAL_H
#define ARM_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+/* No special prctl support required. */
#ifndef CRIS_TARGET_SIGNAL_H
#define CRIS_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
};
#define TARGET_CLONE_BACKWARDS2
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
/* The commpage only exists for 32 bit kernels */
-#define ARM_COMMPAGE (intptr_t)0xffff0f00u
+#define HI_COMMPAGE (intptr_t)0xffff0f00u
static bool init_guest_commpage(void)
{
- void *want = g2h_untagged(ARM_COMMPAGE & -qemu_host_page_size);
+ void *want = g2h_untagged(HI_COMMPAGE & -qemu_host_page_size);
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
regs->estatus = 0x3;
}
+#define LO_COMMPAGE TARGET_PAGE_SIZE
+
+static bool init_guest_commpage(void)
+{
+ static const uint8_t kuser_page[4 + 2 * 64] = {
+ /* __kuser_helper_version */
+ [0x00] = 0x02, 0x00, 0x00, 0x00,
+
+ /* __kuser_cmpxchg */
+ [0x04] = 0x3a, 0x6c, 0x3b, 0x00, /* trap 16 */
+ 0x3a, 0x28, 0x00, 0xf8, /* ret */
+
+ /* __kuser_sigtramp */
+ [0x44] = 0xc4, 0x22, 0x80, 0x00, /* movi r2, __NR_rt_sigreturn */
+ 0x3a, 0x68, 0x3b, 0x00, /* trap 0 */
+ };
+
+ void *want = g2h_untagged(LO_COMMPAGE & -qemu_host_page_size);
+ void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
+
+ if (addr == MAP_FAILED) {
+ perror("Allocating guest commpage");
+ exit(EXIT_FAILURE);
+ }
+ if (addr != want) {
+ return false;
+ }
+
+ memcpy(addr, kuser_page, sizeof(kuser_page));
+
+ if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
+ perror("Protecting guest commpage");
+ exit(EXIT_FAILURE);
+ }
+
+ page_set_flags(LO_COMMPAGE, LO_COMMPAGE + TARGET_PAGE_SIZE,
+ PAGE_READ | PAGE_EXEC | PAGE_VALID);
+ return true;
+}
+
#define ELF_EXEC_PAGESIZE 4096
#define USE_ELF_CORE_DUMP
return sp;
}
-#ifndef ARM_COMMPAGE
-#define ARM_COMMPAGE 0
+#if defined(HI_COMMPAGE)
+#define LO_COMMPAGE 0
+#elif defined(LO_COMMPAGE)
+#define HI_COMMPAGE 0
+#else
+#define HI_COMMPAGE 0
+#define LO_COMMPAGE 0
#define init_guest_commpage() true
#endif
}
loaddr &= -align;
- if (ARM_COMMPAGE) {
+ if (HI_COMMPAGE) {
/*
* Extend the allocation to include the commpage.
* For a 64-bit host, this is just 4GiB; for a 32-bit host we
if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
hiaddr = (uintptr_t) 4 << 30;
} else {
- offset = -(ARM_COMMPAGE & -align);
+ offset = -(HI_COMMPAGE & -align);
}
+ } else if (LO_COMMPAGE) {
+ loaddr = MIN(loaddr, LO_COMMPAGE & -align);
}
addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
if (addr == -1) {
/*
- * If ARM_COMMPAGE, there *might* be a non-consecutive allocation
+ * If HI_COMMPAGE, there *might* be a non-consecutive allocation
* that can satisfy both. But as the normal arm32 link base address
* is ~32k, and we extend down to include the commpage, making the
* overhead only ~96k, this is unlikely.
* All we need is a commpage that satisfies align.
* If we do not need a commpage, leave guest_base == 0.
*/
- if (ARM_COMMPAGE) {
+ if (HI_COMMPAGE) {
uintptr_t addr, commpage;
/* 64-bit hosts should have used reserved_va. */
* By putting the commpage at the first hole, that puts guest_base
* just above that, and maximises the positive guest addresses.
*/
- commpage = ARM_COMMPAGE & -align;
+ commpage = HI_COMMPAGE & -align;
addr = pgb_find_hole(commpage, -commpage, align, 0);
assert(addr != -1);
guest_base = addr;
QEMU_IFLA_PROP_LIST,
QEMU_IFLA_ALT_IFNAME,
QEMU_IFLA_PERM_ADDRESS,
+ QEMU_IFLA_PROTO_DOWN_REASON,
+ QEMU_IFLA_PARENT_DEV_NAME,
+ QEMU_IFLA_PARENT_DEV_BUS_NAME,
QEMU___IFLA_MAX
};
QEMU_IFLA_BRPORT_BACKUP_PORT,
QEMU_IFLA_BRPORT_MRP_RING_OPEN,
QEMU_IFLA_BRPORT_MRP_IN_OPEN,
+ QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
+ QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
QEMU___IFLA_BRPORT_MAX
};
QEMU___RTA_MAX
};
+enum {
+ QEMU_IFLA_VF_STATS_RX_PACKETS,
+ QEMU_IFLA_VF_STATS_TX_PACKETS,
+ QEMU_IFLA_VF_STATS_RX_BYTES,
+ QEMU_IFLA_VF_STATS_TX_BYTES,
+ QEMU_IFLA_VF_STATS_BROADCAST,
+ QEMU_IFLA_VF_STATS_MULTICAST,
+ QEMU_IFLA_VF_STATS_PAD,
+ QEMU_IFLA_VF_STATS_RX_DROPPED,
+ QEMU_IFLA_VF_STATS_TX_DROPPED,
+ QEMU__IFLA_VF_STATS_MAX,
+};
+
+enum {
+ QEMU_IFLA_VF_UNSPEC,
+ QEMU_IFLA_VF_MAC,
+ QEMU_IFLA_VF_VLAN,
+ QEMU_IFLA_VF_TX_RATE,
+ QEMU_IFLA_VF_SPOOFCHK,
+ QEMU_IFLA_VF_LINK_STATE,
+ QEMU_IFLA_VF_RATE,
+ QEMU_IFLA_VF_RSS_QUERY_EN,
+ QEMU_IFLA_VF_STATS,
+ QEMU_IFLA_VF_TRUST,
+ QEMU_IFLA_VF_IB_NODE_GUID,
+ QEMU_IFLA_VF_IB_PORT_GUID,
+ QEMU_IFLA_VF_VLAN_LIST,
+ QEMU_IFLA_VF_BROADCAST,
+ QEMU__IFLA_VF_MAX,
+};
+
TargetFdTrans **target_fd_trans;
QemuMutex target_fd_trans_lock;
unsigned int target_fd_max;
/* uin32_t */
case QEMU_IFLA_BRPORT_COST:
case QEMU_IFLA_BRPORT_BACKUP_PORT:
+ case QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT:
+ case QEMU_IFLA_BRPORT_MCAST_EHT_HOSTS_CNT:
u32 = NLA_DATA(nlattr);
*u32 = tswap32(*u32);
break;
return 0;
}
+static abi_long host_to_target_data_vlan_list_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ struct ifla_vf_vlan_info *vlan_info;
+
+ switch (nlattr->nla_type) {
+ /* struct ifla_vf_vlan_info */
+ case IFLA_VF_VLAN_INFO:
+ vlan_info = NLA_DATA(nlattr);
+ vlan_info->vf = tswap32(vlan_info->vf);
+ vlan_info->vlan = tswap32(vlan_info->vlan);
+ vlan_info->qos = tswap32(vlan_info->qos);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host VLAN LIST type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_vf_stats_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ uint64_t *u64;
+
+ switch (nlattr->nla_type) {
+ /* uint64_t */
+ case QEMU_IFLA_VF_STATS_RX_PACKETS:
+ case QEMU_IFLA_VF_STATS_TX_PACKETS:
+ case QEMU_IFLA_VF_STATS_RX_BYTES:
+ case QEMU_IFLA_VF_STATS_TX_BYTES:
+ case QEMU_IFLA_VF_STATS_BROADCAST:
+ case QEMU_IFLA_VF_STATS_MULTICAST:
+ case QEMU_IFLA_VF_STATS_PAD:
+ case QEMU_IFLA_VF_STATS_RX_DROPPED:
+ case QEMU_IFLA_VF_STATS_TX_DROPPED:
+ u64 = NLA_DATA(nlattr);
+ *u64 = tswap64(*u64);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host VF STATS type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
+static abi_long host_to_target_data_vfinfo_nlattr(struct nlattr *nlattr,
+ void *context)
+{
+ struct ifla_vf_mac *mac;
+ struct ifla_vf_vlan *vlan;
+ struct ifla_vf_vlan_info *vlan_info;
+ struct ifla_vf_spoofchk *spoofchk;
+ struct ifla_vf_rate *rate;
+ struct ifla_vf_link_state *link_state;
+ struct ifla_vf_rss_query_en *rss_query_en;
+ struct ifla_vf_trust *trust;
+ struct ifla_vf_guid *guid;
+
+ switch (nlattr->nla_type) {
+ /* struct ifla_vf_mac */
+ case QEMU_IFLA_VF_MAC:
+ mac = NLA_DATA(nlattr);
+ mac->vf = tswap32(mac->vf);
+ break;
+ /* struct ifla_vf_broadcast */
+ case QEMU_IFLA_VF_BROADCAST:
+ break;
+ /* struct struct ifla_vf_vlan */
+ case QEMU_IFLA_VF_VLAN:
+ vlan = NLA_DATA(nlattr);
+ vlan->vf = tswap32(vlan->vf);
+ vlan->vlan = tswap32(vlan->vlan);
+ vlan->qos = tswap32(vlan->qos);
+ break;
+ /* struct ifla_vf_vlan_info */
+ case QEMU_IFLA_VF_TX_RATE:
+ vlan_info = NLA_DATA(nlattr);
+ vlan_info->vf = tswap32(vlan_info->vf);
+ vlan_info->vlan = tswap32(vlan_info->vlan);
+ vlan_info->qos = tswap32(vlan_info->qos);
+ break;
+ /* struct ifla_vf_spoofchk */
+ case QEMU_IFLA_VF_SPOOFCHK:
+ spoofchk = NLA_DATA(nlattr);
+ spoofchk->vf = tswap32(spoofchk->vf);
+ spoofchk->setting = tswap32(spoofchk->setting);
+ break;
+ /* struct ifla_vf_rate */
+ case QEMU_IFLA_VF_RATE:
+ rate = NLA_DATA(nlattr);
+ rate->vf = tswap32(rate->vf);
+ rate->min_tx_rate = tswap32(rate->min_tx_rate);
+ rate->max_tx_rate = tswap32(rate->max_tx_rate);
+ break;
+ /* struct ifla_vf_link_state */
+ case QEMU_IFLA_VF_LINK_STATE:
+ link_state = NLA_DATA(nlattr);
+ link_state->vf = tswap32(link_state->vf);
+ link_state->link_state = tswap32(link_state->link_state);
+ break;
+ /* struct ifla_vf_rss_query_en */
+ case QEMU_IFLA_VF_RSS_QUERY_EN:
+ rss_query_en = NLA_DATA(nlattr);
+ rss_query_en->vf = tswap32(rss_query_en->vf);
+ rss_query_en->setting = tswap32(rss_query_en->setting);
+ break;
+ /* struct ifla_vf_trust */
+ case QEMU_IFLA_VF_TRUST:
+ trust = NLA_DATA(nlattr);
+ trust->vf = tswap32(trust->vf);
+ trust->setting = tswap32(trust->setting);
+ break;
+ /* struct ifla_vf_guid */
+ case QEMU_IFLA_VF_IB_NODE_GUID:
+ case QEMU_IFLA_VF_IB_PORT_GUID:
+ guid = NLA_DATA(nlattr);
+ guid->vf = tswap32(guid->vf);
+ guid->guid = tswap32(guid->guid);
+ break;
+ /* nested */
+ case QEMU_IFLA_VF_VLAN_LIST:
+ return host_to_target_for_each_nlattr(RTA_DATA(nlattr), nlattr->nla_len,
+ NULL,
+ host_to_target_data_vlan_list_nlattr);
+ case QEMU_IFLA_VF_STATS:
+ return host_to_target_for_each_nlattr(RTA_DATA(nlattr), nlattr->nla_len,
+ NULL,
+ host_to_target_data_vf_stats_nlattr);
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unknown host VFINFO type: %d\n",
+ nlattr->nla_type);
+ break;
+ }
+ return 0;
+}
+
static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
{
uint32_t *u32;
case QEMU_IFLA_ADDRESS:
case QEMU_IFLA_BROADCAST:
case QEMU_IFLA_PERM_ADDRESS:
+ case QEMU_IFLA_PHYS_PORT_ID:
/* string */
case QEMU_IFLA_IFNAME:
case QEMU_IFLA_QDISC:
+ case QEMU_IFLA_PARENT_DEV_NAME:
+ case QEMU_IFLA_PARENT_DEV_BUS_NAME:
break;
/* uin8_t */
case QEMU_IFLA_OPERSTATE:
return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
NULL,
host_to_target_data_xdp_nlattr);
+ case QEMU_IFLA_VFINFO_LIST:
+ return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
+ NULL,
+ host_to_target_data_vfinfo_nlattr);
default:
qemu_log_mask(LOG_UNIMP, "Unknown host QEMU_IFLA type: %d\n",
rtattr->rta_type);
#define TARGET_SIG_UNBLOCK 1 /* for unblocking signals */
#define TARGET_SIG_SETMASK 2 /* for setting the signal mask */
+/* this struct defines a stack used during syscall handling */
+typedef struct target_sigaltstack {
+ abi_ulong ss_sp;
+ abi_int ss_flags;
+ abi_ulong ss_size;
+} target_stack_t;
+
+/*
+ * sigaltstack controls
+ */
+#define TARGET_SS_ONSTACK 1
+#define TARGET_SS_DISABLE 2
+
+#define TARGET_MINSIGSTKSZ 2048
+
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
/* mask for all SS_xxx flags */
--- /dev/null
+/*
+ * Generic prctl unalign functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef GENERIC_TARGET_PRCTL_UNALIGN_H
+#define GENERIC_TARGET_PRCTL_UNALIGN_H
+
+static abi_long do_prctl_get_unalign(CPUArchState *env, target_long arg2)
+{
+ CPUState *cs = env_cpu(env);
+ uint32_t res = PR_UNALIGN_NOPRINT;
+ if (cs->prctl_unalign_sigbus) {
+ res |= PR_UNALIGN_SIGBUS;
+ }
+ return put_user_u32(res, arg2);
+}
+#define do_prctl_get_unalign do_prctl_get_unalign
+
+static abi_long do_prctl_set_unalign(CPUArchState *env, target_long arg2)
+{
+ env_cpu(env)->prctl_unalign_sigbus = arg2 & PR_UNALIGN_SIGBUS;
+ return 0;
+}
+#define do_prctl_set_unalign do_prctl_set_unalign
+
+#endif /* GENERIC_TARGET_PRCTL_UNALIGN_H */
--- /dev/null
+/* No special prctl support required. */
#ifndef HEXAGON_TARGET_SIGNAL_H
#define HEXAGON_TARGET_SIGNAL_H
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
}
break;
case 0b001110: /* indexed, atomic, bounds-checking memory operations */
- uint32_t sel = (insn >> 15) & 0b11111111111;
-
- switch (sel) {
+ switch ((insn >> 15) & 0b11111111111) {
case 0b00000100000: /* stx.b */
case 0b00000101000: /* stx.h */
case 0b00000110000: /* stx.w */
--- /dev/null
+#include "../generic/target_prctl_unalign.h"
#define TARGET_SA_NOCLDWAIT 0x00000080
#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
#define UNAME_MACHINE "parisc"
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+/* No special prctl support required. */
#ifndef I386_TARGET_SIGNAL_H
#define I386_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+/* No special prctl support required. */
#ifndef M68K_TARGET_SIGNAL_H
#define M68K_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
#define UNAME_MACHINE "m68k"
#define UNAME_MINIMUM_RELEASE "2.6.32"
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+/* No special prctl support required. */
#ifndef MICROBLAZE_TARGET_SIGNAL_H
#define MICROBLAZE_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
};
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+/*
+ * MIPS specific prctl functions for linux-user
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef MIPS_TARGET_PRCTL_H
+#define MIPS_TARGET_PRCTL_H
+
+static abi_long do_prctl_get_fp_mode(CPUArchState *env)
+{
+ abi_long ret = 0;
+
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ ret |= PR_FP_MODE_FR;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
+ ret |= PR_FP_MODE_FRE;
+ }
+ return ret;
+}
+#define do_prctl_get_fp_mode do_prctl_get_fp_mode
+
+static abi_long do_prctl_set_fp_mode(CPUArchState *env, abi_long arg2)
+{
+ bool old_fr = env->CP0_Status & (1 << CP0St_FR);
+ bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
+ bool new_fr = arg2 & PR_FP_MODE_FR;
+ bool new_fre = arg2 & PR_FP_MODE_FRE;
+ const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
+
+ /* If nothing to change, return right away, successfully. */
+ if (old_fr == new_fr && old_fre == new_fre) {
+ return 0;
+ }
+ /* Check the value is valid */
+ if (arg2 & ~known_bits) {
+ return -TARGET_EOPNOTSUPP;
+ }
+ /* Setting FRE without FR is not supported. */
+ if (new_fre && !new_fr) {
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
+ /* FR1 is not supported */
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
+ && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
+ /* cannot set FR=0 */
+ return -TARGET_EOPNOTSUPP;
+ }
+ if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
+ /* Cannot set FRE=1 */
+ return -TARGET_EOPNOTSUPP;
+ }
+
+ int i;
+ fpr_t *fpr = env->active_fpu.fpr;
+ for (i = 0; i < 32 ; i += 2) {
+ if (!old_fr && new_fr) {
+ fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
+ } else if (old_fr && !new_fr) {
+ fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
+ }
+ }
+
+ if (new_fr) {
+ env->CP0_Status |= (1 << CP0St_FR);
+ env->hflags |= MIPS_HFLAG_F64;
+ } else {
+ env->CP0_Status &= ~(1 << CP0St_FR);
+ env->hflags &= ~MIPS_HFLAG_F64;
+ }
+ if (new_fre) {
+ env->CP0_Config5 |= (1 << CP0C5_FRE);
+ if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
+ env->hflags |= MIPS_HFLAG_FRE;
+ }
+ } else {
+ env->CP0_Config5 &= ~(1 << CP0C5_FRE);
+ env->hflags &= ~MIPS_HFLAG_FRE;
+ }
+
+ return 0;
+}
+#define do_prctl_set_fp_mode do_prctl_set_fp_mode
+
+#endif /* MIPS_TARGET_PRCTL_H */
#define TARGET_SA_RESTORER 0x04000000 /* Only for O32 */
#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
#if defined(TARGET_ABI_MIPSO32)
/* compare linux/arch/mips/kernel/signal.c:setup_frame() */
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
return 0x40000;
}
-/* MIPS-specific prctl() options */
-#define TARGET_PR_SET_FP_MODE 45
-#define TARGET_PR_GET_FP_MODE 46
-#define TARGET_PR_FP_MODE_FR (1 << 0)
-#define TARGET_PR_FP_MODE_FRE (1 << 1)
-
#endif /* MIPS_TARGET_SYSCALL_H */
--- /dev/null
+#include "../mips/target_prctl.h"
#define TARGET_SA_RESETHAND 0x80000000
#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
/* bit-flags */
#define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
return 0x40000;
}
-/* MIPS-specific prctl() options */
-#define TARGET_PR_SET_FP_MODE 45
-#define TARGET_PR_GET_FP_MODE 46
-#define TARGET_PR_FP_MODE_FR (1 << 0)
-#define TARGET_PR_FP_MODE_FRE (1 << 1)
-
#endif /* MIPS64_TARGET_SYSCALL_H */
void cpu_loop(CPUNios2State *env)
{
CPUState *cs = env_cpu(env);
- Nios2CPU *cpu = NIOS2_CPU(cs);
target_siginfo_t info;
int trapnr, ret;
case EXCP_INTERRUPT:
/* just indicate that signals should be handled asap */
break;
+
case EXCP_TRAP:
- if (env->regs[R_AT] == 0) {
- abi_long ret;
+ switch (env->error_code) {
+ case 0:
qemu_log_mask(CPU_LOG_INT, "\nSyscall\n");
ret = do_syscall(env, env->regs[2],
env->regs[2] = abs(ret);
/* Return value is 0..4096 */
- env->regs[7] = (ret > 0xfffffffffffff000ULL);
- env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
- env->regs[CR_STATUS] &= ~0x3;
- env->regs[R_EA] = env->regs[R_PC] + 4;
+ env->regs[7] = ret > 0xfffff000u;
env->regs[R_PC] += 4;
break;
- } else {
- qemu_log_mask(CPU_LOG_INT, "\nTrap\n");
- env->regs[CR_ESTATUS] = env->regs[CR_STATUS];
- env->regs[CR_STATUS] &= ~0x3;
- env->regs[R_EA] = env->regs[R_PC] + 4;
- env->regs[R_PC] = cpu->exception_addr;
+ case 1:
+ qemu_log_mask(CPU_LOG_INT, "\nTrap 1\n");
+ force_sig_fault(TARGET_SIGUSR1, 0, env->regs[R_PC]);
+ break;
+ case 2:
+ qemu_log_mask(CPU_LOG_INT, "\nTrap 2\n");
+ force_sig_fault(TARGET_SIGUSR2, 0, env->regs[R_PC]);
+ break;
+ case 31:
+ qemu_log_mask(CPU_LOG_INT, "\nTrap 31\n");
+ force_sig_fault(TARGET_SIGTRAP, TARGET_TRAP_BRKPT, env->regs[R_PC]);
+ break;
+ default:
+ qemu_log_mask(CPU_LOG_INT, "\nTrap %d\n", env->error_code);
+ force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLTRP,
+ env->regs[R_PC]);
+ break;
+
+ case 16: /* QEMU specific, for __kuser_cmpxchg */
+ {
+ abi_ptr g = env->regs[4];
+ uint32_t *h, n, o;
- info.si_signo = TARGET_SIGTRAP;
- info.si_errno = 0;
- info.si_code = TARGET_TRAP_BRKPT;
- queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ if (g & 0x3) {
+ force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, g);
+ break;
+ }
+ ret = page_get_flags(g);
+ if (!(ret & PAGE_VALID)) {
+ force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, g);
+ break;
+ }
+ if (!(ret & PAGE_READ) || !(ret & PAGE_WRITE)) {
+ force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_ACCERR, g);
+ break;
+ }
+ h = g2h(cs, g);
+ o = env->regs[5];
+ n = env->regs[6];
+ env->regs[2] = qatomic_cmpxchg(h, o, n) - o;
+ env->regs[R_PC] += 4;
+ }
break;
}
+ break;
+
case EXCP_DEBUG:
info.si_signo = TARGET_SIGTRAP;
info.si_errno = 0;
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
break;
case 0xaa:
- switch (env->regs[R_PC]) {
- /*case 0x1000:*/ /* TODO:__kuser_helper_version */
- case 0x1004: /* __kuser_cmpxchg */
- start_exclusive();
- if (env->regs[4] & 0x3) {
- goto kuser_fail;
- }
- ret = get_user_u32(env->regs[2], env->regs[4]);
- if (ret) {
- end_exclusive();
- goto kuser_fail;
- }
- env->regs[2] -= env->regs[5];
- if (env->regs[2] == 0) {
- put_user_u32(env->regs[6], env->regs[4]);
- }
- end_exclusive();
- env->regs[R_PC] = env->regs[R_RA];
- break;
- /*case 0x1040:*/ /* TODO:__kuser_sigtramp */
- default:
- ;
-kuser_fail:
+ {
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
/* TODO: check env->error_code */
env->regs[R_SP] = regs->sp;
env->regs[R_GP] = regs->gp;
env->regs[CR_ESTATUS] = regs->estatus;
- env->regs[R_EA] = regs->ea;
- /* TODO: unsigned long orig_r7; */
-
- /* Emulate eret when starting thread. */
env->regs[R_PC] = regs->ea;
+ /* TODO: unsigned long orig_r7; */
}
struct target_ucontext uc;
};
-static int rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
+static void rt_setup_ucontext(struct target_ucontext *uc, CPUNios2State *env)
{
unsigned long *gregs = uc->tuc_mcontext.gregs;
__put_user(env->regs[R_RA], &gregs[23]);
__put_user(env->regs[R_FP], &gregs[24]);
__put_user(env->regs[R_GP], &gregs[25]);
- __put_user(env->regs[R_EA], &gregs[27]);
+ __put_user(env->regs[R_PC], &gregs[27]);
__put_user(env->regs[R_SP], &gregs[28]);
-
- return 0;
}
static int rt_restore_ucontext(CPUNios2State *env, struct target_ucontext *uc,
__get_user(env->regs[R_GP], &gregs[25]);
/* Not really necessary no user settable bits */
__get_user(temp, &gregs[26]);
- __get_user(env->regs[R_EA], &gregs[27]);
+ __get_user(env->regs[R_PC], &gregs[27]);
__get_user(env->regs[R_RA], &gregs[23]);
__get_user(env->regs[R_SP], &gregs[28]);
return 0;
}
-static void *get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
- size_t frame_size)
+static abi_ptr get_sigframe(struct target_sigaction *ka, CPUNios2State *env,
+ size_t frame_size)
{
unsigned long usp;
usp = target_sigsp(get_sp_from_cpustate(env), ka);
/* Verify, is it 32 or 64 bit aligned */
- return (void *)((usp - frame_size) & -8UL);
+ return (usp - frame_size) & -8;
}
void setup_rt_frame(int sig, struct target_sigaction *ka,
CPUNios2State *env)
{
struct target_rt_sigframe *frame;
- int i, err = 0;
+ abi_ptr frame_addr;
+ int i;
- frame = get_sigframe(ka, env, sizeof(*frame));
-
- if (ka->sa_flags & SA_SIGINFO) {
- tswap_siginfo(&frame->info, info);
+ frame_addr = get_sigframe(ka, env, sizeof(*frame));
+ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
+ force_sigsegv(sig);
+ return;
}
+ tswap_siginfo(&frame->info, info);
+
/* Create the ucontext. */
__put_user(0, &frame->uc.tuc_flags);
__put_user(0, &frame->uc.tuc_link);
target_save_altstack(&frame->uc.tuc_stack, env);
- err |= rt_setup_ucontext(&frame->uc, env);
+ rt_setup_ucontext(&frame->uc, env);
for (i = 0; i < TARGET_NSIG_WORDS; i++) {
- __put_user((abi_ulong)set->sig[i],
- (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
- }
-
- if (err) {
- goto give_sigsegv;
+ __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
}
/* Set up to return from userspace; jump to fixed address sigreturn
env->regs[R_RA] = (unsigned long) (0x1044);
/* Set up registers for signal handler */
- env->regs[R_SP] = (unsigned long) frame;
- env->regs[4] = (unsigned long) sig;
- env->regs[5] = (unsigned long) &frame->info;
- env->regs[6] = (unsigned long) &frame->uc;
- env->regs[R_EA] = (unsigned long) ka->_sa_handler;
- return;
-
-give_sigsegv:
- if (sig == TARGET_SIGSEGV) {
- ka->_sa_handler = TARGET_SIG_DFL;
- }
- force_sigsegv(sig);
- return;
+ env->regs[R_SP] = frame_addr;
+ env->regs[4] = sig;
+ env->regs[5] = frame_addr + offsetof(struct target_rt_sigframe, info);
+ env->regs[6] = frame_addr + offsetof(struct target_rt_sigframe, uc);
+ env->regs[R_PC] = ka->_sa_handler;
+
+ unlock_user_struct(frame, frame_addr, 1);
}
long do_sigreturn(CPUNios2State *env)
}
target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
- do_sigprocmask(SIG_SETMASK, &set, NULL);
+ set_sigmask(&set);
if (rt_restore_ucontext(env, &frame->uc, &rval)) {
goto badframe;
--- /dev/null
+/* No special prctl support required. */
#ifndef NIOS2_TARGET_SIGNAL_H
#define NIOS2_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/* sigaltstack controls */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
/* Nios2 uses a fixed address on the kuser page for sigreturn. */
unsigned long orig_r7;
};
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+/* No special prctl support required. */
#ifndef OPENRISC_TARGET_SIGNAL_H
#define OPENRISC_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_long ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/* sigaltstack controls */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_SA_NOCLDSTOP 0x00000001
-#define TARGET_SA_NOCLDWAIT 0x00000002
-#define TARGET_SA_SIGINFO 0x00000004
-#define TARGET_SA_ONSTACK 0x08000000
-#define TARGET_SA_RESTART 0x10000000
-#define TARGET_SA_NODEFER 0x40000000
-#define TARGET_SA_RESETHAND 0x80000000
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
#define UNAME_MACHINE "openrisc"
#define UNAME_MINIMUM_RELEASE "2.6.32"
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+/* No special prctl support required. */
#ifndef PPC_TARGET_SIGNAL_H
#define PPC_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#if !defined(TARGET_PPC64)
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 0x2000
#define TARGET_MCL_FUTURE 0x4000
#define TARGET_MCL_ONFAULT 0x8000
--- /dev/null
+/* No special prctl support required. */
#ifndef RISCV_TARGET_SIGNAL_H
#define RISCV_TARGET_SIGNAL_H
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
#define UNAME_MINIMUM_RELEASE "4.15.0"
#endif
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+/* No special prctl support required. */
#ifndef S390X_TARGET_SIGNAL_H
#define S390X_TARGET_SIGNAL_H
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
#define UNAME_MINIMUM_RELEASE "2.6.32"
#define TARGET_CLONE_BACKWARDS2
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+#include "../generic/target_prctl_unalign.h"
#ifndef SH4_TARGET_SIGNAL_H
#define SH4_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SETUP_FRAME
#define UNAME_MACHINE "sh4"
#define UNAME_MINIMUM_RELEASE "2.6.32"
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
return 0;
}
-#if !defined(TARGET_NIOS2)
/* Just set the guest's signal mask to the specified value; the
* caller is assumed to have called block_signals() already.
*/
ts->signal_mask = *set;
}
-#endif
/* sigaltstack management */
case TARGET_SIGCHLD:
tinfo->_sifields._sigchld._pid = info->si_pid;
tinfo->_sifields._sigchld._uid = info->si_uid;
- tinfo->_sifields._sigchld._status = info->si_status;
+ if (si_code == CLD_EXITED)
+ tinfo->_sifields._sigchld._status = info->si_status;
+ else
+ tinfo->_sifields._sigchld._status
+ = host_to_target_signal(info->si_status & 0x7f)
+ | (info->si_status & ~0x7f);
tinfo->_sifields._sigchld._utime = info->si_utime;
tinfo->_sifields._sigchld._stime = info->si_stime;
si_type = QEMU_SI_CHLD;
--- /dev/null
+/* No special prctl support required. */
#define TARGET_ARCH_HAS_KA_RESTORER 1
#define TARGET_MINSIGSTKSZ 4096
-#define TARGET_SIGSTKSZ 16384
#ifdef TARGET_ABI32
#define TARGET_ARCH_HAS_SETUP_FRAME
* and copy_thread().
*/
#define TARGET_CLONE_BACKWARDS
-#define TARGET_MINSIGSTKSZ 4096
#define TARGET_MCL_CURRENT 0x2000
#define TARGET_MCL_FUTURE 0x4000
#define TARGET_MCL_ONFAULT 0x8000
#define __NR_sys_sched_setaffinity __NR_sched_setaffinity
_syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long *, user_mask_ptr);
+/* sched_attr is not defined in glibc */
+struct sched_attr {
+ uint32_t size;
+ uint32_t sched_policy;
+ uint64_t sched_flags;
+ int32_t sched_nice;
+ uint32_t sched_priority;
+ uint64_t sched_runtime;
+ uint64_t sched_deadline;
+ uint64_t sched_period;
+ uint32_t sched_util_min;
+ uint32_t sched_util_max;
+};
+#define __NR_sys_sched_getattr __NR_sched_getattr
+_syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
+ unsigned int, size, unsigned int, flags);
+#define __NR_sys_sched_setattr __NR_sched_setattr
+_syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
+ unsigned int, flags);
+#define __NR_sys_sched_getscheduler __NR_sched_getscheduler
+_syscall1(int, sys_sched_getscheduler, pid_t, pid);
+#define __NR_sys_sched_setscheduler __NR_sched_setscheduler
+_syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
+ const struct sched_param *, param);
+#define __NR_sys_sched_getparam __NR_sched_getparam
+_syscall2(int, sys_sched_getparam, pid_t, pid,
+ struct sched_param *, param);
+#define __NR_sys_sched_setparam __NR_sched_setparam
+_syscall2(int, sys_sched_setparam, pid_t, pid,
+ const struct sched_param *, param);
#define __NR_sys_getcpu __NR_getcpu
_syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
_syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
return strerror(target_to_host_errno(err));
}
+static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
+{
+ int i;
+ uint8_t b;
+ if (usize <= ksize) {
+ return 1;
+ }
+ for (i = ksize; i < usize; i++) {
+ if (get_user_u8(b, addr + i)) {
+ return -TARGET_EFAULT;
+ }
+ if (b != 0) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
#define safe_syscall0(type, name) \
static type safe_##name(void) \
{ \
* We can't fit all the extents into the fixed size buffer.
* Allocate one that is large enough and use it instead.
*/
- host_ifconf = malloc(outbufsz);
+ host_ifconf = g_try_malloc(outbufsz);
if (!host_ifconf) {
return -TARGET_ENOMEM;
}
}
if (free_buf) {
- free(host_ifconf);
+ g_free(host_ifconf);
}
return ret;
return ret;
}
#endif /* defined(TARGET_ABI32 */
-
#endif /* defined(TARGET_I386) */
+/*
+ * These constants are generic. Supply any that are missing from the host.
+ */
+#ifndef PR_SET_NAME
+# define PR_SET_NAME 15
+# define PR_GET_NAME 16
+#endif
+#ifndef PR_SET_FP_MODE
+# define PR_SET_FP_MODE 45
+# define PR_GET_FP_MODE 46
+# define PR_FP_MODE_FR (1 << 0)
+# define PR_FP_MODE_FRE (1 << 1)
+#endif
+#ifndef PR_SVE_SET_VL
+# define PR_SVE_SET_VL 50
+# define PR_SVE_GET_VL 51
+# define PR_SVE_VL_LEN_MASK 0xffff
+# define PR_SVE_VL_INHERIT (1 << 17)
+#endif
+#ifndef PR_PAC_RESET_KEYS
+# define PR_PAC_RESET_KEYS 54
+# define PR_PAC_APIAKEY (1 << 0)
+# define PR_PAC_APIBKEY (1 << 1)
+# define PR_PAC_APDAKEY (1 << 2)
+# define PR_PAC_APDBKEY (1 << 3)
+# define PR_PAC_APGAKEY (1 << 4)
+#endif
+#ifndef PR_SET_TAGGED_ADDR_CTRL
+# define PR_SET_TAGGED_ADDR_CTRL 55
+# define PR_GET_TAGGED_ADDR_CTRL 56
+# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+#endif
+#ifndef PR_MTE_TCF_SHIFT
+# define PR_MTE_TCF_SHIFT 1
+# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
+# define PR_MTE_TAG_SHIFT 3
+# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+#endif
+#ifndef PR_SET_IO_FLUSHER
+# define PR_SET_IO_FLUSHER 57
+# define PR_GET_IO_FLUSHER 58
+#endif
+#ifndef PR_SET_SYSCALL_USER_DISPATCH
+# define PR_SET_SYSCALL_USER_DISPATCH 59
+#endif
+
+#include "target_prctl.h"
+
+static abi_long do_prctl_inval0(CPUArchState *env)
+{
+ return -TARGET_EINVAL;
+}
+
+static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
+{
+ return -TARGET_EINVAL;
+}
+
+#ifndef do_prctl_get_fp_mode
+#define do_prctl_get_fp_mode do_prctl_inval0
+#endif
+#ifndef do_prctl_set_fp_mode
+#define do_prctl_set_fp_mode do_prctl_inval1
+#endif
+#ifndef do_prctl_get_vl
+#define do_prctl_get_vl do_prctl_inval0
+#endif
+#ifndef do_prctl_set_vl
+#define do_prctl_set_vl do_prctl_inval1
+#endif
+#ifndef do_prctl_reset_keys
+#define do_prctl_reset_keys do_prctl_inval1
+#endif
+#ifndef do_prctl_set_tagged_addr_ctrl
+#define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
+#endif
+#ifndef do_prctl_get_tagged_addr_ctrl
+#define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
+#endif
+#ifndef do_prctl_get_unalign
+#define do_prctl_get_unalign do_prctl_inval1
+#endif
+#ifndef do_prctl_set_unalign
+#define do_prctl_set_unalign do_prctl_inval1
+#endif
+
+static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
+ abi_long arg3, abi_long arg4, abi_long arg5)
+{
+ abi_long ret;
+
+ switch (option) {
+ case PR_GET_PDEATHSIG:
+ {
+ int deathsig;
+ ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
+ arg3, arg4, arg5));
+ if (!is_error(ret) && arg2 && put_user_s32(deathsig, arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return ret;
+ }
+ case PR_GET_NAME:
+ {
+ void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
+ if (!name) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
+ arg3, arg4, arg5));
+ unlock_user(name, arg2, 16);
+ return ret;
+ }
+ case PR_SET_NAME:
+ {
+ void *name = lock_user(VERIFY_READ, arg2, 16, 1);
+ if (!name) {
+ return -TARGET_EFAULT;
+ }
+ ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
+ arg3, arg4, arg5));
+ unlock_user(name, arg2, 0);
+ return ret;
+ }
+ case PR_GET_FP_MODE:
+ return do_prctl_get_fp_mode(env);
+ case PR_SET_FP_MODE:
+ return do_prctl_set_fp_mode(env, arg2);
+ case PR_SVE_GET_VL:
+ return do_prctl_get_vl(env);
+ case PR_SVE_SET_VL:
+ return do_prctl_set_vl(env, arg2);
+ case PR_PAC_RESET_KEYS:
+ if (arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_reset_keys(env, arg2);
+ case PR_SET_TAGGED_ADDR_CTRL:
+ if (arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_set_tagged_addr_ctrl(env, arg2);
+ case PR_GET_TAGGED_ADDR_CTRL:
+ if (arg2 || arg3 || arg4 || arg5) {
+ return -TARGET_EINVAL;
+ }
+ return do_prctl_get_tagged_addr_ctrl(env);
+
+ case PR_GET_UNALIGN:
+ return do_prctl_get_unalign(env, arg2);
+ case PR_SET_UNALIGN:
+ return do_prctl_set_unalign(env, arg2);
+
+ case PR_GET_DUMPABLE:
+ case PR_SET_DUMPABLE:
+ case PR_GET_KEEPCAPS:
+ case PR_SET_KEEPCAPS:
+ case PR_GET_TIMING:
+ case PR_SET_TIMING:
+ case PR_GET_TIMERSLACK:
+ case PR_SET_TIMERSLACK:
+ case PR_MCE_KILL:
+ case PR_MCE_KILL_GET:
+ case PR_GET_NO_NEW_PRIVS:
+ case PR_SET_NO_NEW_PRIVS:
+ case PR_GET_IO_FLUSHER:
+ case PR_SET_IO_FLUSHER:
+ /* Some prctl options have no pointer arguments and we can pass on. */
+ return get_errno(prctl(option, arg2, arg3, arg4, arg5));
+
+ case PR_GET_CHILD_SUBREAPER:
+ case PR_SET_CHILD_SUBREAPER:
+ case PR_GET_SPECULATION_CTRL:
+ case PR_SET_SPECULATION_CTRL:
+ case PR_GET_TID_ADDRESS:
+ /* TODO */
+ return -TARGET_EINVAL;
+
+ case PR_GET_FPEXC:
+ case PR_SET_FPEXC:
+ /* Was used for SPE on PowerPC. */
+ return -TARGET_EINVAL;
+
+ case PR_GET_ENDIAN:
+ case PR_SET_ENDIAN:
+ case PR_GET_FPEMU:
+ case PR_SET_FPEMU:
+ case PR_SET_MM:
+ case PR_GET_SECCOMP:
+ case PR_SET_SECCOMP:
+ case PR_SET_SYSCALL_USER_DISPATCH:
+ case PR_GET_THP_DISABLE:
+ case PR_SET_THP_DISABLE:
+ case PR_GET_TSC:
+ case PR_SET_TSC:
+ /* Disable to prevent the target disabling stuff we need. */
+ return -TARGET_EINVAL;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
+ option);
+ return -TARGET_EINVAL;
+ }
+}
+
#define NEW_STACK_SIZE 0x40000
(flags & PAGE_READ) ? 'r' : '-',
(flags & PAGE_WRITE_ORG) ? 'w' : '-',
(flags & PAGE_EXEC) ? 'x' : '-',
- e->is_priv ? 'p' : '-',
+ e->is_priv ? 'p' : 's',
(uint64_t) e->offset, e->dev, e->inode);
if (path) {
dprintf(fd, "%*s%s\n", 73 - count, "", path);
return ret;
case TARGET_NR_sched_setparam:
{
- struct sched_param *target_schp;
+ struct target_sched_param *target_schp;
struct sched_param schp;
if (arg2 == 0) {
return -TARGET_EINVAL;
}
- if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
+ if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
return -TARGET_EFAULT;
+ }
schp.sched_priority = tswap32(target_schp->sched_priority);
unlock_user_struct(target_schp, arg2, 0);
- return get_errno(sched_setparam(arg1, &schp));
+ return get_errno(sys_sched_setparam(arg1, &schp));
}
case TARGET_NR_sched_getparam:
{
- struct sched_param *target_schp;
+ struct target_sched_param *target_schp;
struct sched_param schp;
if (arg2 == 0) {
return -TARGET_EINVAL;
}
- ret = get_errno(sched_getparam(arg1, &schp));
+ ret = get_errno(sys_sched_getparam(arg1, &schp));
if (!is_error(ret)) {
- if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
+ if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
return -TARGET_EFAULT;
+ }
target_schp->sched_priority = tswap32(schp.sched_priority);
unlock_user_struct(target_schp, arg2, 1);
}
return ret;
case TARGET_NR_sched_setscheduler:
{
- struct sched_param *target_schp;
+ struct target_sched_param *target_schp;
struct sched_param schp;
if (arg3 == 0) {
return -TARGET_EINVAL;
}
- if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
+ if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
return -TARGET_EFAULT;
+ }
schp.sched_priority = tswap32(target_schp->sched_priority);
unlock_user_struct(target_schp, arg3, 0);
- return get_errno(sched_setscheduler(arg1, arg2, &schp));
+ return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
}
case TARGET_NR_sched_getscheduler:
- return get_errno(sched_getscheduler(arg1));
+ return get_errno(sys_sched_getscheduler(arg1));
+ case TARGET_NR_sched_getattr:
+ {
+ struct target_sched_attr *target_scha;
+ struct sched_attr scha;
+ if (arg2 == 0) {
+ return -TARGET_EINVAL;
+ }
+ if (arg3 > sizeof(scha)) {
+ arg3 = sizeof(scha);
+ }
+ ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
+ if (!is_error(ret)) {
+ target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
+ if (!target_scha) {
+ return -TARGET_EFAULT;
+ }
+ target_scha->size = tswap32(scha.size);
+ target_scha->sched_policy = tswap32(scha.sched_policy);
+ target_scha->sched_flags = tswap64(scha.sched_flags);
+ target_scha->sched_nice = tswap32(scha.sched_nice);
+ target_scha->sched_priority = tswap32(scha.sched_priority);
+ target_scha->sched_runtime = tswap64(scha.sched_runtime);
+ target_scha->sched_deadline = tswap64(scha.sched_deadline);
+ target_scha->sched_period = tswap64(scha.sched_period);
+ if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
+ target_scha->sched_util_min = tswap32(scha.sched_util_min);
+ target_scha->sched_util_max = tswap32(scha.sched_util_max);
+ }
+ unlock_user(target_scha, arg2, arg3);
+ }
+ return ret;
+ }
+ case TARGET_NR_sched_setattr:
+ {
+ struct target_sched_attr *target_scha;
+ struct sched_attr scha;
+ uint32_t size;
+ int zeroed;
+ if (arg2 == 0) {
+ return -TARGET_EINVAL;
+ }
+ if (get_user_u32(size, arg2)) {
+ return -TARGET_EFAULT;
+ }
+ if (!size) {
+ size = offsetof(struct target_sched_attr, sched_util_min);
+ }
+ if (size < offsetof(struct target_sched_attr, sched_util_min)) {
+ if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return -TARGET_E2BIG;
+ }
+
+ zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
+ if (zeroed < 0) {
+ return zeroed;
+ } else if (zeroed == 0) {
+ if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
+ return -TARGET_EFAULT;
+ }
+ return -TARGET_E2BIG;
+ }
+ if (size > sizeof(struct target_sched_attr)) {
+ size = sizeof(struct target_sched_attr);
+ }
+
+ target_scha = lock_user(VERIFY_READ, arg2, size, 1);
+ if (!target_scha) {
+ return -TARGET_EFAULT;
+ }
+ scha.size = size;
+ scha.sched_policy = tswap32(target_scha->sched_policy);
+ scha.sched_flags = tswap64(target_scha->sched_flags);
+ scha.sched_nice = tswap32(target_scha->sched_nice);
+ scha.sched_priority = tswap32(target_scha->sched_priority);
+ scha.sched_runtime = tswap64(target_scha->sched_runtime);
+ scha.sched_deadline = tswap64(target_scha->sched_deadline);
+ scha.sched_period = tswap64(target_scha->sched_period);
+ if (size > offsetof(struct target_sched_attr, sched_util_min)) {
+ scha.sched_util_min = tswap32(target_scha->sched_util_min);
+ scha.sched_util_max = tswap32(target_scha->sched_util_max);
+ }
+ unlock_user(target_scha, arg2, 0);
+ return get_errno(sys_sched_setattr(arg1, &scha, arg3));
+ }
case TARGET_NR_sched_yield:
return get_errno(sched_yield());
case TARGET_NR_sched_get_priority_max:
return ret;
#endif
case TARGET_NR_prctl:
- switch (arg1) {
- case PR_GET_PDEATHSIG:
- {
- int deathsig;
- ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
- if (!is_error(ret) && arg2
- && put_user_s32(deathsig, arg2)) {
- return -TARGET_EFAULT;
- }
- return ret;
- }
-#ifdef PR_GET_NAME
- case PR_GET_NAME:
- {
- void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
- if (!name) {
- return -TARGET_EFAULT;
- }
- ret = get_errno(prctl(arg1, (unsigned long)name,
- arg3, arg4, arg5));
- unlock_user(name, arg2, 16);
- return ret;
- }
- case PR_SET_NAME:
- {
- void *name = lock_user(VERIFY_READ, arg2, 16, 1);
- if (!name) {
- return -TARGET_EFAULT;
- }
- ret = get_errno(prctl(arg1, (unsigned long)name,
- arg3, arg4, arg5));
- unlock_user(name, arg2, 0);
- return ret;
- }
-#endif
-#ifdef TARGET_MIPS
- case TARGET_PR_GET_FP_MODE:
- {
- CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
- ret = 0;
- if (env->CP0_Status & (1 << CP0St_FR)) {
- ret |= TARGET_PR_FP_MODE_FR;
- }
- if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
- ret |= TARGET_PR_FP_MODE_FRE;
- }
- return ret;
- }
- case TARGET_PR_SET_FP_MODE:
- {
- CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
- bool old_fr = env->CP0_Status & (1 << CP0St_FR);
- bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
- bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
- bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
-
- const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
- TARGET_PR_FP_MODE_FRE;
-
- /* If nothing to change, return right away, successfully. */
- if (old_fr == new_fr && old_fre == new_fre) {
- return 0;
- }
- /* Check the value is valid */
- if (arg2 & ~known_bits) {
- return -TARGET_EOPNOTSUPP;
- }
- /* Setting FRE without FR is not supported. */
- if (new_fre && !new_fr) {
- return -TARGET_EOPNOTSUPP;
- }
- if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
- /* FR1 is not supported */
- return -TARGET_EOPNOTSUPP;
- }
- if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
- && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
- /* cannot set FR=0 */
- return -TARGET_EOPNOTSUPP;
- }
- if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
- /* Cannot set FRE=1 */
- return -TARGET_EOPNOTSUPP;
- }
-
- int i;
- fpr_t *fpr = env->active_fpu.fpr;
- for (i = 0; i < 32 ; i += 2) {
- if (!old_fr && new_fr) {
- fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
- } else if (old_fr && !new_fr) {
- fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
- }
- }
-
- if (new_fr) {
- env->CP0_Status |= (1 << CP0St_FR);
- env->hflags |= MIPS_HFLAG_F64;
- } else {
- env->CP0_Status &= ~(1 << CP0St_FR);
- env->hflags &= ~MIPS_HFLAG_F64;
- }
- if (new_fre) {
- env->CP0_Config5 |= (1 << CP0C5_FRE);
- if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
- env->hflags |= MIPS_HFLAG_FRE;
- }
- } else {
- env->CP0_Config5 &= ~(1 << CP0C5_FRE);
- env->hflags &= ~MIPS_HFLAG_FRE;
- }
-
- return 0;
- }
-#endif /* MIPS */
-#ifdef TARGET_AARCH64
- case TARGET_PR_SVE_SET_VL:
- /*
- * We cannot support either PR_SVE_SET_VL_ONEXEC or
- * PR_SVE_VL_INHERIT. Note the kernel definition
- * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
- * even though the current architectural maximum is VQ=16.
- */
- ret = -TARGET_EINVAL;
- if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
- && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
- uint32_t vq, old_vq;
-
- old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
- vq = MAX(arg2 / 16, 1);
- vq = MIN(vq, cpu->sve_max_vq);
-
- if (vq < old_vq) {
- aarch64_sve_narrow_vq(env, vq);
- }
- env->vfp.zcr_el[1] = vq - 1;
- arm_rebuild_hflags(env);
- ret = vq * 16;
- }
- return ret;
- case TARGET_PR_SVE_GET_VL:
- ret = -TARGET_EINVAL;
- {
- ARMCPU *cpu = env_archcpu(cpu_env);
- if (cpu_isar_feature(aa64_sve, cpu)) {
- ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
- }
- }
- return ret;
- case TARGET_PR_PAC_RESET_KEYS:
- {
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
-
- if (arg3 || arg4 || arg5) {
- return -TARGET_EINVAL;
- }
- if (cpu_isar_feature(aa64_pauth, cpu)) {
- int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
- TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
- TARGET_PR_PAC_APGAKEY);
- int ret = 0;
- Error *err = NULL;
-
- if (arg2 == 0) {
- arg2 = all;
- } else if (arg2 & ~all) {
- return -TARGET_EINVAL;
- }
- if (arg2 & TARGET_PR_PAC_APIAKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apia,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APIBKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apib,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APDAKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apda,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APDBKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apdb,
- sizeof(ARMPACKey), &err);
- }
- if (arg2 & TARGET_PR_PAC_APGAKEY) {
- ret |= qemu_guest_getrandom(&env->keys.apga,
- sizeof(ARMPACKey), &err);
- }
- if (ret != 0) {
- /*
- * Some unknown failure in the crypto. The best
- * we can do is log it and fail the syscall.
- * The real syscall cannot fail this way.
- */
- qemu_log_mask(LOG_UNIMP,
- "PR_PAC_RESET_KEYS: Crypto failure: %s",
- error_get_pretty(err));
- error_free(err);
- return -TARGET_EIO;
- }
- return 0;
- }
- }
- return -TARGET_EINVAL;
- case TARGET_PR_SET_TAGGED_ADDR_CTRL:
- {
- abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
-
- if (cpu_isar_feature(aa64_mte, cpu)) {
- valid_mask |= TARGET_PR_MTE_TCF_MASK;
- valid_mask |= TARGET_PR_MTE_TAG_MASK;
- }
-
- if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
- return -TARGET_EINVAL;
- }
- env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
-
- if (cpu_isar_feature(aa64_mte, cpu)) {
- switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
- case TARGET_PR_MTE_TCF_NONE:
- case TARGET_PR_MTE_TCF_SYNC:
- case TARGET_PR_MTE_TCF_ASYNC:
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
- * Note that the syscall values are consistent with hw.
- */
- env->cp15.sctlr_el[1] =
- deposit64(env->cp15.sctlr_el[1], 38, 2,
- arg2 >> TARGET_PR_MTE_TCF_SHIFT);
-
- /*
- * Write PR_MTE_TAG to GCR_EL1[Exclude].
- * Note that the syscall uses an include mask,
- * and hardware uses an exclude mask -- invert.
- */
- env->cp15.gcr_el1 =
- deposit64(env->cp15.gcr_el1, 0, 16,
- ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
- arm_rebuild_hflags(env);
- }
- return 0;
- }
- case TARGET_PR_GET_TAGGED_ADDR_CTRL:
- {
- abi_long ret = 0;
- CPUARMState *env = cpu_env;
- ARMCPU *cpu = env_archcpu(env);
-
- if (arg2 || arg3 || arg4 || arg5) {
- return -TARGET_EINVAL;
- }
- if (env->tagged_addr_enable) {
- ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
- }
- if (cpu_isar_feature(aa64_mte, cpu)) {
- /* See above. */
- ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
- << TARGET_PR_MTE_TCF_SHIFT);
- ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
- ~env->cp15.gcr_el1);
- }
- return ret;
- }
-#endif /* AARCH64 */
- case PR_GET_SECCOMP:
- case PR_SET_SECCOMP:
- /* Disable seccomp to prevent the target disabling syscalls we
- * need. */
- return -TARGET_EINVAL;
- default:
- /* Most prctl options have no pointer arguments */
- return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
- }
+ return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
break;
#ifdef TARGET_NR_arch_prctl
case TARGET_NR_arch_prctl:
abi_ulong __unused5;
};
-#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) || defined(TARGET_RISCV)
+#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) \
+ || defined(TARGET_RISCV) || defined(TARGET_HEXAGON)
/* These are the asm-generic versions of the stat and stat64 structures */
uint64_t st_ino;
};
-#elif defined(TARGET_HEXAGON)
-
-struct target_stat {
- unsigned long long st_dev;
- unsigned long long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned long long st_rdev;
- target_ulong __pad1;
- long long st_size;
- target_long st_blksize;
- int __pad2;
- long long st_blocks;
-
- target_long target_st_atime;
- target_long target_st_atime_nsec;
- target_long target_st_mtime;
- target_long target_st_mtime_nsec;
- target_long target_st_ctime;
- target_long target_st_ctime_nsec;
- int __unused[2];
-};
-
#else
#error unsupported CPU
#endif
/* 0x100 */
};
+/* from kernel's include/linux/sched/types.h */
+struct target_sched_attr {
+ abi_uint size;
+ abi_uint sched_policy;
+ abi_ullong sched_flags;
+ abi_int sched_nice;
+ abi_uint sched_priority;
+ abi_ullong sched_runtime;
+ abi_ullong sched_deadline;
+ abi_ullong sched_period;
+ abi_uint sched_util_min;
+ abi_uint sched_util_max;
+};
+
+struct target_sched_param {
+ abi_int sched_priority;
+};
+
#endif
abi_long arg5, abi_long arg6, abi_long arg7,
abi_long arg8);
extern __thread CPUState *thread_cpu;
-void cpu_loop(CPUArchState *env);
+void QEMU_NORETURN cpu_loop(CPUArchState *env);
const char *target_strerror(int err);
int get_osversion(void);
void init_qemu_uname_release(void);
--- /dev/null
+/* No special prctl support required. */
#ifndef X86_64_TARGET_SIGNAL_H
#define X86_64_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
/* For x86_64, use of SA_RESTORER is mandatory. */
#define TARGET_ARCH_SET_FS 0x1002
#define TARGET_ARCH_GET_FS 0x1003
#define TARGET_ARCH_GET_GS 0x1004
-#define TARGET_MINSIGSTKSZ 2048
#define TARGET_MCL_CURRENT 1
#define TARGET_MCL_FUTURE 2
#define TARGET_MCL_ONFAULT 4
--- /dev/null
+/* No special prctl support required. */
#ifndef XTENSA_TARGET_SIGNAL_H
#define XTENSA_TARGET_SIGNAL_H
-/* this struct defines a stack used during syscall handling */
-
-typedef struct target_sigaltstack {
- abi_ulong ss_sp;
- abi_int ss_flags;
- abi_ulong ss_size;
-} target_stack_t;
-
-/*
- * sigaltstack controls
- */
-#define TARGET_SS_ONSTACK 1
-#define TARGET_SS_DISABLE 2
-
-#define TARGET_MINSIGSTKSZ 2048
-#define TARGET_SIGSTKSZ 8192
-
#include "../generic/signal.h"
#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
base_dir = 'bsd-user'
target_inc += include_directories('bsd-user/' / targetos)
dir = base_dir / abi
- arch_srcs += files(dir / 'target_arch_cpu.c')
+ arch_srcs += files(dir / 'signal.c', dir / 'target_arch_cpu.c')
endif
target_inc += include_directories(
base_dir,
{ 'command': 'x-query-usb',
'returns': 'HumanReadableText',
'features': [ 'unstable' ] }
+
+##
+# @SmbiosEntryPointType:
+#
+# @32: SMBIOS version 2.1 (32-bit) Entry Point
+#
+# @64: SMBIOS version 3.0 (64-bit) Entry Point
+#
+# Since: 7.0
+##
+{ 'enum': 'SmbiosEntryPointType',
+ 'data': [ '32', '64' ] }
``-display sdl,grab-mod=rctrl`` instead.
ERST
-DEF("no-quit", 0, QEMU_OPTION_no_quit,
- "-no-quit disable SDL/GTK window close capability (deprecated)\n", QEMU_ARCH_ALL)
-SRST
-``-no-quit``
- Disable window close capability (SDL and GTK only). This option is
- deprecated, please use ``-display ...,window-close=off`` instead.
-ERST
-
DEF("sdl", 0, QEMU_OPTION_sdl,
"-sdl shorthand for -display sdl\n", QEMU_ARCH_ALL)
SRST
-Subproject commit 234ed8e427f4d92903123199f6590d144e0d9351
+Subproject commit 48f91ee9c960f048c4a7d1da4447d31e04931e38
PRESERVE_ARG0=no
QEMU_SUFFIX=""
-options=$(getopt -o ds:Q:S:e:hc:p:g: -l debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,persistent:,preserve-argv0: -- "$@")
+_longopts="debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,\
+persistent:,preserve-argv0:"
+options=$(getopt -o ds:Q:S:e:hc:p:g:F: -l ${_longopts} -- "$@")
eval set -- "$options"
while true ; do
{
struct sigaction action;
+ /*
+ * ALERT: when modifying this, take care that SIGBUS forwarding in
+ * os_mem_prealloc() will continue working as expected.
+ */
memset(&action, 0, sizeof(action));
action.sa_flags = SA_SIGINFO;
action.sa_sigaction = sigbus_handler;
"for SDL, ignoring option");
}
if (dpy.has_window_close && !use_gtk && !use_sdl) {
- error_report("-no-quit is only valid for GTK and SDL, "
+ error_report("window-close is only valid for GTK and SDL, "
"ignoring option");
}
warn_report("-ctrl-grab is deprecated, please use "
"-display sdl,grab-mod=rctrl instead.");
break;
- case QEMU_OPTION_no_quit:
- dpy.has_window_close = true;
- dpy.window_close = false;
- warn_report("-no-quit is deprecated, please use "
- "-display ...,window-close=off instead.");
- break;
case QEMU_OPTION_sdl:
warn_report("-sdl is deprecated, use -display sdl instead.");
#ifdef CONFIG_SDL
#define ENV_FLAG_TB_MASK \
(ENV_FLAG_PAL_MODE | ENV_FLAG_PS_USER | ENV_FLAG_FEN)
+#define TB_FLAG_UNALIGN (1u << 1)
+
static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
{
int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX;
*pc = env->pc;
*cs_base = 0;
*pflags = env->flags & ENV_FLAG_TB_MASK;
+#ifdef CONFIG_USER_ONLY
+ *pflags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
+#endif
}
#ifdef CONFIG_USER_ONLY
struct DisasContext {
DisasContextBase base;
-#ifndef CONFIG_USER_ONLY
+#ifdef CONFIG_USER_ONLY
+ MemOp unalign;
+#else
uint64_t palbr;
#endif
uint32_t tbflags;
TCGv sink;
};
+#ifdef CONFIG_USER_ONLY
+#define UNALIGN(C) (C)->unalign
+#else
+#define UNALIGN(C) 0
+#endif
+
/* Target-specific return values from translate_one, indicating the
state of the TB. Note that DISAS_NEXT indicates that we are not
exiting the TB. */
static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
{
TCGv_i32 tmp32 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
gen_helper_memory_to_f(dest, tmp32);
tcg_temp_free_i32(tmp32);
}
static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
{
TCGv tmp = tcg_temp_new();
- tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
gen_helper_memory_to_g(dest, tmp);
tcg_temp_free(tmp);
}
static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
{
TCGv_i32 tmp32 = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
gen_helper_memory_to_s(dest, tmp32);
tcg_temp_free_i32(tmp32);
}
static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
{
- tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
}
static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
if (clear) {
tcg_gen_andi_i64(addr, addr, ~0x7);
+ } else if (!locked) {
+ op |= UNALIGN(ctx);
}
dest = ctx->ir[ra];
{
TCGv_i32 tmp32 = tcg_temp_new_i32();
gen_helper_f_to_memory(tmp32, addr);
- tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
tcg_temp_free_i32(tmp32);
}
{
TCGv tmp = tcg_temp_new();
gen_helper_g_to_memory(tmp, src);
- tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
tcg_temp_free(tmp);
}
{
TCGv_i32 tmp32 = tcg_temp_new_i32();
gen_helper_s_to_memory(tmp32, src);
- tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
tcg_temp_free_i32(tmp32);
}
static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
{
- tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
}
static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
if (clear) {
tcg_gen_andi_i64(addr, addr, ~0x7);
+ } else {
+ op |= UNALIGN(ctx);
}
src = load_gpr(ctx, ra);
break;
case 0x0B:
/* LDQ_U */
- gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 1, 0);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0);
break;
case 0x0C:
/* LDWU */
break;
case 0x0F:
/* STQ_U */
- gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 1);
+ gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1);
break;
case 0x10:
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ);
tcg_gen_mov_i64(cpu_lock_addr, addr);
tcg_gen_mov_i64(cpu_lock_value, va);
break;
break;
case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEUQ);
break;
case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/
case 0xF:
/* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */
- tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEUQ);
break;
}
tcg_temp_free(addr);
vb = load_gpr(ctx, rb);
tmp = tcg_temp_new();
tcg_gen_addi_i64(tmp, vb, disp12);
- tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ);
tcg_temp_free(tmp);
break;
case 0x2:
case 0x3:
/* Quadword physical access with lock */
ret = gen_store_conditional(ctx, ra, rb, disp12,
- MMU_PHYS_IDX, MO_LEQ);
+ MMU_PHYS_IDX, MO_LEUQ);
break;
case 0x4:
/* Longword virtual access */
break;
case 0x29:
/* LDQ */
- gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 0);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0);
break;
case 0x2A:
/* LDL_L */
break;
case 0x2B:
/* LDQ_L */
- gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 1);
+ gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 1);
break;
case 0x2C:
/* STL */
break;
case 0x2D:
/* STQ */
- gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 0);
+ gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0);
break;
case 0x2E:
/* STL_C */
case 0x2F:
/* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16,
- ctx->mem_idx, MO_LEQ);
+ ctx->mem_idx, MO_LEUQ);
break;
case 0x30:
/* BR */
#ifdef CONFIG_USER_ONLY
ctx->ir = cpu_std_ir;
+ ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
#else
ctx->palbr = env->palbr;
ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
uint64_t o0, o1;
bool success;
int mem_idx = cpu_mmu_index(env, false);
- MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
- MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
+ MemOpIdx oi0 = make_memop_idx(MO_LEUQ | MO_ALIGN_16, mem_idx);
+ MemOpIdx oi1 = make_memop_idx(MO_LEUQ, mem_idx);
o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra);
o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra);
uint64_t o0, o1;
bool success;
int mem_idx = cpu_mmu_index(env, false);
- MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
- MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
+ MemOpIdx oi0 = make_memop_idx(MO_BEUQ | MO_ALIGN_16, mem_idx);
+ MemOpIdx oi1 = make_memop_idx(MO_BEUQ, mem_idx);
o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra);
o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra);
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0,
.access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2,
.access = PL1_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
{ .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0,
.access = PL2_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_alle2is_write },
+ { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2is_write },
{ .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4,
.access = PL2_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2is_write },
{ .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6,
.access = PL2_W, .type = ARM_CP_NO_RAW,
.opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0,
.access = PL3_W, .type = ARM_CP_NO_RAW,
.writefn = tlbi_aa64_alle3is_write },
+ { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
REGINFO_SENTINEL
};
static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
TCGv_i32 a32, int index)
{
- gen_aa32_ld_i64(s, val, a32, index, MO_Q);
+ gen_aa32_ld_i64(s, val, a32, index, MO_UQ);
}
static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
TCGv_i32 a32, int index)
{
- gen_aa32_st_i64(s, val, a32, index, MO_Q);
+ gen_aa32_st_i64(s, val, a32, index, MO_UQ);
}
DO_GEN_LD(8u, MO_UB)
tcg_gen_ld_i64(tmphi, cpu_env, fp_reg_hi_offset(s, srcidx));
- mop = s->be_data | MO_Q;
+ mop = s->be_data | MO_UQ;
tcg_gen_qemu_st_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
mop | (s->align_mem ? MO_ALIGN_16 : 0));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
tmphi = tcg_temp_new_i64();
tcg_hiaddr = tcg_temp_new_i64();
- mop = s->be_data | MO_Q;
+ mop = s->be_data | MO_UQ;
tcg_gen_qemu_ld_i64(be ? tmphi : tmplo, tcg_addr, get_mem_index(s),
mop | (s->align_mem ? MO_ALIGN_16 : 0));
tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
int i, n = (1 + is_pair) << LOG2_TAG_GRANULE;
tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index,
- MO_Q | MO_ALIGN_16);
+ MO_UQ | MO_ALIGN_16);
for (i = 8; i < n; i += 8) {
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
- tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_Q);
+ tcg_gen_qemu_st_i64(tcg_zero, clean_addr, mem_index, MO_UQ);
}
tcg_temp_free_i64(tcg_zero);
}
case MO_UL:
tcg_gen_ld32u_i64(var, cpu_env, offset);
break;
- case MO_Q:
+ case MO_UQ:
tcg_gen_ld_i64(var, cpu_env, offset);
break;
default:
return false;
}
- if ((a->vd & 1) || (src1_mop == MO_Q && (a->vn & 1))) {
+ if ((a->vd & 1) || (src1_mop == MO_UQ && (a->vn & 1))) {
return false;
}
}; \
int narrow_mop = a->size == MO_32 ? MO_32 | SIGN : -1; \
return do_prewiden_3d(s, a, widenfn[a->size], addfn[a->size], \
- SRC1WIDE ? MO_Q : narrow_mop, \
+ SRC1WIDE ? MO_UQ : narrow_mop, \
narrow_mop); \
}
t0 = tcg_temp_new_i64();
for (i = 0; i < len_align; i += 8) {
- tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
tcg_gen_st_i64(t0, cpu_env, vofs + i);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
}
gen_set_label(loop);
t0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
tp = tcg_temp_new_ptr();
t0 = tcg_temp_new_i64();
for (i = 0; i < len_align; i += 8) {
tcg_gen_ld_i64(t0, cpu_env, vofs + i);
- tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
}
tcg_temp_free_i64(t0);
tcg_gen_addi_ptr(i, i, 8);
tcg_temp_free_ptr(tp);
- tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEQ);
+ tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
tcg_gen_addi_i64(clean_addr, clean_addr, 8);
tcg_temp_free_i64(t0);
MO_UB, MO_UB, MO_UB, MO_UB,
MO_SL, MO_UW, MO_UW, MO_UW,
MO_SW, MO_SW, MO_UL, MO_UL,
- MO_SB, MO_SB, MO_SB, MO_Q
+ MO_SB, MO_SB, MO_SB, MO_UQ
};
#define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
addr = add_reg_for_lit(s, a->rn, offset);
tmp = tcg_temp_new_i64();
if (a->l) {
- gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
vfp_store_reg64(tmp, a->vd);
} else {
vfp_load_reg64(tmp, a->vd);
- gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
}
tcg_temp_free_i64(tmp);
tcg_temp_free_i32(addr);
for (i = 0; i < n; i++) {
if (a->l) {
/* load */
- gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
vfp_store_reg64(tmp, a->vd + i);
} else {
/* store */
vfp_load_reg64(tmp, a->vd + i);
- gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
+ gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_UQ | MO_ALIGN_4);
}
tcg_gen_addi_i32(addr, addr, offset);
}
case MO_UL:
tcg_gen_ld32u_i64(dest, cpu_env, off);
break;
- case MO_Q:
+ case MO_UQ:
tcg_gen_ld_i64(dest, cpu_env, off);
break;
default:
cris_store_direct_jmp(dc);
}
- tcg_gen_qemu_ld_i64(dst, addr, mem_index, MO_TEQ);
+ tcg_gen_qemu_ld_i64(dst, addr, mem_index, MO_TEUQ);
}
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
return hppa_form_gva_psw(env->psw, spc, off);
}
-/* Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
+/*
+ * Since PSW_{I,CB} will never need to be in tb->flags, reuse them.
* TB_FLAG_SR_SAME indicates that SR4 through SR7 all contain the
* same value.
*/
#define TB_FLAG_SR_SAME PSW_I
#define TB_FLAG_PRIV_SHIFT 8
+#define TB_FLAG_UNALIGN 0x400
static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, target_ulong *pc,
target_ulong *cs_base,
#ifdef CONFIG_USER_ONLY
*pc = env->iaoq_f & -4;
*cs_base = env->iaoq_b & -4;
+ flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
#else
/* ??? E, T, H, L, B, P bits need to be here, when implemented. */
flags |= env->psw & (PSW_W | PSW_C | PSW_D);
int mmu_idx;
int privilege;
bool psw_n_nonzero;
+
+#ifdef CONFIG_USER_ONLY
+ MemOp unalign;
+#endif
} DisasContext;
+#ifdef CONFIG_USER_ONLY
+#define UNALIGN(C) (C)->unalign
+#else
+#define UNALIGN(C) 0
+#endif
+
/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
static int expand_sm_imm(DisasContext *ctx, int val)
{
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
ctx->mmu_idx == MMU_PHYS_IDX);
- tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop);
+ tcg_gen_qemu_ld_reg(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
}
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
ctx->mmu_idx == MMU_PHYS_IDX);
- tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop);
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
}
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
ctx->mmu_idx == MMU_PHYS_IDX);
- tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop);
+ tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
}
form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
ctx->mmu_idx == MMU_PHYS_IDX);
- tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop);
+ tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
if (modify) {
save_gpr(ctx, rb, ofs);
}
nullify_over(ctx);
tmp = tcg_temp_new_i64();
- do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
+ do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
save_frd(rt, tmp);
tcg_temp_free_i64(tmp);
nullify_over(ctx);
tmp = load_frd(rt);
- do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEQ);
+ do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
tcg_temp_free_i64(tmp);
return nullify_end(ctx);
ctx->mmu_idx = MMU_USER_IDX;
ctx->iaoq_f = ctx->base.pc_first | MMU_USER_IDX;
ctx->iaoq_b = ctx->base.tb->cs_base | MMU_USER_IDX;
+ ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
#else
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
ctx->mmu_idx = (ctx->tb_flags & PSW_D ? ctx->privilege : MMU_PHYS_IDX);
{
uintptr_t ra = GETPC();
int mem_idx = cpu_mmu_index(env, false);
- MemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
+ MemOpIdx oi = make_memop_idx(MO_TEUQ, mem_idx);
oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
}
static inline void gen_ldq_env_A0(DisasContext *s, int offset)
{
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ);
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset);
}
static inline void gen_stq_env_A0(DisasContext *s, int offset)
{
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset);
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEQ);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
}
static inline void gen_ldo_env_A0(DisasContext *s, int offset)
{
int mem_index = s->mem_index;
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ);
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ);
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
- tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ);
+ tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
}
{
int mem_index = s->mem_index;
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEQ);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ);
tcg_gen_addi_tl(s->tmp0, s->A0, 8);
tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
- tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEQ);
+ tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ);
}
static inline void gen_op_movo(DisasContext *s, int d_offset, int s_offset)
tcg_gen_mov_i64(cpu_regs[rm], s->tmp1_i64);
} else {
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
}
#else
goto illegal_op;
gen_op_mov_v_reg(s, ot, s->tmp1_i64, rm);
} else {
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
}
tcg_gen_st_i64(s->tmp1_i64, cpu_env,
offsetof(CPUX86State,
break;
case 2:
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
gen_helper_fldl_FT0(cpu_env, s->tmp1_i64);
break;
case 3:
break;
case 2:
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
gen_helper_fldl_ST0(cpu_env, s->tmp1_i64);
break;
case 3:
case 2:
gen_helper_fisttll_ST0(s->tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
break;
case 3:
default:
case 2:
gen_helper_fstl_ST0(s->tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
break;
case 3:
default:
break;
case 0x3d: /* fildll */
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
gen_helper_fildll_ST0(cpu_env, s->tmp1_i64);
break;
case 0x3f: /* fistpll */
gen_helper_fistll_ST0(s->tmp1_i64, cpu_env);
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
gen_helper_fpop(cpu_env);
break;
default:
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
tcg_gen_addi_tl(s->A0, s->A0, 8);
tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
} else {
tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
s->mem_index, MO_LEUL);
gen_lea_modrm(env, s, modrm);
if (CODE64(s)) {
tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
tcg_gen_addi_tl(s->A0, s->A0, 8);
tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
- s->mem_index, MO_LEQ);
+ s->mem_index, MO_LEUQ);
} else {
tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
s->mem_index, MO_LEUL);
oldsr = sr;
env->aregs[7] = sp;
cpu_m68k_set_sr(env, sr &= ~SR_M);
- sp = env->aregs[7] & ~1;
+ sp = env->aregs[7];
+ if (!m68k_feature(env, M68K_FEATURE_UNALIGNED_DATA)) {
+ sp &= ~1;
+ }
do_stack_frame(env, &sp, 1, oldsr, 0, retaddr);
} else {
do_stack_frame(env, &sp, 0, oldsr, 0, retaddr);
uintptr_t ra = GETPC();
#if defined(CONFIG_ATOMIC64)
int mmu_idx = cpu_mmu_index(env, 0);
- MemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx);
+ MemOpIdx oi = make_memop_idx(MO_BEUQ, mmu_idx);
#endif
if (parallel) {
gen_reserved_instruction(ctx);
return;
}
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ);
gen_store_gpr(t1, rd);
tcg_gen_movi_tl(t1, 8);
gen_op_addr_add(ctx, t0, t0, t1);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ);
gen_store_gpr(t1, rd + 1);
break;
case SDP:
gen_load_gpr(t1, rd);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_movi_tl(t1, 8);
gen_op_addr_add(ctx, t0, t0, t1);
gen_load_gpr(t1, rd + 1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ);
break;
#endif
}
case SCD:
check_insn(ctx, ISA_MIPS3);
check_mips_64(ctx);
- gen_st_cond(ctx, rt, rs, offset, MO_TEQ, false);
+ gen_st_cond(ctx, rt, rs, offset, MO_TEUQ, false);
break;
#endif
case LD_EVA:
gen_store_gpr(t0, rt);
break;
case OPC_LD:
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
}
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
tcg_gen_shl_tl(t0, t0, t1);
t2 = tcg_const_tl(-1);
tcg_gen_shl_tl(t2, t2, t1);
}
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
tcg_gen_shr_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 63);
t2 = tcg_const_tl(0xfffffffffffffffeull);
t1 = tcg_const_tl(pc_relative_pc(ctx));
gen_op_addr_add(ctx, t0, t0, t1);
tcg_temp_free(t1);
- tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUQ);
gen_store_gpr(t0, rt);
break;
#endif
switch (opc) {
#if defined(TARGET_MIPS64)
case OPC_SD:
- tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
break;
case OPC_SDL:
case OPC_LDC1:
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, fp0, ft);
tcg_temp_free_i64(fp0);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, ft);
- tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free_i64(fp0);
}
check_mips_64(ctx);
offset = sextract32(ctx->opcode << 3, 0, 21);
addr = addr_add(ctx, (pc & ~0x7), offset);
- gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEQ);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUQ);
break;
#endif
default:
case OPC_GSLQ:
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t1, rt);
gen_store_gpr(t0, lsq_rt1);
check_cp1_enabled(ctx);
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
- tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t1, rt);
gen_store_fpr64(ctx, t0, lsq_rt1);
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
gen_load_gpr(t1, lsq_rt1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free(t1);
break;
t1 = tcg_temp_new();
gen_base_offset_addr(ctx, t0, rs, lsq_offset);
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_base_offset_addr(ctx, t0, rs, lsq_offset + 8);
gen_load_fpr64(ctx, t1, lsq_rt1);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free(t1);
break;
}
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_shl_tl(t0, t0, t1);
t2 = tcg_const_tl(-1);
tcg_gen_shl_tl(t2, t2, t1);
}
tcg_gen_shli_tl(t1, t1, 3);
tcg_gen_andi_tl(t0, t0, ~7);
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
tcg_gen_shr_tl(t0, t0, t1);
tcg_gen_xori_tl(t1, t1, 63);
t2 = tcg_const_tl(0xfffffffffffffffeull);
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_gpr(t0, rt);
break;
if (rd) {
gen_op_addr_add(ctx, t0, cpu_gpr[rd], t0);
}
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
gen_store_fpr64(ctx, t0, rt);
break;
case OPC_GSSDX:
t1 = tcg_temp_new();
gen_load_gpr(t1, rt);
- tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free(t1);
break;
case OPC_GSSDXC1:
t1 = tcg_temp_new();
gen_load_fpr64(ctx, t1, rt);
- tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEQ |
+ tcg_gen_qemu_st_i64(t1, t0, ctx->mem_idx, MO_TEUQ |
ctx->default_tcg_memop_mask);
tcg_temp_free(t1);
break;
check_cp1_registers(ctx, fd);
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
{
TCGv_i64 fp0 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_fpr64(ctx, fp0, fd);
tcg_temp_free_i64(fp0);
}
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
tcg_temp_free_i64(fp0);
}
break;
{
TCGv_i64 fp0 = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp0, fs);
- tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEUQ);
tcg_temp_free_i64(fp0);
}
break;
break;
#if defined(TARGET_MIPS64)
case OPC_LDX:
- tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUQ);
gen_store_gpr(t0, rd);
break;
#endif
#endif
#if defined(TARGET_MIPS64)
case R6_OPC_SCD:
- gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false);
+ gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false);
break;
case R6_OPC_LLD:
gen_ld(ctx, op1, rt, rs, imm);
check_insn_opc_user_only(ctx, INSN_R5900);
}
check_mips_64(ctx);
- gen_st_cond(ctx, rt, rs, imm, MO_TEQ, false);
+ gen_st_cond(ctx, rt, rs, imm, MO_TEUQ, false);
break;
case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */
if (ctx->insn_flags & ISA_MIPS_R6) {
tcg_gen_andi_tl(addr, addr, ~0xf);
/* Lower half */
- tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
gen_store_gpr(t0, a->rt);
/* Upper half */
tcg_gen_addi_i64(addr, addr, 8);
- tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
gen_store_gpr_hi(t0, a->rt);
tcg_temp_free(t0);
/* Lower half */
gen_load_gpr(t0, a->rt);
- tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
/* Upper half */
tcg_gen_addi_i64(addr, addr, 8);
gen_load_gpr_hi(t0, a->rt);
- tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(t0, addr, ctx->mem_idx, MO_TEUQ);
tcg_temp_free(addr);
tcg_temp_free(t0);
#if !defined(CONFIG_USER_ONLY)
Nios2MMU mmu;
-
uint32_t irq_pending;
#endif
+ int error_code;
};
/**
tcg_temp_free(t0);
}
+static void trap(DisasContext *dc, uint32_t code, uint32_t flags)
+{
+#ifdef CONFIG_USER_ONLY
+ /*
+ * The imm5 field is not stored anywhere on real hw; the kernel
+ * has to load the insn and extract the field. But we can make
+ * things easier for cpu_loop if we pop this into env->error_code.
+ */
+ R_TYPE(instr, code);
+ tcg_gen_st_i32(tcg_constant_i32(instr.imm5), cpu_env,
+ offsetof(CPUNios2State, error_code));
+#endif
+ t_gen_helper_raise_exception(dc, EXCP_TRAP);
+}
+
static const Nios2Instruction r_type_instructions[] = {
INSTRUCTION_ILLEGAL(),
INSTRUCTION(eret), /* eret */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_ILLEGAL(),
INSTRUCTION_ILLEGAL(),
- INSTRUCTION_FLG(gen_excp, EXCP_TRAP), /* trap */
+ INSTRUCTION(trap), /* trap */
INSTRUCTION(wrctl), /* wrctl */
INSTRUCTION_ILLEGAL(),
INSTRUCTION_FLG(gen_cmpxx, TCG_COND_LTU), /* cmpltu */
dc->base.pc_next = pc + 4;
/* Decode an instruction */
-
-#if defined(CONFIG_USER_ONLY)
- /* FIXME: Is this needed ? */
- if (pc >= 0x1000 && pc < 0x2000) {
- t_gen_helper_raise_exception(dc, 0xaa);
- return;
- }
-#endif
-
code = cpu_ldl_code(env, pc);
op = get_opcode(code);
/* Other registers */
target_ulong spr[1024]; /* special purpose registers */
ppc_spr_t spr_cb[1024];
+ /* Composite status for PMC[1-6] enabled and counting insns or cycles. */
+ uint8_t pmc_ins_cnt;
+ uint8_t pmc_cyc_cnt;
/* Vector status and control register, minus VSCR_SAT */
uint32_t vscr;
/* VSX registers (including FP and AVR) */
void store_40x_pit(CPUPPCState *env, target_ulong val);
void store_40x_dbcr0(CPUPPCState *env, uint32_t val);
void store_40x_sler(CPUPPCState *env, uint32_t val);
+void store_40x_tcr(CPUPPCState *env, target_ulong val);
+void store_40x_tsr(CPUPPCState *env, target_ulong val);
void store_booke_tcr(CPUPPCState *env, target_ulong val);
void store_booke_tsr(CPUPPCState *env, target_ulong val);
void ppc_tlb_invalidate_all(CPUPPCState *env);
0x00000000);
spr_register(env, SPR_40x_TCR, "TCR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_booke_tcr,
+ &spr_read_generic, &spr_write_40x_tcr,
0x00000000);
spr_register(env, SPR_40x_TSR, "TSR",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_booke_tsr,
+ &spr_read_generic, &spr_write_40x_tsr,
0x00000000);
}
/* MMU */
spr_register(env, SPR_40x_PID, "PID",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_40x_pid,
0x00000000);
spr_register(env, SPR_4xx_CCR0, "CCR0",
SPR_NOACCESS, SPR_NOACCESS,
#endif /* CONFIG_TCG */
#endif
+ pmu_update_summaries(env);
hreg_compute_hflags(env);
env->reserve_addr = (target_ulong)-1ULL;
/* Be sure no exception or interrupt is pending */
env->spr[SPR_SPRG4], env->spr[SPR_SPRG5],
env->spr[SPR_SPRG6], env->spr[SPR_SPRG7]);
+ switch (env->excp_model) {
#if defined(TARGET_PPC64)
- if (env->excp_model == POWERPC_EXCP_POWER7 ||
- env->excp_model == POWERPC_EXCP_POWER8 ||
- env->excp_model == POWERPC_EXCP_POWER9 ||
- env->excp_model == POWERPC_EXCP_POWER10) {
+ case POWERPC_EXCP_POWER7:
+ case POWERPC_EXCP_POWER8:
+ case POWERPC_EXCP_POWER9:
+ case POWERPC_EXCP_POWER10:
qemu_fprintf(f, "HSRR0 " TARGET_FMT_lx " HSRR1 " TARGET_FMT_lx "\n",
env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
- }
+ break;
#endif
- if (env->excp_model == POWERPC_EXCP_BOOKE) {
+ case POWERPC_EXCP_BOOKE:
qemu_fprintf(f, "CSRR0 " TARGET_FMT_lx " CSRR1 " TARGET_FMT_lx
" MCSRR0 " TARGET_FMT_lx " MCSRR1 " TARGET_FMT_lx "\n",
env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1],
* IVORs are left out as they are large and do not change often --
* they can be read with "p $ivor0", "p $ivor1", etc.
*/
+ break;
+ case POWERPC_EXCP_40x:
+ qemu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx
+ " ESR " TARGET_FMT_lx " DEAR " TARGET_FMT_lx "\n",
+ env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
+ env->spr[SPR_40x_ESR], env->spr[SPR_40x_DEAR]);
+
+ qemu_fprintf(f, " EVPR " TARGET_FMT_lx " SRR2 " TARGET_FMT_lx
+ " SRR3 " TARGET_FMT_lx " PID " TARGET_FMT_lx "\n",
+ env->spr[SPR_40x_EVPR], env->spr[SPR_40x_SRR2],
+ env->spr[SPR_40x_SRR3], env->spr[SPR_40x_PID]);
+ break;
+ default:
+ break;
}
#if defined(TARGET_PPC64)
/* Exception processing */
#if !defined(CONFIG_USER_ONLY)
-static inline void dump_syscall(CPUPPCState *env)
+static const char *powerpc_excp_name(int excp)
+{
+ switch (excp) {
+ case POWERPC_EXCP_CRITICAL: return "CRITICAL";
+ case POWERPC_EXCP_MCHECK: return "MCHECK";
+ case POWERPC_EXCP_DSI: return "DSI";
+ case POWERPC_EXCP_ISI: return "ISI";
+ case POWERPC_EXCP_EXTERNAL: return "EXTERNAL";
+ case POWERPC_EXCP_ALIGN: return "ALIGN";
+ case POWERPC_EXCP_PROGRAM: return "PROGRAM";
+ case POWERPC_EXCP_FPU: return "FPU";
+ case POWERPC_EXCP_SYSCALL: return "SYSCALL";
+ case POWERPC_EXCP_APU: return "APU";
+ case POWERPC_EXCP_DECR: return "DECR";
+ case POWERPC_EXCP_FIT: return "FIT";
+ case POWERPC_EXCP_WDT: return "WDT";
+ case POWERPC_EXCP_DTLB: return "DTLB";
+ case POWERPC_EXCP_ITLB: return "ITLB";
+ case POWERPC_EXCP_DEBUG: return "DEBUG";
+ case POWERPC_EXCP_SPEU: return "SPEU";
+ case POWERPC_EXCP_EFPDI: return "EFPDI";
+ case POWERPC_EXCP_EFPRI: return "EFPRI";
+ case POWERPC_EXCP_EPERFM: return "EPERFM";
+ case POWERPC_EXCP_DOORI: return "DOORI";
+ case POWERPC_EXCP_DOORCI: return "DOORCI";
+ case POWERPC_EXCP_GDOORI: return "GDOORI";
+ case POWERPC_EXCP_GDOORCI: return "GDOORCI";
+ case POWERPC_EXCP_HYPPRIV: return "HYPPRIV";
+ case POWERPC_EXCP_RESET: return "RESET";
+ case POWERPC_EXCP_DSEG: return "DSEG";
+ case POWERPC_EXCP_ISEG: return "ISEG";
+ case POWERPC_EXCP_HDECR: return "HDECR";
+ case POWERPC_EXCP_TRACE: return "TRACE";
+ case POWERPC_EXCP_HDSI: return "HDSI";
+ case POWERPC_EXCP_HISI: return "HISI";
+ case POWERPC_EXCP_HDSEG: return "HDSEG";
+ case POWERPC_EXCP_HISEG: return "HISEG";
+ case POWERPC_EXCP_VPU: return "VPU";
+ case POWERPC_EXCP_PIT: return "PIT";
+ case POWERPC_EXCP_IO: return "IO";
+ case POWERPC_EXCP_RUNM: return "RUNM";
+ case POWERPC_EXCP_EMUL: return "EMUL";
+ case POWERPC_EXCP_IFTLB: return "IFTLB";
+ case POWERPC_EXCP_DLTLB: return "DLTLB";
+ case POWERPC_EXCP_DSTLB: return "DSTLB";
+ case POWERPC_EXCP_FPA: return "FPA";
+ case POWERPC_EXCP_DABR: return "DABR";
+ case POWERPC_EXCP_IABR: return "IABR";
+ case POWERPC_EXCP_SMI: return "SMI";
+ case POWERPC_EXCP_PERFM: return "PERFM";
+ case POWERPC_EXCP_THERM: return "THERM";
+ case POWERPC_EXCP_VPUA: return "VPUA";
+ case POWERPC_EXCP_SOFTP: return "SOFTP";
+ case POWERPC_EXCP_MAINT: return "MAINT";
+ case POWERPC_EXCP_MEXTBR: return "MEXTBR";
+ case POWERPC_EXCP_NMEXTBR: return "NMEXTBR";
+ case POWERPC_EXCP_ITLBE: return "ITLBE";
+ case POWERPC_EXCP_DTLBE: return "DTLBE";
+ case POWERPC_EXCP_VSXU: return "VSXU";
+ case POWERPC_EXCP_FU: return "FU";
+ case POWERPC_EXCP_HV_EMU: return "HV_EMU";
+ case POWERPC_EXCP_HV_MAINT: return "HV_MAINT";
+ case POWERPC_EXCP_HV_FU: return "HV_FU";
+ case POWERPC_EXCP_SDOOR: return "SDOOR";
+ case POWERPC_EXCP_SDOOR_HV: return "SDOOR_HV";
+ case POWERPC_EXCP_HVIRT: return "HVIRT";
+ case POWERPC_EXCP_SYSCALL_VECTORED: return "SYSCALL_VECTORED";
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void dump_syscall(CPUPPCState *env)
{
qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64
" r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64
ppc_dump_gpr(env, 8), env->nip);
}
-static inline void dump_hcall(CPUPPCState *env)
+static void dump_hcall(CPUPPCState *env)
{
qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64
" r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
* | a | h | 11 | 1 | 1 | h |
* +--------------------------------------------------------------------+
*/
-static inline void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
+static void ppc_excp_apply_ail(PowerPCCPU *cpu, int excp_model, int excp,
target_ulong msr,
target_ulong *new_msr,
target_ulong *vector)
#endif
}
-static inline void powerpc_set_excp_state(PowerPCCPU *cpu,
+static void powerpc_set_excp_state(PowerPCCPU *cpu,
target_ulong vector, target_ulong msr)
{
CPUState *cs = CPU(cpu);
* Note that this function should be greatly optimized when called
* with a constant excp, from ppc_hw_interrupt
*/
-static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
+static void powerpc_excp(PowerPCCPU *cpu, int excp)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ int excp_model = env->excp_model;
target_ulong msr, new_msr, vector;
- int srr0, srr1, asrr0, asrr1, lev = -1;
+ int srr0, srr1, lev = -1;
+
+ if (excp <= POWERPC_EXCP_NONE || excp >= POWERPC_EXCP_NB) {
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
+ }
qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
- " => %08x (%02x)\n", env->nip, excp, env->error_code);
+ " => %s (%d) error=%02x\n", env->nip, powerpc_excp_name(excp),
+ excp, env->error_code);
/* new srr1 value excluding must-be-zero bits */
if (excp_model == POWERPC_EXCP_BOOKE) {
/* target registers */
srr0 = SPR_SRR0;
srr1 = SPR_SRR1;
- asrr0 = -1;
- asrr1 = -1;
/*
* check for special resume at 0x100 from doze/nap/sleep/winkle on
}
#endif
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(cs, "Raised an exception without defined vector %d\n",
+ excp);
+ }
+
+ vector |= env->excp_prefix;
+
switch (excp) {
- case POWERPC_EXCP_NONE:
- /* Should never happen */
- return;
case POWERPC_EXCP_CRITICAL: /* Critical input */
switch (excp_model) {
case POWERPC_EXCP_40x:
/* FIXME: choose one or the other based on CPU type */
srr0 = SPR_BOOKE_MCSRR0;
srr1 = SPR_BOOKE_MCSRR1;
- asrr0 = SPR_BOOKE_CSRR0;
- asrr1 = SPR_BOOKE_CSRR1;
+
+ env->spr[SPR_BOOKE_CSRR0] = env->nip;
+ env->spr[SPR_BOOKE_CSRR1] = msr;
break;
default:
break;
env->nip += 4;
new_msr |= env->msr & ((target_ulong)1 << MSR_EE);
new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+
+ vector += lev * 0x20;
+
+ env->lr = env->nip;
+ env->ctr = msr;
break;
case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
/* FIXME: choose one or the other based on CPU type */
srr0 = SPR_BOOKE_DSRR0;
srr1 = SPR_BOOKE_DSRR1;
- asrr0 = SPR_BOOKE_CSRR0;
- asrr1 = SPR_BOOKE_CSRR1;
+
+ env->spr[SPR_BOOKE_CSRR0] = env->nip;
+ env->spr[SPR_BOOKE_CSRR1] = msr;
+
/* DBSR already modified by caller */
} else {
cpu_abort(cs, "Debug exception triggered on unsupported model\n");
}
#endif
- vector = env->excp_vectors[excp];
- if (vector == (target_ulong)-1ULL) {
- cpu_abort(cs, "Raised an exception without defined vector %d\n",
- excp);
- }
-
- vector |= env->excp_prefix;
-
- /* If any alternate SRR register are defined, duplicate saved values */
- if (asrr0 != -1) {
- env->spr[asrr0] = env->nip;
- }
- if (asrr1 != -1) {
- env->spr[asrr1] = msr;
- }
-
#if defined(TARGET_PPC64)
if (excp_model == POWERPC_EXCP_BOOKE) {
if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
/* Save MSR */
env->spr[srr1] = msr;
-
-#if defined(TARGET_PPC64)
- } else {
- vector += lev * 0x20;
-
- env->lr = env->nip;
- env->ctr = msr;
-#endif
}
/* This can update new_msr and vector if AIL applies */
void ppc_cpu_do_interrupt(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
- powerpc_excp(cpu, env->excp_model, cs->exception_index);
+ powerpc_excp(cpu, cs->exception_index);
}
static void ppc_hw_interrupt(CPUPPCState *env)
/* External reset */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
+ powerpc_excp(cpu, POWERPC_EXCP_RESET);
return;
}
/* Machine check exception */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
+ powerpc_excp(cpu, POWERPC_EXCP_MCHECK);
return;
}
#if 0 /* TODO */
/* External debug exception */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
+ powerpc_excp(cpu, POWERPC_EXCP_DEBUG);
return;
}
#endif
if ((async_deliver || msr_hv == 0) && hdice) {
/* HDEC clears on delivery */
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
+ powerpc_excp(cpu, POWERPC_EXCP_HDECR);
return;
}
}
/* LPCR will be clear when not supported so this will work */
bool hvice = !!(env->spr[SPR_LPCR] & LPCR_HVICE);
if ((async_deliver || msr_hv == 0) && hvice) {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HVIRT);
+ powerpc_excp(cpu, POWERPC_EXCP_HVIRT);
return;
}
}
/* HEIC blocks delivery to the hypervisor */
if ((async_deliver && !(heic && msr_hv && !msr_pr)) ||
(env->has_hv_mode && msr_hv == 0 && !lpes0)) {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
+ powerpc_excp(cpu, POWERPC_EXCP_EXTERNAL);
return;
}
}
if (msr_ce != 0) {
/* External critical interrupt */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
+ powerpc_excp(cpu, POWERPC_EXCP_CRITICAL);
return;
}
}
/* Watchdog timer on embedded PowerPC */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
+ powerpc_excp(cpu, POWERPC_EXCP_WDT);
return;
}
if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
+ powerpc_excp(cpu, POWERPC_EXCP_DOORCI);
return;
}
/* Fixed interval timer on embedded PowerPC */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
+ powerpc_excp(cpu, POWERPC_EXCP_FIT);
return;
}
/* Programmable interval timer on embedded PowerPC */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
+ powerpc_excp(cpu, POWERPC_EXCP_PIT);
return;
}
/* Decrementer exception */
if (ppc_decr_clear_on_delivery(env)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
}
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
+ powerpc_excp(cpu, POWERPC_EXCP_DECR);
return;
}
if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
if (is_book3s_arch2x(env)) {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR);
+ powerpc_excp(cpu, POWERPC_EXCP_SDOOR);
} else {
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
+ powerpc_excp(cpu, POWERPC_EXCP_DOORI);
}
return;
}
if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDOORBELL)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDOORBELL);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_SDOOR_HV);
+ powerpc_excp(cpu, POWERPC_EXCP_SDOOR_HV);
return;
}
if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
+ powerpc_excp(cpu, POWERPC_EXCP_PERFM);
return;
}
/* Thermal interrupt */
if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
+ powerpc_excp(cpu, POWERPC_EXCP_THERM);
return;
}
}
void ppc_cpu_do_system_reset(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
- CPUPPCState *env = &cpu->env;
- powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
+ powerpc_excp(cpu, POWERPC_EXCP_RESET);
}
void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
#endif /* defined(TARGET_PPC64) */
#endif /* CONFIG_TCG */
-static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
+static void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
{
CPUState *cs = env_cpu(env);
uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
{
- float_status tstat = env->fp_status;
- set_float_exception_flags(0, &tstat);
-
- return float32_to_float64(xb >> 32, &tstat);
+ return helper_todouble(xb >> 32);
}
/*
DEF_HELPER_3(store_403_pbr, void, env, i32, tl)
DEF_HELPER_FLAGS_1(load_40x_pit, TCG_CALL_NO_RWG, tl, env)
DEF_HELPER_FLAGS_2(store_40x_pit, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_40x_tcr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_40x_tsr, TCG_CALL_NO_RWG, void, env, tl)
DEF_HELPER_2(store_40x_dbcr0, void, env, tl)
DEF_HELPER_2(store_40x_sler, void, env, tl)
DEF_HELPER_FLAGS_2(store_booke_tcr, TCG_CALL_NO_RWG, void, env, tl)
}
#if defined(TARGET_PPC64)
- if (pmu_insn_cnt_enabled(env)) {
+ if (env->pmc_ins_cnt) {
hflags |= 1 << HFLAGS_INSN_CNT;
}
#endif
#include "qapi/error.h"
#include "qemu/main-loop.h"
#include "kvm_ppc.h"
+#include "power8-pmu.h"
static void post_load_update_msr(CPUPPCState *env)
{
*/
env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
ppc_store_msr(env, msr);
+ pmu_update_summaries(env);
}
static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
vaddr eaddr,
uint64_t *lpid, uint64_t *pid)
{
+ /* When EA(2:11) are nonzero, raise a segment interrupt */
+ if (eaddr & ~R_EADDR_VALID_MASK) {
+ return false;
+ }
+
if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
switch (eaddr & R_EADDR_QUADRANT) {
case R_EADDR_QUADRANT0:
env->error_code = 0;
}
+static inline const char *access_str(MMUAccessType access_type)
+{
+ return access_type == MMU_DATA_LOAD ? "reading" :
+ (access_type == MMU_DATA_STORE ? "writing" : "execute");
+}
+
static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
vaddr eaddr, uint32_t cause)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n",
+ __func__, access_str(access_type),
+ eaddr, cause);
+
switch (access_type) {
case MMU_INST_FETCH:
/* Instruction Storage Interrupt */
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%"
+ HWADDR_PRIx" cause %08x\n",
+ __func__, access_str(access_type),
+ eaddr, g_raddr, cause);
+
switch (access_type) {
case MMU_INST_FETCH:
/* H Instruction Storage Interrupt */
hwaddr pte_addr;
uint64_t pte;
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
+ " mmu_idx %u (prot %c%c%c) 0x%"HWADDR_PRIx"\n",
+ __func__, access_str(access_type),
+ eaddr, mmu_idx,
+ *h_prot & PAGE_READ ? 'r' : '-',
+ *h_prot & PAGE_WRITE ? 'w' : '-',
+ *h_prot & PAGE_EXEC ? 'x' : '-',
+ g_raddr);
+
*h_page_size = PRTBE_R_GET_RTS(pate.dw0);
/* No valid pte or access denied due to protection */
if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
hwaddr h_raddr, pte_addr;
int ret;
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
+ " mmu_idx %u pid %"PRIu64"\n",
+ __func__, access_str(access_type),
+ eaddr, mmu_idx, pid);
+
/* Index Process Table by PID to Find Corresponding Process Table Entry */
offset = pid * sizeof(struct prtb_entry);
size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
* | = On | Process Scoped | Scoped |
* +-------------+----------------+---------------+
*/
-bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
- hwaddr *raddr, int *psizep, int *protp, int mmu_idx,
- bool guest_visible)
+static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
+ MMUAccessType access_type, hwaddr *raddr,
+ int *psizep, int *protp, int mmu_idx,
+ bool guest_visible)
{
CPUPPCState *env = &cpu->env;
uint64_t lpid, pid;
return true;
}
+
+bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
+ hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
+ bool guest_visible)
+{
+ bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp,
+ psizep, protp, mmu_idx, guest_visible);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
+ " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n",
+ __func__, access_str(access_type),
+ eaddr, mmu_idx,
+ *protp & PAGE_READ ? 'r' : '-',
+ *protp & PAGE_WRITE ? 'w' : '-',
+ *protp & PAGE_EXEC ? 'x' : '-',
+ *raddrp);
+
+ return ret;
+}
/* Radix Quadrants */
#define R_EADDR_MASK 0x3FFFFFFFFFFFFFFF
+#define R_EADDR_VALID_MASK 0xC00FFFFFFFFFFFFF
#define R_EADDR_QUADRANT 0xC000000000000000
#define R_EADDR_QUADRANT0 0x0000000000000000
#define R_EADDR_QUADRANT1 0x4000000000000000
#include "mmu-book3s-v3.h"
#include "mmu-radix64.h"
-/* #define DEBUG_MMU */
-/* #define DEBUG_BATS */
-/* #define DEBUG_SOFTWARE_TLB */
/* #define DUMP_PAGE_TABLES */
-/* #define FLUSH_ALL_TLBS */
-
-#ifdef DEBUG_MMU
-# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
-#else
-# define LOG_MMU_STATE(cpu) do { } while (0)
-#endif
-
-#ifdef DEBUG_SOFTWARE_TLB
-# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
-#else
-# define LOG_SWTLB(...) do { } while (0)
-#endif
-
-#ifdef DEBUG_BATS
-# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
-#else
-# define LOG_BATS(...) do { } while (0)
-#endif
void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
{
tlb = &env->tlb.tlb6[nr];
/* This test "emulates" the PTE index match for hardware TLBs */
if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
- LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
- "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
- pte_is_valid(tlb->pte0) ? "valid" : "inval",
- tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
+ qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s [" TARGET_FMT_lx
+ " " TARGET_FMT_lx "] <> " TARGET_FMT_lx "\n",
+ nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
continue;
}
- LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
- TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
- pte_is_valid(tlb->pte0) ? "valid" : "inval",
- tlb->EPN, eaddr, tlb->pte1,
- access_type == MMU_DATA_STORE ? 'S' : 'L',
- access_type == MMU_INST_FETCH ? 'I' : 'D');
+ qemu_log_mask(CPU_LOG_MMU, "TLB %d/%d %s " TARGET_FMT_lx " <> "
+ TARGET_FMT_lx " " TARGET_FMT_lx " %c %c\n",
+ nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, eaddr, tlb->pte1,
+ access_type == MMU_DATA_STORE ? 'S' : 'L',
+ access_type == MMU_INST_FETCH ? 'I' : 'D');
switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1,
0, access_type)) {
case -3:
}
if (best != -1) {
done:
- LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
- ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
+ qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " TARGET_FMT_plx
+ " prot=%01x ret=%d\n",
+ ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
/* Update page flags */
pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type);
}
int ret = -1;
bool ifetch = access_type == MMU_INST_FETCH;
- LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
+ qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
ifetch ? 'I' : 'D', virtual);
if (ifetch) {
BATlt = env->IBAT[1];
BEPIu = *BATu & 0xF0000000;
BEPIl = *BATu & 0x0FFE0000;
bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
- LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
- " BATl " TARGET_FMT_lx "\n", __func__,
- ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
+ qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu "
+ TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__,
+ ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl);
if ((virtual & 0xF0000000) == BEPIu &&
((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
/* BAT matches */
ctx->prot = prot;
ret = check_prot(ctx->prot, access_type);
if (ret == 0) {
- LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
- i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
- ctx->prot & PAGE_WRITE ? 'W' : '-');
+ qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " TARGET_FMT_plx
+ " prot=%c%c\n", i, ctx->raddr,
+ ctx->prot & PAGE_READ ? 'R' : '-',
+ ctx->prot & PAGE_WRITE ? 'W' : '-');
}
break;
}
}
}
if (ret < 0) {
-#if defined(DEBUG_BATS)
if (qemu_log_enabled()) {
- LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
+ qemu_log_mask(CPU_LOG_MMU, "no BAT match for "
+ TARGET_FMT_lx ":\n", virtual);
for (i = 0; i < 4; i++) {
BATu = &BATut[i];
BATl = &BATlt[i];
BEPIu = *BATu & 0xF0000000;
BEPIl = *BATu & 0x0FFE0000;
bl = (*BATu & 0x00001FFC) << 15;
- LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
- " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
- TARGET_FMT_lx " " TARGET_FMT_lx "\n",
- __func__, ifetch ? 'I' : 'D', i, virtual,
- *BATu, *BATl, BEPIu, BEPIl, bl);
+ qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v "
+ TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
+ TARGET_FMT_lx " " TARGET_FMT_lx "\n",
+ __func__, ifetch ? 'I' : 'D', i, virtual,
+ *BATu, *BATl, BEPIu, BEPIl, bl);
}
}
-#endif
}
/* No hit */
return ret;
vsid = sr & 0x00FFFFFF;
target_page_bits = TARGET_PAGE_BITS;
qemu_log_mask(CPU_LOG_MMU,
- "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
- " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
- " ir=%d dr=%d pr=%d %d t=%d\n",
- eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
- (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type);
+ "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
+ " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
+ " ir=%d dr=%d pr=%d %d t=%d\n",
+ eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
+ (int)msr_dr, pr != 0 ? 1 : 0,
+ access_type == MMU_DATA_STORE, type);
pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
hash = vsid ^ pgidx;
ctx->ptem = (vsid << 7) | (pgidx >> 10);
return -1;
}
mask = ~(tlb->size - 1);
- LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
- " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN,
- mask, (uint32_t)tlb->PID, tlb->prot);
+ qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx
+ " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n",
+ __func__, i, address, pid, tlb->EPN,
+ mask, (uint32_t)tlb->PID, tlb->prot);
/* Check PID */
if (tlb->PID != 0 && tlb->PID != pid) {
return -1;
}
zsel = (tlb->attr >> 4) & 0xF;
zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
- LOG_SWTLB("%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
- __func__, i, zsel, zpr, access_type, tlb->attr);
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n",
+ __func__, i, zsel, zpr, access_type, tlb->attr);
/* Check execute enable bit */
switch (zpr) {
case 0x2:
}
if (ret >= 0) {
ctx->raddr = raddr;
- LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
- ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
+ " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
+ ret);
return 0;
}
}
- LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
+ " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, raddr, ctx->prot, ret);
return ret;
}
goto found_tlb;
}
- LOG_SWTLB("%s: TLB entry not found\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__);
return -1;
found_tlb:
/* Check the address space */
if ((access_type == MMU_INST_FETCH ? msr_ir : msr_dr) != (tlb->attr & 1)) {
- LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__);
return -1;
}
*prot = prot2;
if (prot2 & prot_for_access_type(access_type)) {
- LOG_SWTLB("%s: good TLB!\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__);
return 0;
}
- LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
+ qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2);
return access_type == MMU_INST_FETCH ? -3 : -2;
}
if (ret >= 0) {
ctx->raddr = raddr;
- LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
- ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
+ " => " TARGET_FMT_plx " %d %d\n", __func__,
+ address, ctx->raddr, ctx->prot, ret);
} else {
- LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
+ " => " TARGET_FMT_plx " %d %d\n", __func__,
+ address, raddr, ctx->prot, ret);
}
return ret;
}
mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
- LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
- PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%"
- PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask,
- tlb->mas7_3, tlb->mas8);
+ qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx
+ " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%"
+ HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n",
+ __func__, address, pid, tlb->mas1, tlb->mas2, mask,
+ tlb->mas7_3, tlb->mas8);
/* Check PID */
tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
}
}
- LOG_SWTLB("%s: TLB entry not found\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__);
return -1;
found_tlb:
}
if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
- LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__);
return -1;
}
*prot = prot2;
if (prot2 & prot_for_access_type(access_type)) {
- LOG_SWTLB("%s: good TLB!\n", __func__);
+ qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__);
return 0;
}
- LOG_SWTLB("%s: no prot match: %x\n", __func__, prot2);
+ qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2);
return access_type == MMU_INST_FETCH ? -3 : -2;
}
if (ret >= 0) {
ctx->raddr = raddr;
- LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
- ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx
+ " => " TARGET_FMT_plx " %d %d\n", __func__, address,
+ ctx->raddr, ctx->prot, ret);
} else {
- LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
- " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx
+ " => " TARGET_FMT_plx " %d %d\n", __func__, address,
+ raddr, ctx->prot, ret);
}
return ret;
}
if (guest_visible) {
- LOG_MMU_STATE(cs);
+ log_cpu_state_mask(CPU_LOG_MMU, cs, 0);
if (type == ACCESS_CODE) {
switch (ret) {
case -1:
#include "exec/helper-proto.h"
#include "exec/cpu_ldst.h"
-/* #define DEBUG_BATS */
-/* #define DEBUG_SOFTWARE_TLB */
-/* #define DUMP_PAGE_TABLES */
/* #define FLUSH_ALL_TLBS */
-#ifdef DEBUG_SOFTWARE_TLB
-# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
-#else
-# define LOG_SWTLB(...) do { } while (0)
-#endif
-
-#ifdef DEBUG_BATS
-# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
-#else
-# define LOG_BATS(...) do { } while (0)
-#endif
-
/*****************************************************************************/
/* PowerPC MMU emulation */
nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
tlb = &env->tlb.tlb6[nr];
if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
- LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
- env->nb_tlb, eaddr);
+ qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d "
+ TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr);
pte_invalidate(&tlb->pte0);
tlb_flush_page(cs, tlb->EPN);
}
nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
tlb = &env->tlb.tlb6[nr];
- LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
- " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
+ qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 "
+ TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb,
+ EPN, pte0, pte1);
/* Invalidate any pending reference in QEMU for this virtual address */
ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
tlb->pte0 = pte0;
end = base + mask + 0x00020000;
if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
/* Flushing 1024 4K pages is slower than a complete flush */
- LOG_BATS("Flush all BATs\n");
+ qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n");
tlb_flush(cs);
- LOG_BATS("Flush done\n");
+ qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
return;
}
- LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
- TARGET_FMT_lx ")\n", base, end, mask);
+ qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx
+ " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
+ base, end, mask);
for (page = base; page != end; page += TARGET_PAGE_SIZE) {
tlb_flush_page(cs, page);
}
- LOG_BATS("Flush done\n");
+ qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
}
#endif
static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
target_ulong value)
{
- LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
- nr, ul == 0 ? 'u' : 'l', value, env->nip);
+ qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " ("
+ TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l',
+ value, env->nip);
}
void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
}
way = (env->spr[SPR_SRR1] >> 17) & 1;
(void)EPN; /* avoid a compiler warning */
- LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
- " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
- RPN, way);
+ qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx
+ " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n",
+ __func__, new_EPN, EPN, CMP, RPN, way);
/* Store this TLB */
ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
way, is_code, CMP, RPN);
ppcemb_tlb_t *tlb;
target_ulong page, end;
- LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
+ qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n",
+ __func__, (int)entry,
val);
entry &= PPC4XX_TLB_ENTRY_MASK;
tlb = &env->tlb.tlbe[entry];
/* Invalidate previous TLB (if it's valid) */
if (tlb->prot & PAGE_VALID) {
end = tlb->EPN + tlb->size;
- LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
- TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
+ qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
+ TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
+ (int)entry, tlb->EPN, end);
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
tlb_flush_page(cs, page);
}
tlb->prot &= ~PAGE_VALID;
}
tlb->PID = env->spr[SPR_40x_PID]; /* PID */
- LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
- " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
- (int)entry, tlb->RPN, tlb->EPN, tlb->size,
- tlb->prot & PAGE_READ ? 'r' : '-',
- tlb->prot & PAGE_WRITE ? 'w' : '-',
- tlb->prot & PAGE_EXEC ? 'x' : '-',
- tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+ qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " TARGET_FMT_plx
+ " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx
+ " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
/* Invalidate new TLB (if valid) */
if (tlb->prot & PAGE_VALID) {
end = tlb->EPN + tlb->size;
- LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
- TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
+ qemu_log_mask(CPU_LOG_MMU, "%s: invalidate TLB %d start "
+ TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
+ (int)entry, tlb->EPN, end);
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
tlb_flush_page(cs, page);
}
{
ppcemb_tlb_t *tlb;
- LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
- val);
+ qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n",
+ __func__, (int)entry, val);
entry &= PPC4XX_TLB_ENTRY_MASK;
tlb = &env->tlb.tlbe[entry];
tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
if (val & PPC4XX_TLBLO_WR) {
tlb->prot |= PAGE_WRITE;
}
- LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
- " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
- (int)entry, tlb->RPN, tlb->EPN, tlb->size,
- tlb->prot & PAGE_READ ? 'r' : '-',
- tlb->prot & PAGE_WRITE ? 'w' : '-',
- tlb->prot & PAGE_EXEC ? 'x' : '-',
- tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+ qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " TARGET_FMT_plx
+ " EPN " TARGET_FMT_lx
+ " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
}
target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
target_ulong EPN, RPN, size;
int do_flush_tlbs;
- LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
- __func__, word, (int)entry, value);
+ qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n",
+ __func__, word, (int)entry, value);
do_flush_tlbs = 0;
entry &= 0x3F;
tlb = &env->tlb.tlbe[entry];
*/
#include "qemu/osdep.h"
-
-#include "power8-pmu.h"
#include "cpu.h"
#include "helper_regs.h"
#include "exec/exec-all.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "hw/ppc/ppc.h"
+#include "power8-pmu.h"
#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
#define PMC_COUNTER_NEGATIVE_VAL 0x80000000UL
-static bool pmc_is_inactive(CPUPPCState *env, int sprn)
-{
- if (env->spr[SPR_POWER_MMCR0] & MMCR0_FC) {
- return true;
- }
-
- if (sprn < SPR_POWER_PMC5) {
- return env->spr[SPR_POWER_MMCR0] & MMCR0_FC14;
- }
-
- return env->spr[SPR_POWER_MMCR0] & MMCR0_FC56;
-}
-
static bool pmc_has_overflow_enabled(CPUPPCState *env, int sprn)
{
if (sprn == SPR_POWER_PMC1) {
return env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE;
}
-/*
- * For PMCs 1-4, IBM POWER chips has support for an implementation
- * dependent event, 0x1E, that enables cycle counting. The Linux kernel
- * makes extensive use of 0x1E, so let's also support it.
- *
- * Likewise, event 0x2 is an implementation-dependent event that IBM
- * POWER chips implement (at least since POWER8) that is equivalent to
- * PM_INST_CMPL. Let's support this event on PMCs 1-4 as well.
- */
-static PMUEventType pmc_get_event(CPUPPCState *env, int sprn)
+void pmu_update_summaries(CPUPPCState *env)
{
- uint8_t mmcr1_evt_extr[] = { MMCR1_PMC1EVT_EXTR, MMCR1_PMC2EVT_EXTR,
- MMCR1_PMC3EVT_EXTR, MMCR1_PMC4EVT_EXTR };
- PMUEventType evt_type = PMU_EVENT_INVALID;
- uint8_t pmcsel;
- int i;
-
- if (pmc_is_inactive(env, sprn)) {
- return PMU_EVENT_INACTIVE;
- }
-
- if (sprn == SPR_POWER_PMC5) {
- return PMU_EVENT_INSTRUCTIONS;
- }
+ target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
+ target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
+ int ins_cnt = 0;
+ int cyc_cnt = 0;
- if (sprn == SPR_POWER_PMC6) {
- return PMU_EVENT_CYCLES;
+ if (mmcr0 & MMCR0_FC) {
+ goto hflags_calc;
}
- i = sprn - SPR_POWER_PMC1;
- pmcsel = extract64(env->spr[SPR_POWER_MMCR1], mmcr1_evt_extr[i],
- MMCR1_EVT_SIZE);
-
- switch (pmcsel) {
- case 0x2:
- evt_type = PMU_EVENT_INSTRUCTIONS;
- break;
- case 0x1E:
- evt_type = PMU_EVENT_CYCLES;
- break;
- case 0xF0:
- /*
- * PMC1SEL = 0xF0 is the architected PowerISA v3.1
- * event that counts cycles using PMC1.
- */
- if (sprn == SPR_POWER_PMC1) {
- evt_type = PMU_EVENT_CYCLES;
- }
- break;
- case 0xFA:
- /*
- * PMC4SEL = 0xFA is the "instructions completed
- * with run latch set" event.
- */
- if (sprn == SPR_POWER_PMC4) {
- evt_type = PMU_EVENT_INSN_RUN_LATCH;
- }
- break;
- case 0xFE:
- /*
- * PMC1SEL = 0xFE is the architected PowerISA v3.1
- * event to sample instructions using PMC1.
- */
- if (sprn == SPR_POWER_PMC1) {
- evt_type = PMU_EVENT_INSTRUCTIONS;
+ if (!(mmcr0 & MMCR0_FC14) && mmcr1 != 0) {
+ target_ulong sel;
+
+ sel = extract64(mmcr1, MMCR1_PMC1EVT_EXTR, MMCR1_EVT_SIZE);
+ switch (sel) {
+ case 0x02:
+ case 0xfe:
+ ins_cnt |= 1 << 1;
+ break;
+ case 0x1e:
+ case 0xf0:
+ cyc_cnt |= 1 << 1;
+ break;
}
- break;
- default:
- break;
- }
- return evt_type;
-}
+ sel = extract64(mmcr1, MMCR1_PMC2EVT_EXTR, MMCR1_EVT_SIZE);
+ ins_cnt |= (sel == 0x02) << 2;
+ cyc_cnt |= (sel == 0x1e) << 2;
-bool pmu_insn_cnt_enabled(CPUPPCState *env)
-{
- int sprn;
+ sel = extract64(mmcr1, MMCR1_PMC3EVT_EXTR, MMCR1_EVT_SIZE);
+ ins_cnt |= (sel == 0x02) << 3;
+ cyc_cnt |= (sel == 0x1e) << 3;
- for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC5; sprn++) {
- if (pmc_get_event(env, sprn) == PMU_EVENT_INSTRUCTIONS ||
- pmc_get_event(env, sprn) == PMU_EVENT_INSN_RUN_LATCH) {
- return true;
- }
+ sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
+ ins_cnt |= ((sel == 0xfa) || (sel == 0x2)) << 4;
+ cyc_cnt |= (sel == 0x1e) << 4;
}
- return false;
+ ins_cnt |= !(mmcr0 & MMCR0_FC56) << 5;
+ cyc_cnt |= !(mmcr0 & MMCR0_FC56) << 6;
+
+ hflags_calc:
+ env->pmc_ins_cnt = ins_cnt;
+ env->pmc_cyc_cnt = cyc_cnt;
+ env->hflags = deposit32(env->hflags, HFLAGS_INSN_CNT, 1, ins_cnt != 0);
}
static bool pmu_increment_insns(CPUPPCState *env, uint32_t num_insns)
{
+ target_ulong mmcr0 = env->spr[SPR_POWER_MMCR0];
+ unsigned ins_cnt = env->pmc_ins_cnt;
bool overflow_triggered = false;
- int sprn;
-
- /* PMC6 never counts instructions */
- for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC5; sprn++) {
- PMUEventType evt_type = pmc_get_event(env, sprn);
- bool insn_event = evt_type == PMU_EVENT_INSTRUCTIONS ||
- evt_type == PMU_EVENT_INSN_RUN_LATCH;
-
- if (pmc_is_inactive(env, sprn) || !insn_event) {
- continue;
+ target_ulong tmp;
+
+ if (unlikely(ins_cnt & 0x1e)) {
+ if (ins_cnt & (1 << 1)) {
+ tmp = env->spr[SPR_POWER_PMC1];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMC1CE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC1] = tmp;
}
- if (evt_type == PMU_EVENT_INSTRUCTIONS) {
- env->spr[sprn] += num_insns;
+ if (ins_cnt & (1 << 2)) {
+ tmp = env->spr[SPR_POWER_PMC2];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC2] = tmp;
}
- if (evt_type == PMU_EVENT_INSN_RUN_LATCH &&
- env->spr[SPR_CTRL] & CTRL_RUN) {
- env->spr[sprn] += num_insns;
+ if (ins_cnt & (1 << 3)) {
+ tmp = env->spr[SPR_POWER_PMC3];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC3] = tmp;
}
- if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL &&
- pmc_has_overflow_enabled(env, sprn)) {
+ if (ins_cnt & (1 << 4)) {
+ target_ulong mmcr1 = env->spr[SPR_POWER_MMCR1];
+ int sel = extract64(mmcr1, MMCR1_PMC4EVT_EXTR, MMCR1_EVT_SIZE);
+ if (sel == 0x02 || (env->spr[SPR_CTRL] & CTRL_RUN)) {
+ tmp = env->spr[SPR_POWER_PMC4];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
+ overflow_triggered = true;
+ }
+ env->spr[SPR_POWER_PMC4] = tmp;
+ }
+ }
+ }
+ if (ins_cnt & (1 << 5)) {
+ tmp = env->spr[SPR_POWER_PMC5];
+ tmp += num_insns;
+ if (tmp >= PMC_COUNTER_NEGATIVE_VAL && (mmcr0 & MMCR0_PMCjCE)) {
+ tmp = PMC_COUNTER_NEGATIVE_VAL;
overflow_triggered = true;
-
- /*
- * The real PMU will always trigger a counter overflow with
- * PMC_COUNTER_NEGATIVE_VAL. We don't have an easy way to
- * do that since we're counting block of instructions at
- * the end of each translation block, and we're probably
- * passing this value at this point.
- *
- * Let's write PMC_COUNTER_NEGATIVE_VAL to the overflowed
- * counter to simulate what the real hardware would do.
- */
- env->spr[sprn] = PMC_COUNTER_NEGATIVE_VAL;
}
+ env->spr[SPR_POWER_PMC5] = tmp;
}
return overflow_triggered;
{
uint64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
uint64_t time_delta = now - env->pmu_base_time;
- int sprn;
+ int sprn, cyc_cnt = env->pmc_cyc_cnt;
for (sprn = SPR_POWER_PMC1; sprn <= SPR_POWER_PMC6; sprn++) {
- if (pmc_get_event(env, sprn) != PMU_EVENT_CYCLES) {
- continue;
+ if (cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) {
+ /*
+ * The pseries and powernv clock runs at 1Ghz, meaning
+ * that 1 nanosec equals 1 cycle.
+ */
+ env->spr[sprn] += time_delta;
}
-
- /*
- * The pseries and powernv clock runs at 1Ghz, meaning
- * that 1 nanosec equals 1 cycle.
- */
- env->spr[sprn] += time_delta;
}
/* Update base_time for future calculations */
return;
}
- if (pmc_get_event(env, sprn) != PMU_EVENT_CYCLES ||
+ if (!(env->pmc_cyc_cnt & (1 << (sprn - SPR_POWER_PMC1 + 1))) ||
!pmc_has_overflow_enabled(env, sprn)) {
/* Overflow timer is not needed for this counter */
timer_del(pmc_overflow_timer);
}
if (env->spr[sprn] >= PMC_COUNTER_NEGATIVE_VAL) {
- timeout = 0;
+ timeout = 0;
} else {
timeout = PMC_COUNTER_NEGATIVE_VAL - env->spr[sprn];
}
void helper_store_mmcr0(CPUPPCState *env, target_ulong value)
{
+ bool hflags_pmcc0 = (value & MMCR0_PMCC0) != 0;
+ bool hflags_pmcc1 = (value & MMCR0_PMCC1) != 0;
+
pmu_update_cycles(env);
env->spr[SPR_POWER_MMCR0] = value;
- /* MMCR0 writes can change HFLAGS_PMCCCLEAR and HFLAGS_INSN_CNT */
- hreg_compute_hflags(env);
+ /* MMCR0 writes can change HFLAGS_PMCC[01] and HFLAGS_INSN_CNT */
+ env->hflags = deposit32(env->hflags, HFLAGS_PMCC0, 1, hflags_pmcc0);
+ env->hflags = deposit32(env->hflags, HFLAGS_PMCC1, 1, hflags_pmcc1);
+
+ pmu_update_summaries(env);
/* Update cycle overflow timers with the current MMCR0 state */
pmu_update_overflow_timers(env);
env->spr[SPR_POWER_MMCR1] = value;
/* MMCR1 writes can change HFLAGS_INSN_CNT */
- hreg_compute_hflags(env);
+ pmu_update_summaries(env);
}
target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn)
#ifndef POWER8_PMU
#define POWER8_PMU
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
-#include "qemu/error-report.h"
-#include "qemu/main-loop.h"
-
void cpu_ppc_pmu_init(CPUPPCState *env);
-bool pmu_insn_cnt_enabled(CPUPPCState *env);
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+void pmu_update_summaries(CPUPPCState *env);
+#else
+static inline void pmu_update_summaries(CPUPPCState *env) { }
+#endif
#endif
void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn);
void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn);
void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn);
+void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn);
+void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn);
void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn);
void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn);
void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn);
store_40x_pit(env, val);
}
+void helper_store_40x_tcr(CPUPPCState *env, target_ulong val)
+{
+ store_40x_tcr(env, val);
+}
+
+void helper_store_40x_tsr(CPUPPCState *env, target_ulong val)
+{
+ store_40x_tsr(env, val);
+}
+
void helper_store_booke_tcr(CPUPPCState *env, target_ulong val)
{
store_booke_tcr(env, val);
gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
}
+void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_icount_io_start(ctx);
+ gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]);
+}
+
+void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF);
+ gen_store_spr(SPR_40x_PID, t0);
+ tcg_temp_free(t0);
+}
+
void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn)
{
gen_icount_io_start(ctx);
GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
-GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
+GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_UQ))
#if defined(TARGET_PPC64)
-GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
+GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_UQ))
#endif
#define GEN_QEMU_STORE_TL(stop, op) \
GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
-GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
+GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_UQ))
#if defined(TARGET_PPC64)
-GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
+GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_UQ))
#endif
#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
#if defined(TARGET_PPC64)
-GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00)
+GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
#endif
#if defined(TARGET_PPC64)
GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
#if defined(TARGET_PPC64)
-GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1d, 0x04)
+GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1d, 0x04)
#endif
#if defined(TARGET_PPC64)
#ifdef TARGET_PPC64
static void gen_ldat(DisasContext *ctx)
{
- gen_ld_atomic(ctx, DEF_MEMOP(MO_Q));
+ gen_ld_atomic(ctx, DEF_MEMOP(MO_UQ));
}
#endif
#ifdef TARGET_PPC64
static void gen_stdat(DisasContext *ctx)
{
- gen_st_atomic(ctx, DEF_MEMOP(MO_Q));
+ gen_st_atomic(ctx, DEF_MEMOP(MO_UQ));
}
#endif
#if defined(TARGET_PPC64)
/* ldarx */
-LARX(ldarx, DEF_MEMOP(MO_Q))
+LARX(ldarx, DEF_MEMOP(MO_UQ))
/* stdcx. */
-STCX(stdcx_, DEF_MEMOP(MO_Q))
+STCX(stdcx_, DEF_MEMOP(MO_UQ))
/* lqarx */
static void gen_lqarx(DisasContext *ctx)
return;
}
} else if (ctx->le_mode) {
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ | MO_ALIGN_16);
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEUQ | MO_ALIGN_16);
tcg_gen_mov_tl(cpu_reserve, EA);
gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEUQ);
} else {
- tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ | MO_ALIGN_16);
+ tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEUQ | MO_ALIGN_16);
tcg_gen_mov_tl(cpu_reserve, EA);
gen_addr_add(ctx, EA, EA, 8);
- tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEUQ);
}
tcg_temp_free(EA);
GEN_LDEPX(lh, DEF_MEMOP(MO_UW), 0x1F, 0x08)
GEN_LDEPX(lw, DEF_MEMOP(MO_UL), 0x1F, 0x00)
#if defined(TARGET_PPC64)
-GEN_LDEPX(ld, DEF_MEMOP(MO_Q), 0x1D, 0x00)
+GEN_LDEPX(ld, DEF_MEMOP(MO_UQ), 0x1D, 0x00)
#endif
#undef GEN_STX_E
GEN_STEPX(sth, DEF_MEMOP(MO_UW), 0x1F, 0x0C)
GEN_STEPX(stw, DEF_MEMOP(MO_UL), 0x1F, 0x04)
#if defined(TARGET_PPC64)
-GEN_STEPX(std, DEF_MEMOP(MO_Q), 0x1D, 0x04)
+GEN_STEPX(std, DEF_MEMOP(MO_UQ), 0x1D, 0x04)
#endif
#undef GEN_CRLOGIC
ctx->base.is_jmp = DISAS_NORETURN;
}
} else {
- mop = DEF_MEMOP(MO_Q);
+ mop = DEF_MEMOP(MO_UQ);
if (store) {
tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop);
} else {
TRANS64(PLWA, do_ldst_PLS_D, false, false, MO_SL)
/* Load Doubleword */
-TRANS64(LD, do_ldst_D, false, false, MO_Q)
-TRANS64(LDX, do_ldst_X, false, false, MO_Q)
-TRANS64(LDU, do_ldst_D, true, false, MO_Q)
-TRANS64(LDUX, do_ldst_X, true, false, MO_Q)
-TRANS64(PLD, do_ldst_PLS_D, false, false, MO_Q)
+TRANS64(LD, do_ldst_D, false, false, MO_UQ)
+TRANS64(LDX, do_ldst_X, false, false, MO_UQ)
+TRANS64(LDU, do_ldst_D, true, false, MO_UQ)
+TRANS64(LDUX, do_ldst_X, true, false, MO_UQ)
+TRANS64(PLD, do_ldst_PLS_D, false, false, MO_UQ)
/* Load Quadword */
TRANS64(LQ, do_ldst_quad, false, false);
TRANS(PSTW, do_ldst_PLS_D, false, true, MO_UL)
/* Store Doubleword */
-TRANS64(STD, do_ldst_D, false, true, MO_Q)
-TRANS64(STDX, do_ldst_X, false, true, MO_Q)
-TRANS64(STDU, do_ldst_D, true, true, MO_Q)
-TRANS64(STDUX, do_ldst_X, true, true, MO_Q)
-TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_Q)
+TRANS64(STD, do_ldst_D, false, true, MO_UQ)
+TRANS64(STDX, do_ldst_X, false, true, MO_UQ)
+TRANS64(STDU, do_ldst_D, true, true, MO_UQ)
+TRANS64(STDUX, do_ldst_X, true, true, MO_UQ)
+TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_UQ)
/* Store Quadword */
TRANS64(STQ, do_ldst_quad, true, false);
EA = tcg_temp_new();
t0 = tcg_temp_new_i64();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_Q));
+ tcg_gen_qemu_ld_i64(t0, EA, PPC_TLB_EPID_LOAD, DEF_MEMOP(MO_UQ));
set_fpr(rD(ctx->opcode), t0);
tcg_temp_free(EA);
tcg_temp_free_i64(t0);
t0 = tcg_temp_new_i64();
gen_addr_reg_index(ctx, EA);
get_fpr(t0, rD(ctx->opcode));
- tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_Q));
+ tcg_gen_qemu_st_i64(t0, EA, PPC_TLB_EPID_STORE, DEF_MEMOP(MO_UQ));
tcg_temp_free(EA);
tcg_temp_free_i64(t0);
}
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
tcg_gen_shri_i64(t1, t0, 32);
tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEUQ);
tcg_gen_shri_i64(t1, t0, 32);
tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
} else {
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
}
set_cpu_vsr(xT(ctx->opcode), xth, true);
set_cpu_vsr(xT(ctx->opcode), xtl, false);
gen_addr_reg_index(ctx, EA);
data = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
- tcg_gen_gvec_dup_i64(MO_Q, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
+ tcg_gen_qemu_ld_i64(data, EA, ctx->mem_idx, DEF_MEMOP(MO_UQ));
+ tcg_gen_gvec_dup_i64(MO_UQ, vsr_full_offset(xT(ctx->opcode)), 16, 16, data);
tcg_temp_free(EA);
tcg_temp_free_i64(data);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
if (ctx->le_mode) {
gen_bswap16x8(xth, xtl, xth, xtl);
}
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEUQ);
set_cpu_vsr(xT(ctx->opcode), xth, true);
set_cpu_vsr(xT(ctx->opcode), xtl, false);
tcg_temp_free(EA);
tcg_gen_shri_i64(t0, xsh, 32);
tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
- tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
tcg_gen_addi_tl(EA, EA, 8);
tcg_gen_shri_i64(t0, xsl, 32);
tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
- tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEUQ);
tcg_temp_free_i64(t0);
tcg_temp_free_i64(t1);
} else {
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
}
tcg_temp_free(EA);
tcg_temp_free_i64(xsh);
TCGv_i64 outl = tcg_temp_new_i64();
gen_bswap16x8(outh, outl, xsh, xsl);
- tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEUQ);
tcg_temp_free_i64(outh);
tcg_temp_free_i64(outl);
} else {
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
}
tcg_temp_free(EA);
tcg_temp_free_i64(xsh);
gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
gen_addr_reg_index(ctx, EA);
- tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEUQ);
tcg_gen_addi_tl(EA, EA, 8);
- tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEUQ);
tcg_temp_free(EA);
tcg_temp_free_i64(xsh);
tcg_temp_free_i64(xsl);
xt = tcg_temp_new_i64();
- mop = DEF_MEMOP(MO_Q);
+ mop = DEF_MEMOP(MO_UQ);
gen_set_access_type(ctx, ACCESS_INT);
ea = do_ea_calc(ctx, ra, displ);
"x28/t3", "x29/t4", "x30/t5", "x31/t6"
};
+const char * const riscv_int_regnamesh[] = {
+ "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
+ "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
+ "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
+ "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
+ "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
+ "x30h/t5h", "x31h/t6h"
+};
+
const char * const riscv_fpr_regnames[] = {
"f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
"f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
set_priv_version(env, PRIV_VERSION_1_10_0);
qdev_prop_set_bit(DEVICE(obj), "mmu", false);
}
+
+static void rv128_base_cpu_init(Object *obj)
+{
+ if (qemu_tcg_mttcg_enabled()) {
+ /* Missing 128-bit aligned atomics */
+ error_report("128-bit RISC-V currently does not work with Multi "
+ "Threaded TCG. Please use: -accel tcg,thread=single");
+ exit(EXIT_FAILURE);
+ }
+ CPURISCVState *env = &RISCV_CPU(obj)->env;
+ /* We set this in the realise function */
+ set_misa(env, MXL_RV128, 0);
+}
#else
static void rv32_base_cpu_init(Object *obj)
{
case MXL_RV64:
info->print_insn = print_insn_riscv64;
break;
+ case MXL_RV128:
+ info->print_insn = print_insn_riscv128;
+ break;
default:
g_assert_not_reached();
}
#ifdef TARGET_RISCV64
case MXL_RV64:
break;
+ case MXL_RV128:
+ break;
#endif
case MXL_RV32:
break;
DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
DEFINE_PROP_BOOL("v", RISCVCPU, cfg.ext_v, false),
+ DEFINE_PROP_BOOL("h", RISCVCPU, cfg.ext_h, true),
DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true),
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
- /* These are experimental so mark with 'x-' */
DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
- DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false),
+
+ /* These are experimental so mark with 'x-' */
DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false),
/* ePMP 0.9.3 */
DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
case MXL_RV32:
return g_strdup("riscv:rv32");
case MXL_RV64:
+ case MXL_RV128:
return g_strdup("riscv:rv64");
default:
g_assert_not_reached();
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
+ DEFINE_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
#endif
};
#include "exec/cpu-defs.h"
#include "fpu/softfloat-types.h"
#include "qom/object.h"
+#include "qemu/int128.h"
#include "cpu_bits.h"
#define TCG_GUEST_DEFAULT_MO 0
#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any")
#define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32")
#define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64")
+#define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128")
#define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex")
#define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
struct CPURISCVState {
target_ulong gpr[32];
+ target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
uint64_t fpr[32]; /* assume both F and D extensions */
/* vector coprocessor state. */
target_ulong frm;
target_ulong badaddr;
+ uint32_t bins;
+
target_ulong guest_phys_fault_addr;
target_ulong priv_ver;
uint32_t misa_ext; /* current extensions */
uint32_t misa_ext_mask; /* max ext for this cpu */
+ /* 128-bit helpers upper part return value */
+ target_ulong retxh;
+
uint32_t features;
#ifdef CONFIG_USER_ONLY
target_ulong hgatp;
uint64_t htimedelta;
+ /* Upper 64-bits of 128-bit CSRs */
+ uint64_t mscratchh;
+ uint64_t sscratchh;
+
/* Virtual CSRs */
/*
* For RV32 this is 32-bit vsstatus and 32-bit vsstatush.
#include "cpu_user.h"
extern const char * const riscv_int_regnames[];
+extern const char * const riscv_int_regnamesh[];
extern const char * const riscv_fpr_regnames[];
const char *riscv_cpu_get_trap_name(target_ulong cause, bool async);
target_ulong new_value,
target_ulong write_mask);
+RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
+ Int128 *ret_value,
+ Int128 new_value, Int128 write_mask);
+
+typedef RISCVException (*riscv_csr_read128_fn)(CPURISCVState *env, int csrno,
+ Int128 *ret_value);
+typedef RISCVException (*riscv_csr_write128_fn)(CPURISCVState *env, int csrno,
+ Int128 new_value);
+
typedef struct {
const char *name;
riscv_csr_predicate_fn predicate;
riscv_csr_read_fn read;
riscv_csr_write_fn write;
riscv_csr_op_fn op;
+ riscv_csr_read128_fn read128;
+ riscv_csr_write128_fn write128;
} riscv_csr_operations;
/* CSR function table constants */
#define MSTATUS32_SD 0x80000000
#define MSTATUS64_SD 0x8000000000000000ULL
+#define MSTATUSH128_SD 0x8000000000000000ULL
#define MISA32_MXL 0xC0000000
#define MISA64_MXL 0xC000000000000000ULL
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
#define SSTATUS_MXR 0x00080000
+#define SSTATUS64_UXL 0x0000000300000000ULL
+
#define SSTATUS32_SD 0x80000000
#define SSTATUS64_SD 0x8000000000000000ULL
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
+ bool write_gva = false;
uint64_t s;
/* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
target_ulong deleg = async ? env->mideleg : env->medeleg;
- bool write_tval = false;
target_ulong tval = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
case RISCV_EXCP_INST_PAGE_FAULT:
case RISCV_EXCP_LOAD_PAGE_FAULT:
case RISCV_EXCP_STORE_PAGE_FAULT:
- write_tval = true;
+ write_gva = true;
tval = env->badaddr;
break;
+ case RISCV_EXCP_ILLEGAL_INST:
+ tval = env->bins;
+ break;
default:
break;
}
if (riscv_has_ext(env, RVH)) {
target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
- if (env->two_stage_lookup && write_tval) {
- /*
- * If we are writing a guest virtual address to stval, set
- * this to 1. If we are trapping to VS we will set this to 0
- * later.
- */
- env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 1);
- } else {
- /* For other HS-mode traps, we set this to 0. */
- env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
- }
-
if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
/* Trap to VS mode */
/*
cause == IRQ_VS_EXT) {
cause = cause - 1;
}
- env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0);
+ write_gva = false;
} else if (riscv_cpu_virt_enabled(env)) {
/* Trap into HS mode, from virt */
riscv_cpu_swap_hypervisor_regs(env);
env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
riscv_cpu_virt_enabled(env));
+
htval = env->guest_phys_fault_addr;
riscv_cpu_set_virt_enabled(env, 0);
/* Trap into HS mode */
env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
htval = env->guest_phys_fault_addr;
+ write_gva = false;
}
+ env->hstatus = set_field(env->hstatus, HSTATUS_GVA, write_gva);
}
s = env->mstatus;
(1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
- SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
+ SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS | (target_ulong)SSTATUS64_UXL;
static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
static const target_ulong hip_writable_mask = MIP_VSSIP;
static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
return status | MSTATUS32_SD;
case MXL_RV64:
return status | MSTATUS64_SD;
+ case MXL_RV128:
+ return MSTATUSH128_SD;
default:
g_assert_not_reached();
}
mstatus = (mstatus & ~mask) | (val & mask);
- if (riscv_cpu_mxl(env) == MXL_RV64) {
+ RISCVMXL xl = riscv_cpu_mxl(env);
+ if (xl > MXL_RV32) {
/* SXL and UXL fields are for now read only */
- mstatus = set_field(mstatus, MSTATUS64_SXL, MXL_RV64);
- mstatus = set_field(mstatus, MSTATUS64_UXL, MXL_RV64);
+ mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
+ mstatus = set_field(mstatus, MSTATUS64_UXL, xl);
}
env->mstatus = mstatus;
return RISCV_EXCP_NONE;
}
+static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus));
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_misa(CPURISCVState *env, int csrno,
target_ulong *val)
{
}
/* Machine Trap Handling */
+static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ *val = int128_make128(env->mscratch, env->mscratchh);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
+ Int128 val)
+{
+ env->mscratch = int128_getlo(val);
+ env->mscratchh = int128_gethi(val);
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_mscratch(CPURISCVState *env, int csrno,
target_ulong *val)
{
}
/* Supervisor Trap Setup */
+static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ uint64_t mask = sstatus_v1_10_mask;
+ uint64_t sstatus = env->mstatus & mask;
+
+ *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_sstatus(CPURISCVState *env, int csrno,
target_ulong *val)
{
}
/* Supervisor Trap Handling */
+static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
+ Int128 *val)
+{
+ *val = int128_make128(env->sscratch, env->sscratchh);
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
+ Int128 val)
+{
+ env->sscratch = int128_getlo(val);
+ env->sscratchh = int128_gethi(val);
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_sscratch(CPURISCVState *env, int csrno,
target_ulong *val)
{
* csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
*/
-RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
- target_ulong *ret_value,
- target_ulong new_value, target_ulong write_mask)
+static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
+ int csrno,
+ bool write_mask,
+ RISCVCPU *cpu)
{
- RISCVException ret;
- target_ulong old_value;
- RISCVCPU *cpu = env_archcpu(env);
- int read_only = get_field(csrno, 0xC00) == 3;
-
/* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
+ int read_only = get_field(csrno, 0xC00) == 3;
#if !defined(CONFIG_USER_ONLY)
int effective_priv = env->priv;
if (!csr_ops[csrno].predicate) {
return RISCV_EXCP_ILLEGAL_INST;
}
- ret = csr_ops[csrno].predicate(env, csrno);
- if (ret != RISCV_EXCP_NONE) {
- return ret;
- }
+
+ return csr_ops[csrno].predicate(env, csrno);
+}
+
+static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value,
+ target_ulong write_mask)
+{
+ RISCVException ret;
+ target_ulong old_value;
/* execute combined read/write operation if it exists */
if (csr_ops[csrno].op) {
return RISCV_EXCP_NONE;
}
+RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
+ target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ RISCVCPU *cpu = env_archcpu(env);
+
+ RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
+}
+
+static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
+ Int128 *ret_value,
+ Int128 new_value,
+ Int128 write_mask)
+{
+ RISCVException ret;
+ Int128 old_value;
+
+ /* read old value */
+ ret = csr_ops[csrno].read128(env, csrno, &old_value);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ /* write value if writable and write mask set, otherwise drop writes */
+ if (int128_nz(write_mask)) {
+ new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
+ int128_and(new_value, write_mask));
+ if (csr_ops[csrno].write128) {
+ ret = csr_ops[csrno].write128(env, csrno, new_value);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+ } else if (csr_ops[csrno].write) {
+ /* avoids having to write wrappers for all registers */
+ ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+ }
+ }
+
+ /* return old value */
+ if (ret_value) {
+ *ret_value = old_value;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
+ Int128 *ret_value,
+ Int128 new_value, Int128 write_mask)
+{
+ RISCVException ret;
+ RISCVCPU *cpu = env_archcpu(env);
+
+ ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu);
+ if (ret != RISCV_EXCP_NONE) {
+ return ret;
+ }
+
+ if (csr_ops[csrno].read128) {
+ return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
+ }
+
+ /*
+ * Fall back to 64-bit version for now, if the 128-bit alternative isn't
+ * at all defined.
+ * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
+ * significant), for those, this fallback is correctly handling the accesses
+ */
+ target_ulong old_value;
+ ret = riscv_csrrw_do64(env, csrno, &old_value,
+ int128_getlo(new_value),
+ int128_getlo(write_mask));
+ if (ret == RISCV_EXCP_NONE && ret_value) {
+ *ret_value = int128_make64(old_value);
+ }
+ return ret;
+}
+
/*
* Debugger support. If not in user mode, set env->debugger before the
* riscv_csrrw call and clear it after the call.
[CSR_MHARTID] = { "mhartid", any, read_mhartid },
/* Machine Trap Setup */
- [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus },
- [CSR_MISA] = { "misa", any, read_misa, write_misa },
+ [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, NULL,
+ read_mstatus_i128 },
+ [CSR_MISA] = { "misa", any, read_misa, write_misa, NULL,
+ read_misa_i128 },
[CSR_MIDELEG] = { "mideleg", any, read_mideleg, write_mideleg },
[CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
[CSR_MIE] = { "mie", any, read_mie, write_mie },
[CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush },
/* Machine Trap Handling */
- [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch },
+ [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, NULL,
+ read_mscratch_i128, write_mscratch_i128 },
[CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
[CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
[CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
[CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
/* Supervisor Trap Setup */
- [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus },
+ [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, NULL,
+ read_sstatus_i128 },
[CSR_SIE] = { "sie", smode, read_sie, write_sie },
[CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
[CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, write_scounteren },
/* Supervisor Trap Handling */
- [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch },
+ [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, NULL,
+ read_sscratch_i128, write_sscratch_i128 },
[CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
[CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
[CSR_STVAL] = { "stval", smode, read_stval, write_stval },
int bitsize = 16 << env->misa_mxl_max;
int i;
+ /* Until gdb knows about 128-bit registers */
+ if (bitsize > 64) {
+ bitsize = 64;
+ }
+
g_string_printf(s, "<?xml version=\"1.0\"?>");
g_string_append_printf(s, "<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">");
g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.csr\">");
DEF_HELPER_2(csrr, tl, env, int)
DEF_HELPER_3(csrw, void, env, int, tl)
DEF_HELPER_4(csrrw, tl, env, int, tl, tl)
+DEF_HELPER_2(csrr_i128, tl, env, int)
+DEF_HELPER_4(csrw_i128, void, env, int, tl, tl)
+DEF_HELPER_6(csrrw_i128, tl, env, int, tl, tl, tl, tl)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_2(sret, tl, env, tl)
DEF_HELPER_2(mret, tl, env, tl)
DEF_HELPER_5(vsext_vf4_w, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf4_d, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsext_vf8_d, void, ptr, ptr, ptr, env, i32)
+
+/* 128-bit integer multiplication and division */
+DEF_HELPER_5(divu_i128, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(divs_i128, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(remu_i128, tl, env, tl, tl, tl, tl)
+DEF_HELPER_5(rems_i128, tl, env, tl, tl, tl, tl)
# Immediates:
%imm_ci 12:s1 2:5
%nzuimm_ciw 7:4 11:2 5:1 6:1 !function=ex_shift_2
+%uimm_cl_q 10:1 5:2 11:2 !function=ex_shift_4
%uimm_cl_d 5:2 10:3 !function=ex_shift_3
%uimm_cl_w 5:1 10:3 6:1 !function=ex_shift_2
%imm_cb 12:s1 5:2 2:1 10:2 3:2 !function=ex_shift_1
%imm_cj 12:s1 8:1 9:2 6:1 7:1 2:1 11:1 3:3 !function=ex_shift_1
%shimm_6bit 12:1 2:5 !function=ex_rvc_shifti
+%uimm_6bit_lq 2:4 12:1 6:1 !function=ex_shift_4
%uimm_6bit_ld 2:3 12:1 5:2 !function=ex_shift_3
%uimm_6bit_lw 2:2 12:1 4:3 !function=ex_shift_2
+%uimm_6bit_sq 7:4 11:2 !function=ex_shift_4
%uimm_6bit_sd 7:3 10:3 !function=ex_shift_3
%uimm_6bit_sw 7:2 9:4 !function=ex_shift_2
# Formats 16:
@cr .... ..... ..... .. &r rs2=%rs2_5 rs1=%rd %rd
@ci ... . ..... ..... .. &i imm=%imm_ci rs1=%rd %rd
+@cl_q ... . ..... ..... .. &i imm=%uimm_cl_q rs1=%rs1_3 rd=%rs2_3
@cl_d ... ... ... .. ... .. &i imm=%uimm_cl_d rs1=%rs1_3 rd=%rs2_3
@cl_w ... ... ... .. ... .. &i imm=%uimm_cl_w rs1=%rs1_3 rd=%rs2_3
@cs_2 ... ... ... .. ... .. &r rs2=%rs2_3 rs1=%rs1_3 rd=%rs1_3
+@cs_q ... ... ... .. ... .. &s imm=%uimm_cl_q rs1=%rs1_3 rs2=%rs2_3
@cs_d ... ... ... .. ... .. &s imm=%uimm_cl_d rs1=%rs1_3 rs2=%rs2_3
@cs_w ... ... ... .. ... .. &s imm=%uimm_cl_w rs1=%rs1_3 rs2=%rs2_3
@cj ... ........... .. &j imm=%imm_cj
@cb_z ... ... ... .. ... .. &b imm=%imm_cb rs1=%rs1_3 rs2=0
+@c_lqsp ... . ..... ..... .. &i imm=%uimm_6bit_lq rs1=2 %rd
@c_ldsp ... . ..... ..... .. &i imm=%uimm_6bit_ld rs1=2 %rd
@c_lwsp ... . ..... ..... .. &i imm=%uimm_6bit_lw rs1=2 %rd
+@c_sqsp ... . ..... ..... .. &s imm=%uimm_6bit_sq rs1=2 rs2=%rs2_5
@c_sdsp ... . ..... ..... .. &s imm=%uimm_6bit_sd rs1=2 rs2=%rs2_5
@c_swsp ... . ..... ..... .. &s imm=%uimm_6bit_sw rs1=2 rs2=%rs2_5
@c_li ... . ..... ..... .. &i imm=%imm_ci rs1=0 %rd
illegal 000 000 000 00 --- 00
addi 000 ... ... .. ... 00 @c_addi4spn
}
-fld 001 ... ... .. ... 00 @cl_d
+{
+ lq 001 ... ... .. ... 00 @cl_q
+ fld 001 ... ... .. ... 00 @cl_d
+}
lw 010 ... ... .. ... 00 @cl_w
-fsd 101 ... ... .. ... 00 @cs_d
+{
+ sq 101 ... ... .. ... 00 @cs_q
+ fsd 101 ... ... .. ... 00 @cs_d
+}
sw 110 ... ... .. ... 00 @cs_w
# *** RV32C and RV64C specific Standard Extension (Quadrant 0) ***
# *** RV32/64C Standard Extension (Quadrant 2) ***
slli 000 . ..... ..... 10 @c_shift2
-fld 001 . ..... ..... 10 @c_ldsp
+{
+ lq 001 ... ... .. ... 10 @c_lqsp
+ fld 001 . ..... ..... 10 @c_ldsp
+}
{
illegal 010 - 00000 ----- 10 # c.lwsp, RES rd=0
lw 010 . ..... ..... 10 @c_lwsp
jalr 100 1 ..... 00000 10 @c_jalr rd=1 # C.JALR
add 100 1 ..... ..... 10 @cr
}
-fsd 101 ...... ..... 10 @c_sdsp
+{
+ sq 101 ... ... .. ... 10 @c_sqsp
+ fsd 101 ...... ..... 10 @c_sdsp
+}
sw 110 . ..... ..... 10 @c_swsp
# *** RV32C and RV64C specific Standard Extension (Quadrant 2) ***
%rs1 15:5
%rd 7:5
%sh5 20:5
+%sh6 20:6
%sh7 20:7
%csr 20:12
# Formats 64:
@sh5 ....... ..... ..... ... ..... ....... &shift shamt=%sh5 %rs1 %rd
+# Formats 128:
+@sh6 ...... ...... ..... ... ..... ....... &shift shamt=%sh6 %rs1 %rd
+
# *** Privileged Instructions ***
ecall 000000000000 00000 000 00000 1110011
ebreak 000000000001 00000 000 00000 1110011
srlw 0000000 ..... ..... 101 ..... 0111011 @r
sraw 0100000 ..... ..... 101 ..... 0111011 @r
+# *** RV128I Base Instruction Set (in addition to RV64I) ***
+ldu ............ ..... 111 ..... 0000011 @i
+lq ............ ..... 010 ..... 0001111 @i
+sq ............ ..... 100 ..... 0100011 @s
+addid ............ ..... 000 ..... 1011011 @i
+sllid 000000 ...... ..... 001 ..... 1011011 @sh6
+srlid 000000 ...... ..... 101 ..... 1011011 @sh6
+sraid 010000 ...... ..... 101 ..... 1011011 @sh6
+addd 0000000 ..... ..... 000 ..... 1111011 @r
+subd 0100000 ..... ..... 000 ..... 1111011 @r
+slld 0000000 ..... ..... 001 ..... 1111011 @r
+srld 0000000 ..... ..... 101 ..... 1111011 @r
+srad 0100000 ..... ..... 101 ..... 1111011 @r
+
# *** RV32M Standard Extension ***
mul 0000001 ..... ..... 000 ..... 0110011 @r
mulh 0000001 ..... ..... 001 ..... 0110011 @r
remw 0000001 ..... ..... 110 ..... 0111011 @r
remuw 0000001 ..... ..... 111 ..... 0111011 @r
+# *** RV128M Standard Extension (in addition to RV64M) ***
+muld 0000001 ..... ..... 000 ..... 1111011 @r
+divd 0000001 ..... ..... 100 ..... 1111011 @r
+divud 0000001 ..... ..... 101 ..... 1111011 @r
+remd 0000001 ..... ..... 110 ..... 1111011 @r
+remud 0000001 ..... ..... 111 ..... 1111011 @r
+
# *** RV32A Standard Extension ***
lr_w 00010 . . 00000 ..... 010 ..... 0101111 @atom_ld
sc_w 00011 . . ..... ..... 010 ..... 0101111 @atom_st
static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_lr(ctx, a, MO_ALIGN | MO_TEQ);
+ return gen_lr(ctx, a, MO_ALIGN | MO_TEUQ);
}
static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ));
+ return gen_sc(ctx, a, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a)
{
REQUIRE_64BIT(ctx);
- return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ));
+ return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ));
}
static bool trans_andn(DisasContext *ctx, arg_andn *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_andc_tl);
+ return gen_logic(ctx, a, tcg_gen_andc_tl);
}
static bool trans_orn(DisasContext *ctx, arg_orn *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_orc_tl);
+ return gen_logic(ctx, a, tcg_gen_orc_tl);
}
static bool trans_xnor(DisasContext *ctx, arg_xnor *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_eqv_tl);
+ return gen_logic(ctx, a, tcg_gen_eqv_tl);
}
static bool trans_min(DisasContext *ctx, arg_min *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smin_tl);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smin_tl, NULL);
}
static bool trans_max(DisasContext *ctx, arg_max *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smax_tl);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smax_tl, NULL);
}
static bool trans_minu(DisasContext *ctx, arg_minu *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umin_tl);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umin_tl, NULL);
}
static bool trans_maxu(DisasContext *ctx, arg_maxu *a)
{
REQUIRE_ZBB(ctx);
- return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umax_tl);
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umax_tl, NULL);
}
static bool trans_sext_b(DisasContext *ctx, arg_sext_b *a)
static bool trans_bset(DisasContext *ctx, arg_bset *a)
{
REQUIRE_ZBS(ctx);
- return gen_shift(ctx, a, EXT_NONE, gen_bset);
+ return gen_shift(ctx, a, EXT_NONE, gen_bset, NULL);
}
static bool trans_bseti(DisasContext *ctx, arg_bseti *a)
static bool trans_bclr(DisasContext *ctx, arg_bclr *a)
{
REQUIRE_ZBS(ctx);
- return gen_shift(ctx, a, EXT_NONE, gen_bclr);
+ return gen_shift(ctx, a, EXT_NONE, gen_bclr, NULL);
}
static bool trans_bclri(DisasContext *ctx, arg_bclri *a)
static bool trans_binv(DisasContext *ctx, arg_binv *a)
{
REQUIRE_ZBS(ctx);
- return gen_shift(ctx, a, EXT_NONE, gen_binv);
+ return gen_shift(ctx, a, EXT_NONE, gen_binv, NULL);
}
static bool trans_binvi(DisasContext *ctx, arg_binvi *a)
static bool trans_bext(DisasContext *ctx, arg_bext *a)
{
REQUIRE_ZBS(ctx);
- return gen_shift(ctx, a, EXT_NONE, gen_bext);
+ return gen_shift(ctx, a, EXT_NONE, gen_bext, NULL);
}
static bool trans_bexti(DisasContext *ctx, arg_bexti *a)
static bool trans_ror(DisasContext *ctx, arg_ror *a)
{
REQUIRE_ZBB(ctx);
- return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotr_tl, gen_rorw);
+ return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotr_tl, gen_rorw, NULL);
}
static void gen_roriw(TCGv ret, TCGv arg1, target_long shamt)
{
REQUIRE_ZBB(ctx);
return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
- tcg_gen_rotri_tl, gen_roriw);
+ tcg_gen_rotri_tl, gen_roriw, NULL);
}
static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2)
static bool trans_rol(DisasContext *ctx, arg_rol *a)
{
REQUIRE_ZBB(ctx);
- return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotl_tl, gen_rolw);
+ return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotl_tl, gen_rolw, NULL);
}
static void gen_rev8_32(TCGv ret, TCGv src1)
static bool trans_sh##SHAMT##add(DisasContext *ctx, arg_sh##SHAMT##add *a) \
{ \
REQUIRE_ZBA(ctx); \
- return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add); \
+ return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add, NULL); \
}
GEN_TRANS_SHADD(1)
REQUIRE_64BIT(ctx);
REQUIRE_ZBB(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_NONE, gen_rorw);
+ return gen_shift(ctx, a, EXT_NONE, gen_rorw, NULL);
}
static bool trans_roriw(DisasContext *ctx, arg_roriw *a)
REQUIRE_64BIT(ctx);
REQUIRE_ZBB(ctx);
ctx->ol = MXL_RV32;
- return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_roriw);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_roriw, NULL);
}
static bool trans_rolw(DisasContext *ctx, arg_rolw *a)
REQUIRE_64BIT(ctx);
REQUIRE_ZBB(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_NONE, gen_rolw);
+ return gen_shift(ctx, a, EXT_NONE, gen_rolw, NULL);
}
#define GEN_SHADD_UW(SHAMT) \
{ \
REQUIRE_64BIT(ctx); \
REQUIRE_ZBA(ctx); \
- return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add_uw); \
+ return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add_uw, NULL); \
}
GEN_TRANS_SHADD_UW(1)
{
REQUIRE_64BIT(ctx);
REQUIRE_ZBA(ctx);
- return gen_arith(ctx, a, EXT_NONE, gen_add_uw);
+ return gen_arith(ctx, a, EXT_NONE, gen_add_uw, NULL);
}
static void gen_slli_uw(TCGv dest, TCGv src, target_long shamt)
{
REQUIRE_64BIT(ctx);
REQUIRE_ZBA(ctx);
- return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_slli_uw);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_slli_uw, NULL);
}
static bool trans_clmul(DisasContext *ctx, arg_clmul *a)
{
REQUIRE_ZBC(ctx);
- return gen_arith(ctx, a, EXT_NONE, gen_helper_clmul);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_clmul, NULL);
}
static void gen_clmulh(TCGv dst, TCGv src1, TCGv src2)
static bool trans_clmulh(DisasContext *ctx, arg_clmulr *a)
{
REQUIRE_ZBC(ctx);
- return gen_arith(ctx, a, EXT_NONE, gen_clmulh);
+ return gen_arith(ctx, a, EXT_NONE, gen_clmulh, NULL);
}
static bool trans_clmulr(DisasContext *ctx, arg_clmulh *a)
{
REQUIRE_ZBC(ctx);
- return gen_arith(ctx, a, EXT_NONE, gen_helper_clmulr);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_clmulr, NULL);
}
}
addr = gen_pm_adjust_address(ctx, addr);
- tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ);
mark_fs_dirty(ctx);
return true;
}
addr = gen_pm_adjust_address(ctx, addr);
- tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEQ);
+ tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ);
return true;
}
{
REQUIRE_64BIT(ctx);
REQUIRE_EXT(ctx, RVH);
- return do_hlv(ctx, a, MO_TEQ);
+ return do_hlv(ctx, a, MO_TEUQ);
}
static bool trans_hsv_d(DisasContext *ctx, arg_hsv_d *a)
{
REQUIRE_64BIT(ctx);
REQUIRE_EXT(ctx, RVH);
- return do_hsv(ctx, a, MO_TEQ);
+ return do_hsv(ctx, a, MO_TEUQ);
}
#ifndef CONFIG_USER_ONLY
static bool trans_c64_illegal(DisasContext *ctx, arg_empty *a)
{
- REQUIRE_64BIT(ctx);
- return trans_illegal(ctx, a);
+ REQUIRE_64_OR_128BIT(ctx);
+ return trans_illegal(ctx, a);
}
static bool trans_lui(DisasContext *ctx, arg_lui *a)
{
if (a->rd != 0) {
- tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm);
+ gen_set_gpri(ctx, a->rd, a->imm);
}
return true;
}
static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
{
if (a->rd != 0) {
- tcg_gen_movi_tl(cpu_gpr[a->rd], a->imm + ctx->base.pc_next);
+ gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
}
return true;
}
return true;
}
+static TCGCond gen_compare_i128(bool bz, TCGv rl,
+ TCGv al, TCGv ah, TCGv bl, TCGv bh,
+ TCGCond cond)
+{
+ TCGv rh = tcg_temp_new();
+ bool invert = false;
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ if (bz) {
+ tcg_gen_or_tl(rl, al, ah);
+ } else {
+ tcg_gen_xor_tl(rl, al, bl);
+ tcg_gen_xor_tl(rh, ah, bh);
+ tcg_gen_or_tl(rl, rl, rh);
+ }
+ break;
+
+ case TCG_COND_GE:
+ case TCG_COND_LT:
+ if (bz) {
+ tcg_gen_mov_tl(rl, ah);
+ } else {
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_sub2_tl(rl, rh, al, ah, bl, bh);
+ tcg_gen_xor_tl(rl, rh, ah);
+ tcg_gen_xor_tl(tmp, ah, bh);
+ tcg_gen_and_tl(rl, rl, tmp);
+ tcg_gen_xor_tl(rl, rh, rl);
+
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case TCG_COND_LTU:
+ invert = true;
+ /* fallthrough */
+ case TCG_COND_GEU:
+ {
+ TCGv tmp = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+ TCGv one = tcg_constant_tl(1);
+
+ cond = TCG_COND_NE;
+ /* borrow in to second word */
+ tcg_gen_setcond_tl(TCG_COND_LTU, tmp, al, bl);
+ /* seed third word with 1, which will be result */
+ tcg_gen_sub2_tl(tmp, rh, ah, one, tmp, zero);
+ tcg_gen_sub2_tl(tmp, rl, tmp, rh, bh, zero);
+
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ if (invert) {
+ cond = tcg_invert_cond(cond);
+ }
+
+ tcg_temp_free(rh);
+ return cond;
+}
+
+static void gen_setcond_i128(TCGv rl, TCGv rh,
+ TCGv src1l, TCGv src1h,
+ TCGv src2l, TCGv src2h,
+ TCGCond cond)
+{
+ cond = gen_compare_i128(false, rl, src1l, src1h, src2l, src2h, cond);
+ tcg_gen_setcondi_tl(cond, rl, rl, 0);
+ tcg_gen_movi_tl(rh, 0);
+}
+
static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
{
TCGLabel *l = gen_new_label();
TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
- tcg_gen_brcond_tl(cond, src1, src2, l);
+ if (get_xl(ctx) == MXL_RV128) {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv src2h = get_gprh(ctx, a->rs2);
+ TCGv tmp = tcg_temp_new();
+
+ cond = gen_compare_i128(a->rs2 == 0,
+ tmp, src1, src1h, src2, src2h, cond);
+ tcg_gen_brcondi_tl(cond, tmp, 0, l);
+
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_brcond_tl(cond, src1, src2, l);
+ }
gen_goto_tb(ctx, 1, ctx->pc_succ_insn);
gen_set_label(l); /* branch taken */
return gen_branch(ctx, a, TCG_COND_GEU);
}
-static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
+static bool gen_load_tl(DisasContext *ctx, arg_lb *a, MemOp memop)
{
TCGv dest = dest_gpr(ctx, a->rd);
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
return true;
}
+/* Compute only 64-bit addresses to use the address translation mechanism */
+static bool gen_load_i128(DisasContext *ctx, arg_lb *a, MemOp memop)
+{
+ TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv destl = dest_gpr(ctx, a->rd);
+ TCGv desth = dest_gprh(ctx, a->rd);
+ TCGv addrl = tcg_temp_new();
+
+ tcg_gen_addi_tl(addrl, src1l, a->imm);
+
+ if ((memop & MO_SIZE) <= MO_64) {
+ tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, memop);
+ if (memop & MO_SIGN) {
+ tcg_gen_sari_tl(desth, destl, 63);
+ } else {
+ tcg_gen_movi_tl(desth, 0);
+ }
+ } else {
+ /* assume little-endian memory access for now */
+ tcg_gen_qemu_ld_tl(destl, addrl, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_addi_tl(addrl, addrl, 8);
+ tcg_gen_qemu_ld_tl(desth, addrl, ctx->mem_idx, MO_TEUQ);
+ }
+
+ gen_set_gpr128(ctx, a->rd, destl, desth);
+
+ tcg_temp_free(addrl);
+ return true;
+}
+
+static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop)
+{
+ if (get_xl(ctx) == MXL_RV128) {
+ return gen_load_i128(ctx, a, memop);
+ } else {
+ return gen_load_tl(ctx, a, memop);
+ }
+}
+
static bool trans_lb(DisasContext *ctx, arg_lb *a)
{
return gen_load(ctx, a, MO_SB);
return gen_load(ctx, a, MO_TESL);
}
+static bool trans_ld(DisasContext *ctx, arg_ld *a)
+{
+ REQUIRE_64_OR_128BIT(ctx);
+ return gen_load(ctx, a, MO_TESQ);
+}
+
+static bool trans_lq(DisasContext *ctx, arg_lq *a)
+{
+ REQUIRE_128BIT(ctx);
+ return gen_load(ctx, a, MO_TEUO);
+}
+
static bool trans_lbu(DisasContext *ctx, arg_lbu *a)
{
return gen_load(ctx, a, MO_UB);
return gen_load(ctx, a, MO_TEUW);
}
-static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
+static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
+{
+ REQUIRE_64_OR_128BIT(ctx);
+ return gen_load(ctx, a, MO_TEUL);
+}
+
+static bool trans_ldu(DisasContext *ctx, arg_ldu *a)
+{
+ REQUIRE_128BIT(ctx);
+ return gen_load(ctx, a, MO_TEUQ);
+}
+
+static bool gen_store_tl(DisasContext *ctx, arg_sb *a, MemOp memop)
{
TCGv addr = get_gpr(ctx, a->rs1, EXT_NONE);
TCGv data = get_gpr(ctx, a->rs2, EXT_NONE);
return true;
}
+static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop)
+{
+ TCGv src1l = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2l = get_gpr(ctx, a->rs2, EXT_NONE);
+ TCGv src2h = get_gprh(ctx, a->rs2);
+ TCGv addrl = tcg_temp_new();
+
+ tcg_gen_addi_tl(addrl, src1l, a->imm);
+
+ if ((memop & MO_SIZE) <= MO_64) {
+ tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, memop);
+ } else {
+ /* little-endian memory access assumed for now */
+ tcg_gen_qemu_st_tl(src2l, addrl, ctx->mem_idx, MO_TEUQ);
+ tcg_gen_addi_tl(addrl, addrl, 8);
+ tcg_gen_qemu_st_tl(src2h, addrl, ctx->mem_idx, MO_TEUQ);
+ }
+
+ tcg_temp_free(addrl);
+ return true;
+}
+
+static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop)
+{
+ if (get_xl(ctx) == MXL_RV128) {
+ return gen_store_i128(ctx, a, memop);
+ } else {
+ return gen_store_tl(ctx, a, memop);
+ }
+}
+
static bool trans_sb(DisasContext *ctx, arg_sb *a)
{
return gen_store(ctx, a, MO_SB);
return gen_store(ctx, a, MO_TESL);
}
-static bool trans_lwu(DisasContext *ctx, arg_lwu *a)
+static bool trans_sd(DisasContext *ctx, arg_sd *a)
{
- REQUIRE_64BIT(ctx);
- return gen_load(ctx, a, MO_TEUL);
+ REQUIRE_64_OR_128BIT(ctx);
+ return gen_store(ctx, a, MO_TEUQ);
}
-static bool trans_ld(DisasContext *ctx, arg_ld *a)
+static bool trans_sq(DisasContext *ctx, arg_sq *a)
{
- REQUIRE_64BIT(ctx);
- return gen_load(ctx, a, MO_TEQ);
+ REQUIRE_128BIT(ctx);
+ return gen_store(ctx, a, MO_TEUO);
}
-static bool trans_sd(DisasContext *ctx, arg_sd *a)
+static bool trans_addd(DisasContext *ctx, arg_addd *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
+}
+
+static bool trans_addid(DisasContext *ctx, arg_addid *a)
{
- REQUIRE_64BIT(ctx);
- return gen_store(ctx, a, MO_TEQ);
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
+}
+
+static bool trans_subd(DisasContext *ctx, arg_subd *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
+}
+
+static void gen_addi2_i128(TCGv retl, TCGv reth,
+ TCGv srcl, TCGv srch, target_long imm)
+{
+ TCGv imml = tcg_constant_tl(imm);
+ TCGv immh = tcg_constant_tl(-(imm < 0));
+ tcg_gen_add2_tl(retl, reth, srcl, srch, imml, immh);
}
static bool trans_addi(DisasContext *ctx, arg_addi *a)
{
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, gen_addi2_i128);
}
static void gen_slt(TCGv ret, TCGv s1, TCGv s2)
tcg_gen_setcond_tl(TCG_COND_LT, ret, s1, s2);
}
+static void gen_slt_i128(TCGv retl, TCGv reth,
+ TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
+{
+ gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LT);
+}
+
static void gen_sltu(TCGv ret, TCGv s1, TCGv s2)
{
tcg_gen_setcond_tl(TCG_COND_LTU, ret, s1, s2);
}
+static void gen_sltu_i128(TCGv retl, TCGv reth,
+ TCGv s1l, TCGv s1h, TCGv s2l, TCGv s2h)
+{
+ gen_setcond_i128(retl, reth, s1l, s1h, s2l, s2h, TCG_COND_LTU);
+}
+
static bool trans_slti(DisasContext *ctx, arg_slti *a)
{
- return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt);
+ return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
}
static bool trans_sltiu(DisasContext *ctx, arg_sltiu *a)
{
- return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu);
+ return gen_arith_imm_tl(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
}
static bool trans_xori(DisasContext *ctx, arg_xori *a)
{
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_xori_tl);
+ return gen_logic_imm_fn(ctx, a, tcg_gen_xori_tl);
}
static bool trans_ori(DisasContext *ctx, arg_ori *a)
{
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_ori_tl);
+ return gen_logic_imm_fn(ctx, a, tcg_gen_ori_tl);
}
static bool trans_andi(DisasContext *ctx, arg_andi *a)
{
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_andi_tl);
+ return gen_logic_imm_fn(ctx, a, tcg_gen_andi_tl);
+}
+
+static void gen_slli_i128(TCGv retl, TCGv reth,
+ TCGv src1l, TCGv src1h,
+ target_long shamt)
+{
+ if (shamt >= 64) {
+ tcg_gen_shli_tl(reth, src1l, shamt - 64);
+ tcg_gen_movi_tl(retl, 0);
+ } else {
+ tcg_gen_extract2_tl(reth, src1l, src1h, 64 - shamt);
+ tcg_gen_shli_tl(retl, src1l, shamt);
+ }
}
static bool trans_slli(DisasContext *ctx, arg_slli *a)
{
- return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, gen_slli_i128);
}
static void gen_srliw(TCGv dst, TCGv src, target_long shamt)
tcg_gen_extract_tl(dst, src, shamt, 32 - shamt);
}
+static void gen_srli_i128(TCGv retl, TCGv reth,
+ TCGv src1l, TCGv src1h,
+ target_long shamt)
+{
+ if (shamt >= 64) {
+ tcg_gen_shri_tl(retl, src1h, shamt - 64);
+ tcg_gen_movi_tl(reth, 0);
+ } else {
+ tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
+ tcg_gen_shri_tl(reth, src1h, shamt);
+ }
+}
+
static bool trans_srli(DisasContext *ctx, arg_srli *a)
{
return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
- tcg_gen_shri_tl, gen_srliw);
+ tcg_gen_shri_tl, gen_srliw, gen_srli_i128);
}
static void gen_sraiw(TCGv dst, TCGv src, target_long shamt)
tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt);
}
+static void gen_srai_i128(TCGv retl, TCGv reth,
+ TCGv src1l, TCGv src1h,
+ target_long shamt)
+{
+ if (shamt >= 64) {
+ tcg_gen_sari_tl(retl, src1h, shamt - 64);
+ tcg_gen_sari_tl(reth, src1h, 63);
+ } else {
+ tcg_gen_extract2_tl(retl, src1l, src1h, shamt);
+ tcg_gen_sari_tl(reth, src1h, shamt);
+ }
+}
+
static bool trans_srai(DisasContext *ctx, arg_srai *a)
{
return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE,
- tcg_gen_sari_tl, gen_sraiw);
+ tcg_gen_sari_tl, gen_sraiw, gen_srai_i128);
}
static bool trans_add(DisasContext *ctx, arg_add *a)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, tcg_gen_add2_tl);
}
static bool trans_sub(DisasContext *ctx, arg_sub *a)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, tcg_gen_sub2_tl);
+}
+
+static void gen_sll_i128(TCGv destl, TCGv desth,
+ TCGv src1l, TCGv src1h, TCGv shamt)
+{
+ TCGv ls = tcg_temp_new();
+ TCGv rs = tcg_temp_new();
+ TCGv hs = tcg_temp_new();
+ TCGv ll = tcg_temp_new();
+ TCGv lr = tcg_temp_new();
+ TCGv h0 = tcg_temp_new();
+ TCGv h1 = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_andi_tl(hs, shamt, 64);
+ tcg_gen_andi_tl(ls, shamt, 63);
+ tcg_gen_neg_tl(shamt, shamt);
+ tcg_gen_andi_tl(rs, shamt, 63);
+
+ tcg_gen_shl_tl(ll, src1l, ls);
+ tcg_gen_shl_tl(h0, src1h, ls);
+ tcg_gen_shr_tl(lr, src1l, rs);
+ tcg_gen_movcond_tl(TCG_COND_NE, lr, shamt, zero, lr, zero);
+ tcg_gen_or_tl(h1, h0, lr);
+
+ tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, zero, ll);
+ tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, ll, h1);
+
+ tcg_temp_free(ls);
+ tcg_temp_free(rs);
+ tcg_temp_free(hs);
+ tcg_temp_free(ll);
+ tcg_temp_free(lr);
+ tcg_temp_free(h0);
+ tcg_temp_free(h1);
}
static bool trans_sll(DisasContext *ctx, arg_sll *a)
{
- return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, gen_sll_i128);
}
static bool trans_slt(DisasContext *ctx, arg_slt *a)
{
- return gen_arith(ctx, a, EXT_SIGN, gen_slt);
+ return gen_arith(ctx, a, EXT_SIGN, gen_slt, gen_slt_i128);
}
static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
{
- return gen_arith(ctx, a, EXT_SIGN, gen_sltu);
+ return gen_arith(ctx, a, EXT_SIGN, gen_sltu, gen_sltu_i128);
}
-static bool trans_xor(DisasContext *ctx, arg_xor *a)
+static void gen_srl_i128(TCGv destl, TCGv desth,
+ TCGv src1l, TCGv src1h, TCGv shamt)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_xor_tl);
+ TCGv ls = tcg_temp_new();
+ TCGv rs = tcg_temp_new();
+ TCGv hs = tcg_temp_new();
+ TCGv ll = tcg_temp_new();
+ TCGv lr = tcg_temp_new();
+ TCGv h0 = tcg_temp_new();
+ TCGv h1 = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_andi_tl(hs, shamt, 64);
+ tcg_gen_andi_tl(rs, shamt, 63);
+ tcg_gen_neg_tl(shamt, shamt);
+ tcg_gen_andi_tl(ls, shamt, 63);
+
+ tcg_gen_shr_tl(lr, src1l, rs);
+ tcg_gen_shr_tl(h1, src1h, rs);
+ tcg_gen_shl_tl(ll, src1h, ls);
+ tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
+ tcg_gen_or_tl(h0, ll, lr);
+
+ tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
+ tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, zero, h1);
+
+ tcg_temp_free(ls);
+ tcg_temp_free(rs);
+ tcg_temp_free(hs);
+ tcg_temp_free(ll);
+ tcg_temp_free(lr);
+ tcg_temp_free(h0);
+ tcg_temp_free(h1);
}
static bool trans_srl(DisasContext *ctx, arg_srl *a)
{
- return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, gen_srl_i128);
+}
+
+static void gen_sra_i128(TCGv destl, TCGv desth,
+ TCGv src1l, TCGv src1h, TCGv shamt)
+{
+ TCGv ls = tcg_temp_new();
+ TCGv rs = tcg_temp_new();
+ TCGv hs = tcg_temp_new();
+ TCGv ll = tcg_temp_new();
+ TCGv lr = tcg_temp_new();
+ TCGv h0 = tcg_temp_new();
+ TCGv h1 = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_andi_tl(hs, shamt, 64);
+ tcg_gen_andi_tl(rs, shamt, 63);
+ tcg_gen_neg_tl(shamt, shamt);
+ tcg_gen_andi_tl(ls, shamt, 63);
+
+ tcg_gen_shr_tl(lr, src1l, rs);
+ tcg_gen_sar_tl(h1, src1h, rs);
+ tcg_gen_shl_tl(ll, src1h, ls);
+ tcg_gen_movcond_tl(TCG_COND_NE, ll, shamt, zero, ll, zero);
+ tcg_gen_or_tl(h0, ll, lr);
+ tcg_gen_sari_tl(lr, src1h, 63);
+
+ tcg_gen_movcond_tl(TCG_COND_NE, destl, hs, zero, h1, h0);
+ tcg_gen_movcond_tl(TCG_COND_NE, desth, hs, zero, lr, h1);
+
+ tcg_temp_free(ls);
+ tcg_temp_free(rs);
+ tcg_temp_free(hs);
+ tcg_temp_free(ll);
+ tcg_temp_free(lr);
+ tcg_temp_free(h0);
+ tcg_temp_free(h1);
}
static bool trans_sra(DisasContext *ctx, arg_sra *a)
{
- return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, gen_sra_i128);
+}
+
+static bool trans_xor(DisasContext *ctx, arg_xor *a)
+{
+ return gen_logic(ctx, a, tcg_gen_xor_tl);
}
static bool trans_or(DisasContext *ctx, arg_or *a)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_or_tl);
+ return gen_logic(ctx, a, tcg_gen_or_tl);
}
static bool trans_and(DisasContext *ctx, arg_and *a)
{
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_and_tl);
+ return gen_logic(ctx, a, tcg_gen_and_tl);
}
static bool trans_addiw(DisasContext *ctx, arg_addiw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl);
+ return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl, NULL);
}
static bool trans_slliw(DisasContext *ctx, arg_slliw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
}
static bool trans_srliw(DisasContext *ctx, arg_srliw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw, NULL);
}
static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw);
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw, NULL);
+}
+
+static bool trans_sllid(DisasContext *ctx, arg_sllid *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl, NULL);
+}
+
+static bool trans_srlid(DisasContext *ctx, arg_srlid *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shri_tl, NULL);
+}
+
+static bool trans_sraid(DisasContext *ctx, arg_sraid *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_sari_tl, NULL);
}
static bool trans_addw(DisasContext *ctx, arg_addw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl, NULL);
}
static bool trans_subw(DisasContext *ctx, arg_subw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl, NULL);
}
static bool trans_sllw(DisasContext *ctx, arg_sllw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl);
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
}
static bool trans_srlw(DisasContext *ctx, arg_srlw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl);
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
}
static bool trans_sraw(DisasContext *ctx, arg_sraw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
ctx->ol = MXL_RV32;
- return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl);
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
}
+static bool trans_slld(DisasContext *ctx, arg_slld *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl, NULL);
+}
+
+static bool trans_srld(DisasContext *ctx, arg_srld *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl, NULL);
+}
+
+static bool trans_srad(DisasContext *ctx, arg_srad *a)
+{
+ REQUIRE_128BIT(ctx);
+ ctx->ol = MXL_RV64;
+ return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl, NULL);
+}
+
+
static bool trans_fence(DisasContext *ctx, arg_fence *a)
{
/* FENCE is a full memory barrier. */
return do_csr_post(ctx);
}
-static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
+static bool do_csrr_i128(DisasContext *ctx, int rd, int rc)
{
- TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv destl = dest_gpr(ctx, rd);
+ TCGv desth = dest_gprh(ctx, rd);
+ TCGv_i32 csr = tcg_constant_i32(rc);
- /*
- * If rd == 0, the insn shall not read the csr, nor cause any of the
- * side effects that might occur on a csr read.
- */
- if (a->rd == 0) {
- return do_csrw(ctx, a->csr, src);
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_csrr_i128(destl, cpu_env, csr);
+ tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_set_gpr128(ctx, rd, destl, desth);
+ return do_csr_post(ctx);
+}
+
+static bool do_csrw_i128(DisasContext *ctx, int rc, TCGv srcl, TCGv srch)
+{
+ TCGv_i32 csr = tcg_constant_i32(rc);
+
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
}
+ gen_helper_csrw_i128(cpu_env, csr, srcl, srch);
+ return do_csr_post(ctx);
+}
+
+static bool do_csrrw_i128(DisasContext *ctx, int rd, int rc,
+ TCGv srcl, TCGv srch, TCGv maskl, TCGv maskh)
+{
+ TCGv destl = dest_gpr(ctx, rd);
+ TCGv desth = dest_gprh(ctx, rd);
+ TCGv_i32 csr = tcg_constant_i32(rc);
+
+ if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_csrrw_i128(destl, cpu_env, csr, srcl, srch, maskl, maskh);
+ tcg_gen_ld_tl(desth, cpu_env, offsetof(CPURISCVState, retxh));
+ gen_set_gpr128(ctx, rd, destl, desth);
+ return do_csr_post(ctx);
+}
- TCGv mask = tcg_constant_tl(-1);
- return do_csrrw(ctx, a->rd, a->csr, src, mask);
+static bool trans_csrrw(DisasContext *ctx, arg_csrrw *a)
+{
+ if (get_xl(ctx) < MXL_RV128) {
+ TCGv src = get_gpr(ctx, a->rs1, EXT_NONE);
+
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw(ctx, a->csr, src);
+ }
+
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw(ctx, a->rd, a->csr, src, mask);
+ } else {
+ TCGv srcl = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv srch = get_gprh(ctx, a->rs1);
+
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw_i128(ctx, a->csr, srcl, srch);
+ }
+
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw_i128(ctx, a->rd, a->csr, srcl, srch, mask, mask);
+ }
}
static bool trans_csrrs(DisasContext *ctx, arg_csrrs *a)
* a zero value, the instruction will still attempt to write the
* unmodified value back to the csr and will cause side effects.
*/
- if (a->rs1 == 0) {
- return do_csrr(ctx, a->rd, a->csr);
+ if (get_xl(ctx) < MXL_RV128) {
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
+
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
+ return do_csrrw(ctx, a->rd, a->csr, ones, mask);
+ } else {
+ if (a->rs1 == 0) {
+ return do_csrr_i128(ctx, a->rd, a->csr);
+ }
+
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
+ TCGv maskh = get_gprh(ctx, a->rs1);
+ return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, maskl, maskh);
}
-
- TCGv ones = tcg_constant_tl(-1);
- TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
- return do_csrrw(ctx, a->rd, a->csr, ones, mask);
}
static bool trans_csrrc(DisasContext *ctx, arg_csrrc *a)
* a zero value, the instruction will still attempt to write the
* unmodified value back to the csr and will cause side effects.
*/
- if (a->rs1 == 0) {
- return do_csrr(ctx, a->rd, a->csr);
- }
+ if (get_xl(ctx) < MXL_RV128) {
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
- TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
- return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+ TCGv mask = get_gpr(ctx, a->rs1, EXT_ZERO);
+ return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+ } else {
+ if (a->rs1 == 0) {
+ return do_csrr_i128(ctx, a->rd, a->csr);
+ }
+
+ TCGv maskl = get_gpr(ctx, a->rs1, EXT_ZERO);
+ TCGv maskh = get_gprh(ctx, a->rs1);
+ return do_csrrw_i128(ctx, a->rd, a->csr,
+ ctx->zero, ctx->zero, maskl, maskh);
+ }
}
static bool trans_csrrwi(DisasContext *ctx, arg_csrrwi *a)
{
- TCGv src = tcg_constant_tl(a->rs1);
+ if (get_xl(ctx) < MXL_RV128) {
+ TCGv src = tcg_constant_tl(a->rs1);
- /*
- * If rd == 0, the insn shall not read the csr, nor cause any of the
- * side effects that might occur on a csr read.
- */
- if (a->rd == 0) {
- return do_csrw(ctx, a->csr, src);
- }
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw(ctx, a->csr, src);
+ }
- TCGv mask = tcg_constant_tl(-1);
- return do_csrrw(ctx, a->rd, a->csr, src, mask);
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw(ctx, a->rd, a->csr, src, mask);
+ } else {
+ TCGv src = tcg_constant_tl(a->rs1);
+
+ /*
+ * If rd == 0, the insn shall not read the csr, nor cause any of the
+ * side effects that might occur on a csr read.
+ */
+ if (a->rd == 0) {
+ return do_csrw_i128(ctx, a->csr, src, ctx->zero);
+ }
+
+ TCGv mask = tcg_constant_tl(-1);
+ return do_csrrw_i128(ctx, a->rd, a->csr, src, ctx->zero, mask, mask);
+ }
}
static bool trans_csrrsi(DisasContext *ctx, arg_csrrsi *a)
* a zero value, the instruction will still attempt to write the
* unmodified value back to the csr and will cause side effects.
*/
- if (a->rs1 == 0) {
- return do_csrr(ctx, a->rd, a->csr);
- }
+ if (get_xl(ctx) < MXL_RV128) {
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
+
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw(ctx, a->rd, a->csr, ones, mask);
+ } else {
+ if (a->rs1 == 0) {
+ return do_csrr_i128(ctx, a->rd, a->csr);
+ }
- TCGv ones = tcg_constant_tl(-1);
- TCGv mask = tcg_constant_tl(a->rs1);
- return do_csrrw(ctx, a->rd, a->csr, ones, mask);
+ TCGv ones = tcg_constant_tl(-1);
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw_i128(ctx, a->rd, a->csr, ones, ones, mask, ctx->zero);
+ }
}
-static bool trans_csrrci(DisasContext *ctx, arg_csrrci *a)
+static bool trans_csrrci(DisasContext *ctx, arg_csrrci * a)
{
/*
* If rs1 == 0, the insn shall not write to the csr at all, nor
* a zero value, the instruction will still attempt to write the
* unmodified value back to the csr and will cause side effects.
*/
- if (a->rs1 == 0) {
- return do_csrr(ctx, a->rd, a->csr);
- }
+ if (get_xl(ctx) < MXL_RV128) {
+ if (a->rs1 == 0) {
+ return do_csrr(ctx, a->rd, a->csr);
+ }
- TCGv mask = tcg_constant_tl(a->rs1);
- return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw(ctx, a->rd, a->csr, ctx->zero, mask);
+ } else {
+ if (a->rs1 == 0) {
+ return do_csrr_i128(ctx, a->rd, a->csr);
+ }
+
+ TCGv mask = tcg_constant_tl(a->rs1);
+ return do_csrrw_i128(ctx, a->rd, a->csr,
+ ctx->zero, ctx->zero, mask, ctx->zero);
+ }
}
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+static void gen_mulhu_i128(TCGv r2, TCGv r3, TCGv al, TCGv ah, TCGv bl, TCGv bh)
+{
+ TCGv tmpl = tcg_temp_new();
+ TCGv tmph = tcg_temp_new();
+ TCGv r0 = tcg_temp_new();
+ TCGv r1 = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_mulu2_tl(r0, r1, al, bl);
+
+ tcg_gen_mulu2_tl(tmpl, tmph, al, bh);
+ tcg_gen_add2_tl(r1, r2, r1, zero, tmpl, tmph);
+ tcg_gen_mulu2_tl(tmpl, tmph, ah, bl);
+ tcg_gen_add2_tl(r1, tmph, r1, r2, tmpl, tmph);
+ /* Overflow detection into r3 */
+ tcg_gen_setcond_tl(TCG_COND_LTU, r3, tmph, r2);
+
+ tcg_gen_mov_tl(r2, tmph);
+
+ tcg_gen_mulu2_tl(tmpl, tmph, ah, bh);
+ tcg_gen_add2_tl(r2, r3, r2, r3, tmpl, tmph);
+
+ tcg_temp_free(tmpl);
+ tcg_temp_free(tmph);
+}
+
+static void gen_mul_i128(TCGv rl, TCGv rh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ TCGv tmpl = tcg_temp_new();
+ TCGv tmph = tcg_temp_new();
+ TCGv tmpx = tcg_temp_new();
+ TCGv zero = tcg_constant_tl(0);
+
+ tcg_gen_mulu2_tl(rl, rh, rs1l, rs2l);
+ tcg_gen_mulu2_tl(tmpl, tmph, rs1l, rs2h);
+ tcg_gen_add2_tl(rh, tmpx, rh, zero, tmpl, tmph);
+ tcg_gen_mulu2_tl(tmpl, tmph, rs1h, rs2l);
+ tcg_gen_add2_tl(rh, tmph, rh, tmpx, tmpl, tmph);
+
+ tcg_temp_free(tmpl);
+ tcg_temp_free(tmph);
+ tcg_temp_free(tmpx);
+}
static bool trans_mul(DisasContext *ctx, arg_mul *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl, gen_mul_i128);
+}
+
+static void gen_mulh_i128(TCGv rl, TCGv rh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ TCGv t0l = tcg_temp_new();
+ TCGv t0h = tcg_temp_new();
+ TCGv t1l = tcg_temp_new();
+ TCGv t1h = tcg_temp_new();
+
+ gen_mulhu_i128(rl, rh, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_sari_tl(t0h, rs1h, 63);
+ tcg_gen_and_tl(t0l, t0h, rs2l);
+ tcg_gen_and_tl(t0h, t0h, rs2h);
+ tcg_gen_sari_tl(t1h, rs2h, 63);
+ tcg_gen_and_tl(t1l, t1h, rs1l);
+ tcg_gen_and_tl(t1h, t1h, rs1h);
+ tcg_gen_sub2_tl(t0l, t0h, rl, rh, t0l, t0h);
+ tcg_gen_sub2_tl(rl, rh, t0l, t0h, t1l, t1h);
+
+ tcg_temp_free(t0l);
+ tcg_temp_free(t0h);
+ tcg_temp_free(t1l);
+ tcg_temp_free(t1h);
}
static void gen_mulh(TCGv ret, TCGv s1, TCGv s2)
static bool trans_mulh(DisasContext *ctx, arg_mulh *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith_per_ol(ctx, a, EXT_SIGN, gen_mulh, gen_mulh_w);
+ return gen_arith_per_ol(ctx, a, EXT_SIGN, gen_mulh, gen_mulh_w,
+ gen_mulh_i128);
+}
+
+static void gen_mulhsu_i128(TCGv rl, TCGv rh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+
+ TCGv t0l = tcg_temp_new();
+ TCGv t0h = tcg_temp_new();
+
+ gen_mulhu_i128(rl, rh, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_sari_tl(t0h, rs1h, 63);
+ tcg_gen_and_tl(t0l, t0h, rs2l);
+ tcg_gen_and_tl(t0h, t0h, rs2h);
+ tcg_gen_sub2_tl(rl, rh, rl, rh, t0l, t0h);
+
+ tcg_temp_free(t0l);
+ tcg_temp_free(t0h);
}
static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2)
static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith_per_ol(ctx, a, EXT_NONE, gen_mulhsu, gen_mulhsu_w);
+ return gen_arith_per_ol(ctx, a, EXT_NONE, gen_mulhsu, gen_mulhsu_w,
+ gen_mulhsu_i128);
}
static void gen_mulhu(TCGv ret, TCGv s1, TCGv s2)
{
REQUIRE_EXT(ctx, RVM);
/* gen_mulh_w works for either sign as input. */
- return gen_arith_per_ol(ctx, a, EXT_ZERO, gen_mulhu, gen_mulh_w);
+ return gen_arith_per_ol(ctx, a, EXT_ZERO, gen_mulhu, gen_mulh_w,
+ gen_mulhu_i128);
+}
+
+static void gen_div_i128(TCGv rdl, TCGv rdh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ gen_helper_divs_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
}
static void gen_div(TCGv ret, TCGv source1, TCGv source2)
static bool trans_div(DisasContext *ctx, arg_div *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_SIGN, gen_div);
+ return gen_arith(ctx, a, EXT_SIGN, gen_div, gen_div_i128);
+}
+
+static void gen_divu_i128(TCGv rdl, TCGv rdh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ gen_helper_divu_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
}
static void gen_divu(TCGv ret, TCGv source1, TCGv source2)
static bool trans_divu(DisasContext *ctx, arg_divu *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_ZERO, gen_divu);
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu, gen_divu_i128);
+}
+
+static void gen_rem_i128(TCGv rdl, TCGv rdh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ gen_helper_rems_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
}
static void gen_rem(TCGv ret, TCGv source1, TCGv source2)
static bool trans_rem(DisasContext *ctx, arg_rem *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_SIGN, gen_rem);
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem, gen_rem_i128);
+}
+
+static void gen_remu_i128(TCGv rdl, TCGv rdh,
+ TCGv rs1l, TCGv rs1h, TCGv rs2l, TCGv rs2h)
+{
+ gen_helper_remu_i128(rdl, cpu_env, rs1l, rs1h, rs2l, rs2h);
+ tcg_gen_ld_tl(rdh, cpu_env, offsetof(CPURISCVState, retxh));
}
static void gen_remu(TCGv ret, TCGv source1, TCGv source2)
static bool trans_remu(DisasContext *ctx, arg_remu *a)
{
REQUIRE_EXT(ctx, RVM);
- return gen_arith(ctx, a, EXT_ZERO, gen_remu);
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu, gen_remu_i128);
}
static bool trans_mulw(DisasContext *ctx, arg_mulw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl);
+ return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl, NULL);
}
static bool trans_divw(DisasContext *ctx, arg_divw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_SIGN, gen_div);
+ return gen_arith(ctx, a, EXT_SIGN, gen_div, NULL);
}
static bool trans_divuw(DisasContext *ctx, arg_divuw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_ZERO, gen_divu);
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu, NULL);
}
static bool trans_remw(DisasContext *ctx, arg_remw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_SIGN, gen_rem);
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem, NULL);
}
static bool trans_remuw(DisasContext *ctx, arg_remuw *a)
{
- REQUIRE_64BIT(ctx);
+ REQUIRE_64_OR_128BIT(ctx);
REQUIRE_EXT(ctx, RVM);
ctx->ol = MXL_RV32;
- return gen_arith(ctx, a, EXT_ZERO, gen_remu);
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu, NULL);
+}
+
+static bool trans_muld(DisasContext *ctx, arg_muld *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_SIGN, tcg_gen_mul_tl, NULL);
+}
+
+static bool trans_divd(DisasContext *ctx, arg_divd *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_SIGN, gen_div, NULL);
+}
+
+static bool trans_divud(DisasContext *ctx, arg_divud *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_ZERO, gen_divu, NULL);
+}
+
+static bool trans_remd(DisasContext *ctx, arg_remd *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_SIGN, gen_rem, NULL);
+}
+
+static bool trans_remud(DisasContext *ctx, arg_remud *a)
+{
+ REQUIRE_128BIT(ctx);
+ REQUIRE_EXT(ctx, RVM);
+ ctx->ol = MXL_RV64;
+ return gen_arith(ctx, a, EXT_ZERO, gen_remu, NULL);
}
static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
}
static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
}
static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_dd(s, a->rd, a->rs2, a->vm);
}
static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
{
return require_rvv(s) &&
- require_scale_rvf(s) &&
- (s->sew != MO_8) &&
vext_check_isa_ill(s) &&
vext_check_ds(s, a->rd, a->rs2, a->vm);
}
-#define GEN_OPFV_WIDEN_TRANS(NAME, HELPER, FRM) \
+static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
+{
+ return opfv_widen_check(s, a) &&
+ require_rvf(s);
+}
+
+static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
+{
+ return opfv_widen_check(s, a) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8);
+}
+
+#define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
{ \
- if (opfv_widen_check(s, a)) { \
+ if (CHECK(s, a)) { \
if (FRM != RISCV_FRM_DYN) { \
gen_set_rm(s, RISCV_FRM_DYN); \
} \
return false; \
}
-GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, vfwcvt_xu_f_v, RISCV_FRM_DYN)
-GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, vfwcvt_x_f_v, RISCV_FRM_DYN)
-GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, vfwcvt_f_f_v, RISCV_FRM_DYN)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
+ RISCV_FRM_DYN)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
+ RISCV_FRM_DYN)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, opffv_widen_check, vfwcvt_f_f_v,
+ RISCV_FRM_DYN)
/* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */
-GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, vfwcvt_xu_f_v, RISCV_FRM_RTZ)
-GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, vfwcvt_x_f_v, RISCV_FRM_RTZ)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
+ RISCV_FRM_RTZ)
+GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
+ RISCV_FRM_RTZ)
static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
{
static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
{
return require_rvv(s) &&
- require_rvf(s) &&
- (s->sew != MO_64) &&
vext_check_isa_ill(s) &&
/* OPFV narrowing instructions ignore vs1 check */
vext_check_sd(s, a->rd, a->rs2, a->vm);
}
-#define GEN_OPFV_NARROW_TRANS(NAME, HELPER, FRM) \
+static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
+{
+ return opfv_narrow_check(s, a) &&
+ require_rvf(s) &&
+ (s->sew != MO_64);
+}
+
+static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
+{
+ return opfv_narrow_check(s, a) &&
+ require_scale_rvf(s) &&
+ (s->sew != MO_8);
+}
+
+#define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
{ \
- if (opfv_narrow_check(s, a)) { \
+ if (CHECK(s, a)) { \
if (FRM != RISCV_FRM_DYN) { \
gen_set_rm(s, RISCV_FRM_DYN); \
} \
return false; \
}
-GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, vfncvt_f_xu_w, RISCV_FRM_DYN)
-GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, vfncvt_f_x_w, RISCV_FRM_DYN)
-GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, vfncvt_f_f_w, RISCV_FRM_DYN)
+GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, opfxv_narrow_check, vfncvt_f_xu_w,
+ RISCV_FRM_DYN)
+GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w,
+ RISCV_FRM_DYN)
+GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
+ RISCV_FRM_DYN)
/* Reuse the helper function from vfncvt.f.f.w */
-GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, vfncvt_f_f_w, RISCV_FRM_ROD)
+GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
+ RISCV_FRM_ROD)
static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
{
--- /dev/null
+/*
+ * RISC-V Emulation Helpers for QEMU.
+ *
+ * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
+ * Copyright (c) 2017-2018 SiFive, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/main-loop.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+target_ulong HELPER(divu_i128)(CPURISCVState *env,
+ target_ulong ul, target_ulong uh,
+ target_ulong vl, target_ulong vh)
+{
+ target_ulong ql, qh;
+ Int128 q;
+
+ if (vl == 0 && vh == 0) { /* Handle special behavior on div by zero */
+ ql = ~0x0;
+ qh = ~0x0;
+ } else {
+ q = int128_divu(int128_make128(ul, uh), int128_make128(vl, vh));
+ ql = int128_getlo(q);
+ qh = int128_gethi(q);
+ }
+
+ env->retxh = qh;
+ return ql;
+}
+
+target_ulong HELPER(remu_i128)(CPURISCVState *env,
+ target_ulong ul, target_ulong uh,
+ target_ulong vl, target_ulong vh)
+{
+ target_ulong rl, rh;
+ Int128 r;
+
+ if (vl == 0 && vh == 0) {
+ rl = ul;
+ rh = uh;
+ } else {
+ r = int128_remu(int128_make128(ul, uh), int128_make128(vl, vh));
+ rl = int128_getlo(r);
+ rh = int128_gethi(r);
+ }
+
+ env->retxh = rh;
+ return rl;
+}
+
+target_ulong HELPER(divs_i128)(CPURISCVState *env,
+ target_ulong ul, target_ulong uh,
+ target_ulong vl, target_ulong vh)
+{
+ target_ulong qh, ql;
+ Int128 q;
+
+ if (vl == 0 && vh == 0) { /* Div by zero check */
+ ql = ~0x0;
+ qh = ~0x0;
+ } else if (uh == (1ULL << (TARGET_LONG_BITS - 1)) && ul == 0 &&
+ vh == ~0x0 && vl == ~0x0) {
+ /* Signed div overflow check (-2**127 / -1) */
+ ql = ul;
+ qh = uh;
+ } else {
+ q = int128_divs(int128_make128(ul, uh), int128_make128(vl, vh));
+ ql = int128_getlo(q);
+ qh = int128_gethi(q);
+ }
+
+ env->retxh = qh;
+ return ql;
+}
+
+target_ulong HELPER(rems_i128)(CPURISCVState *env,
+ target_ulong ul, target_ulong uh,
+ target_ulong vl, target_ulong vh)
+{
+ target_ulong rh, rl;
+ Int128 r;
+
+ if (vl == 0 && vh == 0) {
+ rl = ul;
+ rh = uh;
+ } else {
+ r = int128_rems(int128_make128(ul, uh), int128_make128(vl, vh));
+ rl = int128_getlo(r);
+ rh = int128_gethi(r);
+ }
+
+ env->retxh = rh;
+ return rl;
+}
}
};
+static bool rv128_needed(void *opaque)
+{
+ RISCVCPU *cpu = opaque;
+ CPURISCVState *env = &cpu->env;
+
+ return env->misa_mxl_max == MXL_RV128;
+}
+
+static const VMStateDescription vmstate_rv128 = {
+ .name = "cpu/rv128",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = rv128_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32),
+ VMSTATE_UINT64(env.mscratchh, RISCVCPU),
+ VMSTATE_UINT64(env.sscratchh, RISCVCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_riscv_cpu = {
.name = "cpu",
.version_id = 3,
&vmstate_hyper,
&vmstate_vector,
&vmstate_pointermasking,
+ &vmstate_rv128,
NULL
}
};
'vector_helper.c',
'bitmanip_helper.c',
'translate.c',
+ 'm128_helper.c'
))
riscv_softmmu_ss = ss.source_set()
return val;
}
+target_ulong helper_csrr_i128(CPURISCVState *env, int csr)
+{
+ Int128 rv = int128_zero();
+ RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
+ int128_zero(),
+ int128_zero());
+
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+
+ env->retxh = int128_gethi(rv);
+ return int128_getlo(rv);
+}
+
+void helper_csrw_i128(CPURISCVState *env, int csr,
+ target_ulong srcl, target_ulong srch)
+{
+ RISCVException ret = riscv_csrrw_i128(env, csr, NULL,
+ int128_make128(srcl, srch),
+ UINT128_MAX);
+
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+}
+
+target_ulong helper_csrrw_i128(CPURISCVState *env, int csr,
+ target_ulong srcl, target_ulong srch,
+ target_ulong maskl, target_ulong maskh)
+{
+ Int128 rv = int128_zero();
+ RISCVException ret = riscv_csrrw_i128(env, csr, &rv,
+ int128_make128(srcl, srch),
+ int128_make128(maskl, maskh));
+
+ if (ret != RISCV_EXCP_NONE) {
+ riscv_raise_exception(env, ret, GETPC());
+ }
+
+ env->retxh = int128_gethi(rv);
+ return int128_getlo(rv);
+}
+
#ifndef CONFIG_USER_ONLY
target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
uint64_t mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
- if (!pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
+ if (riscv_feature(env, RISCV_FEATURE_PMP) &&
+ !pmp_get_num_rules(env) && (prev_priv != PRV_M)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
#include "internals.h"
/* global register indices */
-static TCGv cpu_gpr[32], cpu_pc, cpu_vl, cpu_vstart;
+static TCGv cpu_gpr[32], cpu_gprh[32], cpu_pc, cpu_vl, cpu_vstart;
static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */
static TCGv load_res;
static TCGv load_val;
/* pc_succ_insn points to the instruction following base.pc_next */
target_ulong pc_succ_insn;
target_ulong priv_ver;
+ RISCVMXL misa_mxl_max;
RISCVMXL xl;
uint32_t misa_ext;
uint32_t opcode;
return 16 << get_ol(ctx);
}
+/* The maximum register length */
+#ifdef TARGET_RISCV32
+#define get_xl_max(ctx) MXL_RV32
+#else
+#define get_xl_max(ctx) ((ctx)->misa_mxl_max)
+#endif
+
/*
* RISC-V requires NaN-boxing of narrower width floating point values.
* This applies when a 32-bit value is assigned to a 64-bit FP register.
static void gen_exception_illegal(DisasContext *ctx)
{
+ tcg_gen_st_i32(tcg_constant_i32(ctx->opcode), cpu_env,
+ offsetof(CPURISCVState, bins));
+
generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST);
}
}
break;
case MXL_RV64:
+ case MXL_RV128:
break;
default:
g_assert_not_reached();
return cpu_gpr[reg_num];
}
+static TCGv get_gprh(DisasContext *ctx, int reg_num)
+{
+ assert(get_xl(ctx) == MXL_RV128);
+ if (reg_num == 0) {
+ return ctx->zero;
+ }
+ return cpu_gprh[reg_num];
+}
+
static TCGv dest_gpr(DisasContext *ctx, int reg_num)
{
if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) {
return cpu_gpr[reg_num];
}
+static TCGv dest_gprh(DisasContext *ctx, int reg_num)
+{
+ if (reg_num == 0) {
+ return temp_new(ctx);
+ }
+ return cpu_gprh[reg_num];
+}
+
static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t)
{
if (reg_num != 0) {
tcg_gen_ext32s_tl(cpu_gpr[reg_num], t);
break;
case MXL_RV64:
+ case MXL_RV128:
tcg_gen_mov_tl(cpu_gpr[reg_num], t);
break;
default:
g_assert_not_reached();
}
+
+ if (get_xl_max(ctx) == MXL_RV128) {
+ tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63);
+ }
+ }
+}
+
+static void gen_set_gpri(DisasContext *ctx, int reg_num, target_long imm)
+{
+ if (reg_num != 0) {
+ switch (get_ol(ctx)) {
+ case MXL_RV32:
+ tcg_gen_movi_tl(cpu_gpr[reg_num], (int32_t)imm);
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ tcg_gen_movi_tl(cpu_gpr[reg_num], imm);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (get_xl_max(ctx) == MXL_RV128) {
+ tcg_gen_movi_tl(cpu_gprh[reg_num], -(imm < 0));
+ }
+ }
+}
+
+static void gen_set_gpr128(DisasContext *ctx, int reg_num, TCGv rl, TCGv rh)
+{
+ assert(get_ol(ctx) == MXL_RV128);
+ if (reg_num != 0) {
+ tcg_gen_mov_tl(cpu_gpr[reg_num], rl);
+ tcg_gen_mov_tl(cpu_gprh[reg_num], rh);
}
}
} \
} while (0)
-#define REQUIRE_64BIT(ctx) do { \
- if (get_xl(ctx) < MXL_RV64) { \
- return false; \
- } \
+#define REQUIRE_64BIT(ctx) do { \
+ if (get_xl(ctx) != MXL_RV64) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_128BIT(ctx) do { \
+ if (get_xl(ctx) != MXL_RV128) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_64_OR_128BIT(ctx) do { \
+ if (get_xl(ctx) == MXL_RV32) { \
+ return false; \
+ } \
} while (0)
static int ex_rvc_register(DisasContext *ctx, int reg)
/* Include the auto-generated decoder for 32 bit insn */
#include "decode-insn32.c.inc"
-static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
+static bool gen_logic_imm_fn(DisasContext *ctx, arg_i *a,
void (*func)(TCGv, TCGv, target_long))
{
TCGv dest = dest_gpr(ctx, a->rd);
- TCGv src1 = get_gpr(ctx, a->rs1, ext);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
func(dest, src1, a->imm);
- gen_set_gpr(ctx, a->rd, dest);
+ if (get_xl(ctx) == MXL_RV128) {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ func(desth, src1h, -(a->imm < 0));
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ } else {
+ gen_set_gpr(ctx, a->rd, dest);
+ }
+
+ return true;
+}
+
+static bool gen_logic(DisasContext *ctx, arg_r *a,
+ void (*func)(TCGv, TCGv, TCGv))
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+
+ func(dest, src1, src2);
+
+ if (get_xl(ctx) == MXL_RV128) {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv src2h = get_gprh(ctx, a->rs2);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ func(desth, src1h, src2h);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ } else {
+ gen_set_gpr(ctx, a->rd, dest);
+ }
+
+ return true;
+}
+
+static bool gen_arith_imm_fn(DisasContext *ctx, arg_i *a, DisasExtend ext,
+ void (*func)(TCGv, TCGv, target_long),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long))
+{
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
+
+ if (get_ol(ctx) < MXL_RV128) {
+ func(dest, src1, a->imm);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ if (f128 == NULL) {
+ return false;
+ }
+
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ f128(dest, desth, src1, src1h, a->imm);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
return true;
}
static bool gen_arith_imm_tl(DisasContext *ctx, arg_i *a, DisasExtend ext,
- void (*func)(TCGv, TCGv, TCGv))
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
{
TCGv dest = dest_gpr(ctx, a->rd);
TCGv src1 = get_gpr(ctx, a->rs1, ext);
TCGv src2 = tcg_constant_tl(a->imm);
- func(dest, src1, src2);
+ if (get_ol(ctx) < MXL_RV128) {
+ func(dest, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ if (f128 == NULL) {
+ return false;
+ }
- gen_set_gpr(ctx, a->rd, dest);
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv src2h = tcg_constant_tl(-(a->imm < 0));
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ f128(dest, desth, src1, src1h, src2, src2h);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
return true;
}
static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext,
- void (*func)(TCGv, TCGv, TCGv))
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
{
TCGv dest = dest_gpr(ctx, a->rd);
TCGv src1 = get_gpr(ctx, a->rs1, ext);
TCGv src2 = get_gpr(ctx, a->rs2, ext);
- func(dest, src1, src2);
+ if (get_ol(ctx) < MXL_RV128) {
+ func(dest, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ if (f128 == NULL) {
+ return false;
+ }
- gen_set_gpr(ctx, a->rd, dest);
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv src2h = get_gprh(ctx, a->rs2);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ f128(dest, desth, src1, src1h, src2, src2h);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
return true;
}
static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
void (*f_tl)(TCGv, TCGv, TCGv),
- void (*f_32)(TCGv, TCGv, TCGv))
+ void (*f_32)(TCGv, TCGv, TCGv),
+ void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv, TCGv))
{
int olen = get_olen(ctx);
if (olen != TARGET_LONG_BITS) {
if (olen == 32) {
f_tl = f_32;
- } else {
+ } else if (olen != 128) {
g_assert_not_reached();
}
}
- return gen_arith(ctx, a, ext, f_tl);
+ return gen_arith(ctx, a, ext, f_tl, f_128);
}
static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext,
- void (*func)(TCGv, TCGv, target_long))
+ void (*func)(TCGv, TCGv, target_long),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, target_long))
{
TCGv dest, src1;
int max_len = get_olen(ctx);
dest = dest_gpr(ctx, a->rd);
src1 = get_gpr(ctx, a->rs1, ext);
- func(dest, src1, a->shamt);
+ if (max_len < 128) {
+ func(dest, src1, a->shamt);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv desth = dest_gprh(ctx, a->rd);
- gen_set_gpr(ctx, a->rd, dest);
+ if (f128 == NULL) {
+ return false;
+ }
+ f128(dest, desth, src1, src1h, a->shamt);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
return true;
}
static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a,
DisasExtend ext,
void (*f_tl)(TCGv, TCGv, target_long),
- void (*f_32)(TCGv, TCGv, target_long))
+ void (*f_32)(TCGv, TCGv, target_long),
+ void (*f_128)(TCGv, TCGv, TCGv, TCGv,
+ target_long))
{
int olen = get_olen(ctx);
if (olen != TARGET_LONG_BITS) {
if (olen == 32) {
f_tl = f_32;
- } else {
+ } else if (olen != 128) {
g_assert_not_reached();
}
}
- return gen_shift_imm_fn(ctx, a, ext, f_tl);
+ return gen_shift_imm_fn(ctx, a, ext, f_tl, f_128);
}
static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext,
}
static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext,
- void (*func)(TCGv, TCGv, TCGv))
+ void (*func)(TCGv, TCGv, TCGv),
+ void (*f128)(TCGv, TCGv, TCGv, TCGv, TCGv))
{
- TCGv dest = dest_gpr(ctx, a->rd);
- TCGv src1 = get_gpr(ctx, a->rs1, ext);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
TCGv ext2 = tcg_temp_new();
+ int max_len = get_olen(ctx);
- tcg_gen_andi_tl(ext2, src2, get_olen(ctx) - 1);
- func(dest, src1, ext2);
+ tcg_gen_andi_tl(ext2, src2, max_len - 1);
- gen_set_gpr(ctx, a->rd, dest);
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv src1 = get_gpr(ctx, a->rs1, ext);
+
+ if (max_len < 128) {
+ func(dest, src1, ext2);
+ gen_set_gpr(ctx, a->rd, dest);
+ } else {
+ TCGv src1h = get_gprh(ctx, a->rs1);
+ TCGv desth = dest_gprh(ctx, a->rd);
+
+ if (f128 == NULL) {
+ return false;
+ }
+ f128(dest, desth, src1, src1h, ext2);
+ gen_set_gpr128(ctx, a->rd, dest, desth);
+ }
tcg_temp_free(ext2);
return true;
}
static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext,
void (*f_tl)(TCGv, TCGv, TCGv),
- void (*f_32)(TCGv, TCGv, TCGv))
+ void (*f_32)(TCGv, TCGv, TCGv),
+ void (*f_128)(TCGv, TCGv, TCGv, TCGv, TCGv))
{
int olen = get_olen(ctx);
if (olen != TARGET_LONG_BITS) {
if (olen == 32) {
f_tl = f_32;
- } else {
+ } else if (olen != 128) {
g_assert_not_reached();
}
}
- return gen_shift(ctx, a, ext, f_tl);
+ return gen_shift(ctx, a, ext, f_tl, f_128);
}
static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext,
if (!has_ext(ctx, RVC)) {
gen_exception_illegal(ctx);
} else {
+ ctx->opcode = opcode;
ctx->pc_succ_insn = ctx->base.pc_next + 2;
if (!decode_insn16(ctx, opcode)) {
gen_exception_illegal(ctx);
opcode32 = deposit32(opcode32, 16, 16,
translator_lduw(env, &ctx->base,
ctx->base.pc_next + 2));
+ ctx->opcode = opcode32;
ctx->pc_succ_insn = ctx->base.pc_next + 4;
if (!decode_insn32(ctx, opcode32)) {
gen_exception_illegal(ctx);
ctx->lmul = sextract32(FIELD_EX32(tb_flags, TB_FLAGS, LMUL), 0, 3);
ctx->vstart = env->vstart;
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
+ ctx->misa_mxl_max = env->misa_mxl_max;
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
ctx->cs = cs;
ctx->ntemp = 0;
* unless you specifically block reads/writes to reg 0.
*/
cpu_gpr[0] = NULL;
+ cpu_gprh[0] = NULL;
for (i = 1; i < 32; i++) {
cpu_gpr[i] = tcg_global_mem_new(cpu_env,
offsetof(CPURISCVState, gpr[i]), riscv_int_regnames[i]);
+ cpu_gprh[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPURISCVState, gprh[i]), riscv_int_regnamesh[i]);
}
for (i = 0; i < 32; i++) {
D(0xeb6a, ASI, SIY, GIE, la1, i2, new, 0, asi, adds32, MO_TESL)
C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32)
C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64)
- D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEQ)
+ D(0xeb7a, AGSI, SIY, GIE, la1, i2, new, 0, asi, adds64, MO_TEUQ)
C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64)
/* ADD IMMEDIATE HIGH */
C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32)
/* ADD LOGICAL WITH SIGNED IMMEDIATE */
D(0xeb6e, ALSI, SIY, GIE, la1, i2_32u, new, 0, asi, addu32, MO_TEUL)
C(0xecda, ALHSIK, RIE_d, DO, r3_32u, i2_32u, new, r1_32, add, addu32)
- D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asiu64, addu64, MO_TEQ)
+ D(0xeb7e, ALGSI, SIY, GIE, la1, i2, new, 0, asiu64, addu64, MO_TEUQ)
C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, addu64, addu64)
/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */
C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2_32u, new, r1_32h, add, addu32)
/* COMPARE AND SWAP */
D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL)
D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, MO_TEUL)
- D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEQ)
+ D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, MO_TEUQ)
/* COMPARE DOUBLE AND SWAP */
- D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ)
- D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEQ)
+ D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEUQ)
+ D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, MO_TEUQ)
C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0)
/* COMPARE AND SWAP AND STORE */
C(0xc802, CSST, SSF, CASS, la1, a2, 0, 0, csst, 0)
C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0)
/* LOAD AND ADD */
D(0xebf8, LAA, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, laa, adds32, MO_TESL)
- D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEQ)
+ D(0xebe8, LAAG, RSY_a, ILA, r3, a2, new, in2_r1, laa, adds64, MO_TEUQ)
/* LOAD AND ADD LOGICAL */
D(0xebfa, LAAL, RSY_a, ILA, r3_32u, a2, new, in2_r1_32, laa, addu32, MO_TEUL)
- D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEQ)
+ D(0xebea, LAALG, RSY_a, ILA, r3, a2, new, in2_r1, laa, addu64, MO_TEUQ)
/* LOAD AND AND */
D(0xebf4, LAN, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lan, nz32, MO_TESL)
- D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEQ)
+ D(0xebe4, LANG, RSY_a, ILA, r3, a2, new, in2_r1, lan, nz64, MO_TEUQ)
/* LOAD AND EXCLUSIVE OR */
D(0xebf7, LAX, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lax, nz32, MO_TESL)
- D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEQ)
+ D(0xebe7, LAXG, RSY_a, ILA, r3, a2, new, in2_r1, lax, nz64, MO_TEUQ)
/* LOAD AND OR */
D(0xebf6, LAO, RSY_a, ILA, r3_32s, a2, new, in2_r1_32, lao, nz32, MO_TESL)
- D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEQ)
+ D(0xebe6, LAOG, RSY_a, ILA, r3, a2, new, in2_r1, lao, nz64, MO_TEUQ)
/* LOAD AND TEST */
C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32)
C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64)
C(0xebe0, LOCFH, RSY_b, LOC2, r1_sr32, m2_32u, new, r1_32h, loc, 0)
/* LOAD PAIR DISJOINT */
D(0xc804, LPD, SSF, ILA, 0, 0, new_P, r3_P32, lpd, 0, MO_TEUL)
- D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEQ)
+ D(0xc805, LPDG, SSF, ILA, 0, 0, new_P, r3_P64, lpd, 0, MO_TEUQ)
/* LOAD PAIR FROM QUADWORD */
C(0xe38f, LPQ, RXY_a, Z, 0, a2, r1_P, 0, lpq, 0)
/* LOAD POSITIVE */
#ifndef CONFIG_USER_ONLY
/* COMPARE AND SWAP AND PURGE */
E(0xb250, CSP, RRE, Z, r1_32u, ra2, r1_P, 0, csp, 0, MO_TEUL, IF_PRIV)
- E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEQ, IF_PRIV)
+ E(0xb98a, CSPG, RRE, DAT_ENH, r1_o, ra2, r1_P, 0, csp, 0, MO_TEUQ, IF_PRIV)
/* DIAGNOSE (KVM hypercall) */
F(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0, IF_PRIV | IF_IO)
/* INSERT STORAGE KEY EXTENDED */
F(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0, IF_PRIV)
/* LOAD USING REAL ADDRESS */
E(0xb24b, LURA, RRE, Z, 0, ra2, new, r1_32, lura, 0, MO_TEUL, IF_PRIV)
- E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEQ, IF_PRIV)
+ E(0xb905, LURAG, RRE, Z, 0, ra2, r1, 0, lura, 0, MO_TEUQ, IF_PRIV)
/* MOVE TO PRIMARY */
F(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0, IF_PRIV)
/* MOVE TO SECONDARY */
F(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0, IF_PRIV)
/* STORE USING REAL ADDRESS */
E(0xb246, STURA, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEUL, IF_PRIV)
- E(0xb925, STURG, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEQ, IF_PRIV)
+ E(0xb925, STURG, RRE, Z, r1_o, ra2, 0, 0, stura, 0, MO_TEUQ, IF_PRIV)
/* TEST BLOCK */
F(0xb22c, TB, RRE, Z, 0, r2_o, 0, 0, testblock, 0, IF_PRIV)
/* TEST PROTECTION */
if (parallel) {
#ifdef CONFIG_ATOMIC64
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx);
+ MemOpIdx oi = make_memop_idx(MO_TEUQ | MO_ALIGN, mem_idx);
ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra);
#else
/* Note that we asserted !parallel above. */
cpu_stq_data_ra(env, a2 + 0, svh, ra);
cpu_stq_data_ra(env, a2 + 8, svl, ra);
} else if (HAVE_ATOMIC128) {
- MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ MemOpIdx oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx);
Int128 sv = int128_make128(svl, svh);
cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra);
} else {
assert(HAVE_ATOMIC128);
mem_idx = cpu_mmu_index(env, false);
- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx);
v = cpu_atomic_ldo_be_mmu(env, addr, oi, ra);
hi = int128_gethi(v);
lo = int128_getlo(v);
assert(HAVE_ATOMIC128);
mem_idx = cpu_mmu_index(env, false);
- oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ oi = make_memop_idx(MO_TEUQ | MO_ALIGN_16, mem_idx);
v = int128_make128(low, high);
cpu_atomic_sto_be_mmu(env, addr, v, oi, ra);
}
t1 = tcg_temp_new_i64();
t2 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s),
- MO_TEQ | MO_ALIGN_8);
+ MO_TEUQ | MO_ALIGN_8);
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
gen_helper_load_psw(cpu_env, t1, t2);
#ifndef CONFIG_USER_ONLY
static DisasJumpType op_sck(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
gen_helper_sck(cc_op, cpu_env, o->in1);
set_cc_static(s);
return DISAS_NEXT;
#ifndef CONFIG_USER_ONLY
static void wout_m1_64a(DisasContext *s, DisasOps *o)
{
- tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN);
+ tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEUQ | MO_ALIGN);
}
#define SPEC_wout_m1_64a 0
#endif
static void in2_m2_64a(DisasContext *s, DisasOps *o)
{
in2_a2(s, o);
- tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN);
+ tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEUQ | MO_ALIGN);
}
#define SPEC_in2_m2_64a 0
#endif
TCGv_i64 t0 = tcg_temp_new_i64();
TCGv_i64 t1 = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
- tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
tcg_temp_free(t0);
t0 = tcg_temp_new_i64();
t1 = tcg_temp_new_i64();
gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8);
- tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEUQ);
for (;; v1++) {
- tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
write_vec_element_i64(t1, v1, 0, ES_64);
if (v1 == v3) {
break;
}
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
- tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
write_vec_element_i64(t1, v1, 1, ES_64);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
}
gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
for (;; v1++) {
read_vec_element_i64(tmp, v1, 0, ES_64);
- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
read_vec_element_i64(tmp, v1, 1, ES_64);
- tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
+ tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
if (v1 == v3) {
break;
}
#define DELAY_SLOT_RTE (1 << 2)
#define TB_FLAG_PENDING_MOVCA (1 << 3)
+#define TB_FLAG_UNALIGN (1 << 4)
#define GUSA_SHIFT 4
#ifdef CONFIG_USER_ONLY
| (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
| (env->sr & (1u << SR_FD)) /* Bit 15 */
| (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 3 */
+#ifdef CONFIG_USER_ONLY
+ *flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
+#endif
}
#endif /* SH4_CPU_H */
#if defined(CONFIG_USER_ONLY)
#define IS_USER(ctx) 1
+#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
#else
#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
+#define UNALIGN(C) 0
#endif
/* Target-specific values for ctx->base.is_jmp. */
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUL | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
+ MO_TESL | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
return;
case 0x2001: /* mov.w Rm,@Rn */
- tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
+ MO_TEUW | UNALIGN(ctx));
return;
case 0x2002: /* mov.l Rm,@Rn */
- tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
+ MO_TEUL | UNALIGN(ctx));
return;
case 0x6000: /* mov.b @Rm,Rn */
tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
return;
case 0x6001: /* mov.w @Rm,Rn */
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TESW | UNALIGN(ctx));
return;
case 0x6002: /* mov.l @Rm,Rn */
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TESL | UNALIGN(ctx));
return;
case 0x2004: /* mov.b Rm,@-Rn */
{
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 2);
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUW | UNALIGN(ctx));
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
}
{
TCGv addr = tcg_temp_new();
tcg_gen_subi_i32(addr, REG(B11_8), 4);
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUL | UNALIGN(ctx));
tcg_gen_mov_i32(REG(B11_8), addr);
tcg_temp_free(addr);
}
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
return;
case 0x6005: /* mov.w @Rm+,Rn */
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TESW | UNALIGN(ctx));
if ( B11_8 != B7_4 )
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
return;
case 0x6006: /* mov.l @Rm+,Rn */
- tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
+ MO_TESL | UNALIGN(ctx));
if ( B11_8 != B7_4 )
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
return;
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUW | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B11_8), REG(0));
- tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
+ MO_TEUL | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
+ MO_TESW | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
{
TCGv addr = tcg_temp_new();
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
- tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
+ MO_TESL | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
- tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEUQ);
tcg_temp_free_i64(fp);
} else {
tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
tcg_temp_free_i64(fp);
} else {
CHECK_FPU_ENABLED
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
tcg_temp_free_i64(fp);
tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
tcg_gen_subi_i32(addr, REG(B11_8), 8);
- tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
tcg_temp_free_i64(fp);
} else {
tcg_gen_subi_i32(addr, REG(B11_8), 4);
tcg_gen_add_i32(addr, REG(B7_4), REG(0));
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEUQ);
gen_store_fpr64(ctx, fp, XHACK(B11_8));
tcg_temp_free_i64(fp);
} else {
if (ctx->tbflags & FPSCR_SZ) {
TCGv_i64 fp = tcg_temp_new_i64();
gen_load_fpr64(ctx, fp, XHACK(B7_4));
- tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
+ tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
tcg_temp_free_i64(fp);
} else {
tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
- tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
+ MO_TEUW | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
{
TCGv addr = tcg_temp_new();
tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
- tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
+ MO_TESW | UNALIGN(ctx));
tcg_temp_free(addr);
}
return;
static void gen_ldf_asi(DisasContext *dc, TCGv addr,
int insn, int size, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
TCGv_i32 d32;
TCGv_i64 d64;
static void gen_stf_asi(DisasContext *dc, TCGv addr,
int insn, int size, int rd)
{
- DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEUQ));
TCGv_i32 d32;
switch (da.type) {
static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv_i64 hi = gen_dest_gpr(dc, rd);
TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
int insn, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv lo = gen_load_gpr(dc, rd + 1);
switch (da.type) {
static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
int insn, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv oldv;
switch (da.type) {
TCGv lo = gen_dest_gpr(dc, rd | 1);
TCGv hi = gen_dest_gpr(dc, rd);
TCGv_i64 t64 = tcg_temp_new_i64();
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
switch (da.type) {
case GET_ASI_EXCP:
default:
{
TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+ TCGv_i32 r_mop = tcg_const_i32(MO_UQ);
save_state(dc);
gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
int insn, int rd)
{
- DisasASI da = get_asi(dc, insn, MO_TEQ);
+ DisasASI da = get_asi(dc, insn, MO_TEUQ);
TCGv lo = gen_load_gpr(dc, rd + 1);
TCGv_i64 t64 = tcg_temp_new_i64();
default:
{
TCGv_i32 r_asi = tcg_const_i32(da.asi);
- TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+ TCGv_i32 r_mop = tcg_const_i32(MO_UQ);
save_state(dc);
gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
break;
case 0x1b: /* V9 ldxa */
- gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
break;
case 0x2d: /* V9 prefetch, no effect */
goto skip_move;
if (rd == 1) {
TCGv_i64 t64 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t64, cpu_addr,
- dc->mem_idx, MO_TEQ);
+ dc->mem_idx, MO_TEUQ);
gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
tcg_temp_free_i64(t64);
break;
gen_address_mask(dc, cpu_addr);
cpu_src1_64 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
- MO_TEQ | MO_ALIGN_4);
+ MO_TEUQ | MO_ALIGN_4);
tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
cpu_src2_64 = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
- MO_TEQ | MO_ALIGN_4);
+ MO_TEUQ | MO_ALIGN_4);
gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
tcg_temp_free_i64(cpu_src1_64);
tcg_temp_free_i64(cpu_src2_64);
gen_address_mask(dc, cpu_addr);
cpu_dst_64 = gen_dest_fpr_D(dc, rd);
tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
- MO_TEQ | MO_ALIGN_4);
+ MO_TEUQ | MO_ALIGN_4);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
default:
tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x1e: /* V9 stxa */
- gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUQ);
break;
#endif
default:
before performing the first write. */
cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
- dc->mem_idx, MO_TEQ | MO_ALIGN_16);
+ dc->mem_idx, MO_TEUQ | MO_ALIGN_16);
tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
- dc->mem_idx, MO_TEQ);
+ dc->mem_idx, MO_TEUQ);
break;
#else /* !TARGET_SPARC64 */
/* stdfq, store floating point queue */
gen_address_mask(dc, cpu_addr);
cpu_src1_64 = gen_load_fpr_D(dc, rd);
tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
- MO_TEQ | MO_ALIGN_4);
+ MO_TEUQ | MO_ALIGN_4);
break;
default:
goto illegal_insn;
TCGv_i64 temp = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(temp, rl, rh);
- tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEUQ);
tcg_temp_free_i64(temp);
}
{
TCGv_i64 temp = tcg_temp_new_i64();
- tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ);
+ tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEUQ);
/* write back to two 32 bit regs */
tcg_gen_extr_i64_i32(rl, rh, temp);
} else {
addr = arg[1].in;
}
- mop = gen_load_store_alignment(dc, MO_TEQ, addr);
+ mop = gen_load_store_alignment(dc, MO_TEUQ, addr);
if (par[0]) {
tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop);
} else {
} else {
addr = arg[1].in;
}
- mop = gen_load_store_alignment(dc, MO_TEQ, addr);
+ mop = gen_load_store_alignment(dc, MO_TEUQ, addr);
if (par[0]) {
tcg_gen_qemu_st_i64(arg[0].in, addr, dc->cring, mop);
} else {
case MO_SL:
tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
break;
- case MO_Q:
+ case MO_UQ:
tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
break;
default:
#ifdef HOST_WORDS_BIGENDIAN
[MO_UW] = helper_be_lduw_mmu,
[MO_UL] = helper_be_ldul_mmu,
- [MO_Q] = helper_be_ldq_mmu,
+ [MO_UQ] = helper_be_ldq_mmu,
[MO_SW] = helper_be_ldsw_mmu,
[MO_SL] = helper_be_ldul_mmu,
#else
[MO_UW] = helper_le_lduw_mmu,
[MO_UL] = helper_le_ldul_mmu,
- [MO_Q] = helper_le_ldq_mmu,
+ [MO_UQ] = helper_le_ldq_mmu,
[MO_SW] = helper_le_ldsw_mmu,
[MO_SL] = helper_le_ldul_mmu,
#endif
default:
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
break;
- case MO_Q:
+ case MO_UQ:
if (datalo != TCG_REG_R1) {
tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
case MO_UL:
tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
break;
- case MO_Q:
+ case MO_UQ:
/* Avoid ldrd for user-only emulation, to handle unaligned. */
if (USING_SOFTMMU && use_armv6_instructions
&& (datalo & 1) == 0 && datahi == datalo + 1) {
case MO_UL:
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
break;
- case MO_Q:
+ case MO_UQ:
/* Avoid ldrd for user-only emulation, to handle unaligned. */
if (USING_SOFTMMU && use_armv6_instructions
&& (datalo & 1) == 0 && datahi == datalo + 1) {
[MO_UB] = helper_ret_ldub_mmu,
[MO_LEUW] = helper_le_lduw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
};
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
/* Perform the TLB load and compare.
case MO_UL:
tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
break;
- case MO_Q:
+ case MO_UQ:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_RAX);
} else if (data_reg == TCG_REG_EDX) {
}
break;
#endif
- case MO_Q:
+ case MO_UQ:
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_modrm_sib_offset(s, movop + P_REXW + seg, datalo,
base, index, 0, ofs);
[MO_LEUW] = helper_le_lduw_mmu,
[MO_LESW] = helper_le_ldsw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BESW] = helper_be_ldsw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
#if TCG_TARGET_REG_BITS == 64
[MO_LESL] = helper_le_ldsl_mmu,
[MO_BESL] = helper_be_ldsl_mmu,
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
/* Helper routines for marshalling helper function arguments into
case MO_SL:
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
break;
- case MO_Q | MO_BSWAP:
+ case MO_UQ | MO_BSWAP:
if (TCG_TARGET_REG_BITS == 64) {
if (use_mips32r2_instructions) {
tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3);
}
break;
- case MO_Q:
+ case MO_UQ:
/* Prefer to load from offset 0 first, but allow for overlap. */
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
CASE_OP_32_64(mul):
return x * y;
- CASE_OP_32_64(and):
+ CASE_OP_32_64_VEC(and):
return x & y;
- CASE_OP_32_64(or):
+ CASE_OP_32_64_VEC(or):
return x | y;
- CASE_OP_32_64(xor):
+ CASE_OP_32_64_VEC(xor):
return x ^ y;
case INDEX_op_shl_i32:
case INDEX_op_rotl_i64:
return rol64(x, y & 63);
- CASE_OP_32_64(not):
+ CASE_OP_32_64_VEC(not):
return ~x;
CASE_OP_32_64(neg):
return -x;
- CASE_OP_32_64(andc):
+ CASE_OP_32_64_VEC(andc):
return x & ~y;
- CASE_OP_32_64(orc):
+ CASE_OP_32_64_VEC(orc):
return x | ~y;
CASE_OP_32_64(eqv):
return false;
}
+static bool fold_commutative(OptContext *ctx, TCGOp *op)
+{
+ swap_commutative(op->args[0], &op->args[1], &op->args[2]);
+ return false;
+}
+
static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
{
swap_commutative(op->args[0], &op->args[1], &op->args[2]);
return false;
}
+/* We cannot as yet do_constant_folding with vectors. */
+static bool fold_add_vec(OptContext *ctx, TCGOp *op)
+{
+ if (fold_commutative(ctx, op) ||
+ fold_xi_to_x(ctx, op, 0)) {
+ return true;
+ }
+ return false;
+}
+
static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
{
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) &&
return false;
}
-static bool fold_sub(OptContext *ctx, TCGOp *op)
+/* We cannot as yet do_constant_folding with vectors. */
+static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
{
- if (fold_const2(ctx, op) ||
- fold_xx_to_i(ctx, op, 0) ||
+ if (fold_xx_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, 0) ||
fold_sub_to_neg(ctx, op)) {
return true;
return false;
}
+static bool fold_sub(OptContext *ctx, TCGOp *op)
+{
+ return fold_const2(ctx, op) || fold_sub_vec(ctx, op);
+}
+
static bool fold_sub2(OptContext *ctx, TCGOp *op)
{
return fold_addsub2(ctx, op, false);
* Sorted alphabetically by opcode as much as possible.
*/
switch (opc) {
- CASE_OP_32_64_VEC(add):
+ CASE_OP_32_64(add):
done = fold_add(&ctx, op);
break;
+ case INDEX_op_add_vec:
+ done = fold_add_vec(&ctx, op);
+ break;
CASE_OP_32_64(add2):
done = fold_add2(&ctx, op);
break;
CASE_OP_32_64(sextract):
done = fold_sextract(&ctx, op);
break;
- CASE_OP_32_64_VEC(sub):
+ CASE_OP_32_64(sub):
done = fold_sub(&ctx, op);
break;
+ case INDEX_op_sub_vec:
+ done = fold_sub_vec(&ctx, op);
+ break;
CASE_OP_32_64(sub2):
done = fold_sub2(&ctx, op);
break;
[MO_UB] = LBZX,
[MO_UW] = LHZX,
[MO_UL] = LWZX,
- [MO_Q] = LDX,
+ [MO_UQ] = LDX,
[MO_SW] = LHAX,
[MO_SL] = LWAX,
[MO_BSWAP | MO_UB] = LBZX,
[MO_BSWAP | MO_UW] = LHBRX,
[MO_BSWAP | MO_UL] = LWBRX,
- [MO_BSWAP | MO_Q] = LDBRX,
+ [MO_BSWAP | MO_UQ] = LDBRX,
};
static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
[MO_UB] = STBX,
[MO_UW] = STHX,
[MO_UL] = STWX,
- [MO_Q] = STDX,
+ [MO_UQ] = STDX,
[MO_BSWAP | MO_UB] = STBX,
[MO_BSWAP | MO_UW] = STHBRX,
[MO_BSWAP | MO_UL] = STWBRX,
- [MO_BSWAP | MO_Q] = STDBRX,
+ [MO_BSWAP | MO_UQ] = STDBRX,
};
static const uint32_t qemu_exts_opc[4] = {
[MO_UB] = helper_ret_ldub_mmu,
[MO_LEUW] = helper_le_lduw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
/* We expect to use a 16-bit negative offset from ENV. */
#if TCG_TARGET_REG_BITS == 64
[MO_SL] = helper_be_ldsl_mmu,
#endif
- [MO_Q] = helper_be_ldq_mmu,
+ [MO_UQ] = helper_be_ldq_mmu,
#else
[MO_UW] = helper_le_lduw_mmu,
[MO_SW] = helper_le_ldsw_mmu,
#if TCG_TARGET_REG_BITS == 64
[MO_SL] = helper_le_ldsl_mmu,
#endif
- [MO_Q] = helper_le_ldq_mmu,
+ [MO_UQ] = helper_le_ldq_mmu,
#endif
};
case MO_SL:
tcg_out_opc_imm(s, OPC_LW, lo, base, 0);
break;
- case MO_Q:
+ case MO_UQ:
/* Prefer to load from offset 0 first, but allow for overlap. */
if (TCG_TARGET_REG_BITS == 64) {
tcg_out_opc_imm(s, OPC_LD, lo, base, 0);
[MO_LESW] = helper_le_ldsw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
[MO_LESL] = helper_le_ldsl_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BESW] = helper_be_ldsw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
[MO_BESL] = helper_be_ldsl_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
};
static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
#endif
tcg_out_insn(s, RXY, LGF, data, base, index, disp);
break;
- case MO_Q | MO_BSWAP:
+ case MO_UQ | MO_BSWAP:
tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
break;
- case MO_Q:
+ case MO_UQ:
tcg_out_insn(s, RXY, LG, data, base, index, disp);
break;
}
break;
- case MO_Q | MO_BSWAP:
+ case MO_UQ | MO_BSWAP:
tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
break;
- case MO_Q:
+ case MO_UQ:
tcg_out_insn(s, RXY, STG, data, base, index, disp);
break;
case MO_UL:
tgen_ext32u(s, TCG_REG_R4, data_reg);
break;
- case MO_Q:
+ case MO_UQ:
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
break;
default:
[MO_LEUW] = helper_le_lduw_mmu,
[MO_LESW] = helper_le_ldsw_mmu,
[MO_LEUL] = helper_le_ldul_mmu,
- [MO_LEQ] = helper_le_ldq_mmu,
+ [MO_LEUQ] = helper_le_ldq_mmu,
[MO_BEUW] = helper_be_lduw_mmu,
[MO_BESW] = helper_be_ldsw_mmu,
[MO_BEUL] = helper_be_ldul_mmu,
- [MO_BEQ] = helper_be_ldq_mmu,
+ [MO_BEUQ] = helper_be_ldq_mmu,
};
static void * const qemu_st_helpers[] = {
[MO_UB] = helper_ret_stb_mmu,
[MO_LEUW] = helper_le_stw_mmu,
[MO_LEUL] = helper_le_stl_mmu,
- [MO_LEQ] = helper_le_stq_mmu,
+ [MO_LEUQ] = helper_le_stq_mmu,
[MO_BEUW] = helper_be_stw_mmu,
[MO_BEUL] = helper_be_stl_mmu,
- [MO_BEQ] = helper_be_stq_mmu,
+ [MO_BEUQ] = helper_be_stq_mmu,
};
int i;
[MO_BESW] = LDSH,
[MO_BEUL] = LDUW,
[MO_BESL] = LDSW,
- [MO_BEQ] = LDX,
+ [MO_BEUQ] = LDX,
[MO_LEUW] = LDUH_LE,
[MO_LESW] = LDSH_LE,
[MO_LEUL] = LDUW_LE,
[MO_LESL] = LDSW_LE,
- [MO_LEQ] = LDX_LE,
+ [MO_LEUQ] = LDX_LE,
};
static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_BEUW] = STH,
[MO_BEUL] = STW,
- [MO_BEQ] = STX,
+ [MO_BEUQ] = STX,
[MO_LEUW] = STH_LE,
[MO_LEUL] = STW_LE,
- [MO_LEQ] = STX_LE,
+ [MO_LEUQ] = STX_LE,
};
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
[MO_LESW] = "lesw",
[MO_LEUL] = "leul",
[MO_LESL] = "lesl",
- [MO_LEQ] = "leq",
+ [MO_LEUQ] = "leq",
[MO_BEUW] = "beuw",
[MO_BESW] = "besw",
[MO_BEUL] = "beul",
[MO_BESL] = "besl",
- [MO_BEQ] = "beq",
+ [MO_BEUQ] = "beq",
};
static const char * const alignment_name[(MO_AMASK >> MO_ASHIFT) + 1] = {
return helper_le_ldul_mmu(env, taddr, oi, ra);
case MO_LESL:
return helper_le_ldsl_mmu(env, taddr, oi, ra);
- case MO_LEQ:
+ case MO_LEUQ:
return helper_le_ldq_mmu(env, taddr, oi, ra);
case MO_BEUW:
return helper_be_lduw_mmu(env, taddr, oi, ra);
return helper_be_ldul_mmu(env, taddr, oi, ra);
case MO_BESL:
return helper_be_ldsl_mmu(env, taddr, oi, ra);
- case MO_BEQ:
+ case MO_BEUQ:
return helper_be_ldq_mmu(env, taddr, oi, ra);
default:
g_assert_not_reached();
case MO_LESL:
ret = (int32_t)ldl_le_p(haddr);
break;
- case MO_LEQ:
+ case MO_LEUQ:
ret = ldq_le_p(haddr);
break;
case MO_BEUW:
case MO_BESL:
ret = (int32_t)ldl_be_p(haddr);
break;
- case MO_BEQ:
+ case MO_BEUQ:
ret = ldq_be_p(haddr);
break;
default:
case MO_LEUL:
helper_le_stl_mmu(env, taddr, val, oi, ra);
break;
- case MO_LEQ:
+ case MO_LEUQ:
helper_le_stq_mmu(env, taddr, val, oi, ra);
break;
case MO_BEUW:
case MO_BEUL:
helper_be_stl_mmu(env, taddr, val, oi, ra);
break;
- case MO_BEQ:
+ case MO_BEUQ:
helper_be_stq_mmu(env, taddr, val, oi, ra);
break;
default:
case MO_LEUL:
stl_le_p(haddr, val);
break;
- case MO_LEQ:
+ case MO_LEUQ:
stq_le_p(haddr, val);
break;
case MO_BEUW:
case MO_BEUL:
stl_be_p(haddr, val);
break;
- case MO_BEQ:
+ case MO_BEUQ:
stq_be_p(haddr, val);
break;
default:
free_test_data(&data);
}
+static void test_acpi_q35_slic(void)
+{
+ test_data data = {
+ .machine = MACHINE_Q35,
+ .variant = ".slic",
+ };
+
+ test_acpi_one("-acpitable sig=SLIC,oem_id='CRASH ',oem_table_id='ME',"
+ "oem_rev=00002210,asl_compiler_id='qemu',"
+ "asl_compiler_rev=00000000,data=/dev/null",
+ &data);
+ free_test_data(&data);
+}
+
static void test_oem_fields(test_data *data)
{
int i;
qtest_add_func("acpi/q35/kvm/dmar", test_acpi_q35_kvm_dmar);
}
qtest_add_func("acpi/q35/viot", test_acpi_q35_viot);
+ qtest_add_func("acpi/q35/slic", test_acpi_q35_slic);
} else if (strcmp(arch, "aarch64") == 0) {
if (has_tcg) {
qtest_add_func("acpi/virt", test_acpi_virt_tcg);
qtest_add_func("hd-geo/ide/device/user/chst", test_ide_device_user_chst);
if (have_qemu_img()) {
qtest_add_func("hd-geo/override/ide", test_override_ide);
- qtest_add_func("hd-geo/override/scsi", test_override_scsi);
- qtest_add_func("hd-geo/override/scsi_2_controllers",
- test_override_scsi_2_controllers);
+ if (qtest_has_device("lsi53c895a")) {
+ qtest_add_func("hd-geo/override/scsi", test_override_scsi);
+ qtest_add_func("hd-geo/override/scsi_2_controllers",
+ test_override_scsi_2_controllers);
+ }
qtest_add_func("hd-geo/override/virtio_blk", test_override_virtio_blk);
qtest_add_func("hd-geo/override/zero_chs", test_override_zero_chs);
qtest_add_func("hd-geo/override/scsi_hot_unplug",
/* Check compatibility of old machine-types that didn't
* auto-increase level/xlevel/xlevel2: */
-
- add_cpuid_test("x86/cpuid/auto-level/pc-2.7",
- "-machine pc-i440fx-2.7 -cpu 486,arat=on,avx512vbmi=on,xsaveopt=on",
- "level", 1);
- add_cpuid_test("x86/cpuid/auto-xlevel/pc-2.7",
- "-machine pc-i440fx-2.7 -cpu 486,3dnow=on,sse4a=on,invtsc=on,npt=on,svm=on",
- "xlevel", 0);
- add_cpuid_test("x86/cpuid/auto-xlevel2/pc-2.7",
- "-machine pc-i440fx-2.7 -cpu 486,xstore=on",
- "xlevel2", 0);
+ if (qtest_has_machine("pc-i440fx-2.7")) {
+ add_cpuid_test("x86/cpuid/auto-level/pc-2.7",
+ "-machine pc-i440fx-2.7 -cpu 486,arat=on,avx512vbmi=on,xsaveopt=on",
+ "level", 1);
+ add_cpuid_test("x86/cpuid/auto-xlevel/pc-2.7",
+ "-machine pc-i440fx-2.7 -cpu 486,3dnow=on,sse4a=on,invtsc=on,npt=on,svm=on",
+ "xlevel", 0);
+ add_cpuid_test("x86/cpuid/auto-xlevel2/pc-2.7",
+ "-machine pc-i440fx-2.7 -cpu 486,xstore=on",
+ "xlevel2", 0);
+ }
/*
* QEMU 1.4.0 had auto-level enabled for CPUID[7], already,
* and the compat code that sets default level shouldn't
* disable the auto-level=7 code:
*/
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.4/off",
- "-machine pc-i440fx-1.4 -cpu Nehalem",
- "level", 2);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.5/on",
- "-machine pc-i440fx-1.4 -cpu Nehalem,smap=on",
- "level", 7);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off",
- "-machine pc-i440fx-2.3 -cpu Penryn",
- "level", 4);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/on",
- "-machine pc-i440fx-2.3 -cpu Penryn,erms=on",
- "level", 7);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/off",
- "-machine pc-i440fx-2.9 -cpu Conroe",
- "level", 10);
- add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/on",
- "-machine pc-i440fx-2.9 -cpu Conroe,erms=on",
- "level", 10);
+ if (qtest_has_machine("pc-i440fx-1.4")) {
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.4/off",
+ "-machine pc-i440fx-1.4 -cpu Nehalem",
+ "level", 2);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-1.5/on",
+ "-machine pc-i440fx-1.4 -cpu Nehalem,smap=on",
+ "level", 7);
+ }
+ if (qtest_has_machine("pc-i440fx-2.3")) {
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/off",
+ "-machine pc-i440fx-2.3 -cpu Penryn",
+ "level", 4);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.3/on",
+ "-machine pc-i440fx-2.3 -cpu Penryn,erms=on",
+ "level", 7);
+ }
+ if (qtest_has_machine("pc-i440fx-2.9")) {
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/off",
+ "-machine pc-i440fx-2.9 -cpu Conroe",
+ "level", 10);
+ add_cpuid_test("x86/cpuid/auto-level7/pc-i440fx-2.9/on",
+ "-machine pc-i440fx-2.9 -cpu Conroe,erms=on",
+ "level", 10);
+ }
/*
* xlevel doesn't have any feature that triggers auto-level
* code on old machine-types. Just check that the compat code
* is working correctly:
*/
- add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.3",
- "-machine pc-i440fx-2.3 -cpu SandyBridge",
- "xlevel", 0x8000000a);
- add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-off",
- "-machine pc-i440fx-2.4 -cpu SandyBridge,",
- "xlevel", 0x80000008);
- add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on",
- "-machine pc-i440fx-2.4 -cpu SandyBridge,svm=on,npt=on",
- "xlevel", 0x80000008);
+ if (qtest_has_machine("pc-i440fx-2.3")) {
+ add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.3",
+ "-machine pc-i440fx-2.3 -cpu SandyBridge",
+ "xlevel", 0x8000000a);
+ }
+ if (qtest_has_machine("pc-i440fx-2.4")) {
+ add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-off",
+ "-machine pc-i440fx-2.4 -cpu SandyBridge,",
+ "xlevel", 0x80000008);
+ add_cpuid_test("x86/cpuid/xlevel-compat/pc-i440fx-2.4/npt-on",
+ "-machine pc-i440fx-2.4 -cpu SandyBridge,svm=on,npt=on",
+ "xlevel", 0x80000008);
+ }
/* Test feature parsing */
add_feature_test("x86/cpuid/features/plus",
VPATH += $(SRC_PATH)/tests/tcg/ppc64le
ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_POWER8_VECTOR),)
-PPC64_TESTS=bcdsub
+PPC64_TESTS=bcdsub non_signalling_xscv
endif
-bcdsub: CFLAGS += -mpower8-vector
+$(PPC64_TESTS): CFLAGS += -mpower8-vector
PPC64_TESTS += byte_reverse
PPC64_TESTS += mtfsf
VPATH += $(SRC_PATH)/tests/tcg/ppc64le
ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_POWER8_VECTOR),)
-PPC64LE_TESTS=bcdsub
+PPC64LE_TESTS=bcdsub non_signalling_xscv
endif
-bcdsub: CFLAGS += -mpower8-vector
+$(PPC64LE_TESTS): CFLAGS += -mpower8-vector
ifneq ($(DOCKER_IMAGE)$(CROSS_CC_HAS_POWER10),)
PPC64LE_TESTS += byte_reverse
--- /dev/null
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#define TEST(INSN, B_HI, B_LO, T_HI, T_LO) \
+ do { \
+ uint64_t th, tl, bh = B_HI, bl = B_LO; \
+ asm("mtvsrd 0, %2\n\t" \
+ "mtvsrd 1, %3\n\t" \
+ "xxmrghd 0, 0, 1\n\t" \
+ INSN " 0, 0\n\t" \
+ "mfvsrd %0, 0\n\t" \
+ "xxswapd 0, 0\n\t" \
+ "mfvsrd %1, 0\n\t" \
+ : "=r" (th), "=r" (tl) \
+ : "r" (bh), "r" (bl) \
+ : "vs0", "vs1"); \
+ printf(INSN "(0x%016" PRIx64 "%016" PRIx64 ") = 0x%016" PRIx64 \
+ "%016" PRIx64 "\n", bh, bl, th, tl); \
+ assert(th == T_HI && tl == T_LO); \
+ } while (0)
+
+int main(void)
+{
+ /* SNaN shouldn't be silenced */
+ TEST("xscvspdpn", 0x7fbfffff00000000ULL, 0x0, 0x7ff7ffffe0000000ULL, 0x0);
+ TEST("xscvdpspn", 0x7ff7ffffffffffffULL, 0x0, 0x7fbfffff7fbfffffULL, 0x0);
+
+ /*
+ * SNaN inputs having no significant bits in the upper 23 bits of the
+ * signifcand will return Infinity as the result.
+ */
+ TEST("xscvdpspn", 0x7ff000001fffffffULL, 0x0, 0x7f8000007f800000ULL, 0x0);
+
+ return 0;
+}
};
int i;
+ i = g_file_open_tmp("unix-XXXXXX", &addr.u.q_unix.path, NULL);
+ g_assert_true(i >= 0);
+ close(i);
+
addr.type = SOCKET_ADDRESS_TYPE_UNIX;
- addr.u.q_unix.path = g_strdup_printf("unix-%d-%u",
- getpid(), g_random_int());
addr.u.q_unix.has_abstract = true;
addr.u.q_unix.abstract = true;
addr.u.q_unix.has_tight = false;
--- /dev/null
+/*
+ * 128-bit division and remainder for compilers not supporting __int128
+ *
+ * Copyright (c) 2021 Frédéric Pétrot <frederic.petrot@univ-grenoble-alpes.fr>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/host-utils.h"
+#include "qemu/int128.h"
+
+#ifndef CONFIG_INT128
+
+/*
+ * Division and remainder algorithms for 128-bit due to Stefan Kanthak,
+ * https://skanthak.homepage.t-online.de/integer.html#udivmodti4
+ * Preconditions:
+ * - function should never be called with v equals to 0, it has to
+ * be dealt with beforehand
+ * - quotien pointer must be valid
+ */
+static Int128 divrem128(Int128 u, Int128 v, Int128 *q)
+{
+ Int128 qq;
+ uint64_t hi, lo, tmp;
+ int s = clz64(v.hi);
+
+ if (s == 64) {
+ /* we have uu÷0v => let's use divu128 */
+ hi = u.hi;
+ lo = u.lo;
+ tmp = divu128(&lo, &hi, v.lo);
+ *q = int128_make128(lo, hi);
+ return int128_make128(tmp, 0);
+ } else {
+ hi = int128_gethi(int128_lshift(v, s));
+
+ if (hi > u.hi) {
+ lo = u.lo;
+ tmp = u.hi;
+ divu128(&lo, &tmp, hi);
+ lo = int128_gethi(int128_lshift(int128_make128(lo, 0), s));
+ } else { /* prevent overflow */
+ lo = u.lo;
+ tmp = u.hi - hi;
+ divu128(&lo, &tmp, hi);
+ lo = int128_gethi(int128_lshift(int128_make128(lo, 1), s));
+ }
+
+ qq = int128_make64(lo);
+
+ tmp = lo * v.hi;
+ mulu64(&lo, &hi, lo, v.lo);
+ hi += tmp;
+
+ if (hi < tmp /* quotient * divisor >= 2**128 > dividend */
+ || hi > u.hi /* quotient * divisor > dividend */
+ || (hi == u.hi && lo > u.lo)) {
+ qq.lo -= 1;
+ mulu64(&lo, &hi, qq.lo, v.lo);
+ hi += qq.lo * v.hi;
+ }
+
+ *q = qq;
+ u.hi -= hi + (u.lo < lo);
+ u.lo -= lo;
+ return u;
+ }
+}
+
+Int128 int128_divu(Int128 a, Int128 b)
+{
+ Int128 q;
+ divrem128(a, b, &q);
+ return q;
+}
+
+Int128 int128_remu(Int128 a, Int128 b)
+{
+ Int128 q;
+ return divrem128(a, b, &q);
+}
+
+Int128 int128_divs(Int128 a, Int128 b)
+{
+ Int128 q;
+ bool sgna = !int128_nonneg(a);
+ bool sgnb = !int128_nonneg(b);
+
+ if (sgna) {
+ a = int128_neg(a);
+ }
+
+ if (sgnb) {
+ b = int128_neg(b);
+ }
+
+ divrem128(a, b, &q);
+
+ if (sgna != sgnb) {
+ q = int128_neg(q);
+ }
+
+ return q;
+}
+
+Int128 int128_rems(Int128 a, Int128 b)
+{
+ Int128 q, r;
+ bool sgna = !int128_nonneg(a);
+ bool sgnb = !int128_nonneg(b);
+
+ if (sgna) {
+ a = int128_neg(a);
+ }
+
+ if (sgnb) {
+ b = int128_neg(b);
+ }
+
+ r = divrem128(a, b, &q);
+
+ if (sgna) {
+ r = int128_neg(r);
+ }
+
+ return r;
+}
+
+#endif
util_ss.add(when: 'CONFIG_POSIX', if_true: files('drm.c'))
util_ss.add(files('guest-random.c'))
util_ss.add(files('yank.c'))
+util_ss.add(files('int128.c'))
if have_user
util_ss.add(files('selfmap.c'))
#include "sysemu/sysemu.h"
#include "trace.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#include "qemu/sockets.h"
#include "qemu/thread.h"
#include <libgen.h>
#include "qemu/cutils.h"
#include "qemu/compiler.h"
+#include "qemu/units.h"
#ifdef CONFIG_LINUX
#include <sys/syscall.h>
#define MAX_MEM_PREALLOC_THREAD_COUNT 16
+struct MemsetThread;
+
+typedef struct MemsetContext {
+ bool all_threads_created;
+ bool any_thread_failed;
+ struct MemsetThread *threads;
+ int num_threads;
+} MemsetContext;
+
struct MemsetThread {
char *addr;
size_t numpages;
size_t hpagesize;
QemuThread pgthread;
sigjmp_buf env;
+ MemsetContext *context;
};
typedef struct MemsetThread MemsetThread;
-static MemsetThread *memset_thread;
-static int memset_num_threads;
-static bool memset_thread_failed;
+/* used by sigbus_handler() */
+static MemsetContext *sigbus_memset_context;
+struct sigaction sigbus_oldact;
+static QemuMutex sigbus_mutex;
static QemuMutex page_mutex;
static QemuCond page_cond;
-static bool threads_created_flag;
int qemu_get_thread_id(void)
{
return exec_dir;
}
+#ifdef CONFIG_LINUX
+static void sigbus_handler(int signal, siginfo_t *siginfo, void *ctx)
+#else /* CONFIG_LINUX */
static void sigbus_handler(int signal)
+#endif /* CONFIG_LINUX */
{
int i;
- if (memset_thread) {
- for (i = 0; i < memset_num_threads; i++) {
- if (qemu_thread_is_self(&memset_thread[i].pgthread)) {
- siglongjmp(memset_thread[i].env, 1);
+
+ if (sigbus_memset_context) {
+ for (i = 0; i < sigbus_memset_context->num_threads; i++) {
+ MemsetThread *thread = &sigbus_memset_context->threads[i];
+
+ if (qemu_thread_is_self(&thread->pgthread)) {
+ siglongjmp(thread->env, 1);
}
}
}
+
+#ifdef CONFIG_LINUX
+ /*
+ * We assume that the MCE SIGBUS handler could have been registered. We
+ * should never receive BUS_MCEERR_AO on any of our threads, but only on
+ * the main thread registered for PR_MCE_KILL_EARLY. Further, we should not
+ * receive BUS_MCEERR_AR triggered by action of other threads on one of
+ * our threads. So, no need to check for unrelated SIGBUS when seeing one
+ * for our threads.
+ *
+ * We will forward to the MCE handler, which will either handle the SIGBUS
+ * or reinstall the default SIGBUS handler and reraise the SIGBUS. The
+ * default SIGBUS handler will crash the process, so we don't care.
+ */
+ if (sigbus_oldact.sa_flags & SA_SIGINFO) {
+ sigbus_oldact.sa_sigaction(signal, siginfo, ctx);
+ return;
+ }
+#endif /* CONFIG_LINUX */
+ warn_report("os_mem_prealloc: unrelated SIGBUS detected and ignored");
}
static void *do_touch_pages(void *arg)
{
MemsetThread *memset_args = (MemsetThread *)arg;
sigset_t set, oldset;
+ int ret = 0;
/*
* On Linux, the page faults from the loop below can cause mmap_sem
* clearing until all threads have been created.
*/
qemu_mutex_lock(&page_mutex);
- while(!threads_created_flag){
+ while (!memset_args->context->all_threads_created) {
qemu_cond_wait(&page_cond, &page_mutex);
}
qemu_mutex_unlock(&page_mutex);
pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
if (sigsetjmp(memset_args->env, 1)) {
- memset_thread_failed = true;
+ ret = -EFAULT;
} else {
char *addr = memset_args->addr;
size_t numpages = memset_args->numpages;
*
* 'volatile' to stop compiler optimizing this away
* to a no-op
- *
- * TODO: get a better solution from kernel so we
- * don't need to write at all so we don't cause
- * wear on the storage backing the region...
*/
*(volatile char *)addr = *addr;
addr += hpagesize;
}
}
pthread_sigmask(SIG_SETMASK, &oldset, NULL);
- return NULL;
+ return (void *)(uintptr_t)ret;
}
-static inline int get_memset_num_threads(int smp_cpus)
+static void *do_madv_populate_write_pages(void *arg)
+{
+ MemsetThread *memset_args = (MemsetThread *)arg;
+ const size_t size = memset_args->numpages * memset_args->hpagesize;
+ char * const addr = memset_args->addr;
+ int ret = 0;
+
+ /* See do_touch_pages(). */
+ qemu_mutex_lock(&page_mutex);
+ while (!memset_args->context->all_threads_created) {
+ qemu_cond_wait(&page_cond, &page_mutex);
+ }
+ qemu_mutex_unlock(&page_mutex);
+
+ if (size && qemu_madvise(addr, size, QEMU_MADV_POPULATE_WRITE)) {
+ ret = -errno;
+ }
+ return (void *)(uintptr_t)ret;
+}
+
+static inline int get_memset_num_threads(size_t hpagesize, size_t numpages,
+ int smp_cpus)
{
long host_procs = sysconf(_SC_NPROCESSORS_ONLN);
int ret = 1;
if (host_procs > 0) {
ret = MIN(MIN(host_procs, MAX_MEM_PREALLOC_THREAD_COUNT), smp_cpus);
}
+
+ /* Especially with gigantic pages, don't create more threads than pages. */
+ ret = MIN(ret, numpages);
+ /* Don't start threads to prealloc comparatively little memory. */
+ ret = MIN(ret, MAX(1, hpagesize * numpages / (64 * MiB)));
+
/* In case sysconf() fails, we fall back to single threaded */
return ret;
}
-static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages,
- int smp_cpus)
+static int touch_all_pages(char *area, size_t hpagesize, size_t numpages,
+ int smp_cpus, bool use_madv_populate_write)
{
static gsize initialized = 0;
+ MemsetContext context = {
+ .num_threads = get_memset_num_threads(hpagesize, numpages, smp_cpus),
+ };
size_t numpages_per_thread, leftover;
+ void *(*touch_fn)(void *);
+ int ret = 0, i = 0;
char *addr = area;
- int i = 0;
if (g_once_init_enter(&initialized)) {
qemu_mutex_init(&page_mutex);
g_once_init_leave(&initialized, 1);
}
- memset_thread_failed = false;
- threads_created_flag = false;
- memset_num_threads = get_memset_num_threads(smp_cpus);
- memset_thread = g_new0(MemsetThread, memset_num_threads);
- numpages_per_thread = numpages / memset_num_threads;
- leftover = numpages % memset_num_threads;
- for (i = 0; i < memset_num_threads; i++) {
- memset_thread[i].addr = addr;
- memset_thread[i].numpages = numpages_per_thread + (i < leftover);
- memset_thread[i].hpagesize = hpagesize;
- qemu_thread_create(&memset_thread[i].pgthread, "touch_pages",
- do_touch_pages, &memset_thread[i],
+ if (use_madv_populate_write) {
+ /* Avoid creating a single thread for MADV_POPULATE_WRITE */
+ if (context.num_threads == 1) {
+ if (qemu_madvise(area, hpagesize * numpages,
+ QEMU_MADV_POPULATE_WRITE)) {
+ return -errno;
+ }
+ return 0;
+ }
+ touch_fn = do_madv_populate_write_pages;
+ } else {
+ touch_fn = do_touch_pages;
+ }
+
+ context.threads = g_new0(MemsetThread, context.num_threads);
+ numpages_per_thread = numpages / context.num_threads;
+ leftover = numpages % context.num_threads;
+ for (i = 0; i < context.num_threads; i++) {
+ context.threads[i].addr = addr;
+ context.threads[i].numpages = numpages_per_thread + (i < leftover);
+ context.threads[i].hpagesize = hpagesize;
+ context.threads[i].context = &context;
+ qemu_thread_create(&context.threads[i].pgthread, "touch_pages",
+ touch_fn, &context.threads[i],
QEMU_THREAD_JOINABLE);
- addr += memset_thread[i].numpages * hpagesize;
+ addr += context.threads[i].numpages * hpagesize;
+ }
+
+ if (!use_madv_populate_write) {
+ sigbus_memset_context = &context;
}
qemu_mutex_lock(&page_mutex);
- threads_created_flag = true;
+ context.all_threads_created = true;
qemu_cond_broadcast(&page_cond);
qemu_mutex_unlock(&page_mutex);
- for (i = 0; i < memset_num_threads; i++) {
- qemu_thread_join(&memset_thread[i].pgthread);
+ for (i = 0; i < context.num_threads; i++) {
+ int tmp = (uintptr_t)qemu_thread_join(&context.threads[i].pgthread);
+
+ if (tmp) {
+ ret = tmp;
+ }
}
- g_free(memset_thread);
- memset_thread = NULL;
- return memset_thread_failed;
+ if (!use_madv_populate_write) {
+ sigbus_memset_context = NULL;
+ }
+ g_free(context.threads);
+
+ return ret;
+}
+
+static bool madv_populate_write_possible(char *area, size_t pagesize)
+{
+ return !qemu_madvise(area, pagesize, QEMU_MADV_POPULATE_WRITE) ||
+ errno != EINVAL;
}
void os_mem_prealloc(int fd, char *area, size_t memory, int smp_cpus,
Error **errp)
{
+ static gsize initialized;
int ret;
- struct sigaction act, oldact;
size_t hpagesize = qemu_fd_getpagesize(fd);
size_t numpages = DIV_ROUND_UP(memory, hpagesize);
+ bool use_madv_populate_write;
+ struct sigaction act;
- memset(&act, 0, sizeof(act));
- act.sa_handler = &sigbus_handler;
- act.sa_flags = 0;
+ /*
+ * Sense on every invocation, as MADV_POPULATE_WRITE cannot be used for
+ * some special mappings, such as mapping /dev/mem.
+ */
+ use_madv_populate_write = madv_populate_write_possible(area, hpagesize);
- ret = sigaction(SIGBUS, &act, &oldact);
- if (ret) {
- error_setg_errno(errp, errno,
- "os_mem_prealloc: failed to install signal handler");
- return;
+ if (!use_madv_populate_write) {
+ if (g_once_init_enter(&initialized)) {
+ qemu_mutex_init(&sigbus_mutex);
+ g_once_init_leave(&initialized, 1);
+ }
+
+ qemu_mutex_lock(&sigbus_mutex);
+ memset(&act, 0, sizeof(act));
+#ifdef CONFIG_LINUX
+ act.sa_sigaction = &sigbus_handler;
+ act.sa_flags = SA_SIGINFO;
+#else /* CONFIG_LINUX */
+ act.sa_handler = &sigbus_handler;
+ act.sa_flags = 0;
+#endif /* CONFIG_LINUX */
+
+ ret = sigaction(SIGBUS, &act, &sigbus_oldact);
+ if (ret) {
+ error_setg_errno(errp, errno,
+ "os_mem_prealloc: failed to install signal handler");
+ return;
+ }
}
/* touch pages simultaneously */
- if (touch_all_pages(area, hpagesize, numpages, smp_cpus)) {
- error_setg(errp, "os_mem_prealloc: Insufficient free host memory "
- "pages available to allocate guest RAM");
+ ret = touch_all_pages(area, hpagesize, numpages, smp_cpus,
+ use_madv_populate_write);
+ if (ret) {
+ error_setg_errno(errp, -ret,
+ "os_mem_prealloc: preallocating memory failed");
}
- ret = sigaction(SIGBUS, &oldact, NULL);
- if (ret) {
- /* Terminate QEMU since it can't recover from error */
- perror("os_mem_prealloc: failed to reinstall signal handler");
- exit(1);
+ if (!use_madv_populate_write) {
+ ret = sigaction(SIGBUS, &sigbus_oldact, NULL);
+ if (ret) {
+ /* Terminate QEMU since it can't recover from error */
+ perror("os_mem_prealloc: failed to reinstall signal handler");
+ exit(1);
+ }
+ qemu_mutex_unlock(&sigbus_mutex);
}
}