#include "hw/core/tcg-cpu-ops.h"
#endif /* CONFIG_TCG */
#include "internals.h"
+#include "cpu-features.h"
#include "exec/exec-all.h"
#include "hw/qdev-properties.h"
#if !defined(CONFIG_USER_ONLY)
env->cp15.sctlr_el[1] |= SCTLR_TSCXT;
/* Disable access to Debug Communication Channel (DCC). */
env->cp15.mdscr_el1 |= 1 << 12;
+ /* Enable FEAT_MOPS */
+ env->cp15.sctlr_el[1] |= SCTLR_MSCEN;
#else
/* Reset into the highest available EL */
if (arm_feature(env, ARM_FEATURE_EL3)) {
}
}
+void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
+{
+ ARMCPU *cpu = ARM_CPU(cpustate);
+ CPUARMState *env = &cpu->env;
+ bool have_el3 = arm_feature(env, ARM_FEATURE_EL3);
+ bool have_el2 = arm_feature(env, ARM_FEATURE_EL2);
+
+ /*
+ * Check we have the EL we're aiming for. If that is the
+ * highest implemented EL, then cpu_reset has already done
+ * all the work.
+ */
+ switch (target_el) {
+ case 3:
+ assert(have_el3);
+ return;
+ case 2:
+ assert(have_el2);
+ if (!have_el3) {
+ return;
+ }
+ break;
+ case 1:
+ if (!have_el3 && !have_el2) {
+ return;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (have_el3) {
+ /*
+ * Set the EL3 state so code can run at EL2. This should match
+ * the requirements set by Linux in its booting spec.
+ */
+ if (env->aarch64) {
+ env->cp15.scr_el3 |= SCR_RW;
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ env->cp15.scr_el3 |= SCR_API | SCR_APK;
+ }
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ env->cp15.scr_el3 |= SCR_ATA;
+ }
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
+ env->vfp.zcr_el[3] = 0xf;
+ }
+ if (cpu_isar_feature(aa64_sme, cpu)) {
+ env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
+ env->cp15.scr_el3 |= SCR_ENTP2;
+ env->vfp.smcr_el[3] = 0xf;
+ }
+ if (cpu_isar_feature(aa64_hcx, cpu)) {
+ env->cp15.scr_el3 |= SCR_HXEN;
+ }
+ if (cpu_isar_feature(aa64_fgt, cpu)) {
+ env->cp15.scr_el3 |= SCR_FGTEN;
+ }
+ }
+
+ if (target_el == 2) {
+ /* If the guest is at EL2 then Linux expects the HVC insn to work */
+ env->cp15.scr_el3 |= SCR_HCE;
+ }
+
+ /* Put CPU into non-secure state */
+ env->cp15.scr_el3 |= SCR_NS;
+ /* Set NSACR.{CP11,CP10} so NS can access the FPU */
+ env->cp15.nsacr |= 3 << 10;
+ }
+
+ if (have_el2 && target_el < 2) {
+ /* Set EL2 state so code can run at EL1. */
+ if (env->aarch64) {
+ env->cp15.hcr_el2 |= HCR_RW;
+ }
+ }
+
+ /* Set the CPU to the desired state */
+ if (env->aarch64) {
+ env->pstate = aarch64_pstate_mode(target_el, true);
+ } else {
+ static const uint32_t mode_for_el[] = {
+ 0,
+ ARM_CPU_MODE_SVC,
+ ARM_CPU_MODE_HYP,
+ ARM_CPU_MODE_SVC,
+ };
+
+ cpsr_write(env, mode_for_el[target_el], CPSR_M, CPSRWriteRaw);
+ }
+}
+
+
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
uint32_t psr = pstate_read(env);
int i, j;
int el = arm_current_el(env);
+ uint64_t hcr = arm_hcr_el2_eff(env);
const char *ns_status;
bool sve;
if (cpu_isar_feature(aa64_bti, cpu)) {
qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
}
+ qemu_fprintf(f, "%s%s%s",
+ (hcr & HCR_NV) ? " NV" : "",
+ (hcr & HCR_NV1) ? " NV1" : "",
+ (hcr & HCR_NV2) ? " NV2" : "");
if (!(flags & CPU_DUMP_FPU)) {
qemu_fprintf(f, "\n");
return;
}
if (kvm_enabled()) {
- kvm_arm_add_vcpu_properties(obj);
+ kvm_arm_add_vcpu_properties(cpu);
}
#ifndef CONFIG_USER_ONLY
return;
}
+ /*
+ * FEAT_SME is not architecturally dependent on FEAT_SVE (unless
+ * FEAT_SME_FA64 is present). However our implementation currently
+ * assumes it, so if the user asked for sve=off then turn off SME also.
+ * (KVM doesn't currently support SME at all.)
+ */
+ if (cpu_isar_feature(aa64_sme, cpu) && !cpu_isar_feature(aa64_sve, cpu)) {
+ object_property_set_bool(OBJECT(cpu), "sme", false, &error_abort);
+ }
+
arm_cpu_sme_finalize(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
int pagebits;
Error *local_err = NULL;
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
/* Use pc-relative instructions in system-mode */
-#ifndef CONFIG_USER_ONLY
cs->tcg_cflags |= CF_PCREL;
#endif
/* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */
cpu->isar.id_aa64pfr0 =
FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0);
- /* FEAT_NV (Nested Virtualization) */
- cpu->isar.id_aa64mmfr2 =
- FIELD_DP64(cpu->isar.id_aa64mmfr2, ID_AA64MMFR2, NV, 0);
}
/* MPU can be configured out of a PMSA CPU either by setting has-mpu
oc = object_class_by_name(typename);
g_strfreev(cpuname);
g_free(typename);
- if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
- object_class_is_abstract(oc)) {
- return NULL;
- }
+
return oc;
}
DEFINE_PROP_END_OF_LIST()
};
-static gchar *arm_gdb_arch_name(CPUState *cs)
+static const gchar *arm_gdb_arch_name(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
- return g_strdup("iwmmxt");
+ return "iwmmxt";
}
- return g_strdup("arm");
+ return "arm";
}
#ifndef CONFIG_USER_ONLY
cc->sysemu_ops = &arm_sysemu_ops;
#endif
cc->gdb_num_core_regs = 26;
- cc->gdb_core_xml_file = "arm-core.xml";
cc->gdb_arch_name = arm_gdb_arch_name;
cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
cc->gdb_stop_before_watchpoint = true;
static void cpu_register_class_init(ObjectClass *oc, void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(acc);
acc->info = data;
+ cc->gdb_core_xml_file = "arm-core.xml";
}
void arm_cpu_register(const ARMCPUInfo *info)