#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
+#define DEBUG_RETURN_GUEST 0
+#define DEBUG_RETURN_GDB 1
+
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
static int cap_ppc_count_cache_flush_assist;
static int cap_ppc_nested_kvm_hv;
static int cap_large_decr;
+static int cap_fwnmi;
static uint32_t debug_inst_opcode;
kvmppc_get_cpu_characteristics(s);
cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
cap_large_decr = kvmppc_get_dec_bits();
+ cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
/*
* Note: setting it to false because there is not such capability
* in KVM at this moment.
return 0;
}
-int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
+int kvm_arch_irqchip_create(KVMState *s)
{
return 0;
}
struct ppc_radix_page_info *kvm_get_radix_page_info(void)
{
- KVMState *s = KVM_STATE(current_machine->accelerator);
+ KVMState *s = KVM_STATE(current_accel());
struct ppc_radix_page_info *radix_page_info;
struct kvm_ppc_rmmu_info rmmu_info;
int i;
/*
* KVM-HV has transactional memory on POWER8 also without
* the KVM_CAP_PPC_HTM extension, so enable it here
- * instead as long as it's availble to userspace on the
+ * instead as long as it's available to userspace on the
* host.
*/
if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
return 0;
}
-#if defined(TARGET_PPC64)
-#define PPC_INPUT_INT PPC970_INPUT_INT
-#else
-#define PPC_INPUT_INT PPC6xx_INPUT_INT
-#endif
-
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
return;
static int kvm_handle_hw_breakpoint(CPUState *cs,
struct kvm_debug_exit_arch *arch_info)
{
- int handle = 0;
+ int handle = DEBUG_RETURN_GUEST;
int n;
int flag = 0;
if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
if (n >= 0) {
- handle = 1;
+ handle = DEBUG_RETURN_GDB;
}
} else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
KVMPPC_DEBUG_WATCH_WRITE)) {
n = find_hw_watchpoint(arch_info->address, &flag);
if (n >= 0) {
- handle = 1;
+ handle = DEBUG_RETURN_GDB;
cs->watchpoint_hit = &hw_watchpoint;
hw_watchpoint.vaddr = hw_debug_points[n].addr;
hw_watchpoint.flags = flag;
static int kvm_handle_singlestep(void)
{
- return 1;
+ return DEBUG_RETURN_GDB;
}
static int kvm_handle_sw_breakpoint(void)
{
- return 1;
+ return DEBUG_RETURN_GDB;
}
static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
env->error_code = POWERPC_EXCP_INVAL;
ppc_cpu_do_interrupt(cs);
- return 0;
+ return DEBUG_RETURN_GUEST;
}
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
ret = 0;
break;
+#if defined(TARGET_PPC64)
+ case KVM_EXIT_NMI:
+ trace_kvm_handle_nmi_exception();
+ ret = kvm_handle_nmi(cpu, run);
+ break;
+#endif
+
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
ret = -1;
}
}
+bool kvmppc_get_fwnmi(void)
+{
+ return cap_fwnmi;
+}
+
+int kvmppc_set_fwnmi(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
+}
+
int kvmppc_smt_threads(void)
{
return cap_ppc_smt ? cap_ppc_smt : 1;
return ret;
}
-void kvmppc_error_append_smt_possible_hint(Error **errp_in)
+void kvmppc_error_append_smt_possible_hint(Error *const *errp)
{
int i;
GString *g;
}
}
s = g_string_free(g, false);
- error_append_hint(errp_in, "%s.\n", s);
+ error_append_hint(errp, "%s.\n", s);
g_free(s);
} else {
- error_append_hint(errp_in,
+ error_append_hint(errp,
"This KVM seems to be too old to support VSMT.\n");
}
}
#ifdef TARGET_PPC64
-uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
+uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
{
struct kvm_ppc_smmu_info info;
long rampagesize, best_page_shift;
}
}
- return MIN(current_size,
- 1ULL << (best_page_shift + hash_shift - 7));
+ return 1ULL << (best_page_shift + hash_shift - 7);
}
#endif
}
int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
- uint16_t n_valid, uint16_t n_invalid)
+ uint16_t n_valid, uint16_t n_invalid, Error **errp)
{
struct kvm_get_htab_header *buf;
size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
rc = write(fd, buf, chunksize);
if (rc < 0) {
- fprintf(stderr, "Error writing KVM hash table: %s\n",
- strerror(errno));
- return rc;
+ error_setg_errno(errp, errno, "Error writing the KVM hash table");
+ return -errno;
}
if (rc != chunksize) {
/* We should never get a short write on a single chunk */
- fprintf(stderr, "Short write, restoring KVM hash table\n");
- return -1;
+ error_setg(errp, "Short write while restoring the KVM hash table");
+ return -ENOSPC;
}
return 0;
}
return data & 0xffff;
}
+#if defined(TARGET_PPC64)
+int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
+{
+ uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
+
+ return 0;
+}
+#endif
+
int kvmppc_enable_hwrng(void)
{
if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
}
}
+
+/*
+ * Don't set error if KVM_PPC_SVM_OFF ioctl is invoked on kernels
+ * that don't support this ioctl.
+ */
+void kvmppc_svm_off(Error **errp)
+{
+ int rc;
+
+ if (!kvm_enabled()) {
+ return;
+ }
+
+ rc = kvm_vm_ioctl(KVM_STATE(current_accel()), KVM_PPC_SVM_OFF);
+ if (rc && rc != -ENOTTY) {
+ error_setg_errno(errp, -rc, "KVM_PPC_SVM_OFF ioctl failed");
+ }
+}