]> git.proxmox.com Git - mirror_qemu.git/blobdiff - target-ppc/kvm.c
MAINTAINERS: update xtensa boards
[mirror_qemu.git] / target-ppc / kvm.c
index 0b5d391403b6d118bbf4c85c8d40842a3dff4d1d..9c23c6ba0d8cfd9df800043a85e67022fd5a0163 100644 (file)
 #include "hw/sysbus.h"
 #include "hw/ppc/spapr.h"
 #include "hw/ppc/spapr_vio.h"
+#include "hw/ppc/ppc.h"
 #include "sysemu/watchdog.h"
+#include "trace.h"
+#include "exec/gdbstub.h"
 
 //#define DEBUG_KVM
 
@@ -60,12 +63,17 @@ static int cap_booke_sregs;
 static int cap_ppc_smt;
 static int cap_ppc_rma;
 static int cap_spapr_tce;
+static int cap_spapr_multitce;
+static int cap_spapr_vfio;
 static int cap_hior;
 static int cap_one_reg;
 static int cap_epr;
 static int cap_ppc_watchdog;
 static int cap_papr;
 static int cap_htab_fd;
+static int cap_fixup_hcalls;
+
+static uint32_t debug_inst_opcode;
 
 /* XXX We have a race condition where we actually have a level triggered
  *     interrupt, but the infrastructure can't expose that yet, so the guest
@@ -96,6 +104,8 @@ int kvm_arch_init(KVMState *s)
     cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
     cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
+    cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
+    cap_spapr_vfio = false;
     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
@@ -103,6 +113,7 @@ int kvm_arch_init(KVMState *s)
     /* Note: we don't set cap_papr here, because this capability is
      * only activated after this by kvmppc_set_papr() */
     cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
+    cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
 
     if (!cap_interrupt_level) {
         fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
@@ -150,7 +161,6 @@ static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
     CPUState *cs = CPU(cpu);
     struct kvm_book3e_206_tlb_params params = {};
     struct kvm_config_tlb cfg = {};
-    struct kvm_enable_cap encap = {};
     unsigned int entries = 0;
     int ret, i;
 
@@ -177,10 +187,7 @@ static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
     cfg.params = (uintptr_t)&params;
     cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
 
-    encap.cap = KVM_CAP_SW_TLB;
-    encap.args[0] = (uintptr_t)&cfg;
-
-    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
+    ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
     if (ret < 0) {
         fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
                 __func__, strerror(-ret));
@@ -359,6 +366,10 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
     /* Convert to QEMU form */
     memset(&env->sps, 0, sizeof(env->sps));
 
+    /*
+     * XXX This loop should be an entry wide AND of the capabilities that
+     *     the selected CPU has with the capabilities that KVM supports.
+     */
     for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
         struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
         struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
@@ -385,9 +396,7 @@ static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
         }
     }
     env->slb_nr = smmu_info.slb_size;
-    if (smmu_info.flags & KVM_PPC_1T_SEGMENTS) {
-        env->mmu_model |= POWERPC_MMU_1TSEG;
-    } else {
+    if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
         env->mmu_model &= ~POWERPC_MMU_1TSEG;
     }
 }
@@ -401,7 +410,39 @@ static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
 
 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
 {
-    return cpu->cpu_index;
+    return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
+}
+
+/* e500 supports 2 h/w breakpoint and 2 watchpoint.
+ * book3s supports only 1 watchpoint, so array size
+ * of 4 is sufficient for now.
+ */
+#define MAX_HW_BKPTS 4
+
+static struct HWBreakpoint {
+    target_ulong addr;
+    int type;
+} hw_debug_points[MAX_HW_BKPTS];
+
+static CPUWatchpoint hw_watchpoint;
+
+/* Default there is no breakpoint and watchpoint supported */
+static int max_hw_breakpoint;
+static int max_hw_watchpoint;
+static int nb_hw_breakpoint;
+static int nb_hw_watchpoint;
+
+static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
+{
+    if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
+        max_hw_breakpoint = 2;
+        max_hw_watchpoint = 2;
+    }
+
+    if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
+        fprintf(stderr, "Error initializing h/w breakpoints\n");
+        return;
+    }
 }
 
 int kvm_arch_init_vcpu(CPUState *cs)
@@ -430,11 +471,10 @@ int kvm_arch_init_vcpu(CPUState *cs)
         break;
     }
 
-    return ret;
-}
+    kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
+    kvmppc_hw_debug_points_init(cenv);
 
-void kvm_arch_reset_vcpu(CPUState *cpu)
-{
+    return ret;
 }
 
 static void kvm_sw_tlb_put(PowerPCCPU *cpu)
@@ -480,8 +520,7 @@ static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
 
     ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
     if (ret != 0) {
-        fprintf(stderr, "Warning: Unable to retrieve SPR %d from KVM: %s\n",
-                spr, strerror(errno));
+        trace_kvm_failed_spr_get(spr, strerror(errno));
     } else {
         switch (id & KVM_REG_SIZE_MASK) {
         case KVM_REG_SIZE_U32:
@@ -529,8 +568,7 @@ static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
 
     ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
     if (ret != 0) {
-        fprintf(stderr, "Warning: Unable to set SPR %d to KVM: %s\n",
-                spr, strerror(errno));
+        trace_kvm_failed_spr_set(spr, strerror(errno));
     }
 }
 
@@ -818,8 +856,11 @@ int kvm_arch_put_registers(CPUState *cs, int level)
 
         /* Sync SLB */
 #ifdef TARGET_PPC64
-        for (i = 0; i < 64; i++) {
+        for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
             sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
+            if (env->slb[i].esid & SLB_ESID_V) {
+                sregs.u.s.ppc64.slb[i].slbe |= i;
+            }
             sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
         }
 #endif
@@ -864,17 +905,43 @@ int kvm_arch_put_registers(CPUState *cs, int level)
         }
 
 #ifdef TARGET_PPC64
+        if (msr_ts) {
+            for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
+                kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
+            }
+            for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
+                kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
+            }
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
+            kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
+        }
+
         if (cap_papr) {
             if (kvm_put_vpa(cs) < 0) {
                 DPRINTF("Warning: Unable to set VPA information to KVM\n");
             }
         }
+
+        kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
 #endif /* TARGET_PPC64 */
     }
 
     return ret;
 }
 
+static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
+{
+     env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
+}
+
 int kvm_arch_get_registers(CPUState *cs)
 {
     PowerPCCPU *cpu = POWERPC_CPU(cs);
@@ -957,35 +1024,57 @@ int kvm_arch_get_registers(CPUState *cs)
 
         if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
             env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
+            kvm_sync_excp(env, POWERPC_EXCP_CRITICAL,  SPR_BOOKE_IVOR0);
             env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
+            kvm_sync_excp(env, POWERPC_EXCP_MCHECK,  SPR_BOOKE_IVOR1);
             env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
+            kvm_sync_excp(env, POWERPC_EXCP_DSI,  SPR_BOOKE_IVOR2);
             env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
+            kvm_sync_excp(env, POWERPC_EXCP_ISI,  SPR_BOOKE_IVOR3);
             env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
+            kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL,  SPR_BOOKE_IVOR4);
             env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
+            kvm_sync_excp(env, POWERPC_EXCP_ALIGN,  SPR_BOOKE_IVOR5);
             env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
+            kvm_sync_excp(env, POWERPC_EXCP_PROGRAM,  SPR_BOOKE_IVOR6);
             env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
+            kvm_sync_excp(env, POWERPC_EXCP_FPU,  SPR_BOOKE_IVOR7);
             env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
+            kvm_sync_excp(env, POWERPC_EXCP_SYSCALL,  SPR_BOOKE_IVOR8);
             env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
+            kvm_sync_excp(env, POWERPC_EXCP_APU,  SPR_BOOKE_IVOR9);
             env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
+            kvm_sync_excp(env, POWERPC_EXCP_DECR,  SPR_BOOKE_IVOR10);
             env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
+            kvm_sync_excp(env, POWERPC_EXCP_FIT,  SPR_BOOKE_IVOR11);
             env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
+            kvm_sync_excp(env, POWERPC_EXCP_WDT,  SPR_BOOKE_IVOR12);
             env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
+            kvm_sync_excp(env, POWERPC_EXCP_DTLB,  SPR_BOOKE_IVOR13);
             env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
+            kvm_sync_excp(env, POWERPC_EXCP_ITLB,  SPR_BOOKE_IVOR14);
             env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
+            kvm_sync_excp(env, POWERPC_EXCP_DEBUG,  SPR_BOOKE_IVOR15);
 
             if (sregs.u.e.features & KVM_SREGS_E_SPE) {
                 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
+                kvm_sync_excp(env, POWERPC_EXCP_SPEU,  SPR_BOOKE_IVOR32);
                 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
+                kvm_sync_excp(env, POWERPC_EXCP_EFPDI,  SPR_BOOKE_IVOR33);
                 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
+                kvm_sync_excp(env, POWERPC_EXCP_EFPRI,  SPR_BOOKE_IVOR34);
             }
 
             if (sregs.u.e.features & KVM_SREGS_E_PM) {
                 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
+                kvm_sync_excp(env, POWERPC_EXCP_EPERFM,  SPR_BOOKE_IVOR35);
             }
 
             if (sregs.u.e.features & KVM_SREGS_E_PC) {
                 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
+                kvm_sync_excp(env, POWERPC_EXCP_DOORI,  SPR_BOOKE_IVOR36);
                 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
+                kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
             }
         }
 
@@ -1029,13 +1118,28 @@ int kvm_arch_get_registers(CPUState *cs)
             return ret;
         }
 
-        ppc_store_sdr1(env, sregs.u.s.sdr1);
+        if (!env->external_htab) {
+            ppc_store_sdr1(env, sregs.u.s.sdr1);
+        }
 
         /* Sync SLB */
 #ifdef TARGET_PPC64
-        for (i = 0; i < 64; i++) {
-            ppc_store_slb(env, sregs.u.s.ppc64.slb[i].slbe,
-                               sregs.u.s.ppc64.slb[i].slbv);
+        /*
+         * The packed SLB array we get from KVM_GET_SREGS only contains
+         * information about valid entries. So we flush our internal
+         * copy to get rid of stale ones, then put all valid SLB entries
+         * back in.
+         */
+        memset(env->slb, 0, sizeof(env->slb));
+        for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+            target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
+            target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
+            /*
+             * Only restore valid entries
+             */
+            if (rb & SLB_ESID_V) {
+                ppc_store_slb(env, rb, rs);
+            }
         }
 #endif
 
@@ -1073,11 +1177,32 @@ int kvm_arch_get_registers(CPUState *cs)
         }
 
 #ifdef TARGET_PPC64
+        if (msr_ts) {
+            for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
+                kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
+            }
+            for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
+                kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
+            }
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
+            kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
+        }
+
         if (cap_papr) {
             if (kvm_get_vpa(cs) < 0) {
                 DPRINTF("Warning: Unable to get VPA information from KVM\n");
             }
         }
+
+        kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
 #endif
     }
 
@@ -1161,7 +1286,7 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu)
 
     if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
         cs->halted = 1;
-        env->exception_index = EXCP_HLT;
+        cs->exception_index = EXCP_HLT;
     }
 
     return 0;
@@ -1184,6 +1309,259 @@ static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t dat
     return 0;
 }
 
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+    /* Mixed endian case is not handled */
+    uint32_t sc = debug_inst_opcode;
+
+    if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+                            sizeof(sc), 0) ||
+        cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+    uint32_t sc;
+
+    if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
+        sc != debug_inst_opcode ||
+        cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+                            sizeof(sc), 1)) {
+        return -EINVAL;
+    }
+
+    return 0;
+}
+
+static int find_hw_breakpoint(target_ulong addr, int type)
+{
+    int n;
+
+    assert((nb_hw_breakpoint + nb_hw_watchpoint)
+           <= ARRAY_SIZE(hw_debug_points));
+
+    for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
+        if (hw_debug_points[n].addr == addr &&
+             hw_debug_points[n].type == type) {
+            return n;
+        }
+    }
+
+    return -1;
+}
+
+static int find_hw_watchpoint(target_ulong addr, int *flag)
+{
+    int n;
+
+    n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
+    if (n >= 0) {
+        *flag = BP_MEM_ACCESS;
+        return n;
+    }
+
+    n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
+    if (n >= 0) {
+        *flag = BP_MEM_WRITE;
+        return n;
+    }
+
+    n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
+    if (n >= 0) {
+        *flag = BP_MEM_READ;
+        return n;
+    }
+
+    return -1;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+                                  target_ulong len, int type)
+{
+    if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
+        return -ENOBUFS;
+    }
+
+    hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
+    hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
+
+    switch (type) {
+    case GDB_BREAKPOINT_HW:
+        if (nb_hw_breakpoint >= max_hw_breakpoint) {
+            return -ENOBUFS;
+        }
+
+        if (find_hw_breakpoint(addr, type) >= 0) {
+            return -EEXIST;
+        }
+
+        nb_hw_breakpoint++;
+        break;
+
+    case GDB_WATCHPOINT_WRITE:
+    case GDB_WATCHPOINT_READ:
+    case GDB_WATCHPOINT_ACCESS:
+        if (nb_hw_watchpoint >= max_hw_watchpoint) {
+            return -ENOBUFS;
+        }
+
+        if (find_hw_breakpoint(addr, type) >= 0) {
+            return -EEXIST;
+        }
+
+        nb_hw_watchpoint++;
+        break;
+
+    default:
+        return -ENOSYS;
+    }
+
+    return 0;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+                                  target_ulong len, int type)
+{
+    int n;
+
+    n = find_hw_breakpoint(addr, type);
+    if (n < 0) {
+        return -ENOENT;
+    }
+
+    switch (type) {
+    case GDB_BREAKPOINT_HW:
+        nb_hw_breakpoint--;
+        break;
+
+    case GDB_WATCHPOINT_WRITE:
+    case GDB_WATCHPOINT_READ:
+    case GDB_WATCHPOINT_ACCESS:
+        nb_hw_watchpoint--;
+        break;
+
+    default:
+        return -ENOSYS;
+    }
+    hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
+
+    return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+    nb_hw_breakpoint = nb_hw_watchpoint = 0;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
+{
+    int n;
+
+    /* Software Breakpoint updates */
+    if (kvm_sw_breakpoints_active(cs)) {
+        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+    }
+
+    assert((nb_hw_breakpoint + nb_hw_watchpoint)
+           <= ARRAY_SIZE(hw_debug_points));
+    assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
+
+    if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
+        dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+        memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
+        for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
+            switch (hw_debug_points[n].type) {
+            case GDB_BREAKPOINT_HW:
+                dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
+                break;
+            case GDB_WATCHPOINT_WRITE:
+                dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
+                break;
+            case GDB_WATCHPOINT_READ:
+                dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
+                break;
+            case GDB_WATCHPOINT_ACCESS:
+                dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
+                                        KVMPPC_DEBUG_WATCH_READ;
+                break;
+            default:
+                cpu_abort(cs, "Unsupported breakpoint type\n");
+            }
+            dbg->arch.bp[n].addr = hw_debug_points[n].addr;
+        }
+    }
+}
+
+static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
+{
+    CPUState *cs = CPU(cpu);
+    CPUPPCState *env = &cpu->env;
+    struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
+    int handle = 0;
+    int n;
+    int flag = 0;
+
+    if (cs->singlestep_enabled) {
+        handle = 1;
+    } else if (arch_info->status) {
+        if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
+            if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
+                n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
+                if (n >= 0) {
+                    handle = 1;
+                }
+            } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
+                                            KVMPPC_DEBUG_WATCH_WRITE)) {
+                n = find_hw_watchpoint(arch_info->address,  &flag);
+                if (n >= 0) {
+                    handle = 1;
+                    cs->watchpoint_hit = &hw_watchpoint;
+                    hw_watchpoint.vaddr = hw_debug_points[n].addr;
+                    hw_watchpoint.flags = flag;
+                }
+            }
+        }
+    } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
+        handle = 1;
+    } else {
+        /* QEMU is not able to handle debug exception, so inject
+         * program exception to guest;
+         * Yes program exception NOT debug exception !!
+         * When QEMU is using debug resources then debug exception must
+         * be always set. To achieve this we set MSR_DE and also set
+         * MSRP_DEP so guest cannot change MSR_DE.
+         * When emulating debug resource for guest we want guest
+         * to control MSR_DE (enable/disable debug interrupt on need).
+         * Supporting both configurations are NOT possible.
+         * So the result is that we cannot share debug resources
+         * between QEMU and Guest on BOOKE architecture.
+         * In the current design QEMU gets the priority over guest,
+         * this means that if QEMU is using debug resources then guest
+         * cannot use them;
+         * For software breakpoint QEMU uses a privileged instruction;
+         * So there cannot be any reason that we are here for guest
+         * set debug exception, only possibility is guest executed a
+         * privileged / illegal instruction and that's why we are
+         * injecting a program interrupt.
+         */
+
+        cpu_synchronize_state(cs);
+        /* env->nip is PC, so increment this by 4 to use
+         * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
+         */
+        env->nip += 4;
+        cs->exception_index = POWERPC_EXCP_PROGRAM;
+        env->error_code = POWERPC_EXCP_INVAL;
+        ppc_cpu_do_interrupt(cs);
+    }
+
+    return handle;
+}
+
 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
 {
     PowerPCCPU *cpu = POWERPC_CPU(cs);
@@ -1215,7 +1593,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
 #endif
     case KVM_EXIT_EPR:
         DPRINTF("handle epr\n");
-        run->epr.epr = ldl_phys(env->mpic_iack);
+        run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
         ret = 0;
         break;
     case KVM_EXIT_WATCHDOG:
@@ -1224,6 +1602,16 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
         ret = 0;
         break;
 
+    case KVM_EXIT_DEBUG:
+        DPRINTF("handle debug exception\n");
+        if (kvm_handle_debug(cpu, run)) {
+            ret = EXCP_DEBUG;
+            break;
+        }
+        /* re-enter, this exception was guest-internal */
+        ret = 0;
+        break;
+
     default:
         fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
         ret = -1;
@@ -1275,7 +1663,6 @@ int kvmppc_set_tcr(PowerPCCPU *cpu)
 int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
 {
     CPUState *cs = CPU(cpu);
-    struct kvm_enable_cap encap = {};
     int ret;
 
     if (!kvm_enabled()) {
@@ -1287,8 +1674,7 @@ int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
         return -1;
     }
 
-    encap.cap = KVM_CAP_PPC_BOOKE_WATCHDOG;
-    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
+    ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
     if (ret < 0) {
         fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
                 __func__, strerror(-ret));
@@ -1311,7 +1697,7 @@ static int read_cpuinfo(const char *field, char *value, int len)
     }
 
     do {
-        if(!fgets(line, sizeof(line), f)) {
+        if (!fgets(line, sizeof(line), f)) {
             break;
         }
         if (!strncmp(line, field, field_len)) {
@@ -1346,6 +1732,17 @@ uint32_t kvmppc_get_tbfreq(void)
     return retval;
 }
 
+bool kvmppc_get_host_serial(char **value)
+{
+    return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
+                               NULL);
+}
+
+bool kvmppc_get_host_model(char **value)
+{
+    return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
+}
+
 /* Try to find a device tree node for a CPU with clock-frequency property */
 static int kvmppc_find_cpu_dt(char *buf, int buf_len)
 {
@@ -1438,7 +1835,7 @@ static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
      PowerPCCPU *cpu = ppc_env_get_cpu(env);
      CPUState *cs = CPU(cpu);
 
-    if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
+    if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
         !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
         return 0;
     }
@@ -1469,34 +1866,30 @@ int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
     }
 
     /*
-     * Fallback to always fail hypercalls:
+     * Fallback to always fail hypercalls regardless of endianness:
      *
+     *     tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
      *     li r3, -1
-     *     nop
-     *     nop
-     *     nop
+     *     b .+8       (becomes nop in wrong endian)
+     *     bswap32(li r3, -1)
      */
 
-    hc[0] = 0x3860ffff;
-    hc[1] = 0x60000000;
-    hc[2] = 0x60000000;
-    hc[3] = 0x60000000;
+    hc[0] = cpu_to_be32(0x08000048);
+    hc[1] = cpu_to_be32(0x3860ffff);
+    hc[2] = cpu_to_be32(0x48000008);
+    hc[3] = cpu_to_be32(bswap32(0x3860ffff));
 
     return 0;
 }
 
 void kvmppc_set_papr(PowerPCCPU *cpu)
 {
-    CPUPPCState *env = &cpu->env;
     CPUState *cs = CPU(cpu);
-    struct kvm_enable_cap cap = {};
     int ret;
 
-    cap.cap = KVM_CAP_PPC_PAPR;
-    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
-
+    ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
     if (ret) {
-        cpu_abort(env, "This KVM version does not support PAPR\n");
+        cpu_abort(cs, "This KVM version does not support PAPR\n");
     }
 
     /* Update the capability flag so we sync the right information
@@ -1504,19 +1897,19 @@ void kvmppc_set_papr(PowerPCCPU *cpu)
     cap_papr = 1;
 }
 
+int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
+{
+    return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &cpu_version);
+}
+
 void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
 {
-    CPUPPCState *env = &cpu->env;
     CPUState *cs = CPU(cpu);
-    struct kvm_enable_cap cap = {};
     int ret;
 
-    cap.cap = KVM_CAP_PPC_EPR;
-    cap.args[0] = mpic_proxy;
-    ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
-
+    ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
     if (ret && mpic_proxy) {
-        cpu_abort(env, "This KVM version does not support EPR\n");
+        cpu_abort(cs, "This KVM version does not support EPR\n");
     }
 }
 
@@ -1526,13 +1919,11 @@ int kvmppc_smt_threads(void)
 }
 
 #ifdef TARGET_PPC64
-off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
+off_t kvmppc_alloc_rma(void **rma)
 {
-    void *rma;
     off_t size;
     int fd;
     struct kvm_allocate_rma ret;
-    MemoryRegion *rma_region;
 
     /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
      * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
@@ -1555,17 +1946,12 @@ off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
 
     size = MIN(ret.rma_size, 256ul << 20);
 
-    rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
-    if (rma == MAP_FAILED) {
+    *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+    if (*rma == MAP_FAILED) {
         fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
         return -1;
     };
 
-    rma_region = g_new(MemoryRegion, 1);
-    memory_region_init_ram_ptr(rma_region, NULL, name, size, rma);
-    vmstate_register_ram_global(rma_region);
-    memory_region_add_subregion(sysmem, 0, rma_region);
-
     return size;
 }
 
@@ -1603,7 +1989,13 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
 }
 #endif
 
-void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
+bool kvmppc_spapr_use_multitce(void)
+{
+    return cap_spapr_multitce;
+}
+
+void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd,
+                              bool vfio_accel)
 {
     struct kvm_create_spapr_tce args = {
         .liobn = liobn,
@@ -1617,7 +2009,7 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
      * destroying the table, which the upper layers -will- do
      */
     *pfd = -1;
-    if (!cap_spapr_tce) {
+    if (!cap_spapr_tce || (vfio_accel && !cap_spapr_vfio)) {
         return NULL;
     }
 
@@ -1643,7 +2035,7 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
     return table;
 }
 
-int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size)
+int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
 {
     long len;
 
@@ -1651,7 +2043,7 @@ int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size)
         return -1;
     }
 
-    len = (window_size / SPAPR_TCE_PAGE_SIZE)*sizeof(uint64_t);
+    len = nb_table * sizeof(uint64_t);
     if ((munmap(table, len) < 0) ||
         (close(fd) < 0)) {
         fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
@@ -1732,6 +2124,7 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
     uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
 
     /* Now fix up the class with information we can query from the host */
+    pcc->pvr = mfpvr();
 
     if (vmx != -1) {
         /* Only override when we know what the host supports */
@@ -1752,22 +2145,31 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
     }
 }
 
-int kvmppc_fixup_cpu(PowerPCCPU *cpu)
+bool kvmppc_has_cap_epr(void)
 {
-    CPUState *cs = CPU(cpu);
-    int smt;
+    return cap_epr;
+}
 
-    /* Adjust cpu index for SMT */
-    smt = kvmppc_smt_threads();
-    cs->cpu_index = (cs->cpu_index / smp_threads) * smt
-        + (cs->cpu_index % smp_threads);
+bool kvmppc_has_cap_htab_fd(void)
+{
+    return cap_htab_fd;
+}
 
-    return 0;
+bool kvmppc_has_cap_fixup_hcalls(void)
+{
+    return cap_fixup_hcalls;
 }
 
-bool kvmppc_has_cap_epr(void)
+static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
 {
-    return cap_epr;
+    ObjectClass *oc = OBJECT_CLASS(pcc);
+
+    while (oc && !object_class_is_abstract(oc)) {
+        oc = object_class_get_parent(oc);
+    }
+    assert(oc);
+
+    return POWERPC_CPU_CLASS(oc);
 }
 
 static int kvm_ppc_register_host_cpu_type(void)
@@ -1779,13 +2181,25 @@ static int kvm_ppc_register_host_cpu_type(void)
     };
     uint32_t host_pvr = mfpvr();
     PowerPCCPUClass *pvr_pcc;
+    DeviceClass *dc;
 
     pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
+    if (pvr_pcc == NULL) {
+        pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
+    }
     if (pvr_pcc == NULL) {
         return -1;
     }
     type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
     type_register(&type_info);
+
+    /* Register generic family CPU class for a family */
+    pvr_pcc = ppc_cpu_get_family_class(pvr_pcc);
+    dc = DEVICE_CLASS(pvr_pcc);
+    type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
+    type_info.name = g_strdup_printf("%s-"TYPE_POWERPC_CPU, dc->desc);
+    type_register(&type_info);
+
     return 0;
 }
 
@@ -1889,3 +2303,88 @@ int kvm_arch_on_sigbus(int code, void *addr)
 void kvm_arch_init_irq_routing(KVMState *s)
 {
 }
+
+struct kvm_get_htab_buf {
+    struct kvm_get_htab_header header;
+    /*
+     * We require one extra byte for read
+     */
+    target_ulong hpte[(HPTES_PER_GROUP * 2) + 1];
+};
+
+uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index)
+{
+    int htab_fd;
+    struct kvm_get_htab_fd ghf;
+    struct kvm_get_htab_buf  *hpte_buf;
+
+    ghf.flags = 0;
+    ghf.start_index = pte_index;
+    htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
+    if (htab_fd < 0) {
+        goto error_out;
+    }
+
+    hpte_buf = g_malloc0(sizeof(*hpte_buf));
+    /*
+     * Read the hpte group
+     */
+    if (read(htab_fd, hpte_buf, sizeof(*hpte_buf)) < 0) {
+        goto out_close;
+    }
+
+    close(htab_fd);
+    return (uint64_t)(uintptr_t) hpte_buf->hpte;
+
+out_close:
+    g_free(hpte_buf);
+    close(htab_fd);
+error_out:
+    return 0;
+}
+
+void kvmppc_hash64_free_pteg(uint64_t token)
+{
+    struct kvm_get_htab_buf *htab_buf;
+
+    htab_buf = container_of((void *)(uintptr_t) token, struct kvm_get_htab_buf,
+                            hpte);
+    g_free(htab_buf);
+    return;
+}
+
+void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
+                             target_ulong pte0, target_ulong pte1)
+{
+    int htab_fd;
+    struct kvm_get_htab_fd ghf;
+    struct kvm_get_htab_buf hpte_buf;
+
+    ghf.flags = 0;
+    ghf.start_index = 0;     /* Ignored */
+    htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
+    if (htab_fd < 0) {
+        goto error_out;
+    }
+
+    hpte_buf.header.n_valid = 1;
+    hpte_buf.header.n_invalid = 0;
+    hpte_buf.header.index = pte_index;
+    hpte_buf.hpte[0] = pte0;
+    hpte_buf.hpte[1] = pte1;
+    /*
+     * Write the hpte entry.
+     * CAUTION: write() has the warn_unused_result attribute. Hence we
+     * need to check the return value, even though we do nothing.
+     */
+    if (write(htab_fd, &hpte_buf, sizeof(hpte_buf)) < 0) {
+        goto out_close;
+    }
+
+out_close:
+    close(htab_fd);
+    return;
+
+error_out:
+    return;
+}