X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=target-ppc%2Fkvm.c;h=2546c577f6b311edca3702da9152d4561ce2c0fb;hb=1de7afc984b49af164e2619e6850b9732b173b34;hp=77b98c4d73a69aa23bd5f84875da78c4cc0e10d2;hpb=eadaada1ceb74f152dc9fe196bdda1b79c86275c;p=qemu.git diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c index 77b98c4d7..2546c577f 100644 --- a/target-ppc/kvm.c +++ b/target-ppc/kvm.c @@ -18,16 +18,24 @@ #include #include #include +#include #include #include "qemu-common.h" -#include "qemu-timer.h" +#include "qemu/timer.h" #include "sysemu.h" #include "kvm.h" #include "kvm_ppc.h" #include "cpu.h" +#include "cpus.h" #include "device_tree.h" +#include "hw/sysbus.h" +#include "hw/spapr.h" + +#include "hw/sysbus.h" +#include "hw/spapr.h" +#include "hw/spapr_vio.h" //#define DEBUG_KVM @@ -49,6 +57,10 @@ static int cap_interrupt_unset = false; static int cap_interrupt_level = false; static int cap_segstate; static int cap_booke_sregs; +static int cap_ppc_smt; +static int cap_ppc_rma; +static int cap_spapr_tce; +static int cap_hior; /* XXX We have a race condition where we actually have a level triggered * interrupt, but the infrastructure can't expose that yet, so the guest @@ -61,9 +73,11 @@ static int cap_booke_sregs; */ static QEMUTimer *idle_timer; -static void kvm_kick_env(void *env) +static void kvm_kick_cpu(void *opaque) { - qemu_cpu_kick(env); + PowerPCCPU *cpu = opaque; + + qemu_cpu_kick(CPU(cpu)); } int kvm_arch_init(KVMState *s) @@ -72,6 +86,10 @@ int kvm_arch_init(KVMState *s) cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL); cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE); cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS); + cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT); + cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA); + cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE); + cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR); if (!cap_interrupt_level) { fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the " @@ -81,7 +99,7 @@ int kvm_arch_init(KVMState *s) return 0; } -static int kvm_arch_sync_sregs(CPUState *cenv) +static int kvm_arch_sync_sregs(CPUPPCState *cenv) { struct kvm_sregs sregs; int ret; @@ -108,25 +126,313 @@ static int kvm_arch_sync_sregs(CPUState *cenv) return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs); } -int kvm_arch_init_vcpu(CPUState *cenv) +/* Set up a shared TLB array with KVM */ +static int kvm_booke206_tlb_init(CPUPPCState *env) +{ + struct kvm_book3e_206_tlb_params params = {}; + struct kvm_config_tlb cfg = {}; + struct kvm_enable_cap encap = {}; + unsigned int entries = 0; + int ret, i; + + if (!kvm_enabled() || + !kvm_check_extension(env->kvm_state, KVM_CAP_SW_TLB)) { + return 0; + } + + assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN); + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + params.tlb_sizes[i] = booke206_tlb_size(env, i); + params.tlb_ways[i] = booke206_tlb_ways(env, i); + entries += params.tlb_sizes[i]; + } + + assert(entries == env->nb_tlb); + assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t)); + + env->tlb_dirty = true; + + cfg.array = (uintptr_t)env->tlb.tlbm; + cfg.array_len = sizeof(ppcmas_tlb_t) * entries; + cfg.params = (uintptr_t)¶ms; + cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV; + + encap.cap = KVM_CAP_SW_TLB; + encap.args[0] = (uintptr_t)&cfg; + + ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &encap); + if (ret < 0) { + fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n", + __func__, strerror(-ret)); + return ret; + } + + env->kvm_sw_tlb = true; + return 0; +} + + +#if defined(TARGET_PPC64) +static void kvm_get_fallback_smmu_info(CPUPPCState *env, + struct kvm_ppc_smmu_info *info) +{ + memset(info, 0, sizeof(*info)); + + /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so + * need to "guess" what the supported page sizes are. + * + * For that to work we make a few assumptions: + * + * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR" + * KVM which only supports 4K and 16M pages, but supports them + * regardless of the backing store characteritics. We also don't + * support 1T segments. + * + * This is safe as if HV KVM ever supports that capability or PR + * KVM grows supports for more page/segment sizes, those versions + * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we + * will not hit this fallback + * + * - Else we are running HV KVM. This means we only support page + * sizes that fit in the backing store. Additionally we only + * advertize 64K pages if the processor is ARCH 2.06 and we assume + * P7 encodings for the SLB and hash table. Here too, we assume + * support for any newer processor will mean a kernel that + * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit + * this fallback. + */ + if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO)) { + /* No flags */ + info->flags = 0; + info->slb_size = 64; + + /* Standard 4k base page size segment */ + info->sps[0].page_shift = 12; + info->sps[0].slb_enc = 0; + info->sps[0].enc[0].page_shift = 12; + info->sps[0].enc[0].pte_enc = 0; + + /* Standard 16M large page size segment */ + info->sps[1].page_shift = 24; + info->sps[1].slb_enc = SLB_VSID_L; + info->sps[1].enc[0].page_shift = 24; + info->sps[1].enc[0].pte_enc = 0; + } else { + int i = 0; + + /* HV KVM has backing store size restrictions */ + info->flags = KVM_PPC_PAGE_SIZES_REAL; + + if (env->mmu_model & POWERPC_MMU_1TSEG) { + info->flags |= KVM_PPC_1T_SEGMENTS; + } + + if (env->mmu_model == POWERPC_MMU_2_06) { + info->slb_size = 32; + } else { + info->slb_size = 64; + } + + /* Standard 4k base page size segment */ + info->sps[i].page_shift = 12; + info->sps[i].slb_enc = 0; + info->sps[i].enc[0].page_shift = 12; + info->sps[i].enc[0].pte_enc = 0; + i++; + + /* 64K on MMU 2.06 */ + if (env->mmu_model == POWERPC_MMU_2_06) { + info->sps[i].page_shift = 16; + info->sps[i].slb_enc = 0x110; + info->sps[i].enc[0].page_shift = 16; + info->sps[i].enc[0].pte_enc = 1; + i++; + } + + /* Standard 16M large page size segment */ + info->sps[i].page_shift = 24; + info->sps[i].slb_enc = SLB_VSID_L; + info->sps[i].enc[0].page_shift = 24; + info->sps[i].enc[0].pte_enc = 0; + } +} + +static void kvm_get_smmu_info(CPUPPCState *env, struct kvm_ppc_smmu_info *info) +{ + int ret; + + if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) { + ret = kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_SMMU_INFO, info); + if (ret == 0) { + return; + } + } + + kvm_get_fallback_smmu_info(env, info); +} + +static long getrampagesize(void) +{ + struct statfs fs; + int ret; + + if (!mem_path) { + /* guest RAM is backed by normal anonymous pages */ + return getpagesize(); + } + + do { + ret = statfs(mem_path, &fs); + } while (ret != 0 && errno == EINTR); + + if (ret != 0) { + fprintf(stderr, "Couldn't statfs() memory path: %s\n", + strerror(errno)); + exit(1); + } + +#define HUGETLBFS_MAGIC 0x958458f6 + + if (fs.f_type != HUGETLBFS_MAGIC) { + /* Explicit mempath, but it's ordinary pages */ + return getpagesize(); + } + + /* It's hugepage, return the huge page size */ + return fs.f_bsize; +} + +static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift) +{ + if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) { + return true; + } + + return (1ul << shift) <= rampgsize; +} + +static void kvm_fixup_page_sizes(CPUPPCState *env) { + static struct kvm_ppc_smmu_info smmu_info; + static bool has_smmu_info; + long rampagesize; + int iq, ik, jq, jk; + + /* We only handle page sizes for 64-bit server guests for now */ + if (!(env->mmu_model & POWERPC_MMU_64)) { + return; + } + + /* Collect MMU info from kernel if not already */ + if (!has_smmu_info) { + kvm_get_smmu_info(env, &smmu_info); + has_smmu_info = true; + } + + rampagesize = getrampagesize(); + + /* Convert to QEMU form */ + memset(&env->sps, 0, sizeof(env->sps)); + + for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) { + struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq]; + struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik]; + + if (!kvm_valid_page_size(smmu_info.flags, rampagesize, + ksps->page_shift)) { + continue; + } + qsps->page_shift = ksps->page_shift; + qsps->slb_enc = ksps->slb_enc; + for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) { + if (!kvm_valid_page_size(smmu_info.flags, rampagesize, + ksps->enc[jk].page_shift)) { + continue; + } + qsps->enc[jq].page_shift = ksps->enc[jk].page_shift; + qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc; + if (++jq >= PPC_PAGE_SIZES_MAX_SZ) { + break; + } + } + if (++iq >= PPC_PAGE_SIZES_MAX_SZ) { + break; + } + } + env->slb_nr = smmu_info.slb_size; + if (smmu_info.flags & KVM_PPC_1T_SEGMENTS) { + env->mmu_model |= POWERPC_MMU_1TSEG; + } else { + env->mmu_model &= ~POWERPC_MMU_1TSEG; + } +} +#else /* defined (TARGET_PPC64) */ + +static inline void kvm_fixup_page_sizes(CPUPPCState *env) +{ +} + +#endif /* !defined (TARGET_PPC64) */ + +int kvm_arch_init_vcpu(CPUPPCState *cenv) +{ + PowerPCCPU *cpu = ppc_env_get_cpu(cenv); int ret; + /* Gather server mmu info from KVM and update the CPU state */ + kvm_fixup_page_sizes(cenv); + + /* Synchronize sregs with kvm */ ret = kvm_arch_sync_sregs(cenv); if (ret) { return ret; } - idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv); + idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_cpu, cpu); + + /* Some targets support access to KVM's guest TLB. */ + switch (cenv->mmu_model) { + case POWERPC_MMU_BOOKE206: + ret = kvm_booke206_tlb_init(cenv); + break; + default: + break; + } return ret; } -void kvm_arch_reset_vcpu(CPUState *env) +void kvm_arch_reset_vcpu(CPUPPCState *env) { } -int kvm_arch_put_registers(CPUState *env, int level) +static void kvm_sw_tlb_put(CPUPPCState *env) +{ + struct kvm_dirty_tlb dirty_tlb; + unsigned char *bitmap; + int ret; + + if (!env->kvm_sw_tlb) { + return; + } + + bitmap = g_malloc((env->nb_tlb + 7) / 8); + memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8); + + dirty_tlb.bitmap = (uintptr_t)bitmap; + dirty_tlb.num_dirty = env->nb_tlb; + + ret = kvm_vcpu_ioctl(env, KVM_DIRTY_TLB, &dirty_tlb); + if (ret) { + fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n", + __func__, strerror(-ret)); + } + + g_free(bitmap); +} + +int kvm_arch_put_registers(CPUPPCState *env, int level) { struct kvm_regs regs; int ret; @@ -163,10 +469,63 @@ int kvm_arch_put_registers(CPUState *env, int level) if (ret < 0) return ret; + if (env->tlb_dirty) { + kvm_sw_tlb_put(env); + env->tlb_dirty = false; + } + + if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) { + struct kvm_sregs sregs; + + sregs.pvr = env->spr[SPR_PVR]; + + sregs.u.s.sdr1 = env->spr[SPR_SDR1]; + + /* Sync SLB */ +#ifdef TARGET_PPC64 + for (i = 0; i < 64; i++) { + sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid; + sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid; + } +#endif + + /* Sync SRs */ + for (i = 0; i < 16; i++) { + sregs.u.s.ppc32.sr[i] = env->sr[i]; + } + + /* Sync BATs */ + for (i = 0; i < 8; i++) { + /* Beware. We have to swap upper and lower bits here */ + sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32) + | env->DBAT[1][i]; + sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32) + | env->IBAT[1][i]; + } + + ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs); + if (ret) { + return ret; + } + } + + if (cap_hior && (level >= KVM_PUT_RESET_STATE)) { + uint64_t hior = env->spr[SPR_HIOR]; + struct kvm_one_reg reg = { + .id = KVM_REG_PPC_HIOR, + .addr = (uintptr_t) &hior, + }; + + ret = kvm_vcpu_ioctl(env, KVM_SET_ONE_REG, ®); + if (ret) { + return ret; + } + } + return ret; } -int kvm_arch_get_registers(CPUState *env) +int kvm_arch_get_registers(CPUPPCState *env) { struct kvm_regs regs; struct kvm_sregs sregs; @@ -343,7 +702,7 @@ int kvm_arch_get_registers(CPUState *env) return 0; } -int kvmppc_set_interrupt(CPUState *env, int irq, int level) +int kvmppc_set_interrupt(CPUPPCState *env, int irq, int level) { unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET; @@ -368,12 +727,12 @@ int kvmppc_set_interrupt(CPUState *env, int irq, int level) #define PPC_INPUT_INT PPC6xx_INPUT_INT #endif -void kvm_arch_pre_run(CPUState *env, struct kvm_run *run) +void kvm_arch_pre_run(CPUPPCState *env, struct kvm_run *run) { int r; unsigned irq; - /* PowerPC Qemu tracks the various core input pins (interrupt, critical + /* PowerPC QEMU tracks the various core input pins (interrupt, critical * interrupt, reset, etc) in PPC-specific env->irq_input_state. */ if (!cap_interrupt_level && run->ready_for_interrupt_injection && @@ -401,16 +760,16 @@ void kvm_arch_pre_run(CPUState *env, struct kvm_run *run) * anyways, so we will get a chance to deliver the rest. */ } -void kvm_arch_post_run(CPUState *env, struct kvm_run *run) +void kvm_arch_post_run(CPUPPCState *env, struct kvm_run *run) { } -int kvm_arch_process_async_events(CPUState *env) +int kvm_arch_process_async_events(CPUPPCState *env) { - return 0; + return env->halted; } -static int kvmppc_handle_halt(CPUState *env) +static int kvmppc_handle_halt(CPUPPCState *env) { if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) { env->halted = 1; @@ -421,7 +780,7 @@ static int kvmppc_handle_halt(CPUState *env) } /* map dcr access to existing qemu dcr emulation */ -static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data) +static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data) { if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn); @@ -429,7 +788,7 @@ static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data) return 0; } -static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data) +static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data) { if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn); @@ -437,7 +796,7 @@ static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data) return 0; } -int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) +int kvm_arch_handle_exit(CPUPPCState *env, struct kvm_run *run) { int ret; @@ -455,6 +814,15 @@ int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run) dprintf("handle halt\n"); ret = kvmppc_handle_halt(env); break; +#ifdef CONFIG_PSERIES + case KVM_EXIT_PAPR_HCALL: + dprintf("handle PAPR hypercall\n"); + run->papr_hcall.ret = spapr_hypercall(ppc_env_get_cpu(env), + run->papr_hcall.nr, + run->papr_hcall.args); + ret = 0; + break; +#endif default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); ret = -1; @@ -481,7 +849,7 @@ static int read_cpuinfo(const char *field, char *value, int len) break; } if (!strncmp(line, field, field_len)) { - strncpy(value, line, len); + pstrcpy(value, len, line); ret = 0; break; } @@ -545,38 +913,61 @@ static int kvmppc_find_cpu_dt(char *buf, int buf_len) return 0; } -uint64_t kvmppc_get_clockfreq(void) +/* Read a CPU node property from the host device tree that's a single + * integer (32-bit or 64-bit). Returns 0 if anything goes wrong + * (can't find or open the property, or doesn't understand the + * format) */ +static uint64_t kvmppc_read_int_cpu_dt(const char *propname) { - char buf[512]; - uint32_t tb[2]; + char buf[PATH_MAX]; + union { + uint32_t v32; + uint64_t v64; + } u; FILE *f; int len; if (kvmppc_find_cpu_dt(buf, sizeof(buf))) { - return 0; + return -1; } - strncat(buf, "/clock-frequency", sizeof(buf) - strlen(buf)); + strncat(buf, "/", sizeof(buf) - strlen(buf)); + strncat(buf, propname, sizeof(buf) - strlen(buf)); f = fopen(buf, "rb"); if (!f) { return -1; } - len = fread(tb, sizeof(tb[0]), 2, f); + len = fread(&u, 1, sizeof(u), f); fclose(f); switch (len) { - case 1: - /* freq is only a single cell */ - return tb[0]; - case 2: - return *(uint64_t*)tb; + case 4: + /* property is a 32-bit quantity */ + return be32_to_cpu(u.v32); + case 8: + return be64_to_cpu(u.v64); } return 0; } -int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len) +uint64_t kvmppc_get_clockfreq(void) +{ + return kvmppc_read_int_cpu_dt("clock-frequency"); +} + +uint32_t kvmppc_get_vmx(void) +{ + return kvmppc_read_int_cpu_dt("ibm,vmx"); +} + +uint32_t kvmppc_get_dfp(void) +{ + return kvmppc_read_int_cpu_dt("ibm,dfp"); +} + +int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len) { uint32_t *hc = (uint32_t*)buf; @@ -606,12 +997,240 @@ int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len) return 0; } -bool kvm_arch_stop_on_emulation_error(CPUState *env) +void kvmppc_set_papr(CPUPPCState *env) +{ + struct kvm_enable_cap cap = {}; + int ret; + + cap.cap = KVM_CAP_PPC_PAPR; + ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &cap); + + if (ret) { + cpu_abort(env, "This KVM version does not support PAPR\n"); + } +} + +int kvmppc_smt_threads(void) +{ + return cap_ppc_smt ? cap_ppc_smt : 1; +} + +#ifdef TARGET_PPC64 +off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem) +{ + void *rma; + off_t size; + int fd; + struct kvm_allocate_rma ret; + MemoryRegion *rma_region; + + /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported + * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but + * not necessary on this hardware + * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware + * + * FIXME: We should allow the user to force contiguous RMA + * allocation in the cap_ppc_rma==1 case. + */ + if (cap_ppc_rma < 2) { + return 0; + } + + fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret); + if (fd < 0) { + fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n", + strerror(errno)); + return -1; + } + + size = MIN(ret.rma_size, 256ul << 20); + + rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); + if (rma == MAP_FAILED) { + fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno)); + return -1; + }; + + rma_region = g_new(MemoryRegion, 1); + memory_region_init_ram_ptr(rma_region, name, size, rma); + vmstate_register_ram_global(rma_region); + memory_region_add_subregion(sysmem, 0, rma_region); + + return size; +} + +uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift) +{ + if (cap_ppc_rma >= 2) { + return current_size; + } + return MIN(current_size, + getrampagesize() << (hash_shift - 7)); +} +#endif + +void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd) +{ + struct kvm_create_spapr_tce args = { + .liobn = liobn, + .window_size = window_size, + }; + long len; + int fd; + void *table; + + /* Must set fd to -1 so we don't try to munmap when called for + * destroying the table, which the upper layers -will- do + */ + *pfd = -1; + if (!cap_spapr_tce) { + return NULL; + } + + fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args); + if (fd < 0) { + fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n", + liobn); + return NULL; + } + + len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(sPAPRTCE); + /* FIXME: round this up to page size */ + + table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); + if (table == MAP_FAILED) { + fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n", + liobn); + close(fd); + return NULL; + } + + *pfd = fd; + return table; +} + +int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size) +{ + long len; + + if (fd < 0) { + return -1; + } + + len = (window_size / SPAPR_TCE_PAGE_SIZE)*sizeof(sPAPRTCE); + if ((munmap(table, len) < 0) || + (close(fd) < 0)) { + fprintf(stderr, "KVM: Unexpected error removing TCE table: %s", + strerror(errno)); + /* Leak the table */ + } + + return 0; +} + +int kvmppc_reset_htab(int shift_hint) +{ + uint32_t shift = shift_hint; + + if (!kvm_enabled()) { + /* Full emulation, tell caller to allocate htab itself */ + return 0; + } + if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) { + int ret; + ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift); + if (ret == -ENOTTY) { + /* At least some versions of PR KVM advertise the + * capability, but don't implement the ioctl(). Oops. + * Return 0 so that we allocate the htab in qemu, as is + * correct for PR. */ + return 0; + } else if (ret < 0) { + return ret; + } + return shift; + } + + /* We have a kernel that predates the htab reset calls. For PR + * KVM, we need to allocate the htab ourselves, for an HV KVM of + * this era, it has allocated a 16MB fixed size hash table + * already. Kernels of this era have the GET_PVINFO capability + * only on PR, so we use this hack to determine the right + * answer */ + if (kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_PVINFO)) { + /* PR - tell caller to allocate htab */ + return 0; + } else { + /* HV - assume 16MB kernel allocated htab */ + return 24; + } +} + +static inline uint32_t mfpvr(void) +{ + uint32_t pvr; + + asm ("mfpvr %0" + : "=r"(pvr)); + return pvr; +} + +static void alter_insns(uint64_t *word, uint64_t flags, bool on) +{ + if (on) { + *word |= flags; + } else { + *word &= ~flags; + } +} + +const ppc_def_t *kvmppc_host_cpu_def(void) +{ + uint32_t host_pvr = mfpvr(); + const ppc_def_t *base_spec; + ppc_def_t *spec; + uint32_t vmx = kvmppc_get_vmx(); + uint32_t dfp = kvmppc_get_dfp(); + + base_spec = ppc_find_by_pvr(host_pvr); + + spec = g_malloc0(sizeof(*spec)); + memcpy(spec, base_spec, sizeof(*spec)); + + /* Now fix up the spec with information we can query from the host */ + + if (vmx != -1) { + /* Only override when we know what the host supports */ + alter_insns(&spec->insns_flags, PPC_ALTIVEC, vmx > 0); + alter_insns(&spec->insns_flags2, PPC2_VSX, vmx > 1); + } + if (dfp != -1) { + /* Only override when we know what the host supports */ + alter_insns(&spec->insns_flags2, PPC2_DFP, dfp); + } + + return spec; +} + +int kvmppc_fixup_cpu(CPUPPCState *env) +{ + int smt; + + /* Adjust cpu index for SMT */ + smt = kvmppc_smt_threads(); + env->cpu_index = (env->cpu_index / smp_threads) * smt + + (env->cpu_index % smp_threads); + + return 0; +} + + +bool kvm_arch_stop_on_emulation_error(CPUPPCState *env) { return true; } -int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr) +int kvm_arch_on_sigbus_vcpu(CPUPPCState *env, int code, void *addr) { return 1; }