]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - arch/powerpc/kvm/book3s_hv.c
KVM: PPC: Book3S HV: Store LPCR value for each virtual core
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kvm / book3s_hv.c
index 62a2b5ab08eda0399bf009b81eebd36d587763d8..36eb95cc48ae7516ed981c8713119cd888966747 100644 (file)
@@ -195,7 +195,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
                pr_err("  ESID = %.16llx VSID = %.16llx\n",
                       vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
        pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
-              vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
+              vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
               vcpu->arch.last_inst);
 }
 
@@ -489,7 +489,7 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
        memset(dt, 0, sizeof(struct dtl_entry));
        dt->dispatch_reason = 7;
        dt->processor_id = vc->pcpu + vcpu->arch.ptid;
-       dt->timebase = now;
+       dt->timebase = now + vc->tb_offset;
        dt->enqueue_to_dispatch_time = stolen;
        dt->srr0 = kvmppc_get_pc(vcpu);
        dt->srr1 = vcpu->arch.shregs.msr;
@@ -538,6 +538,15 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                }
                break;
        case H_CONFER:
+               target = kvmppc_get_gpr(vcpu, 4);
+               if (target == -1)
+                       break;
+               tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+               if (!tvcpu) {
+                       ret = H_PARAMETER;
+                       break;
+               }
+               kvm_vcpu_yield_to(tvcpu);
                break;
        case H_REGISTER_VPA:
                ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
@@ -714,6 +723,21 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
+{
+       struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       u64 mask;
+
+       spin_lock(&vc->lock);
+       /*
+        * Userspace can only modify DPFD (default prefetch depth),
+        * ILE (interrupt little-endian) and TC (translation control).
+        */
+       mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
+       vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
+       spin_unlock(&vc->lock);
+}
+
 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
 {
        int r = 0;
@@ -749,6 +773,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
                i = id - KVM_REG_PPC_PMC1;
                *val = get_reg_val(id, vcpu->arch.pmc[i]);
                break;
+       case KVM_REG_PPC_SIAR:
+               *val = get_reg_val(id, vcpu->arch.siar);
+               break;
+       case KVM_REG_PPC_SDAR:
+               *val = get_reg_val(id, vcpu->arch.sdar);
+               break;
 #ifdef CONFIG_VSX
        case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
                if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -787,6 +817,12 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
                val->vpaval.length = vcpu->arch.dtl.len;
                spin_unlock(&vcpu->arch.vpa_update_lock);
                break;
+       case KVM_REG_PPC_TB_OFFSET:
+               *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
+               break;
+       case KVM_REG_PPC_LPCR:
+               *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
+               break;
        default:
                r = -EINVAL;
                break;
@@ -833,6 +869,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
                i = id - KVM_REG_PPC_PMC1;
                vcpu->arch.pmc[i] = set_reg_val(id, *val);
                break;
+       case KVM_REG_PPC_SIAR:
+               vcpu->arch.siar = set_reg_val(id, *val);
+               break;
+       case KVM_REG_PPC_SDAR:
+               vcpu->arch.sdar = set_reg_val(id, *val);
+               break;
 #ifdef CONFIG_VSX
        case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
                if (cpu_has_feature(CPU_FTR_VSX)) {
@@ -880,6 +922,14 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
                len -= len % sizeof(struct dtl_entry);
                r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
                break;
+       case KVM_REG_PPC_TB_OFFSET:
+               /* round up to multiple of 2^24 */
+               vcpu->arch.vcore->tb_offset =
+                       ALIGN(set_reg_val(id, *val), 1UL << 24);
+               break;
+       case KVM_REG_PPC_LPCR:
+               kvmppc_set_lpcr(vcpu, set_reg_val(id, *val));
+               break;
        default:
                r = -EINVAL;
                break;
@@ -940,6 +990,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
                        spin_lock_init(&vcore->lock);
                        init_waitqueue_head(&vcore->wq);
                        vcore->preempt_tb = TB_NIL;
+                       vcore->lpcr = kvm->arch.lpcr;
                }
                kvm->arch.vcores[core] = vcore;
                kvm->arch.online_vcores++;
@@ -1729,6 +1780,32 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
        }
 }
 
+/*
+ * Update LPCR values in kvm->arch and in vcores.
+ * Caller must hold kvm->lock.
+ */
+void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
+{
+       long int i;
+       u32 cores_done = 0;
+
+       if ((kvm->arch.lpcr & mask) == lpcr)
+               return;
+
+       kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
+
+       for (i = 0; i < KVM_MAX_VCORES; ++i) {
+               struct kvmppc_vcore *vc = kvm->arch.vcores[i];
+               if (!vc)
+                       continue;
+               spin_lock(&vc->lock);
+               vc->lpcr = (vc->lpcr & ~mask) | lpcr;
+               spin_unlock(&vc->lock);
+               if (++cores_done >= kvm->arch.online_vcores)
+                       break;
+       }
+}
+
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
 {
        int err = 0;
@@ -1737,7 +1814,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
        unsigned long hva;
        struct kvm_memory_slot *memslot;
        struct vm_area_struct *vma;
-       unsigned long lpcr, senc;
+       unsigned long lpcr = 0, senc;
+       unsigned long lpcr_mask = 0;
        unsigned long psize, porder;
        unsigned long rma_size;
        unsigned long rmls;
@@ -1802,9 +1880,9 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
                senc = slb_pgsize_encoding(psize);
                kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
                        (VRMA_VSID << SLB_VSID_SHIFT_1T);
-               lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
-               lpcr |= senc << (LPCR_VRMASD_SH - 4);
-               kvm->arch.lpcr = lpcr;
+               lpcr_mask = LPCR_VRMASD;
+               /* the -4 is to account for senc values starting at 0x10 */
+               lpcr = senc << (LPCR_VRMASD_SH - 4);
 
                /* Create HPTEs in the hash page table for the VRMA */
                kvmppc_map_vrma(vcpu, memslot, porder);
@@ -1825,23 +1903,21 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
                kvm->arch.rma = ri;
 
                /* Update LPCR and RMOR */
-               lpcr = kvm->arch.lpcr;
                if (cpu_has_feature(CPU_FTR_ARCH_201)) {
                        /* PPC970; insert RMLS value (split field) in HID4 */
-                       lpcr &= ~((1ul << HID4_RMLS0_SH) |
-                                 (3ul << HID4_RMLS2_SH));
-                       lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
+                       lpcr_mask = (1ul << HID4_RMLS0_SH) |
+                               (3ul << HID4_RMLS2_SH) | HID4_RMOR;
+                       lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
                                ((rmls & 3) << HID4_RMLS2_SH);
                        /* RMOR is also in HID4 */
                        lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
                                << HID4_RMOR_SH;
                } else {
                        /* POWER7 */
-                       lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
-                       lpcr |= rmls << LPCR_RMLS_SH;
+                       lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
+                       lpcr = rmls << LPCR_RMLS_SH;
                        kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
                }
-               kvm->arch.lpcr = lpcr;
                pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
                        ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
 
@@ -1860,6 +1936,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
                }
        }
 
+       kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
        /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
        smp_wmb();
        kvm->arch.rma_setup_done = 1;