]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
arm64: KVM: Add a new feature bit for PMUv3
authorShannon Zhao <shannon.zhao@linaro.org>
Mon, 11 Jan 2016 14:46:15 +0000 (22:46 +0800)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 29 Feb 2016 18:34:21 +0000 (18:34 +0000)
To support guest PMUv3, use one bit of the VCPU INIT feature array.
Initialize the PMU when initialzing the vcpu with that bit and PMU
overflow interrupt set.

Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org>
Acked-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Andrew Jones <drjones@redhat.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Documentation/virtual/kvm/api.txt
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kvm/reset.c
include/kvm/arm_pmu.h
include/uapi/linux/kvm.h
virt/kvm/arm/pmu.c

index 07e4cdf024073033e2a52213bcbed4d562111a33..9684f8dc6bb2414934427adee6f7169ed0bf3239 100644 (file)
@@ -2577,6 +2577,8 @@ Possible features:
          Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
        - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
          Depends on KVM_CAP_ARM_PSCI_0_2.
+       - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
+         Depends on KVM_CAP_ARM_PMU_V3.
 
 
 4.83 KVM_ARM_PREFERRED_TARGET
index a819c6debce40fead71b3f9507a1b3c1484be6a4..b02ef0828f220dca7e09984acb2218f4130f0caf 100644 (file)
@@ -42,7 +42,7 @@
 
 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
 
-#define KVM_VCPU_MAX_FEATURES 3
+#define KVM_VCPU_MAX_FEATURES 4
 
 int __attribute_const__ kvm_target_cpu(void);
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
index 2d4ca4bb0dd34a18ed6bfde2d5fa4e4319402903..6aedbe3144320cd48301545bbf91745635195026 100644 (file)
@@ -94,6 +94,7 @@ struct kvm_regs {
 #define KVM_ARM_VCPU_POWER_OFF         0 /* CPU is started in OFF state */
 #define KVM_ARM_VCPU_EL1_32BIT         1 /* CPU running a 32bit VM */
 #define KVM_ARM_VCPU_PSCI_0_2          2 /* CPU uses PSCI v0.2 */
+#define KVM_ARM_VCPU_PMU_V3            3 /* Support guest PMUv3 */
 
 struct kvm_vcpu_init {
        __u32 target;
index dfbce781d284d6a92ef71f88cd3c54305c1e3bbf..cf4f28a7a5144ccaa79e92194801797c943aeaf1 100644 (file)
@@ -77,6 +77,9 @@ int kvm_arch_dev_ioctl_check_extension(long ext)
        case KVM_CAP_GUEST_DEBUG_HW_WPS:
                r = get_num_wrps();
                break;
+       case KVM_CAP_ARM_PMU_V3:
+               r = kvm_arm_support_pmu_v3();
+               break;
        case KVM_CAP_SET_GUEST_DEBUG:
                r = 1;
                break;
index 9f87d717ef84234ea22f3e725a4c5b4b161d725d..ee62497d46f7559f275e0ff3a8811c18ce026d9e 100644 (file)
@@ -53,6 +53,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
 void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
                                    u64 select_idx);
+bool kvm_arm_support_pmu_v3(void);
 #else
 struct kvm_pmu {
 };
@@ -80,6 +81,7 @@ static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
 static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
 static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
                                                  u64 data, u64 select_idx) {}
+static inline bool kvm_arm_support_pmu_v3(void) { return false; }
 #endif
 
 #endif
index 9da905157ceeebcb5afbd9e73b8ac56195954929..dc16d3084d4a4f706b7d3dd5a95c4d29a34e6a51 100644 (file)
@@ -850,6 +850,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
 #define KVM_CAP_HYPERV_SYNIC 123
 #define KVM_CAP_S390_RI 124
+#define KVM_CAP_ARM_PMU_V3 125
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 9b83857da195e11107b3939f1e3d0b4c0012fb9e..6e28f4f86cc6775e836f28f5a9d7f19f523ea33a 100644 (file)
@@ -405,3 +405,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
 
        pmc->perf_event = event;
 }
+
+bool kvm_arm_support_pmu_v3(void)
+{
+       /*
+        * Check if HW_PERF_EVENTS are supported by checking the number of
+        * hardware performance counters. This could ensure the presence of
+        * a physical PMU and CONFIG_PERF_EVENT is selected.
+        */
+       return (perf_num_counters() > 0);
+}