]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
KVM: arm64: Wrapper for getting pmu_events
authorFuad Tabba <tabba@google.com>
Tue, 10 May 2022 09:57:07 +0000 (09:57 +0000)
committerMarc Zyngier <maz@kernel.org>
Sun, 15 May 2022 10:24:17 +0000 (11:24 +0100)
Eases migrating away from using hyp data and simplifies the code.

No functional change intended.

Reviewed-by: Oliver Upton <oupton@google.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220510095710.148178-2-tabba@google.com
arch/arm64/kvm/pmu.c

index 03a6c1f4a09af0a3e2321054e31640163cf79650..4bd38ff3422167d76e42b90e2f1a19b27c9d3ed2 100644 (file)
@@ -25,21 +25,31 @@ static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
        return (attr->exclude_host != attr->exclude_guest);
 }
 
+static struct kvm_pmu_events *kvm_get_pmu_events(void)
+{
+       struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+
+       if (!ctx)
+               return NULL;
+
+       return &ctx->pmu_events;
+}
+
 /*
  * Add events to track that we may want to switch at guest entry/exit
  * time.
  */
 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
 {
-       struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+       struct kvm_pmu_events *pmu = kvm_get_pmu_events();
 
-       if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr))
+       if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
                return;
 
        if (!attr->exclude_host)
-               ctx->pmu_events.events_host |= set;
+               pmu->events_host |= set;
        if (!attr->exclude_guest)
-               ctx->pmu_events.events_guest |= set;
+               pmu->events_guest |= set;
 }
 
 /*
@@ -47,13 +57,13 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
  */
 void kvm_clr_pmu_events(u32 clr)
 {
-       struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
+       struct kvm_pmu_events *pmu = kvm_get_pmu_events();
 
-       if (!kvm_arm_support_pmu_v3() || !ctx)
+       if (!kvm_arm_support_pmu_v3() || !pmu)
                return;
 
-       ctx->pmu_events.events_host &= ~clr;
-       ctx->pmu_events.events_guest &= ~clr;
+       pmu->events_host &= ~clr;
+       pmu->events_guest &= ~clr;
 }
 
 #define PMEVTYPER_READ_CASE(idx)                               \
@@ -169,16 +179,16 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
  */
 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
 {
-       struct kvm_host_data *host;
+       struct kvm_pmu_events *pmu;
        u32 events_guest, events_host;
 
        if (!kvm_arm_support_pmu_v3() || !has_vhe())
                return;
 
        preempt_disable();
-       host = this_cpu_ptr_hyp_sym(kvm_host_data);
-       events_guest = host->pmu_events.events_guest;
-       events_host = host->pmu_events.events_host;
+       pmu = kvm_get_pmu_events();
+       events_guest = pmu->events_guest;
+       events_host = pmu->events_host;
 
        kvm_vcpu_pmu_enable_el0(events_guest);
        kvm_vcpu_pmu_disable_el0(events_host);
@@ -190,15 +200,15 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
  */
 void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
 {
-       struct kvm_host_data *host;
+       struct kvm_pmu_events *pmu;
        u32 events_guest, events_host;
 
        if (!kvm_arm_support_pmu_v3() || !has_vhe())
                return;
 
-       host = this_cpu_ptr_hyp_sym(kvm_host_data);
-       events_guest = host->pmu_events.events_guest;
-       events_host = host->pmu_events.events_host;
+       pmu = kvm_get_pmu_events();
+       events_guest = pmu->events_guest;
+       events_host = pmu->events_host;
 
        kvm_vcpu_pmu_enable_el0(events_host);
        kvm_vcpu_pmu_disable_el0(events_guest);