]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
Merge branch 'kvm-amd-pmu-fixes' into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 3 May 2022 11:57:40 +0000 (07:57 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 3 May 2022 12:07:54 +0000 (08:07 -0400)
1  2 
arch/x86/kvm/cpuid.c
arch/x86/kvm/svm/pmu.c

diff --combined arch/x86/kvm/cpuid.c
index 598334ed5fbc8db78fb6ee5e2492d6a47884a339,732724ea5b100936ec46639df6e236858a354d7f..0c1ba6aa07651f4d2698b004c35c27aa1b406118
@@@ -887,6 -887,11 +887,11 @@@ static inline int __do_cpuid_func(struc
                union cpuid10_eax eax;
                union cpuid10_edx edx;
  
+               if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
+                       entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+                       break;
+               }
                perf_get_x86_pmu_capability(&cap);
  
                /*
        case 0x80000000:
                entry->eax = min(entry->eax, 0x80000021);
                /*
 -               * Serializing LFENCE is reported in a multitude of ways,
 -               * and NullSegClearsBase is not reported in CPUID on Zen2;
 -               * help userspace by providing the CPUID leaf ourselves.
 +               * Serializing LFENCE is reported in a multitude of ways, and
 +               * NullSegClearsBase is not reported in CPUID on Zen2; help
 +               * userspace by providing the CPUID leaf ourselves.
 +               *
 +               * However, only do it if the host has CPUID leaf 0x8000001d.
 +               * QEMU thinks that it can query the host blindly for that
 +               * CPUID leaf if KVM reports that it supports 0x8000001d or
 +               * above.  The processor merrily returns values from the
 +               * highest Intel leaf which QEMU tries to use as the guest's
 +               * 0x8000001d.  Even worse, this can result in an infinite
 +               * loop if said highest leaf has no subleaves indexed by ECX.
                 */
 -              if (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
 -                  || !static_cpu_has_bug(X86_BUG_NULL_SEG))
 +              if (entry->eax >= 0x8000001d &&
 +                  (static_cpu_has(X86_FEATURE_LFENCE_RDTSC)
 +                   || !static_cpu_has_bug(X86_BUG_NULL_SEG)))
                        entry->eax = max(entry->eax, 0x80000021);
                break;
        case 0x80000001:
diff --combined arch/x86/kvm/svm/pmu.c
index b14860863c39417e3ab196c5e54c9b6e392eac19,311cbaa0c3ddfbc8874f5c920a7d12eab6130c6b..16a5ebb420cfe428fd451c086f7e72be30a2ef9a
@@@ -45,6 -45,22 +45,22 @@@ static struct kvm_event_hw_type_mappin
        [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
  };
  
+ /* duplicated from amd_f17h_perfmon_event_map. */
+ static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
+       [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
+       [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
+       [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
+       [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
+       [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+       [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+       [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
+       [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
+ };
+ /* amd_pmc_perf_hw_id depends on these being the same size */
+ static_assert(ARRAY_SIZE(amd_event_mapping) ==
+            ARRAY_SIZE(amd_f17h_event_mapping));
  static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
  {
        struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
@@@ -140,6 -156,7 +156,7 @@@ static inline struct kvm_pmc *get_gp_pm
  
  static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
  {
+       struct kvm_event_hw_type_mapping *event_mapping;
        u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
        u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
        int i;
        if (WARN_ON(pmc_is_fixed(pmc)))
                return PERF_COUNT_HW_MAX;
  
+       if (guest_cpuid_family(pmc->vcpu) >= 0x17)
+               event_mapping = amd_f17h_event_mapping;
+       else
+               event_mapping = amd_event_mapping;
        for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
-               if (amd_event_mapping[i].eventsel == event_select
-                   && amd_event_mapping[i].unit_mask == unit_mask)
+               if (event_mapping[i].eventsel == event_select
+                   && event_mapping[i].unit_mask == unit_mask)
                        break;
  
        if (i == ARRAY_SIZE(amd_event_mapping))
                return PERF_COUNT_HW_MAX;
  
-       return amd_event_mapping[i].event_type;
+       return event_mapping[i].event_type;
  }
  
  /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
@@@ -257,7 -279,6 +279,7 @@@ static int amd_pmu_set_msr(struct kvm_v
        pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
        if (pmc) {
                pmc->counter += data - pmc_read_counter(pmc);
 +              pmc_update_sample_period(pmc);
                return 0;
        }
        /* MSR_EVNTSELn */