]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - arch/x86/events/intel/core.c
x86/cpu: Sanitize FAM6_ATOM naming
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / events / intel / core.c
index 731153a4681e73f761dea8c0c15ce6757b89860e..7e12ce4cda6707d546117990dafc3434083177bf 100644 (file)
@@ -2201,9 +2201,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        int bit, loops;
        u64 status;
        int handled;
+       int pmu_enabled;
 
        cpuc = this_cpu_ptr(&cpu_hw_events);
 
+       /*
+        * Save the PMU state.
+        * It needs to be restored when leaving the handler.
+        */
+       pmu_enabled = cpuc->enabled;
        /*
         * No known reason to not always do late ACK,
         * but just in case do it opt-in.
@@ -2211,6 +2217,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        if (!x86_pmu.late_ack)
                apic_write(APIC_LVTPC, APIC_DM_NMI);
        intel_bts_disable_local();
+       cpuc->enabled = 0;
        __intel_pmu_disable_all();
        handled = intel_pmu_drain_bts_buffer();
        handled += intel_bts_interrupt();
@@ -2320,7 +2327,8 @@ again:
 
 done:
        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
-       if (cpuc->enabled)
+       cpuc->enabled = pmu_enabled;
+       if (pmu_enabled)
                __intel_pmu_enable_all(0, true);
        intel_bts_enable_local();
 
@@ -3188,13 +3196,13 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
  * Therefore the effective (average) period matches the requested period,
  * despite coarser hardware granularity.
  */
-static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
+static u64 bdw_limit_period(struct perf_event *event, u64 left)
 {
        if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
                        X86_CONFIG(.event=0xc0, .umask=0x01)) {
                if (left < 128)
                        left = 128;
-               left &= ~0x3fu;
+               left &= ~0x3fULL;
        }
        return left;
 }
@@ -3323,7 +3331,8 @@ static void intel_pmu_cpu_starting(int cpu)
 
        cpuc->lbr_sel = NULL;
 
-       flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
+       if (x86_pmu.version > 1)
+               flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
 
        if (!cpuc->shared_regs)
                return;
@@ -3486,6 +3495,8 @@ static __initconst const struct x86_pmu core_pmu = {
        .cpu_dying              = intel_pmu_cpu_dying,
 };
 
+static struct attribute *intel_pmu_attrs[];
+
 static __initconst const struct x86_pmu intel_pmu = {
        .name                   = "Intel",
        .handle_irq             = intel_pmu_handle_irq,
@@ -3516,6 +3527,8 @@ static __initconst const struct x86_pmu intel_pmu = {
        .format_attrs           = intel_arch3_formats_attr,
        .events_sysfs_show      = intel_event_sysfs_show,
 
+       .attrs                  = intel_pmu_attrs,
+
        .cpu_prepare            = intel_pmu_cpu_prepare,
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
@@ -3559,7 +3572,7 @@ static int intel_snb_pebs_broken(int cpu)
                break;
 
        case INTEL_FAM6_SANDYBRIDGE_X:
-               switch (cpu_data(cpu).x86_mask) {
+               switch (cpu_data(cpu).x86_stepping) {
                case 6: rev = 0x618; break;
                case 7: rev = 0x70c; break;
                }
@@ -3894,8 +3907,6 @@ __init int intel_pmu_init(void)
 
        x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
 
-
-       x86_pmu.attrs                   = intel_pmu_attrs;
        /*
         * Quirk: v2 perfmon does not report fixed-purpose events, so
         * assume at least 3 events, when not running in a hypervisor:
@@ -3976,11 +3987,11 @@ __init int intel_pmu_init(void)
                name = "nehalem";
                break;
 
-       case INTEL_FAM6_ATOM_PINEVIEW:
-       case INTEL_FAM6_ATOM_LINCROFT:
-       case INTEL_FAM6_ATOM_PENWELL:
-       case INTEL_FAM6_ATOM_CLOVERVIEW:
-       case INTEL_FAM6_ATOM_CEDARVIEW:
+       case INTEL_FAM6_ATOM_BONNELL:
+       case INTEL_FAM6_ATOM_BONNELL_MID:
+       case INTEL_FAM6_ATOM_SALTWELL:
+       case INTEL_FAM6_ATOM_SALTWELL_MID:
+       case INTEL_FAM6_ATOM_SALTWELL_TABLET:
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -3993,9 +4004,11 @@ __init int intel_pmu_init(void)
                name = "bonnell";
                break;
 
-       case INTEL_FAM6_ATOM_SILVERMONT1:
-       case INTEL_FAM6_ATOM_SILVERMONT2:
+       case INTEL_FAM6_ATOM_SILVERMONT:
+       case INTEL_FAM6_ATOM_SILVERMONT_X:
+       case INTEL_FAM6_ATOM_SILVERMONT_MID:
        case INTEL_FAM6_ATOM_AIRMONT:
+       case INTEL_FAM6_ATOM_AIRMONT_MID:
                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
                        sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -4014,7 +4027,7 @@ __init int intel_pmu_init(void)
                break;
 
        case INTEL_FAM6_ATOM_GOLDMONT:
-       case INTEL_FAM6_ATOM_DENVERTON:
+       case INTEL_FAM6_ATOM_GOLDMONT_X:
                memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -4040,7 +4053,7 @@ __init int intel_pmu_init(void)
                name = "goldmont";
                break;
 
-       case INTEL_FAM6_ATOM_GEMINI_LAKE:
+       case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
                memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,