]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - arch/x86/events/intel/lbr.c
Merge branch 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / events / intel / lbr.c
index 0da0eb0d875d97384e3b3eb48329c0324c7e5100..707d358e0dff59ed5871bc49ceda0462c4f818be 100644 (file)
@@ -284,6 +284,50 @@ inline u64 lbr_from_signext_quirk_wr(u64 val)
        return val;
 }
 
+/*
+ * If quirk is needed, ensure sign extension is 61 bits:
+ */
+u64 lbr_from_signext_quirk_rd(u64 val)
+{
+       if (static_branch_unlikely(&lbr_from_quirk_key)) {
+               /*
+                * Quirk is on when TSX is not enabled. Therefore TSX
+                * flags must be read as OFF.
+                */
+               val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
+       }
+       return val;
+}
+
+static inline void wrlbr_from(unsigned int idx, u64 val)
+{
+       val = lbr_from_signext_quirk_wr(val);
+       wrmsrl(x86_pmu.lbr_from + idx, val);
+}
+
+static inline void wrlbr_to(unsigned int idx, u64 val)
+{
+       wrmsrl(x86_pmu.lbr_to + idx, val);
+}
+
+static inline u64 rdlbr_from(unsigned int idx)
+{
+       u64 val;
+
+       rdmsrl(x86_pmu.lbr_from + idx, val);
+
+       return lbr_from_signext_quirk_rd(val);
+}
+
+static inline u64 rdlbr_to(unsigned int idx)
+{
+       u64 val;
+
+       rdmsrl(x86_pmu.lbr_to + idx, val);
+
+       return val;
+}
+
 static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
 {
        int i;
@@ -300,8 +344,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
        tos = task_ctx->tos;
        for (i = 0; i < tos; i++) {
                lbr_idx = (tos - i) & mask;
-               wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
-               wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+               wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
+               wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
+
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
@@ -311,9 +356,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
 
 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 {
-       int i;
        unsigned lbr_idx, mask;
        u64 tos;
+       int i;
 
        if (task_ctx->lbr_callstack_users == 0) {
                task_ctx->lbr_stack_state = LBR_NONE;
@@ -324,8 +369,8 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
        tos = intel_pmu_lbr_tos();
        for (i = 0; i < tos; i++) {
                lbr_idx = (tos - i) & mask;
-               rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
-               rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+               task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+               task_ctx->lbr_to[i]   = rdlbr_to(lbr_idx);
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
@@ -501,8 +546,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
                u16 cycles = 0;
                int lbr_flags = lbr_desc[lbr_format];
 
-               rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
-               rdmsrl(x86_pmu.lbr_to   + lbr_idx, to);
+               from = rdlbr_from(lbr_idx);
+               to   = rdlbr_to(lbr_idx);
 
                if (lbr_format == LBR_FORMAT_INFO && need_info) {
                        u64 info;