1 // SPDX-License-Identifier: GPL-2.0-only
3 * x86 APERF/MPERF KHz calculation for
4 * /sys/.../cpufreq/scaling_cur_freq
6 * Copyright (C) 2017 Intel Corp.
7 * Author: Len Brown <len.brown@intel.com>
10 #include <linux/delay.h>
11 #include <linux/ktime.h>
12 #include <linux/math64.h>
13 #include <linux/percpu.h>
14 #include <linux/cpufreq.h>
15 #include <linux/smp.h>
16 #include <linux/sched/isolation.h>
17 #include <linux/rcupdate.h>
21 struct aperfmperf_sample
{
29 static DEFINE_PER_CPU(struct aperfmperf_sample
, samples
);
31 #define APERFMPERF_CACHE_THRESHOLD_MS 10
32 #define APERFMPERF_REFRESH_DELAY_MS 10
33 #define APERFMPERF_STALE_THRESHOLD_MS 1000
36 * aperfmperf_snapshot_khz()
37 * On the current CPU, snapshot APERF, MPERF, and jiffies
38 * unless we already did it within 10ms
39 * calculate kHz, save snapshot
41 static void aperfmperf_snapshot_khz(void *dummy
)
43 u64 aperf
, aperf_delta
;
44 u64 mperf
, mperf_delta
;
45 struct aperfmperf_sample
*s
= this_cpu_ptr(&samples
);
48 local_irq_save(flags
);
49 rdmsrl(MSR_IA32_APERF
, aperf
);
50 rdmsrl(MSR_IA32_MPERF
, mperf
);
51 local_irq_restore(flags
);
53 aperf_delta
= aperf
- s
->aperf
;
54 mperf_delta
= mperf
- s
->mperf
;
57 * There is no architectural guarantee that MPERF
58 * increments faster than we can read it.
63 s
->time
= ktime_get();
66 s
->khz
= div64_u64((cpu_khz
* aperf_delta
), mperf_delta
);
67 atomic_set_release(&s
->scfpending
, 0);
70 static bool aperfmperf_snapshot_cpu(int cpu
, ktime_t now
, bool wait
)
72 s64 time_delta
= ktime_ms_delta(now
, per_cpu(samples
.time
, cpu
));
73 struct aperfmperf_sample
*s
= per_cpu_ptr(&samples
, cpu
);
75 /* Don't bother re-computing within the cache threshold time. */
76 if (time_delta
< APERFMPERF_CACHE_THRESHOLD_MS
)
79 if (!atomic_xchg(&s
->scfpending
, 1) || wait
)
80 smp_call_function_single(cpu
, aperfmperf_snapshot_khz
, NULL
, wait
);
82 /* Return false if the previous iteration was too long ago. */
83 return time_delta
<= APERFMPERF_STALE_THRESHOLD_MS
;
86 unsigned int aperfmperf_get_khz(int cpu
)
91 if (!boot_cpu_has(X86_FEATURE_APERFMPERF
))
94 if (!housekeeping_cpu(cpu
, HK_FLAG_MISC
))
97 if (rcu_is_idle_cpu(cpu
))
98 return 0; /* Idle CPUs are completely uninteresting. */
100 aperfmperf_snapshot_cpu(cpu
, ktime_get(), true);
101 return per_cpu(samples
.khz
, cpu
);
104 void arch_freq_prepare_all(void)
106 ktime_t now
= ktime_get();
113 if (!boot_cpu_has(X86_FEATURE_APERFMPERF
))
116 for_each_online_cpu(cpu
) {
117 if (!housekeeping_cpu(cpu
, HK_FLAG_MISC
))
119 if (rcu_is_idle_cpu(cpu
))
120 continue; /* Idle CPUs are completely uninteresting. */
121 if (!aperfmperf_snapshot_cpu(cpu
, now
, false))
126 msleep(APERFMPERF_REFRESH_DELAY_MS
);
129 unsigned int arch_freq_get_on_cpu(int cpu
)
131 struct aperfmperf_sample
*s
= per_cpu_ptr(&samples
, cpu
);
136 if (!boot_cpu_has(X86_FEATURE_APERFMPERF
))
139 if (!housekeeping_cpu(cpu
, HK_FLAG_MISC
))
142 if (aperfmperf_snapshot_cpu(cpu
, ktime_get(), true))
143 return per_cpu(samples
.khz
, cpu
);
145 msleep(APERFMPERF_REFRESH_DELAY_MS
);
146 atomic_set(&s
->scfpending
, 1);
147 smp_mb(); /* ->scfpending before smp_call_function_single(). */
148 smp_call_function_single(cpu
, aperfmperf_snapshot_khz
, NULL
, 1);
150 return per_cpu(samples
.khz
, cpu
);