static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
static DEFINE_PER_CPU(int, cpu_irq);
+static inline u64 arm_pmu_max_period(void)
+{
+ return (1ULL << 32) - 1;
+}
+
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
struct hw_perf_event *hwc = &event->hw;
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
+ u64 max_period;
int ret = 0;
+ max_period = arm_pmu_max_period();
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
* effect we are reducing max_period to account for
* interrupt latency (and we are being very conservative).
*/
- if (left > (armpmu->max_period >> 1))
- left = armpmu->max_period >> 1;
+ if (left > (max_period >> 1))
+ left = (max_period >> 1);
local64_set(&hwc->prev_count, (u64)-left);
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u64 delta, prev_raw_count, new_raw_count;
+ u64 max_period = arm_pmu_max_period();
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count) != prev_raw_count)
goto again;
- delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+ delta = (new_raw_count - prev_raw_count) & max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
* is far less likely to overtake the previous one unless
* you have some serious IRQ latency issues.
*/
- hwc->sample_period = armpmu->max_period >> 1;
+ hwc->sample_period = arm_pmu_max_period() >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}