return count;
}
-void armpmu_free_cpu_irq(int irq, int cpu)
+void armpmu_free_irq(int irq, int cpu)
{
if (per_cpu(cpu_irq, cpu) == 0)
return;
per_cpu(cpu_irq, cpu) = 0;
}
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
-{
- struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- int irq = per_cpu(hw_events->irq, cpu);
-
- armpmu_free_cpu_irq(irq, cpu);
-}
-
-int armpmu_request_cpu_irq(int irq, int cpu)
+int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
const irq_handler_t handler = armpmu_dispatch_irq;
return err;
}
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
-{
- struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- int irq = per_cpu(hw_events->irq, cpu);
- if (!irq)
- return 0;
-
- return armpmu_request_cpu_irq(irq, cpu);
-}
-
static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
{
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
}
+ /*
+ * Log and request the IRQ so the core arm_pmu code can manage
+ * it. We'll have to sanity-check IRQs later when we associate
+ * them with their PMUs.
+ */
per_cpu(pmu_irqs, cpu) = irq;
+ armpmu_request_irq(irq, cpu);
}
return 0;
cpumask_set_cpu(cpu, &pmu->supported_cpus);
- /*
- * Log and request the IRQ so the core arm_pmu code can manage it. In
- * some situations (e.g. mismatched PPIs), we may fail to request the
- * IRQ. However, it may be too late for us to do anything about it.
- * The common ARM PMU code will log a warning in this case.
- */
- armpmu_request_irq(pmu, cpu);
-
/*
* Ideally, we'd probe the PMU here when we find the first matching
* CPU. We can't do that for several reasons; see the comment in
if (acpi_disabled)
return 0;
- /*
- * We can't request IRQs yet, since we don't know the cookie value
- * until we know which CPUs share the same logical PMU. We'll handle
- * that in arm_pmu_acpi_cpu_starting().
- */
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
static int armpmu_request_irqs(struct arm_pmu *armpmu)
{
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
int cpu, err;
for_each_cpu(cpu, &armpmu->supported_cpus) {
- err = armpmu_request_irq(armpmu, cpu);
+ int irq = per_cpu(hw_events->irq, cpu);
+ if (!irq)
+ continue;
+
+ err = armpmu_request_irq(irq, cpu);
if (err)
break;
}
static void armpmu_free_irqs(struct arm_pmu *armpmu)
{
int cpu;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- for_each_cpu(cpu, &armpmu->supported_cpus)
- armpmu_free_irq(armpmu, cpu);
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ int irq = per_cpu(hw_events->irq, cpu);
+
+ armpmu_free_irq(irq, cpu);
+ }
}
int arm_pmu_device_probe(struct platform_device *pdev,
#include <linux/interrupt.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/sysfs.h>
#include <asm/cputype.h>
struct arm_pmu *armpmu_alloc_atomic(void);
void armpmu_free(struct arm_pmu *pmu);
int armpmu_register(struct arm_pmu *pmu);
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
+int armpmu_request_irq(int irq, int cpu);
+void armpmu_free_irq(int irq, int cpu);
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"