]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blobdiff - kernel/events/core.c
Merge tag 'omap-for-v5.0/fixes-rc7-signed' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-eoan-kernel.git] / kernel / events / core.c
index 3cd13a30f732921bec9ac0235613b02da5f91de9..26d6edab051a1e41c81d23b8c29ab097d6c4c71b 100644 (file)
@@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *lenp,
                loff_t *ppos)
 {
-       int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-
-       if (ret || !write)
-               return ret;
-
+       int ret;
+       int perf_cpu = sysctl_perf_cpu_time_max_percent;
        /*
         * If throttling is disabled don't allow the write:
         */
-       if (sysctl_perf_cpu_time_max_percent == 100 ||
-           sysctl_perf_cpu_time_max_percent == 0)
+       if (write && (perf_cpu == 100 || perf_cpu == 0))
                return -EINVAL;
 
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       if (ret || !write)
+               return ret;
+
        max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
        perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
        update_perf_cpu_limits();
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
        }
 }
 
+static int perf_event_check_period(struct perf_event *event, u64 value)
+{
+       return event->pmu->check_period(event, value);
+}
+
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
        u64 value;
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
        if (event->attr.freq && value > sysctl_perf_event_sample_rate)
                return -EINVAL;
 
+       if (perf_event_check_period(event, value))
+               return -EINVAL;
+
        event_function_call(event, __perf_event_period, &value);
 
        return 0;
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
        return 0;
 }
 
+static int perf_event_nop_int(struct perf_event *event, u64 value)
+{
+       return 0;
+}
+
 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
 
 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9691,6 +9704,9 @@ got_cpu_context:
                pmu->pmu_disable = perf_pmu_nop_void;
        }
 
+       if (!pmu->check_period)
+               pmu->check_period = perf_event_nop_int;
+
        if (!pmu->event_idx)
                pmu->event_idx = perf_event_idx_default;