]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - kernel/events/core.c
Merge tag 'hwmon-for-linus-v4.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-artful-kernel.git] / kernel / events / core.c
index 1538df9b2b65d57ebc30b9d156acc2dcb71e7fea..426c2ffba16d4ce1474a798a4c43d78d5abedb1e 100644 (file)
@@ -1452,6 +1452,13 @@ static enum event_type_t get_event_type(struct perf_event *event)
 
        lockdep_assert_held(&ctx->lock);
 
+       /*
+        * It's 'group type', really, because if our group leader is
+        * pinned, so are we.
+        */
+       if (event->group_leader != event)
+               event = event->group_leader;
+
        event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
        if (!ctx->task)
                event_type |= EVENT_CPU;
@@ -4378,7 +4385,9 @@ EXPORT_SYMBOL_GPL(perf_event_read_value);
 static int __perf_read_group_add(struct perf_event *leader,
                                        u64 read_format, u64 *values)
 {
+       struct perf_event_context *ctx = leader->ctx;
        struct perf_event *sub;
+       unsigned long flags;
        int n = 1; /* skip @nr */
        int ret;
 
@@ -4408,12 +4417,15 @@ static int __perf_read_group_add(struct perf_event *leader,
        if (read_format & PERF_FORMAT_ID)
                values[n++] = primary_event_id(leader);
 
+       raw_spin_lock_irqsave(&ctx->lock, flags);
+
        list_for_each_entry(sub, &leader->sibling_list, group_entry) {
                values[n++] += perf_event_count(sub);
                if (read_format & PERF_FORMAT_ID)
                        values[n++] = primary_event_id(sub);
        }
 
+       raw_spin_unlock_irqrestore(&ctx->lock, flags);
        return 0;
 }
 
@@ -7321,21 +7333,6 @@ int perf_event_account_interrupt(struct perf_event *event)
        return __perf_event_account_interrupt(event, 1);
 }
 
-static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
-{
-       /*
-        * Due to interrupt latency (AKA "skid"), we may enter the
-        * kernel before taking an overflow, even if the PMU is only
-        * counting user events.
-        * To avoid leaking information to userspace, we must always
-        * reject kernel samples when exclude_kernel is set.
-        */
-       if (event->attr.exclude_kernel && !user_mode(regs))
-               return false;
-
-       return true;
-}
-
 /*
  * Generic event overflow handling, sampling.
  */
@@ -7356,12 +7353,6 @@ static int __perf_event_overflow(struct perf_event *event,
 
        ret = __perf_event_account_interrupt(event, throttle);
 
-       /*
-        * For security, drop the skid kernel samples if necessary.
-        */
-       if (!sample_is_allowed(event, regs))
-               return ret;
-
        /*
         * XXX event_limit might not quite work as expected on inherited
         * events