]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - kernel/perf_counter.c
mm: fix for infinite churning of mlocked pages
[mirror_ubuntu-zesty-kernel.git] / kernel / perf_counter.c
index 534e20d14d631b44cefc136f99200b2257af378f..f274e19598858979a3d9bf32b75fe1f62fce426a 100644 (file)
@@ -1503,10 +1503,21 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
  */
 static void __perf_counter_read(void *info)
 {
+       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_counter *counter = info;
        struct perf_counter_context *ctx = counter->ctx;
        unsigned long flags;
 
+       /*
+        * If this is a task context, we need to check whether it is
+        * the current task context of this cpu.  If not it has been
+        * scheduled out before the smp call arrived.  In that case
+        * counter->count would have been updated to a recent sample
+        * when the counter was scheduled out.
+        */
+       if (ctx->task && cpuctx->task_ctx != ctx)
+               return;
+
        local_irq_save(flags);
        if (ctx->is_active)
                update_context_time(ctx);
@@ -1780,7 +1791,7 @@ static int perf_counter_read_group(struct perf_counter *counter,
        size += err;
 
        list_for_each_entry(sub, &leader->sibling_list, list_entry) {
-               err = perf_counter_read_entry(counter, read_format,
+               err = perf_counter_read_entry(sub, read_format,
                                buf + size);
                if (err < 0)
                        return err;
@@ -2008,6 +2019,10 @@ int perf_counter_task_disable(void)
        return 0;
 }
 
+#ifndef PERF_COUNTER_INDEX_OFFSET
+# define PERF_COUNTER_INDEX_OFFSET 0
+#endif
+
 static int perf_counter_index(struct perf_counter *counter)
 {
        if (counter->state != PERF_COUNTER_STATE_ACTIVE)