]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blobdiff - kernel/events/core.c
Merge branch 'core' of git://amd64.org/linux/rric into perf/core
[mirror_ubuntu-zesty-kernel.git] / kernel / events / core.c
index d2e28bdd523a9b206b69b6e66ae4b684747ddd9a..924338bb489ce1f43a400e75a2c5f5df94491cb4 100644 (file)
 #include <linux/reboot.h>
 #include <linux/vmstat.h>
 #include <linux/device.h>
+#include <linux/export.h>
 #include <linux/vmalloc.h>
 #include <linux/hardirq.h>
 #include <linux/rculist.h>
 #include <linux/uaccess.h>
-#include <linux/suspend.h>
 #include <linux/syscalls.h>
 #include <linux/anon_inodes.h>
 #include <linux/kernel_stat.h>
@@ -2571,215 +2571,6 @@ static u64 perf_event_read(struct perf_event *event)
        return perf_event_count(event);
 }
 
-/*
- * Callchain support
- */
-
-struct callchain_cpus_entries {
-       struct rcu_head                 rcu_head;
-       struct perf_callchain_entry     *cpu_entries[0];
-};
-
-static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
-static atomic_t nr_callchain_events;
-static DEFINE_MUTEX(callchain_mutex);
-struct callchain_cpus_entries *callchain_cpus_entries;
-
-
-__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
-                                 struct pt_regs *regs)
-{
-}
-
-__weak void perf_callchain_user(struct perf_callchain_entry *entry,
-                               struct pt_regs *regs)
-{
-}
-
-static void release_callchain_buffers_rcu(struct rcu_head *head)
-{
-       struct callchain_cpus_entries *entries;
-       int cpu;
-
-       entries = container_of(head, struct callchain_cpus_entries, rcu_head);
-
-       for_each_possible_cpu(cpu)
-               kfree(entries->cpu_entries[cpu]);
-
-       kfree(entries);
-}
-
-static void release_callchain_buffers(void)
-{
-       struct callchain_cpus_entries *entries;
-
-       entries = callchain_cpus_entries;
-       rcu_assign_pointer(callchain_cpus_entries, NULL);
-       call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
-}
-
-static int alloc_callchain_buffers(void)
-{
-       int cpu;
-       int size;
-       struct callchain_cpus_entries *entries;
-
-       /*
-        * We can't use the percpu allocation API for data that can be
-        * accessed from NMI. Use a temporary manual per cpu allocation
-        * until that gets sorted out.
-        */
-       size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
-
-       entries = kzalloc(size, GFP_KERNEL);
-       if (!entries)
-               return -ENOMEM;
-
-       size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
-
-       for_each_possible_cpu(cpu) {
-               entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
-                                                        cpu_to_node(cpu));
-               if (!entries->cpu_entries[cpu])
-                       goto fail;
-       }
-
-       rcu_assign_pointer(callchain_cpus_entries, entries);
-
-       return 0;
-
-fail:
-       for_each_possible_cpu(cpu)
-               kfree(entries->cpu_entries[cpu]);
-       kfree(entries);
-
-       return -ENOMEM;
-}
-
-static int get_callchain_buffers(void)
-{
-       int err = 0;
-       int count;
-
-       mutex_lock(&callchain_mutex);
-
-       count = atomic_inc_return(&nr_callchain_events);
-       if (WARN_ON_ONCE(count < 1)) {
-               err = -EINVAL;
-               goto exit;
-       }
-
-       if (count > 1) {
-               /* If the allocation failed, give up */
-               if (!callchain_cpus_entries)
-                       err = -ENOMEM;
-               goto exit;
-       }
-
-       err = alloc_callchain_buffers();
-       if (err)
-               release_callchain_buffers();
-exit:
-       mutex_unlock(&callchain_mutex);
-
-       return err;
-}
-
-static void put_callchain_buffers(void)
-{
-       if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
-               release_callchain_buffers();
-               mutex_unlock(&callchain_mutex);
-       }
-}
-
-static int get_recursion_context(int *recursion)
-{
-       int rctx;
-
-       if (in_nmi())
-               rctx = 3;
-       else if (in_irq())
-               rctx = 2;
-       else if (in_softirq())
-               rctx = 1;
-       else
-               rctx = 0;
-
-       if (recursion[rctx])
-               return -1;
-
-       recursion[rctx]++;
-       barrier();
-
-       return rctx;
-}
-
-static inline void put_recursion_context(int *recursion, int rctx)
-{
-       barrier();
-       recursion[rctx]--;
-}
-
-static struct perf_callchain_entry *get_callchain_entry(int *rctx)
-{
-       int cpu;
-       struct callchain_cpus_entries *entries;
-
-       *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
-       if (*rctx == -1)
-               return NULL;
-
-       entries = rcu_dereference(callchain_cpus_entries);
-       if (!entries)
-               return NULL;
-
-       cpu = smp_processor_id();
-
-       return &entries->cpu_entries[cpu][*rctx];
-}
-
-static void
-put_callchain_entry(int rctx)
-{
-       put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
-}
-
-static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       int rctx;
-       struct perf_callchain_entry *entry;
-
-
-       entry = get_callchain_entry(&rctx);
-       if (rctx == -1)
-               return NULL;
-
-       if (!entry)
-               goto exit_put;
-
-       entry->nr = 0;
-
-       if (!user_mode(regs)) {
-               perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
-               perf_callchain_kernel(entry, regs);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-
-       if (regs) {
-               perf_callchain_store(entry, PERF_CONTEXT_USER);
-               perf_callchain_user(entry, regs);
-       }
-
-exit_put:
-       put_callchain_entry(rctx);
-
-       return entry;
-}
-
 /*
  * Initialize the perf_event context in a task_struct:
  */
@@ -3546,7 +3337,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                struct ring_buffer *rb = event->rb;
 
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
-               vma->vm_mm->locked_vm -= event->mmap_locked;
+               vma->vm_mm->pinned_vm -= event->mmap_locked;
                rcu_assign_pointer(event->rb, NULL);
                mutex_unlock(&event->mmap_mutex);
 
@@ -3627,7 +3418,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 
        lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
-       locked = vma->vm_mm->locked_vm + extra;
+       locked = vma->vm_mm->pinned_vm + extra;
 
        if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
                !capable(CAP_IPC_LOCK)) {
@@ -3653,7 +3444,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        atomic_long_add(user_extra, &user->locked_vm);
        event->mmap_locked = extra;
        event->mmap_user = get_current_user();
-       vma->vm_mm->locked_vm += event->mmap_locked;
+       vma->vm_mm->pinned_vm += event->mmap_locked;
 
 unlock:
        if (!ret)
@@ -4739,7 +4530,6 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
        struct hw_perf_event *hwc = &event->hw;
        int throttle = 0;
 
-       data->period = event->hw.last_period;
        if (!overflow)
                overflow = perf_swevent_set_period(event);
 
@@ -4773,6 +4563,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
        if (!is_sampling_event(event))
                return;
 
+       if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
+               data->period = nr;
+               return perf_swevent_overflow(event, 1, data, regs);
+       } else
+               data->period = event->hw.last_period;
+
        if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
                return perf_swevent_overflow(event, 1, data, regs);
 
@@ -6855,7 +6651,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
        mutex_lock(&swhash->hlist_mutex);
-       if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) {
+       if (swhash->hlist_refcount > 0) {
                struct swevent_hlist *hlist;
 
                hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -6944,14 +6740,7 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 {
        unsigned int cpu = (long)hcpu;
 
-       /*
-        * Ignore suspend/resume action, the perf_pm_notifier will
-        * take care of that.
-        */
-       if (action & CPU_TASKS_FROZEN)
-               return NOTIFY_OK;
-
-       switch (action) {
+       switch (action & ~CPU_TASKS_FROZEN) {
 
        case CPU_UP_PREPARE:
        case CPU_DOWN_FAILED:
@@ -6970,90 +6759,6 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
-static void perf_pm_resume_cpu(void *unused)
-{
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
-       struct pmu *pmu;
-       int idx;
-
-       idx = srcu_read_lock(&pmus_srcu);
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-               ctx = cpuctx->task_ctx;
-
-               perf_ctx_lock(cpuctx, ctx);
-               perf_pmu_disable(cpuctx->ctx.pmu);
-
-               cpu_ctx_sched_out(cpuctx, EVENT_ALL);
-               if (ctx)
-                       ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-
-               perf_pmu_enable(cpuctx->ctx.pmu);
-               perf_ctx_unlock(cpuctx, ctx);
-       }
-       srcu_read_unlock(&pmus_srcu, idx);
-}
-
-static void perf_pm_suspend_cpu(void *unused)
-{
-       struct perf_cpu_context *cpuctx;
-       struct perf_event_context *ctx;
-       struct pmu *pmu;
-       int idx;
-
-       idx = srcu_read_lock(&pmus_srcu);
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-               ctx = cpuctx->task_ctx;
-
-               perf_ctx_lock(cpuctx, ctx);
-               perf_pmu_disable(cpuctx->ctx.pmu);
-
-               perf_event_sched_in(cpuctx, ctx, current);
-
-               perf_pmu_enable(cpuctx->ctx.pmu);
-               perf_ctx_unlock(cpuctx, ctx);
-       }
-       srcu_read_unlock(&pmus_srcu, idx);
-}
-
-static int perf_resume(void)
-{
-       get_online_cpus();
-       smp_call_function(perf_pm_resume_cpu, NULL, 1);
-       put_online_cpus();
-
-       return NOTIFY_OK;
-}
-
-static int perf_suspend(void)
-{
-       get_online_cpus();
-       smp_call_function(perf_pm_suspend_cpu, NULL, 1);
-       put_online_cpus();
-
-       return NOTIFY_OK;
-}
-
-static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
-{
-       switch (action) {
-       case PM_POST_HIBERNATION:
-       case PM_POST_SUSPEND:
-               return perf_resume();
-       case PM_HIBERNATION_PREPARE:
-       case PM_SUSPEND_PREPARE:
-               return perf_suspend();
-       default:
-               return NOTIFY_DONE;
-       }
-}
-
-static struct notifier_block perf_pm_notifier = {
-       .notifier_call = perf_pm,
-};
-
 void __init perf_event_init(void)
 {
        int ret;
@@ -7068,7 +6773,6 @@ void __init perf_event_init(void)
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
        register_reboot_notifier(&perf_reboot_notifier);
-       register_pm_notifier(&perf_pm_notifier);
 
        ret = init_hw_breakpoint();
        WARN(ret, "hw_breakpoint initialization failed with: %d", ret);