]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/events/core.c
Merge remote-tracking branches 'asoc/topic/tlv320aic3x', 'asoc/topic/width', 'asoc...
[mirror_ubuntu-bionic-kernel.git] / kernel / events / core.c
1 /*
2 * Performance events core code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/tick.h>
22 #include <linux/sysfs.h>
23 #include <linux/dcache.h>
24 #include <linux/percpu.h>
25 #include <linux/ptrace.h>
26 #include <linux/reboot.h>
27 #include <linux/vmstat.h>
28 #include <linux/device.h>
29 #include <linux/export.h>
30 #include <linux/vmalloc.h>
31 #include <linux/hardirq.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/perf_event.h>
38 #include <linux/ftrace_event.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/mm_types.h>
41 #include <linux/cgroup.h>
42 #include <linux/module.h>
43 #include <linux/mman.h>
44
45 #include "internal.h"
46
47 #include <asm/irq_regs.h>
48
49 struct remote_function_call {
50 struct task_struct *p;
51 int (*func)(void *info);
52 void *info;
53 int ret;
54 };
55
56 static void remote_function(void *data)
57 {
58 struct remote_function_call *tfc = data;
59 struct task_struct *p = tfc->p;
60
61 if (p) {
62 tfc->ret = -EAGAIN;
63 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
64 return;
65 }
66
67 tfc->ret = tfc->func(tfc->info);
68 }
69
70 /**
71 * task_function_call - call a function on the cpu on which a task runs
72 * @p: the task to evaluate
73 * @func: the function to be called
74 * @info: the function call argument
75 *
76 * Calls the function @func when the task is currently running. This might
77 * be on the current CPU, which just calls the function directly
78 *
79 * returns: @func return value, or
80 * -ESRCH - when the process isn't running
81 * -EAGAIN - when the process moved away
82 */
83 static int
84 task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
85 {
86 struct remote_function_call data = {
87 .p = p,
88 .func = func,
89 .info = info,
90 .ret = -ESRCH, /* No such (running) process */
91 };
92
93 if (task_curr(p))
94 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
95
96 return data.ret;
97 }
98
99 /**
100 * cpu_function_call - call a function on the cpu
101 * @func: the function to be called
102 * @info: the function call argument
103 *
104 * Calls the function @func on the remote cpu.
105 *
106 * returns: @func return value or -ENXIO when the cpu is offline
107 */
108 static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
109 {
110 struct remote_function_call data = {
111 .p = NULL,
112 .func = func,
113 .info = info,
114 .ret = -ENXIO, /* No such CPU */
115 };
116
117 smp_call_function_single(cpu, remote_function, &data, 1);
118
119 return data.ret;
120 }
121
122 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
123 PERF_FLAG_FD_OUTPUT |\
124 PERF_FLAG_PID_CGROUP |\
125 PERF_FLAG_FD_CLOEXEC)
126
127 /*
128 * branch priv levels that need permission checks
129 */
130 #define PERF_SAMPLE_BRANCH_PERM_PLM \
131 (PERF_SAMPLE_BRANCH_KERNEL |\
132 PERF_SAMPLE_BRANCH_HV)
133
134 enum event_type_t {
135 EVENT_FLEXIBLE = 0x1,
136 EVENT_PINNED = 0x2,
137 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
138 };
139
140 /*
141 * perf_sched_events : >0 events exist
142 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
143 */
144 struct static_key_deferred perf_sched_events __read_mostly;
145 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
146 static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
147
148 static atomic_t nr_mmap_events __read_mostly;
149 static atomic_t nr_comm_events __read_mostly;
150 static atomic_t nr_task_events __read_mostly;
151 static atomic_t nr_freq_events __read_mostly;
152
153 static LIST_HEAD(pmus);
154 static DEFINE_MUTEX(pmus_lock);
155 static struct srcu_struct pmus_srcu;
156
157 /*
158 * perf event paranoia level:
159 * -1 - not paranoid at all
160 * 0 - disallow raw tracepoint access for unpriv
161 * 1 - disallow cpu events for unpriv
162 * 2 - disallow kernel profiling for unpriv
163 */
164 int sysctl_perf_event_paranoid __read_mostly = 1;
165
166 /* Minimum for 512 kiB + 1 user control page */
167 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
168
169 /*
170 * max perf event sample rate
171 */
172 #define DEFAULT_MAX_SAMPLE_RATE 100000
173 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
174 #define DEFAULT_CPU_TIME_MAX_PERCENT 25
175
176 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
177
178 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
179 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
180
181 static int perf_sample_allowed_ns __read_mostly =
182 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
183
184 void update_perf_cpu_limits(void)
185 {
186 u64 tmp = perf_sample_period_ns;
187
188 tmp *= sysctl_perf_cpu_time_max_percent;
189 do_div(tmp, 100);
190 ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
191 }
192
193 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
194
195 int perf_proc_update_handler(struct ctl_table *table, int write,
196 void __user *buffer, size_t *lenp,
197 loff_t *ppos)
198 {
199 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
200
201 if (ret || !write)
202 return ret;
203
204 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
205 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
206 update_perf_cpu_limits();
207
208 return 0;
209 }
210
211 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
212
213 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
214 void __user *buffer, size_t *lenp,
215 loff_t *ppos)
216 {
217 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
218
219 if (ret || !write)
220 return ret;
221
222 update_perf_cpu_limits();
223
224 return 0;
225 }
226
227 /*
228 * perf samples are done in some very critical code paths (NMIs).
229 * If they take too much CPU time, the system can lock up and not
230 * get any real work done. This will drop the sample rate when
231 * we detect that events are taking too long.
232 */
233 #define NR_ACCUMULATED_SAMPLES 128
234 static DEFINE_PER_CPU(u64, running_sample_length);
235
236 static void perf_duration_warn(struct irq_work *w)
237 {
238 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
239 u64 avg_local_sample_len;
240 u64 local_samples_len;
241
242 local_samples_len = __get_cpu_var(running_sample_length);
243 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
244
245 printk_ratelimited(KERN_WARNING
246 "perf interrupt took too long (%lld > %lld), lowering "
247 "kernel.perf_event_max_sample_rate to %d\n",
248 avg_local_sample_len, allowed_ns >> 1,
249 sysctl_perf_event_sample_rate);
250 }
251
252 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
253
254 void perf_sample_event_took(u64 sample_len_ns)
255 {
256 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
257 u64 avg_local_sample_len;
258 u64 local_samples_len;
259
260 if (allowed_ns == 0)
261 return;
262
263 /* decay the counter by 1 average sample */
264 local_samples_len = __get_cpu_var(running_sample_length);
265 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
266 local_samples_len += sample_len_ns;
267 __get_cpu_var(running_sample_length) = local_samples_len;
268
269 /*
270 * note: this will be biased artifically low until we have
271 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
272 * from having to maintain a count.
273 */
274 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
275
276 if (avg_local_sample_len <= allowed_ns)
277 return;
278
279 if (max_samples_per_tick <= 1)
280 return;
281
282 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
283 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
284 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
285
286 update_perf_cpu_limits();
287
288 if (!irq_work_queue(&perf_duration_work)) {
289 early_printk("perf interrupt took too long (%lld > %lld), lowering "
290 "kernel.perf_event_max_sample_rate to %d\n",
291 avg_local_sample_len, allowed_ns >> 1,
292 sysctl_perf_event_sample_rate);
293 }
294 }
295
296 static atomic64_t perf_event_id;
297
298 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
299 enum event_type_t event_type);
300
301 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
302 enum event_type_t event_type,
303 struct task_struct *task);
304
305 static void update_context_time(struct perf_event_context *ctx);
306 static u64 perf_event_time(struct perf_event *event);
307
308 void __weak perf_event_print_debug(void) { }
309
310 extern __weak const char *perf_pmu_name(void)
311 {
312 return "pmu";
313 }
314
315 static inline u64 perf_clock(void)
316 {
317 return local_clock();
318 }
319
320 static inline struct perf_cpu_context *
321 __get_cpu_context(struct perf_event_context *ctx)
322 {
323 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
324 }
325
326 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
327 struct perf_event_context *ctx)
328 {
329 raw_spin_lock(&cpuctx->ctx.lock);
330 if (ctx)
331 raw_spin_lock(&ctx->lock);
332 }
333
334 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
335 struct perf_event_context *ctx)
336 {
337 if (ctx)
338 raw_spin_unlock(&ctx->lock);
339 raw_spin_unlock(&cpuctx->ctx.lock);
340 }
341
342 #ifdef CONFIG_CGROUP_PERF
343
344 /*
345 * perf_cgroup_info keeps track of time_enabled for a cgroup.
346 * This is a per-cpu dynamically allocated data structure.
347 */
348 struct perf_cgroup_info {
349 u64 time;
350 u64 timestamp;
351 };
352
353 struct perf_cgroup {
354 struct cgroup_subsys_state css;
355 struct perf_cgroup_info __percpu *info;
356 };
357
358 /*
359 * Must ensure cgroup is pinned (css_get) before calling
360 * this function. In other words, we cannot call this function
361 * if there is no cgroup event for the current CPU context.
362 */
363 static inline struct perf_cgroup *
364 perf_cgroup_from_task(struct task_struct *task)
365 {
366 return container_of(task_css(task, perf_event_cgrp_id),
367 struct perf_cgroup, css);
368 }
369
370 static inline bool
371 perf_cgroup_match(struct perf_event *event)
372 {
373 struct perf_event_context *ctx = event->ctx;
374 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
375
376 /* @event doesn't care about cgroup */
377 if (!event->cgrp)
378 return true;
379
380 /* wants specific cgroup scope but @cpuctx isn't associated with any */
381 if (!cpuctx->cgrp)
382 return false;
383
384 /*
385 * Cgroup scoping is recursive. An event enabled for a cgroup is
386 * also enabled for all its descendant cgroups. If @cpuctx's
387 * cgroup is a descendant of @event's (the test covers identity
388 * case), it's a match.
389 */
390 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
391 event->cgrp->css.cgroup);
392 }
393
394 static inline void perf_put_cgroup(struct perf_event *event)
395 {
396 css_put(&event->cgrp->css);
397 }
398
399 static inline void perf_detach_cgroup(struct perf_event *event)
400 {
401 perf_put_cgroup(event);
402 event->cgrp = NULL;
403 }
404
405 static inline int is_cgroup_event(struct perf_event *event)
406 {
407 return event->cgrp != NULL;
408 }
409
410 static inline u64 perf_cgroup_event_time(struct perf_event *event)
411 {
412 struct perf_cgroup_info *t;
413
414 t = per_cpu_ptr(event->cgrp->info, event->cpu);
415 return t->time;
416 }
417
418 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
419 {
420 struct perf_cgroup_info *info;
421 u64 now;
422
423 now = perf_clock();
424
425 info = this_cpu_ptr(cgrp->info);
426
427 info->time += now - info->timestamp;
428 info->timestamp = now;
429 }
430
431 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
432 {
433 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
434 if (cgrp_out)
435 __update_cgrp_time(cgrp_out);
436 }
437
438 static inline void update_cgrp_time_from_event(struct perf_event *event)
439 {
440 struct perf_cgroup *cgrp;
441
442 /*
443 * ensure we access cgroup data only when needed and
444 * when we know the cgroup is pinned (css_get)
445 */
446 if (!is_cgroup_event(event))
447 return;
448
449 cgrp = perf_cgroup_from_task(current);
450 /*
451 * Do not update time when cgroup is not active
452 */
453 if (cgrp == event->cgrp)
454 __update_cgrp_time(event->cgrp);
455 }
456
457 static inline void
458 perf_cgroup_set_timestamp(struct task_struct *task,
459 struct perf_event_context *ctx)
460 {
461 struct perf_cgroup *cgrp;
462 struct perf_cgroup_info *info;
463
464 /*
465 * ctx->lock held by caller
466 * ensure we do not access cgroup data
467 * unless we have the cgroup pinned (css_get)
468 */
469 if (!task || !ctx->nr_cgroups)
470 return;
471
472 cgrp = perf_cgroup_from_task(task);
473 info = this_cpu_ptr(cgrp->info);
474 info->timestamp = ctx->timestamp;
475 }
476
477 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
478 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
479
480 /*
481 * reschedule events based on the cgroup constraint of task.
482 *
483 * mode SWOUT : schedule out everything
484 * mode SWIN : schedule in based on cgroup for next
485 */
486 void perf_cgroup_switch(struct task_struct *task, int mode)
487 {
488 struct perf_cpu_context *cpuctx;
489 struct pmu *pmu;
490 unsigned long flags;
491
492 /*
493 * disable interrupts to avoid geting nr_cgroup
494 * changes via __perf_event_disable(). Also
495 * avoids preemption.
496 */
497 local_irq_save(flags);
498
499 /*
500 * we reschedule only in the presence of cgroup
501 * constrained events.
502 */
503 rcu_read_lock();
504
505 list_for_each_entry_rcu(pmu, &pmus, entry) {
506 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
507 if (cpuctx->unique_pmu != pmu)
508 continue; /* ensure we process each cpuctx once */
509
510 /*
511 * perf_cgroup_events says at least one
512 * context on this CPU has cgroup events.
513 *
514 * ctx->nr_cgroups reports the number of cgroup
515 * events for a context.
516 */
517 if (cpuctx->ctx.nr_cgroups > 0) {
518 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
519 perf_pmu_disable(cpuctx->ctx.pmu);
520
521 if (mode & PERF_CGROUP_SWOUT) {
522 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
523 /*
524 * must not be done before ctxswout due
525 * to event_filter_match() in event_sched_out()
526 */
527 cpuctx->cgrp = NULL;
528 }
529
530 if (mode & PERF_CGROUP_SWIN) {
531 WARN_ON_ONCE(cpuctx->cgrp);
532 /*
533 * set cgrp before ctxsw in to allow
534 * event_filter_match() to not have to pass
535 * task around
536 */
537 cpuctx->cgrp = perf_cgroup_from_task(task);
538 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
539 }
540 perf_pmu_enable(cpuctx->ctx.pmu);
541 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
542 }
543 }
544
545 rcu_read_unlock();
546
547 local_irq_restore(flags);
548 }
549
550 static inline void perf_cgroup_sched_out(struct task_struct *task,
551 struct task_struct *next)
552 {
553 struct perf_cgroup *cgrp1;
554 struct perf_cgroup *cgrp2 = NULL;
555
556 /*
557 * we come here when we know perf_cgroup_events > 0
558 */
559 cgrp1 = perf_cgroup_from_task(task);
560
561 /*
562 * next is NULL when called from perf_event_enable_on_exec()
563 * that will systematically cause a cgroup_switch()
564 */
565 if (next)
566 cgrp2 = perf_cgroup_from_task(next);
567
568 /*
569 * only schedule out current cgroup events if we know
570 * that we are switching to a different cgroup. Otherwise,
571 * do no touch the cgroup events.
572 */
573 if (cgrp1 != cgrp2)
574 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
575 }
576
577 static inline void perf_cgroup_sched_in(struct task_struct *prev,
578 struct task_struct *task)
579 {
580 struct perf_cgroup *cgrp1;
581 struct perf_cgroup *cgrp2 = NULL;
582
583 /*
584 * we come here when we know perf_cgroup_events > 0
585 */
586 cgrp1 = perf_cgroup_from_task(task);
587
588 /* prev can never be NULL */
589 cgrp2 = perf_cgroup_from_task(prev);
590
591 /*
592 * only need to schedule in cgroup events if we are changing
593 * cgroup during ctxsw. Cgroup events were not scheduled
594 * out of ctxsw out if that was not the case.
595 */
596 if (cgrp1 != cgrp2)
597 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
598 }
599
600 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
601 struct perf_event_attr *attr,
602 struct perf_event *group_leader)
603 {
604 struct perf_cgroup *cgrp;
605 struct cgroup_subsys_state *css;
606 struct fd f = fdget(fd);
607 int ret = 0;
608
609 if (!f.file)
610 return -EBADF;
611
612 css = css_tryget_online_from_dir(f.file->f_dentry,
613 &perf_event_cgrp_subsys);
614 if (IS_ERR(css)) {
615 ret = PTR_ERR(css);
616 goto out;
617 }
618
619 cgrp = container_of(css, struct perf_cgroup, css);
620 event->cgrp = cgrp;
621
622 /*
623 * all events in a group must monitor
624 * the same cgroup because a task belongs
625 * to only one perf cgroup at a time
626 */
627 if (group_leader && group_leader->cgrp != cgrp) {
628 perf_detach_cgroup(event);
629 ret = -EINVAL;
630 }
631 out:
632 fdput(f);
633 return ret;
634 }
635
636 static inline void
637 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
638 {
639 struct perf_cgroup_info *t;
640 t = per_cpu_ptr(event->cgrp->info, event->cpu);
641 event->shadow_ctx_time = now - t->timestamp;
642 }
643
644 static inline void
645 perf_cgroup_defer_enabled(struct perf_event *event)
646 {
647 /*
648 * when the current task's perf cgroup does not match
649 * the event's, we need to remember to call the
650 * perf_mark_enable() function the first time a task with
651 * a matching perf cgroup is scheduled in.
652 */
653 if (is_cgroup_event(event) && !perf_cgroup_match(event))
654 event->cgrp_defer_enabled = 1;
655 }
656
657 static inline void
658 perf_cgroup_mark_enabled(struct perf_event *event,
659 struct perf_event_context *ctx)
660 {
661 struct perf_event *sub;
662 u64 tstamp = perf_event_time(event);
663
664 if (!event->cgrp_defer_enabled)
665 return;
666
667 event->cgrp_defer_enabled = 0;
668
669 event->tstamp_enabled = tstamp - event->total_time_enabled;
670 list_for_each_entry(sub, &event->sibling_list, group_entry) {
671 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
672 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
673 sub->cgrp_defer_enabled = 0;
674 }
675 }
676 }
677 #else /* !CONFIG_CGROUP_PERF */
678
679 static inline bool
680 perf_cgroup_match(struct perf_event *event)
681 {
682 return true;
683 }
684
685 static inline void perf_detach_cgroup(struct perf_event *event)
686 {}
687
688 static inline int is_cgroup_event(struct perf_event *event)
689 {
690 return 0;
691 }
692
693 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
694 {
695 return 0;
696 }
697
698 static inline void update_cgrp_time_from_event(struct perf_event *event)
699 {
700 }
701
702 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
703 {
704 }
705
706 static inline void perf_cgroup_sched_out(struct task_struct *task,
707 struct task_struct *next)
708 {
709 }
710
711 static inline void perf_cgroup_sched_in(struct task_struct *prev,
712 struct task_struct *task)
713 {
714 }
715
716 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
717 struct perf_event_attr *attr,
718 struct perf_event *group_leader)
719 {
720 return -EINVAL;
721 }
722
723 static inline void
724 perf_cgroup_set_timestamp(struct task_struct *task,
725 struct perf_event_context *ctx)
726 {
727 }
728
729 void
730 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
731 {
732 }
733
734 static inline void
735 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
736 {
737 }
738
739 static inline u64 perf_cgroup_event_time(struct perf_event *event)
740 {
741 return 0;
742 }
743
744 static inline void
745 perf_cgroup_defer_enabled(struct perf_event *event)
746 {
747 }
748
749 static inline void
750 perf_cgroup_mark_enabled(struct perf_event *event,
751 struct perf_event_context *ctx)
752 {
753 }
754 #endif
755
756 /*
757 * set default to be dependent on timer tick just
758 * like original code
759 */
760 #define PERF_CPU_HRTIMER (1000 / HZ)
761 /*
762 * function must be called with interrupts disbled
763 */
764 static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
765 {
766 struct perf_cpu_context *cpuctx;
767 enum hrtimer_restart ret = HRTIMER_NORESTART;
768 int rotations = 0;
769
770 WARN_ON(!irqs_disabled());
771
772 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
773
774 rotations = perf_rotate_context(cpuctx);
775
776 /*
777 * arm timer if needed
778 */
779 if (rotations) {
780 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
781 ret = HRTIMER_RESTART;
782 }
783
784 return ret;
785 }
786
787 /* CPU is going down */
788 void perf_cpu_hrtimer_cancel(int cpu)
789 {
790 struct perf_cpu_context *cpuctx;
791 struct pmu *pmu;
792 unsigned long flags;
793
794 if (WARN_ON(cpu != smp_processor_id()))
795 return;
796
797 local_irq_save(flags);
798
799 rcu_read_lock();
800
801 list_for_each_entry_rcu(pmu, &pmus, entry) {
802 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
803
804 if (pmu->task_ctx_nr == perf_sw_context)
805 continue;
806
807 hrtimer_cancel(&cpuctx->hrtimer);
808 }
809
810 rcu_read_unlock();
811
812 local_irq_restore(flags);
813 }
814
815 static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
816 {
817 struct hrtimer *hr = &cpuctx->hrtimer;
818 struct pmu *pmu = cpuctx->ctx.pmu;
819 int timer;
820
821 /* no multiplexing needed for SW PMU */
822 if (pmu->task_ctx_nr == perf_sw_context)
823 return;
824
825 /*
826 * check default is sane, if not set then force to
827 * default interval (1/tick)
828 */
829 timer = pmu->hrtimer_interval_ms;
830 if (timer < 1)
831 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
832
833 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
834
835 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
836 hr->function = perf_cpu_hrtimer_handler;
837 }
838
839 static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
840 {
841 struct hrtimer *hr = &cpuctx->hrtimer;
842 struct pmu *pmu = cpuctx->ctx.pmu;
843
844 /* not for SW PMU */
845 if (pmu->task_ctx_nr == perf_sw_context)
846 return;
847
848 if (hrtimer_active(hr))
849 return;
850
851 if (!hrtimer_callback_running(hr))
852 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
853 0, HRTIMER_MODE_REL_PINNED, 0);
854 }
855
856 void perf_pmu_disable(struct pmu *pmu)
857 {
858 int *count = this_cpu_ptr(pmu->pmu_disable_count);
859 if (!(*count)++)
860 pmu->pmu_disable(pmu);
861 }
862
863 void perf_pmu_enable(struct pmu *pmu)
864 {
865 int *count = this_cpu_ptr(pmu->pmu_disable_count);
866 if (!--(*count))
867 pmu->pmu_enable(pmu);
868 }
869
870 static DEFINE_PER_CPU(struct list_head, rotation_list);
871
872 /*
873 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
874 * because they're strictly cpu affine and rotate_start is called with IRQs
875 * disabled, while rotate_context is called from IRQ context.
876 */
877 static void perf_pmu_rotate_start(struct pmu *pmu)
878 {
879 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
880 struct list_head *head = &__get_cpu_var(rotation_list);
881
882 WARN_ON(!irqs_disabled());
883
884 if (list_empty(&cpuctx->rotation_list))
885 list_add(&cpuctx->rotation_list, head);
886 }
887
888 static void get_ctx(struct perf_event_context *ctx)
889 {
890 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
891 }
892
893 static void put_ctx(struct perf_event_context *ctx)
894 {
895 if (atomic_dec_and_test(&ctx->refcount)) {
896 if (ctx->parent_ctx)
897 put_ctx(ctx->parent_ctx);
898 if (ctx->task)
899 put_task_struct(ctx->task);
900 kfree_rcu(ctx, rcu_head);
901 }
902 }
903
904 static void unclone_ctx(struct perf_event_context *ctx)
905 {
906 if (ctx->parent_ctx) {
907 put_ctx(ctx->parent_ctx);
908 ctx->parent_ctx = NULL;
909 }
910 ctx->generation++;
911 }
912
913 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
914 {
915 /*
916 * only top level events have the pid namespace they were created in
917 */
918 if (event->parent)
919 event = event->parent;
920
921 return task_tgid_nr_ns(p, event->ns);
922 }
923
924 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
925 {
926 /*
927 * only top level events have the pid namespace they were created in
928 */
929 if (event->parent)
930 event = event->parent;
931
932 return task_pid_nr_ns(p, event->ns);
933 }
934
935 /*
936 * If we inherit events we want to return the parent event id
937 * to userspace.
938 */
939 static u64 primary_event_id(struct perf_event *event)
940 {
941 u64 id = event->id;
942
943 if (event->parent)
944 id = event->parent->id;
945
946 return id;
947 }
948
949 /*
950 * Get the perf_event_context for a task and lock it.
951 * This has to cope with with the fact that until it is locked,
952 * the context could get moved to another task.
953 */
954 static struct perf_event_context *
955 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
956 {
957 struct perf_event_context *ctx;
958
959 retry:
960 /*
961 * One of the few rules of preemptible RCU is that one cannot do
962 * rcu_read_unlock() while holding a scheduler (or nested) lock when
963 * part of the read side critical section was preemptible -- see
964 * rcu_read_unlock_special().
965 *
966 * Since ctx->lock nests under rq->lock we must ensure the entire read
967 * side critical section is non-preemptible.
968 */
969 preempt_disable();
970 rcu_read_lock();
971 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
972 if (ctx) {
973 /*
974 * If this context is a clone of another, it might
975 * get swapped for another underneath us by
976 * perf_event_task_sched_out, though the
977 * rcu_read_lock() protects us from any context
978 * getting freed. Lock the context and check if it
979 * got swapped before we could get the lock, and retry
980 * if so. If we locked the right context, then it
981 * can't get swapped on us any more.
982 */
983 raw_spin_lock_irqsave(&ctx->lock, *flags);
984 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
985 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
986 rcu_read_unlock();
987 preempt_enable();
988 goto retry;
989 }
990
991 if (!atomic_inc_not_zero(&ctx->refcount)) {
992 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
993 ctx = NULL;
994 }
995 }
996 rcu_read_unlock();
997 preempt_enable();
998 return ctx;
999 }
1000
1001 /*
1002 * Get the context for a task and increment its pin_count so it
1003 * can't get swapped to another task. This also increments its
1004 * reference count so that the context can't get freed.
1005 */
1006 static struct perf_event_context *
1007 perf_pin_task_context(struct task_struct *task, int ctxn)
1008 {
1009 struct perf_event_context *ctx;
1010 unsigned long flags;
1011
1012 ctx = perf_lock_task_context(task, ctxn, &flags);
1013 if (ctx) {
1014 ++ctx->pin_count;
1015 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1016 }
1017 return ctx;
1018 }
1019
1020 static void perf_unpin_context(struct perf_event_context *ctx)
1021 {
1022 unsigned long flags;
1023
1024 raw_spin_lock_irqsave(&ctx->lock, flags);
1025 --ctx->pin_count;
1026 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1027 }
1028
1029 /*
1030 * Update the record of the current time in a context.
1031 */
1032 static void update_context_time(struct perf_event_context *ctx)
1033 {
1034 u64 now = perf_clock();
1035
1036 ctx->time += now - ctx->timestamp;
1037 ctx->timestamp = now;
1038 }
1039
1040 static u64 perf_event_time(struct perf_event *event)
1041 {
1042 struct perf_event_context *ctx = event->ctx;
1043
1044 if (is_cgroup_event(event))
1045 return perf_cgroup_event_time(event);
1046
1047 return ctx ? ctx->time : 0;
1048 }
1049
1050 /*
1051 * Update the total_time_enabled and total_time_running fields for a event.
1052 * The caller of this function needs to hold the ctx->lock.
1053 */
1054 static void update_event_times(struct perf_event *event)
1055 {
1056 struct perf_event_context *ctx = event->ctx;
1057 u64 run_end;
1058
1059 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1060 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1061 return;
1062 /*
1063 * in cgroup mode, time_enabled represents
1064 * the time the event was enabled AND active
1065 * tasks were in the monitored cgroup. This is
1066 * independent of the activity of the context as
1067 * there may be a mix of cgroup and non-cgroup events.
1068 *
1069 * That is why we treat cgroup events differently
1070 * here.
1071 */
1072 if (is_cgroup_event(event))
1073 run_end = perf_cgroup_event_time(event);
1074 else if (ctx->is_active)
1075 run_end = ctx->time;
1076 else
1077 run_end = event->tstamp_stopped;
1078
1079 event->total_time_enabled = run_end - event->tstamp_enabled;
1080
1081 if (event->state == PERF_EVENT_STATE_INACTIVE)
1082 run_end = event->tstamp_stopped;
1083 else
1084 run_end = perf_event_time(event);
1085
1086 event->total_time_running = run_end - event->tstamp_running;
1087
1088 }
1089
1090 /*
1091 * Update total_time_enabled and total_time_running for all events in a group.
1092 */
1093 static void update_group_times(struct perf_event *leader)
1094 {
1095 struct perf_event *event;
1096
1097 update_event_times(leader);
1098 list_for_each_entry(event, &leader->sibling_list, group_entry)
1099 update_event_times(event);
1100 }
1101
1102 static struct list_head *
1103 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1104 {
1105 if (event->attr.pinned)
1106 return &ctx->pinned_groups;
1107 else
1108 return &ctx->flexible_groups;
1109 }
1110
1111 /*
1112 * Add a event from the lists for its context.
1113 * Must be called with ctx->mutex and ctx->lock held.
1114 */
1115 static void
1116 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1117 {
1118 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1119 event->attach_state |= PERF_ATTACH_CONTEXT;
1120
1121 /*
1122 * If we're a stand alone event or group leader, we go to the context
1123 * list, group events are kept attached to the group so that
1124 * perf_group_detach can, at all times, locate all siblings.
1125 */
1126 if (event->group_leader == event) {
1127 struct list_head *list;
1128
1129 if (is_software_event(event))
1130 event->group_flags |= PERF_GROUP_SOFTWARE;
1131
1132 list = ctx_group_list(event, ctx);
1133 list_add_tail(&event->group_entry, list);
1134 }
1135
1136 if (is_cgroup_event(event))
1137 ctx->nr_cgroups++;
1138
1139 if (has_branch_stack(event))
1140 ctx->nr_branch_stack++;
1141
1142 list_add_rcu(&event->event_entry, &ctx->event_list);
1143 if (!ctx->nr_events)
1144 perf_pmu_rotate_start(ctx->pmu);
1145 ctx->nr_events++;
1146 if (event->attr.inherit_stat)
1147 ctx->nr_stat++;
1148
1149 ctx->generation++;
1150 }
1151
1152 /*
1153 * Initialize event state based on the perf_event_attr::disabled.
1154 */
1155 static inline void perf_event__state_init(struct perf_event *event)
1156 {
1157 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1158 PERF_EVENT_STATE_INACTIVE;
1159 }
1160
1161 /*
1162 * Called at perf_event creation and when events are attached/detached from a
1163 * group.
1164 */
1165 static void perf_event__read_size(struct perf_event *event)
1166 {
1167 int entry = sizeof(u64); /* value */
1168 int size = 0;
1169 int nr = 1;
1170
1171 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1172 size += sizeof(u64);
1173
1174 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1175 size += sizeof(u64);
1176
1177 if (event->attr.read_format & PERF_FORMAT_ID)
1178 entry += sizeof(u64);
1179
1180 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1181 nr += event->group_leader->nr_siblings;
1182 size += sizeof(u64);
1183 }
1184
1185 size += entry * nr;
1186 event->read_size = size;
1187 }
1188
1189 static void perf_event__header_size(struct perf_event *event)
1190 {
1191 struct perf_sample_data *data;
1192 u64 sample_type = event->attr.sample_type;
1193 u16 size = 0;
1194
1195 perf_event__read_size(event);
1196
1197 if (sample_type & PERF_SAMPLE_IP)
1198 size += sizeof(data->ip);
1199
1200 if (sample_type & PERF_SAMPLE_ADDR)
1201 size += sizeof(data->addr);
1202
1203 if (sample_type & PERF_SAMPLE_PERIOD)
1204 size += sizeof(data->period);
1205
1206 if (sample_type & PERF_SAMPLE_WEIGHT)
1207 size += sizeof(data->weight);
1208
1209 if (sample_type & PERF_SAMPLE_READ)
1210 size += event->read_size;
1211
1212 if (sample_type & PERF_SAMPLE_DATA_SRC)
1213 size += sizeof(data->data_src.val);
1214
1215 if (sample_type & PERF_SAMPLE_TRANSACTION)
1216 size += sizeof(data->txn);
1217
1218 event->header_size = size;
1219 }
1220
1221 static void perf_event__id_header_size(struct perf_event *event)
1222 {
1223 struct perf_sample_data *data;
1224 u64 sample_type = event->attr.sample_type;
1225 u16 size = 0;
1226
1227 if (sample_type & PERF_SAMPLE_TID)
1228 size += sizeof(data->tid_entry);
1229
1230 if (sample_type & PERF_SAMPLE_TIME)
1231 size += sizeof(data->time);
1232
1233 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1234 size += sizeof(data->id);
1235
1236 if (sample_type & PERF_SAMPLE_ID)
1237 size += sizeof(data->id);
1238
1239 if (sample_type & PERF_SAMPLE_STREAM_ID)
1240 size += sizeof(data->stream_id);
1241
1242 if (sample_type & PERF_SAMPLE_CPU)
1243 size += sizeof(data->cpu_entry);
1244
1245 event->id_header_size = size;
1246 }
1247
1248 static void perf_group_attach(struct perf_event *event)
1249 {
1250 struct perf_event *group_leader = event->group_leader, *pos;
1251
1252 /*
1253 * We can have double attach due to group movement in perf_event_open.
1254 */
1255 if (event->attach_state & PERF_ATTACH_GROUP)
1256 return;
1257
1258 event->attach_state |= PERF_ATTACH_GROUP;
1259
1260 if (group_leader == event)
1261 return;
1262
1263 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1264 !is_software_event(event))
1265 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1266
1267 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1268 group_leader->nr_siblings++;
1269
1270 perf_event__header_size(group_leader);
1271
1272 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1273 perf_event__header_size(pos);
1274 }
1275
1276 /*
1277 * Remove a event from the lists for its context.
1278 * Must be called with ctx->mutex and ctx->lock held.
1279 */
1280 static void
1281 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1282 {
1283 struct perf_cpu_context *cpuctx;
1284 /*
1285 * We can have double detach due to exit/hot-unplug + close.
1286 */
1287 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1288 return;
1289
1290 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1291
1292 if (is_cgroup_event(event)) {
1293 ctx->nr_cgroups--;
1294 cpuctx = __get_cpu_context(ctx);
1295 /*
1296 * if there are no more cgroup events
1297 * then cler cgrp to avoid stale pointer
1298 * in update_cgrp_time_from_cpuctx()
1299 */
1300 if (!ctx->nr_cgroups)
1301 cpuctx->cgrp = NULL;
1302 }
1303
1304 if (has_branch_stack(event))
1305 ctx->nr_branch_stack--;
1306
1307 ctx->nr_events--;
1308 if (event->attr.inherit_stat)
1309 ctx->nr_stat--;
1310
1311 list_del_rcu(&event->event_entry);
1312
1313 if (event->group_leader == event)
1314 list_del_init(&event->group_entry);
1315
1316 update_group_times(event);
1317
1318 /*
1319 * If event was in error state, then keep it
1320 * that way, otherwise bogus counts will be
1321 * returned on read(). The only way to get out
1322 * of error state is by explicit re-enabling
1323 * of the event
1324 */
1325 if (event->state > PERF_EVENT_STATE_OFF)
1326 event->state = PERF_EVENT_STATE_OFF;
1327
1328 ctx->generation++;
1329 }
1330
1331 static void perf_group_detach(struct perf_event *event)
1332 {
1333 struct perf_event *sibling, *tmp;
1334 struct list_head *list = NULL;
1335
1336 /*
1337 * We can have double detach due to exit/hot-unplug + close.
1338 */
1339 if (!(event->attach_state & PERF_ATTACH_GROUP))
1340 return;
1341
1342 event->attach_state &= ~PERF_ATTACH_GROUP;
1343
1344 /*
1345 * If this is a sibling, remove it from its group.
1346 */
1347 if (event->group_leader != event) {
1348 list_del_init(&event->group_entry);
1349 event->group_leader->nr_siblings--;
1350 goto out;
1351 }
1352
1353 if (!list_empty(&event->group_entry))
1354 list = &event->group_entry;
1355
1356 /*
1357 * If this was a group event with sibling events then
1358 * upgrade the siblings to singleton events by adding them
1359 * to whatever list we are on.
1360 */
1361 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1362 if (list)
1363 list_move_tail(&sibling->group_entry, list);
1364 sibling->group_leader = sibling;
1365
1366 /* Inherit group flags from the previous leader */
1367 sibling->group_flags = event->group_flags;
1368 }
1369
1370 out:
1371 perf_event__header_size(event->group_leader);
1372
1373 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1374 perf_event__header_size(tmp);
1375 }
1376
1377 static inline int
1378 event_filter_match(struct perf_event *event)
1379 {
1380 return (event->cpu == -1 || event->cpu == smp_processor_id())
1381 && perf_cgroup_match(event);
1382 }
1383
1384 static void
1385 event_sched_out(struct perf_event *event,
1386 struct perf_cpu_context *cpuctx,
1387 struct perf_event_context *ctx)
1388 {
1389 u64 tstamp = perf_event_time(event);
1390 u64 delta;
1391 /*
1392 * An event which could not be activated because of
1393 * filter mismatch still needs to have its timings
1394 * maintained, otherwise bogus information is return
1395 * via read() for time_enabled, time_running:
1396 */
1397 if (event->state == PERF_EVENT_STATE_INACTIVE
1398 && !event_filter_match(event)) {
1399 delta = tstamp - event->tstamp_stopped;
1400 event->tstamp_running += delta;
1401 event->tstamp_stopped = tstamp;
1402 }
1403
1404 if (event->state != PERF_EVENT_STATE_ACTIVE)
1405 return;
1406
1407 perf_pmu_disable(event->pmu);
1408
1409 event->state = PERF_EVENT_STATE_INACTIVE;
1410 if (event->pending_disable) {
1411 event->pending_disable = 0;
1412 event->state = PERF_EVENT_STATE_OFF;
1413 }
1414 event->tstamp_stopped = tstamp;
1415 event->pmu->del(event, 0);
1416 event->oncpu = -1;
1417
1418 if (!is_software_event(event))
1419 cpuctx->active_oncpu--;
1420 ctx->nr_active--;
1421 if (event->attr.freq && event->attr.sample_freq)
1422 ctx->nr_freq--;
1423 if (event->attr.exclusive || !cpuctx->active_oncpu)
1424 cpuctx->exclusive = 0;
1425
1426 perf_pmu_enable(event->pmu);
1427 }
1428
1429 static void
1430 group_sched_out(struct perf_event *group_event,
1431 struct perf_cpu_context *cpuctx,
1432 struct perf_event_context *ctx)
1433 {
1434 struct perf_event *event;
1435 int state = group_event->state;
1436
1437 event_sched_out(group_event, cpuctx, ctx);
1438
1439 /*
1440 * Schedule out siblings (if any):
1441 */
1442 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1443 event_sched_out(event, cpuctx, ctx);
1444
1445 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1446 cpuctx->exclusive = 0;
1447 }
1448
1449 struct remove_event {
1450 struct perf_event *event;
1451 bool detach_group;
1452 };
1453
1454 /*
1455 * Cross CPU call to remove a performance event
1456 *
1457 * We disable the event on the hardware level first. After that we
1458 * remove it from the context list.
1459 */
1460 static int __perf_remove_from_context(void *info)
1461 {
1462 struct remove_event *re = info;
1463 struct perf_event *event = re->event;
1464 struct perf_event_context *ctx = event->ctx;
1465 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1466
1467 raw_spin_lock(&ctx->lock);
1468 event_sched_out(event, cpuctx, ctx);
1469 if (re->detach_group)
1470 perf_group_detach(event);
1471 list_del_event(event, ctx);
1472 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1473 ctx->is_active = 0;
1474 cpuctx->task_ctx = NULL;
1475 }
1476 raw_spin_unlock(&ctx->lock);
1477
1478 return 0;
1479 }
1480
1481
1482 /*
1483 * Remove the event from a task's (or a CPU's) list of events.
1484 *
1485 * CPU events are removed with a smp call. For task events we only
1486 * call when the task is on a CPU.
1487 *
1488 * If event->ctx is a cloned context, callers must make sure that
1489 * every task struct that event->ctx->task could possibly point to
1490 * remains valid. This is OK when called from perf_release since
1491 * that only calls us on the top-level context, which can't be a clone.
1492 * When called from perf_event_exit_task, it's OK because the
1493 * context has been detached from its task.
1494 */
1495 static void perf_remove_from_context(struct perf_event *event, bool detach_group)
1496 {
1497 struct perf_event_context *ctx = event->ctx;
1498 struct task_struct *task = ctx->task;
1499 struct remove_event re = {
1500 .event = event,
1501 .detach_group = detach_group,
1502 };
1503
1504 lockdep_assert_held(&ctx->mutex);
1505
1506 if (!task) {
1507 /*
1508 * Per cpu events are removed via an smp call and
1509 * the removal is always successful.
1510 */
1511 cpu_function_call(event->cpu, __perf_remove_from_context, &re);
1512 return;
1513 }
1514
1515 retry:
1516 if (!task_function_call(task, __perf_remove_from_context, &re))
1517 return;
1518
1519 raw_spin_lock_irq(&ctx->lock);
1520 /*
1521 * If we failed to find a running task, but find the context active now
1522 * that we've acquired the ctx->lock, retry.
1523 */
1524 if (ctx->is_active) {
1525 raw_spin_unlock_irq(&ctx->lock);
1526 goto retry;
1527 }
1528
1529 /*
1530 * Since the task isn't running, its safe to remove the event, us
1531 * holding the ctx->lock ensures the task won't get scheduled in.
1532 */
1533 if (detach_group)
1534 perf_group_detach(event);
1535 list_del_event(event, ctx);
1536 raw_spin_unlock_irq(&ctx->lock);
1537 }
1538
1539 /*
1540 * Cross CPU call to disable a performance event
1541 */
1542 int __perf_event_disable(void *info)
1543 {
1544 struct perf_event *event = info;
1545 struct perf_event_context *ctx = event->ctx;
1546 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1547
1548 /*
1549 * If this is a per-task event, need to check whether this
1550 * event's task is the current task on this cpu.
1551 *
1552 * Can trigger due to concurrent perf_event_context_sched_out()
1553 * flipping contexts around.
1554 */
1555 if (ctx->task && cpuctx->task_ctx != ctx)
1556 return -EINVAL;
1557
1558 raw_spin_lock(&ctx->lock);
1559
1560 /*
1561 * If the event is on, turn it off.
1562 * If it is in error state, leave it in error state.
1563 */
1564 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1565 update_context_time(ctx);
1566 update_cgrp_time_from_event(event);
1567 update_group_times(event);
1568 if (event == event->group_leader)
1569 group_sched_out(event, cpuctx, ctx);
1570 else
1571 event_sched_out(event, cpuctx, ctx);
1572 event->state = PERF_EVENT_STATE_OFF;
1573 }
1574
1575 raw_spin_unlock(&ctx->lock);
1576
1577 return 0;
1578 }
1579
1580 /*
1581 * Disable a event.
1582 *
1583 * If event->ctx is a cloned context, callers must make sure that
1584 * every task struct that event->ctx->task could possibly point to
1585 * remains valid. This condition is satisifed when called through
1586 * perf_event_for_each_child or perf_event_for_each because they
1587 * hold the top-level event's child_mutex, so any descendant that
1588 * goes to exit will block in sync_child_event.
1589 * When called from perf_pending_event it's OK because event->ctx
1590 * is the current context on this CPU and preemption is disabled,
1591 * hence we can't get into perf_event_task_sched_out for this context.
1592 */
1593 void perf_event_disable(struct perf_event *event)
1594 {
1595 struct perf_event_context *ctx = event->ctx;
1596 struct task_struct *task = ctx->task;
1597
1598 if (!task) {
1599 /*
1600 * Disable the event on the cpu that it's on
1601 */
1602 cpu_function_call(event->cpu, __perf_event_disable, event);
1603 return;
1604 }
1605
1606 retry:
1607 if (!task_function_call(task, __perf_event_disable, event))
1608 return;
1609
1610 raw_spin_lock_irq(&ctx->lock);
1611 /*
1612 * If the event is still active, we need to retry the cross-call.
1613 */
1614 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1615 raw_spin_unlock_irq(&ctx->lock);
1616 /*
1617 * Reload the task pointer, it might have been changed by
1618 * a concurrent perf_event_context_sched_out().
1619 */
1620 task = ctx->task;
1621 goto retry;
1622 }
1623
1624 /*
1625 * Since we have the lock this context can't be scheduled
1626 * in, so we can change the state safely.
1627 */
1628 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1629 update_group_times(event);
1630 event->state = PERF_EVENT_STATE_OFF;
1631 }
1632 raw_spin_unlock_irq(&ctx->lock);
1633 }
1634 EXPORT_SYMBOL_GPL(perf_event_disable);
1635
1636 static void perf_set_shadow_time(struct perf_event *event,
1637 struct perf_event_context *ctx,
1638 u64 tstamp)
1639 {
1640 /*
1641 * use the correct time source for the time snapshot
1642 *
1643 * We could get by without this by leveraging the
1644 * fact that to get to this function, the caller
1645 * has most likely already called update_context_time()
1646 * and update_cgrp_time_xx() and thus both timestamp
1647 * are identical (or very close). Given that tstamp is,
1648 * already adjusted for cgroup, we could say that:
1649 * tstamp - ctx->timestamp
1650 * is equivalent to
1651 * tstamp - cgrp->timestamp.
1652 *
1653 * Then, in perf_output_read(), the calculation would
1654 * work with no changes because:
1655 * - event is guaranteed scheduled in
1656 * - no scheduled out in between
1657 * - thus the timestamp would be the same
1658 *
1659 * But this is a bit hairy.
1660 *
1661 * So instead, we have an explicit cgroup call to remain
1662 * within the time time source all along. We believe it
1663 * is cleaner and simpler to understand.
1664 */
1665 if (is_cgroup_event(event))
1666 perf_cgroup_set_shadow_time(event, tstamp);
1667 else
1668 event->shadow_ctx_time = tstamp - ctx->timestamp;
1669 }
1670
1671 #define MAX_INTERRUPTS (~0ULL)
1672
1673 static void perf_log_throttle(struct perf_event *event, int enable);
1674
1675 static int
1676 event_sched_in(struct perf_event *event,
1677 struct perf_cpu_context *cpuctx,
1678 struct perf_event_context *ctx)
1679 {
1680 u64 tstamp = perf_event_time(event);
1681 int ret = 0;
1682
1683 lockdep_assert_held(&ctx->lock);
1684
1685 if (event->state <= PERF_EVENT_STATE_OFF)
1686 return 0;
1687
1688 event->state = PERF_EVENT_STATE_ACTIVE;
1689 event->oncpu = smp_processor_id();
1690
1691 /*
1692 * Unthrottle events, since we scheduled we might have missed several
1693 * ticks already, also for a heavily scheduling task there is little
1694 * guarantee it'll get a tick in a timely manner.
1695 */
1696 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1697 perf_log_throttle(event, 1);
1698 event->hw.interrupts = 0;
1699 }
1700
1701 /*
1702 * The new state must be visible before we turn it on in the hardware:
1703 */
1704 smp_wmb();
1705
1706 perf_pmu_disable(event->pmu);
1707
1708 if (event->pmu->add(event, PERF_EF_START)) {
1709 event->state = PERF_EVENT_STATE_INACTIVE;
1710 event->oncpu = -1;
1711 ret = -EAGAIN;
1712 goto out;
1713 }
1714
1715 event->tstamp_running += tstamp - event->tstamp_stopped;
1716
1717 perf_set_shadow_time(event, ctx, tstamp);
1718
1719 if (!is_software_event(event))
1720 cpuctx->active_oncpu++;
1721 ctx->nr_active++;
1722 if (event->attr.freq && event->attr.sample_freq)
1723 ctx->nr_freq++;
1724
1725 if (event->attr.exclusive)
1726 cpuctx->exclusive = 1;
1727
1728 out:
1729 perf_pmu_enable(event->pmu);
1730
1731 return ret;
1732 }
1733
1734 static int
1735 group_sched_in(struct perf_event *group_event,
1736 struct perf_cpu_context *cpuctx,
1737 struct perf_event_context *ctx)
1738 {
1739 struct perf_event *event, *partial_group = NULL;
1740 struct pmu *pmu = ctx->pmu;
1741 u64 now = ctx->time;
1742 bool simulate = false;
1743
1744 if (group_event->state == PERF_EVENT_STATE_OFF)
1745 return 0;
1746
1747 pmu->start_txn(pmu);
1748
1749 if (event_sched_in(group_event, cpuctx, ctx)) {
1750 pmu->cancel_txn(pmu);
1751 perf_cpu_hrtimer_restart(cpuctx);
1752 return -EAGAIN;
1753 }
1754
1755 /*
1756 * Schedule in siblings as one group (if any):
1757 */
1758 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1759 if (event_sched_in(event, cpuctx, ctx)) {
1760 partial_group = event;
1761 goto group_error;
1762 }
1763 }
1764
1765 if (!pmu->commit_txn(pmu))
1766 return 0;
1767
1768 group_error:
1769 /*
1770 * Groups can be scheduled in as one unit only, so undo any
1771 * partial group before returning:
1772 * The events up to the failed event are scheduled out normally,
1773 * tstamp_stopped will be updated.
1774 *
1775 * The failed events and the remaining siblings need to have
1776 * their timings updated as if they had gone thru event_sched_in()
1777 * and event_sched_out(). This is required to get consistent timings
1778 * across the group. This also takes care of the case where the group
1779 * could never be scheduled by ensuring tstamp_stopped is set to mark
1780 * the time the event was actually stopped, such that time delta
1781 * calculation in update_event_times() is correct.
1782 */
1783 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1784 if (event == partial_group)
1785 simulate = true;
1786
1787 if (simulate) {
1788 event->tstamp_running += now - event->tstamp_stopped;
1789 event->tstamp_stopped = now;
1790 } else {
1791 event_sched_out(event, cpuctx, ctx);
1792 }
1793 }
1794 event_sched_out(group_event, cpuctx, ctx);
1795
1796 pmu->cancel_txn(pmu);
1797
1798 perf_cpu_hrtimer_restart(cpuctx);
1799
1800 return -EAGAIN;
1801 }
1802
1803 /*
1804 * Work out whether we can put this event group on the CPU now.
1805 */
1806 static int group_can_go_on(struct perf_event *event,
1807 struct perf_cpu_context *cpuctx,
1808 int can_add_hw)
1809 {
1810 /*
1811 * Groups consisting entirely of software events can always go on.
1812 */
1813 if (event->group_flags & PERF_GROUP_SOFTWARE)
1814 return 1;
1815 /*
1816 * If an exclusive group is already on, no other hardware
1817 * events can go on.
1818 */
1819 if (cpuctx->exclusive)
1820 return 0;
1821 /*
1822 * If this group is exclusive and there are already
1823 * events on the CPU, it can't go on.
1824 */
1825 if (event->attr.exclusive && cpuctx->active_oncpu)
1826 return 0;
1827 /*
1828 * Otherwise, try to add it if all previous groups were able
1829 * to go on.
1830 */
1831 return can_add_hw;
1832 }
1833
1834 static void add_event_to_ctx(struct perf_event *event,
1835 struct perf_event_context *ctx)
1836 {
1837 u64 tstamp = perf_event_time(event);
1838
1839 list_add_event(event, ctx);
1840 perf_group_attach(event);
1841 event->tstamp_enabled = tstamp;
1842 event->tstamp_running = tstamp;
1843 event->tstamp_stopped = tstamp;
1844 }
1845
1846 static void task_ctx_sched_out(struct perf_event_context *ctx);
1847 static void
1848 ctx_sched_in(struct perf_event_context *ctx,
1849 struct perf_cpu_context *cpuctx,
1850 enum event_type_t event_type,
1851 struct task_struct *task);
1852
1853 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1854 struct perf_event_context *ctx,
1855 struct task_struct *task)
1856 {
1857 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1858 if (ctx)
1859 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1860 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1861 if (ctx)
1862 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1863 }
1864
1865 /*
1866 * Cross CPU call to install and enable a performance event
1867 *
1868 * Must be called with ctx->mutex held
1869 */
1870 static int __perf_install_in_context(void *info)
1871 {
1872 struct perf_event *event = info;
1873 struct perf_event_context *ctx = event->ctx;
1874 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1875 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1876 struct task_struct *task = current;
1877
1878 perf_ctx_lock(cpuctx, task_ctx);
1879 perf_pmu_disable(cpuctx->ctx.pmu);
1880
1881 /*
1882 * If there was an active task_ctx schedule it out.
1883 */
1884 if (task_ctx)
1885 task_ctx_sched_out(task_ctx);
1886
1887 /*
1888 * If the context we're installing events in is not the
1889 * active task_ctx, flip them.
1890 */
1891 if (ctx->task && task_ctx != ctx) {
1892 if (task_ctx)
1893 raw_spin_unlock(&task_ctx->lock);
1894 raw_spin_lock(&ctx->lock);
1895 task_ctx = ctx;
1896 }
1897
1898 if (task_ctx) {
1899 cpuctx->task_ctx = task_ctx;
1900 task = task_ctx->task;
1901 }
1902
1903 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1904
1905 update_context_time(ctx);
1906 /*
1907 * update cgrp time only if current cgrp
1908 * matches event->cgrp. Must be done before
1909 * calling add_event_to_ctx()
1910 */
1911 update_cgrp_time_from_event(event);
1912
1913 add_event_to_ctx(event, ctx);
1914
1915 /*
1916 * Schedule everything back in
1917 */
1918 perf_event_sched_in(cpuctx, task_ctx, task);
1919
1920 perf_pmu_enable(cpuctx->ctx.pmu);
1921 perf_ctx_unlock(cpuctx, task_ctx);
1922
1923 return 0;
1924 }
1925
1926 /*
1927 * Attach a performance event to a context
1928 *
1929 * First we add the event to the list with the hardware enable bit
1930 * in event->hw_config cleared.
1931 *
1932 * If the event is attached to a task which is on a CPU we use a smp
1933 * call to enable it in the task context. The task might have been
1934 * scheduled away, but we check this in the smp call again.
1935 */
1936 static void
1937 perf_install_in_context(struct perf_event_context *ctx,
1938 struct perf_event *event,
1939 int cpu)
1940 {
1941 struct task_struct *task = ctx->task;
1942
1943 lockdep_assert_held(&ctx->mutex);
1944
1945 event->ctx = ctx;
1946 if (event->cpu != -1)
1947 event->cpu = cpu;
1948
1949 if (!task) {
1950 /*
1951 * Per cpu events are installed via an smp call and
1952 * the install is always successful.
1953 */
1954 cpu_function_call(cpu, __perf_install_in_context, event);
1955 return;
1956 }
1957
1958 retry:
1959 if (!task_function_call(task, __perf_install_in_context, event))
1960 return;
1961
1962 raw_spin_lock_irq(&ctx->lock);
1963 /*
1964 * If we failed to find a running task, but find the context active now
1965 * that we've acquired the ctx->lock, retry.
1966 */
1967 if (ctx->is_active) {
1968 raw_spin_unlock_irq(&ctx->lock);
1969 goto retry;
1970 }
1971
1972 /*
1973 * Since the task isn't running, its safe to add the event, us holding
1974 * the ctx->lock ensures the task won't get scheduled in.
1975 */
1976 add_event_to_ctx(event, ctx);
1977 raw_spin_unlock_irq(&ctx->lock);
1978 }
1979
1980 /*
1981 * Put a event into inactive state and update time fields.
1982 * Enabling the leader of a group effectively enables all
1983 * the group members that aren't explicitly disabled, so we
1984 * have to update their ->tstamp_enabled also.
1985 * Note: this works for group members as well as group leaders
1986 * since the non-leader members' sibling_lists will be empty.
1987 */
1988 static void __perf_event_mark_enabled(struct perf_event *event)
1989 {
1990 struct perf_event *sub;
1991 u64 tstamp = perf_event_time(event);
1992
1993 event->state = PERF_EVENT_STATE_INACTIVE;
1994 event->tstamp_enabled = tstamp - event->total_time_enabled;
1995 list_for_each_entry(sub, &event->sibling_list, group_entry) {
1996 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1997 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1998 }
1999 }
2000
2001 /*
2002 * Cross CPU call to enable a performance event
2003 */
2004 static int __perf_event_enable(void *info)
2005 {
2006 struct perf_event *event = info;
2007 struct perf_event_context *ctx = event->ctx;
2008 struct perf_event *leader = event->group_leader;
2009 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2010 int err;
2011
2012 /*
2013 * There's a time window between 'ctx->is_active' check
2014 * in perf_event_enable function and this place having:
2015 * - IRQs on
2016 * - ctx->lock unlocked
2017 *
2018 * where the task could be killed and 'ctx' deactivated
2019 * by perf_event_exit_task.
2020 */
2021 if (!ctx->is_active)
2022 return -EINVAL;
2023
2024 raw_spin_lock(&ctx->lock);
2025 update_context_time(ctx);
2026
2027 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2028 goto unlock;
2029
2030 /*
2031 * set current task's cgroup time reference point
2032 */
2033 perf_cgroup_set_timestamp(current, ctx);
2034
2035 __perf_event_mark_enabled(event);
2036
2037 if (!event_filter_match(event)) {
2038 if (is_cgroup_event(event))
2039 perf_cgroup_defer_enabled(event);
2040 goto unlock;
2041 }
2042
2043 /*
2044 * If the event is in a group and isn't the group leader,
2045 * then don't put it on unless the group is on.
2046 */
2047 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
2048 goto unlock;
2049
2050 if (!group_can_go_on(event, cpuctx, 1)) {
2051 err = -EEXIST;
2052 } else {
2053 if (event == leader)
2054 err = group_sched_in(event, cpuctx, ctx);
2055 else
2056 err = event_sched_in(event, cpuctx, ctx);
2057 }
2058
2059 if (err) {
2060 /*
2061 * If this event can't go on and it's part of a
2062 * group, then the whole group has to come off.
2063 */
2064 if (leader != event) {
2065 group_sched_out(leader, cpuctx, ctx);
2066 perf_cpu_hrtimer_restart(cpuctx);
2067 }
2068 if (leader->attr.pinned) {
2069 update_group_times(leader);
2070 leader->state = PERF_EVENT_STATE_ERROR;
2071 }
2072 }
2073
2074 unlock:
2075 raw_spin_unlock(&ctx->lock);
2076
2077 return 0;
2078 }
2079
2080 /*
2081 * Enable a event.
2082 *
2083 * If event->ctx is a cloned context, callers must make sure that
2084 * every task struct that event->ctx->task could possibly point to
2085 * remains valid. This condition is satisfied when called through
2086 * perf_event_for_each_child or perf_event_for_each as described
2087 * for perf_event_disable.
2088 */
2089 void perf_event_enable(struct perf_event *event)
2090 {
2091 struct perf_event_context *ctx = event->ctx;
2092 struct task_struct *task = ctx->task;
2093
2094 if (!task) {
2095 /*
2096 * Enable the event on the cpu that it's on
2097 */
2098 cpu_function_call(event->cpu, __perf_event_enable, event);
2099 return;
2100 }
2101
2102 raw_spin_lock_irq(&ctx->lock);
2103 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2104 goto out;
2105
2106 /*
2107 * If the event is in error state, clear that first.
2108 * That way, if we see the event in error state below, we
2109 * know that it has gone back into error state, as distinct
2110 * from the task having been scheduled away before the
2111 * cross-call arrived.
2112 */
2113 if (event->state == PERF_EVENT_STATE_ERROR)
2114 event->state = PERF_EVENT_STATE_OFF;
2115
2116 retry:
2117 if (!ctx->is_active) {
2118 __perf_event_mark_enabled(event);
2119 goto out;
2120 }
2121
2122 raw_spin_unlock_irq(&ctx->lock);
2123
2124 if (!task_function_call(task, __perf_event_enable, event))
2125 return;
2126
2127 raw_spin_lock_irq(&ctx->lock);
2128
2129 /*
2130 * If the context is active and the event is still off,
2131 * we need to retry the cross-call.
2132 */
2133 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
2134 /*
2135 * task could have been flipped by a concurrent
2136 * perf_event_context_sched_out()
2137 */
2138 task = ctx->task;
2139 goto retry;
2140 }
2141
2142 out:
2143 raw_spin_unlock_irq(&ctx->lock);
2144 }
2145 EXPORT_SYMBOL_GPL(perf_event_enable);
2146
2147 int perf_event_refresh(struct perf_event *event, int refresh)
2148 {
2149 /*
2150 * not supported on inherited events
2151 */
2152 if (event->attr.inherit || !is_sampling_event(event))
2153 return -EINVAL;
2154
2155 atomic_add(refresh, &event->event_limit);
2156 perf_event_enable(event);
2157
2158 return 0;
2159 }
2160 EXPORT_SYMBOL_GPL(perf_event_refresh);
2161
2162 static void ctx_sched_out(struct perf_event_context *ctx,
2163 struct perf_cpu_context *cpuctx,
2164 enum event_type_t event_type)
2165 {
2166 struct perf_event *event;
2167 int is_active = ctx->is_active;
2168
2169 ctx->is_active &= ~event_type;
2170 if (likely(!ctx->nr_events))
2171 return;
2172
2173 update_context_time(ctx);
2174 update_cgrp_time_from_cpuctx(cpuctx);
2175 if (!ctx->nr_active)
2176 return;
2177
2178 perf_pmu_disable(ctx->pmu);
2179 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
2180 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2181 group_sched_out(event, cpuctx, ctx);
2182 }
2183
2184 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
2185 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2186 group_sched_out(event, cpuctx, ctx);
2187 }
2188 perf_pmu_enable(ctx->pmu);
2189 }
2190
2191 /*
2192 * Test whether two contexts are equivalent, i.e. whether they have both been
2193 * cloned from the same version of the same context.
2194 *
2195 * Equivalence is measured using a generation number in the context that is
2196 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2197 * and list_del_event().
2198 */
2199 static int context_equiv(struct perf_event_context *ctx1,
2200 struct perf_event_context *ctx2)
2201 {
2202 /* Pinning disables the swap optimization */
2203 if (ctx1->pin_count || ctx2->pin_count)
2204 return 0;
2205
2206 /* If ctx1 is the parent of ctx2 */
2207 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2208 return 1;
2209
2210 /* If ctx2 is the parent of ctx1 */
2211 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2212 return 1;
2213
2214 /*
2215 * If ctx1 and ctx2 have the same parent; we flatten the parent
2216 * hierarchy, see perf_event_init_context().
2217 */
2218 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2219 ctx1->parent_gen == ctx2->parent_gen)
2220 return 1;
2221
2222 /* Unmatched */
2223 return 0;
2224 }
2225
2226 static void __perf_event_sync_stat(struct perf_event *event,
2227 struct perf_event *next_event)
2228 {
2229 u64 value;
2230
2231 if (!event->attr.inherit_stat)
2232 return;
2233
2234 /*
2235 * Update the event value, we cannot use perf_event_read()
2236 * because we're in the middle of a context switch and have IRQs
2237 * disabled, which upsets smp_call_function_single(), however
2238 * we know the event must be on the current CPU, therefore we
2239 * don't need to use it.
2240 */
2241 switch (event->state) {
2242 case PERF_EVENT_STATE_ACTIVE:
2243 event->pmu->read(event);
2244 /* fall-through */
2245
2246 case PERF_EVENT_STATE_INACTIVE:
2247 update_event_times(event);
2248 break;
2249
2250 default:
2251 break;
2252 }
2253
2254 /*
2255 * In order to keep per-task stats reliable we need to flip the event
2256 * values when we flip the contexts.
2257 */
2258 value = local64_read(&next_event->count);
2259 value = local64_xchg(&event->count, value);
2260 local64_set(&next_event->count, value);
2261
2262 swap(event->total_time_enabled, next_event->total_time_enabled);
2263 swap(event->total_time_running, next_event->total_time_running);
2264
2265 /*
2266 * Since we swizzled the values, update the user visible data too.
2267 */
2268 perf_event_update_userpage(event);
2269 perf_event_update_userpage(next_event);
2270 }
2271
2272 static void perf_event_sync_stat(struct perf_event_context *ctx,
2273 struct perf_event_context *next_ctx)
2274 {
2275 struct perf_event *event, *next_event;
2276
2277 if (!ctx->nr_stat)
2278 return;
2279
2280 update_context_time(ctx);
2281
2282 event = list_first_entry(&ctx->event_list,
2283 struct perf_event, event_entry);
2284
2285 next_event = list_first_entry(&next_ctx->event_list,
2286 struct perf_event, event_entry);
2287
2288 while (&event->event_entry != &ctx->event_list &&
2289 &next_event->event_entry != &next_ctx->event_list) {
2290
2291 __perf_event_sync_stat(event, next_event);
2292
2293 event = list_next_entry(event, event_entry);
2294 next_event = list_next_entry(next_event, event_entry);
2295 }
2296 }
2297
2298 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2299 struct task_struct *next)
2300 {
2301 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2302 struct perf_event_context *next_ctx;
2303 struct perf_event_context *parent, *next_parent;
2304 struct perf_cpu_context *cpuctx;
2305 int do_switch = 1;
2306
2307 if (likely(!ctx))
2308 return;
2309
2310 cpuctx = __get_cpu_context(ctx);
2311 if (!cpuctx->task_ctx)
2312 return;
2313
2314 rcu_read_lock();
2315 next_ctx = next->perf_event_ctxp[ctxn];
2316 if (!next_ctx)
2317 goto unlock;
2318
2319 parent = rcu_dereference(ctx->parent_ctx);
2320 next_parent = rcu_dereference(next_ctx->parent_ctx);
2321
2322 /* If neither context have a parent context; they cannot be clones. */
2323 if (!parent || !next_parent)
2324 goto unlock;
2325
2326 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
2327 /*
2328 * Looks like the two contexts are clones, so we might be
2329 * able to optimize the context switch. We lock both
2330 * contexts and check that they are clones under the
2331 * lock (including re-checking that neither has been
2332 * uncloned in the meantime). It doesn't matter which
2333 * order we take the locks because no other cpu could
2334 * be trying to lock both of these tasks.
2335 */
2336 raw_spin_lock(&ctx->lock);
2337 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2338 if (context_equiv(ctx, next_ctx)) {
2339 /*
2340 * XXX do we need a memory barrier of sorts
2341 * wrt to rcu_dereference() of perf_event_ctxp
2342 */
2343 task->perf_event_ctxp[ctxn] = next_ctx;
2344 next->perf_event_ctxp[ctxn] = ctx;
2345 ctx->task = next;
2346 next_ctx->task = task;
2347 do_switch = 0;
2348
2349 perf_event_sync_stat(ctx, next_ctx);
2350 }
2351 raw_spin_unlock(&next_ctx->lock);
2352 raw_spin_unlock(&ctx->lock);
2353 }
2354 unlock:
2355 rcu_read_unlock();
2356
2357 if (do_switch) {
2358 raw_spin_lock(&ctx->lock);
2359 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2360 cpuctx->task_ctx = NULL;
2361 raw_spin_unlock(&ctx->lock);
2362 }
2363 }
2364
2365 #define for_each_task_context_nr(ctxn) \
2366 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2367
2368 /*
2369 * Called from scheduler to remove the events of the current task,
2370 * with interrupts disabled.
2371 *
2372 * We stop each event and update the event value in event->count.
2373 *
2374 * This does not protect us against NMI, but disable()
2375 * sets the disabled bit in the control field of event _before_
2376 * accessing the event control register. If a NMI hits, then it will
2377 * not restart the event.
2378 */
2379 void __perf_event_task_sched_out(struct task_struct *task,
2380 struct task_struct *next)
2381 {
2382 int ctxn;
2383
2384 for_each_task_context_nr(ctxn)
2385 perf_event_context_sched_out(task, ctxn, next);
2386
2387 /*
2388 * if cgroup events exist on this CPU, then we need
2389 * to check if we have to switch out PMU state.
2390 * cgroup event are system-wide mode only
2391 */
2392 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2393 perf_cgroup_sched_out(task, next);
2394 }
2395
2396 static void task_ctx_sched_out(struct perf_event_context *ctx)
2397 {
2398 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2399
2400 if (!cpuctx->task_ctx)
2401 return;
2402
2403 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2404 return;
2405
2406 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2407 cpuctx->task_ctx = NULL;
2408 }
2409
2410 /*
2411 * Called with IRQs disabled
2412 */
2413 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2414 enum event_type_t event_type)
2415 {
2416 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2417 }
2418
2419 static void
2420 ctx_pinned_sched_in(struct perf_event_context *ctx,
2421 struct perf_cpu_context *cpuctx)
2422 {
2423 struct perf_event *event;
2424
2425 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2426 if (event->state <= PERF_EVENT_STATE_OFF)
2427 continue;
2428 if (!event_filter_match(event))
2429 continue;
2430
2431 /* may need to reset tstamp_enabled */
2432 if (is_cgroup_event(event))
2433 perf_cgroup_mark_enabled(event, ctx);
2434
2435 if (group_can_go_on(event, cpuctx, 1))
2436 group_sched_in(event, cpuctx, ctx);
2437
2438 /*
2439 * If this pinned group hasn't been scheduled,
2440 * put it in error state.
2441 */
2442 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2443 update_group_times(event);
2444 event->state = PERF_EVENT_STATE_ERROR;
2445 }
2446 }
2447 }
2448
2449 static void
2450 ctx_flexible_sched_in(struct perf_event_context *ctx,
2451 struct perf_cpu_context *cpuctx)
2452 {
2453 struct perf_event *event;
2454 int can_add_hw = 1;
2455
2456 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2457 /* Ignore events in OFF or ERROR state */
2458 if (event->state <= PERF_EVENT_STATE_OFF)
2459 continue;
2460 /*
2461 * Listen to the 'cpu' scheduling filter constraint
2462 * of events:
2463 */
2464 if (!event_filter_match(event))
2465 continue;
2466
2467 /* may need to reset tstamp_enabled */
2468 if (is_cgroup_event(event))
2469 perf_cgroup_mark_enabled(event, ctx);
2470
2471 if (group_can_go_on(event, cpuctx, can_add_hw)) {
2472 if (group_sched_in(event, cpuctx, ctx))
2473 can_add_hw = 0;
2474 }
2475 }
2476 }
2477
2478 static void
2479 ctx_sched_in(struct perf_event_context *ctx,
2480 struct perf_cpu_context *cpuctx,
2481 enum event_type_t event_type,
2482 struct task_struct *task)
2483 {
2484 u64 now;
2485 int is_active = ctx->is_active;
2486
2487 ctx->is_active |= event_type;
2488 if (likely(!ctx->nr_events))
2489 return;
2490
2491 now = perf_clock();
2492 ctx->timestamp = now;
2493 perf_cgroup_set_timestamp(task, ctx);
2494 /*
2495 * First go through the list and put on any pinned groups
2496 * in order to give them the best chance of going on.
2497 */
2498 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2499 ctx_pinned_sched_in(ctx, cpuctx);
2500
2501 /* Then walk through the lower prio flexible groups */
2502 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2503 ctx_flexible_sched_in(ctx, cpuctx);
2504 }
2505
2506 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2507 enum event_type_t event_type,
2508 struct task_struct *task)
2509 {
2510 struct perf_event_context *ctx = &cpuctx->ctx;
2511
2512 ctx_sched_in(ctx, cpuctx, event_type, task);
2513 }
2514
2515 static void perf_event_context_sched_in(struct perf_event_context *ctx,
2516 struct task_struct *task)
2517 {
2518 struct perf_cpu_context *cpuctx;
2519
2520 cpuctx = __get_cpu_context(ctx);
2521 if (cpuctx->task_ctx == ctx)
2522 return;
2523
2524 perf_ctx_lock(cpuctx, ctx);
2525 perf_pmu_disable(ctx->pmu);
2526 /*
2527 * We want to keep the following priority order:
2528 * cpu pinned (that don't need to move), task pinned,
2529 * cpu flexible, task flexible.
2530 */
2531 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2532
2533 if (ctx->nr_events)
2534 cpuctx->task_ctx = ctx;
2535
2536 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2537
2538 perf_pmu_enable(ctx->pmu);
2539 perf_ctx_unlock(cpuctx, ctx);
2540
2541 /*
2542 * Since these rotations are per-cpu, we need to ensure the
2543 * cpu-context we got scheduled on is actually rotating.
2544 */
2545 perf_pmu_rotate_start(ctx->pmu);
2546 }
2547
2548 /*
2549 * When sampling the branck stack in system-wide, it may be necessary
2550 * to flush the stack on context switch. This happens when the branch
2551 * stack does not tag its entries with the pid of the current task.
2552 * Otherwise it becomes impossible to associate a branch entry with a
2553 * task. This ambiguity is more likely to appear when the branch stack
2554 * supports priv level filtering and the user sets it to monitor only
2555 * at the user level (which could be a useful measurement in system-wide
2556 * mode). In that case, the risk is high of having a branch stack with
2557 * branch from multiple tasks. Flushing may mean dropping the existing
2558 * entries or stashing them somewhere in the PMU specific code layer.
2559 *
2560 * This function provides the context switch callback to the lower code
2561 * layer. It is invoked ONLY when there is at least one system-wide context
2562 * with at least one active event using taken branch sampling.
2563 */
2564 static void perf_branch_stack_sched_in(struct task_struct *prev,
2565 struct task_struct *task)
2566 {
2567 struct perf_cpu_context *cpuctx;
2568 struct pmu *pmu;
2569 unsigned long flags;
2570
2571 /* no need to flush branch stack if not changing task */
2572 if (prev == task)
2573 return;
2574
2575 local_irq_save(flags);
2576
2577 rcu_read_lock();
2578
2579 list_for_each_entry_rcu(pmu, &pmus, entry) {
2580 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2581
2582 /*
2583 * check if the context has at least one
2584 * event using PERF_SAMPLE_BRANCH_STACK
2585 */
2586 if (cpuctx->ctx.nr_branch_stack > 0
2587 && pmu->flush_branch_stack) {
2588
2589 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2590
2591 perf_pmu_disable(pmu);
2592
2593 pmu->flush_branch_stack();
2594
2595 perf_pmu_enable(pmu);
2596
2597 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2598 }
2599 }
2600
2601 rcu_read_unlock();
2602
2603 local_irq_restore(flags);
2604 }
2605
2606 /*
2607 * Called from scheduler to add the events of the current task
2608 * with interrupts disabled.
2609 *
2610 * We restore the event value and then enable it.
2611 *
2612 * This does not protect us against NMI, but enable()
2613 * sets the enabled bit in the control field of event _before_
2614 * accessing the event control register. If a NMI hits, then it will
2615 * keep the event running.
2616 */
2617 void __perf_event_task_sched_in(struct task_struct *prev,
2618 struct task_struct *task)
2619 {
2620 struct perf_event_context *ctx;
2621 int ctxn;
2622
2623 for_each_task_context_nr(ctxn) {
2624 ctx = task->perf_event_ctxp[ctxn];
2625 if (likely(!ctx))
2626 continue;
2627
2628 perf_event_context_sched_in(ctx, task);
2629 }
2630 /*
2631 * if cgroup events exist on this CPU, then we need
2632 * to check if we have to switch in PMU state.
2633 * cgroup event are system-wide mode only
2634 */
2635 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2636 perf_cgroup_sched_in(prev, task);
2637
2638 /* check for system-wide branch_stack events */
2639 if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2640 perf_branch_stack_sched_in(prev, task);
2641 }
2642
2643 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2644 {
2645 u64 frequency = event->attr.sample_freq;
2646 u64 sec = NSEC_PER_SEC;
2647 u64 divisor, dividend;
2648
2649 int count_fls, nsec_fls, frequency_fls, sec_fls;
2650
2651 count_fls = fls64(count);
2652 nsec_fls = fls64(nsec);
2653 frequency_fls = fls64(frequency);
2654 sec_fls = 30;
2655
2656 /*
2657 * We got @count in @nsec, with a target of sample_freq HZ
2658 * the target period becomes:
2659 *
2660 * @count * 10^9
2661 * period = -------------------
2662 * @nsec * sample_freq
2663 *
2664 */
2665
2666 /*
2667 * Reduce accuracy by one bit such that @a and @b converge
2668 * to a similar magnitude.
2669 */
2670 #define REDUCE_FLS(a, b) \
2671 do { \
2672 if (a##_fls > b##_fls) { \
2673 a >>= 1; \
2674 a##_fls--; \
2675 } else { \
2676 b >>= 1; \
2677 b##_fls--; \
2678 } \
2679 } while (0)
2680
2681 /*
2682 * Reduce accuracy until either term fits in a u64, then proceed with
2683 * the other, so that finally we can do a u64/u64 division.
2684 */
2685 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2686 REDUCE_FLS(nsec, frequency);
2687 REDUCE_FLS(sec, count);
2688 }
2689
2690 if (count_fls + sec_fls > 64) {
2691 divisor = nsec * frequency;
2692
2693 while (count_fls + sec_fls > 64) {
2694 REDUCE_FLS(count, sec);
2695 divisor >>= 1;
2696 }
2697
2698 dividend = count * sec;
2699 } else {
2700 dividend = count * sec;
2701
2702 while (nsec_fls + frequency_fls > 64) {
2703 REDUCE_FLS(nsec, frequency);
2704 dividend >>= 1;
2705 }
2706
2707 divisor = nsec * frequency;
2708 }
2709
2710 if (!divisor)
2711 return dividend;
2712
2713 return div64_u64(dividend, divisor);
2714 }
2715
2716 static DEFINE_PER_CPU(int, perf_throttled_count);
2717 static DEFINE_PER_CPU(u64, perf_throttled_seq);
2718
2719 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2720 {
2721 struct hw_perf_event *hwc = &event->hw;
2722 s64 period, sample_period;
2723 s64 delta;
2724
2725 period = perf_calculate_period(event, nsec, count);
2726
2727 delta = (s64)(period - hwc->sample_period);
2728 delta = (delta + 7) / 8; /* low pass filter */
2729
2730 sample_period = hwc->sample_period + delta;
2731
2732 if (!sample_period)
2733 sample_period = 1;
2734
2735 hwc->sample_period = sample_period;
2736
2737 if (local64_read(&hwc->period_left) > 8*sample_period) {
2738 if (disable)
2739 event->pmu->stop(event, PERF_EF_UPDATE);
2740
2741 local64_set(&hwc->period_left, 0);
2742
2743 if (disable)
2744 event->pmu->start(event, PERF_EF_RELOAD);
2745 }
2746 }
2747
2748 /*
2749 * combine freq adjustment with unthrottling to avoid two passes over the
2750 * events. At the same time, make sure, having freq events does not change
2751 * the rate of unthrottling as that would introduce bias.
2752 */
2753 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2754 int needs_unthr)
2755 {
2756 struct perf_event *event;
2757 struct hw_perf_event *hwc;
2758 u64 now, period = TICK_NSEC;
2759 s64 delta;
2760
2761 /*
2762 * only need to iterate over all events iff:
2763 * - context have events in frequency mode (needs freq adjust)
2764 * - there are events to unthrottle on this cpu
2765 */
2766 if (!(ctx->nr_freq || needs_unthr))
2767 return;
2768
2769 raw_spin_lock(&ctx->lock);
2770 perf_pmu_disable(ctx->pmu);
2771
2772 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2773 if (event->state != PERF_EVENT_STATE_ACTIVE)
2774 continue;
2775
2776 if (!event_filter_match(event))
2777 continue;
2778
2779 perf_pmu_disable(event->pmu);
2780
2781 hwc = &event->hw;
2782
2783 if (hwc->interrupts == MAX_INTERRUPTS) {
2784 hwc->interrupts = 0;
2785 perf_log_throttle(event, 1);
2786 event->pmu->start(event, 0);
2787 }
2788
2789 if (!event->attr.freq || !event->attr.sample_freq)
2790 goto next;
2791
2792 /*
2793 * stop the event and update event->count
2794 */
2795 event->pmu->stop(event, PERF_EF_UPDATE);
2796
2797 now = local64_read(&event->count);
2798 delta = now - hwc->freq_count_stamp;
2799 hwc->freq_count_stamp = now;
2800
2801 /*
2802 * restart the event
2803 * reload only if value has changed
2804 * we have stopped the event so tell that
2805 * to perf_adjust_period() to avoid stopping it
2806 * twice.
2807 */
2808 if (delta > 0)
2809 perf_adjust_period(event, period, delta, false);
2810
2811 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2812 next:
2813 perf_pmu_enable(event->pmu);
2814 }
2815
2816 perf_pmu_enable(ctx->pmu);
2817 raw_spin_unlock(&ctx->lock);
2818 }
2819
2820 /*
2821 * Round-robin a context's events:
2822 */
2823 static void rotate_ctx(struct perf_event_context *ctx)
2824 {
2825 /*
2826 * Rotate the first entry last of non-pinned groups. Rotation might be
2827 * disabled by the inheritance code.
2828 */
2829 if (!ctx->rotate_disable)
2830 list_rotate_left(&ctx->flexible_groups);
2831 }
2832
2833 /*
2834 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2835 * because they're strictly cpu affine and rotate_start is called with IRQs
2836 * disabled, while rotate_context is called from IRQ context.
2837 */
2838 static int perf_rotate_context(struct perf_cpu_context *cpuctx)
2839 {
2840 struct perf_event_context *ctx = NULL;
2841 int rotate = 0, remove = 1;
2842
2843 if (cpuctx->ctx.nr_events) {
2844 remove = 0;
2845 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2846 rotate = 1;
2847 }
2848
2849 ctx = cpuctx->task_ctx;
2850 if (ctx && ctx->nr_events) {
2851 remove = 0;
2852 if (ctx->nr_events != ctx->nr_active)
2853 rotate = 1;
2854 }
2855
2856 if (!rotate)
2857 goto done;
2858
2859 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2860 perf_pmu_disable(cpuctx->ctx.pmu);
2861
2862 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2863 if (ctx)
2864 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2865
2866 rotate_ctx(&cpuctx->ctx);
2867 if (ctx)
2868 rotate_ctx(ctx);
2869
2870 perf_event_sched_in(cpuctx, ctx, current);
2871
2872 perf_pmu_enable(cpuctx->ctx.pmu);
2873 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2874 done:
2875 if (remove)
2876 list_del_init(&cpuctx->rotation_list);
2877
2878 return rotate;
2879 }
2880
2881 #ifdef CONFIG_NO_HZ_FULL
2882 bool perf_event_can_stop_tick(void)
2883 {
2884 if (atomic_read(&nr_freq_events) ||
2885 __this_cpu_read(perf_throttled_count))
2886 return false;
2887 else
2888 return true;
2889 }
2890 #endif
2891
2892 void perf_event_task_tick(void)
2893 {
2894 struct list_head *head = &__get_cpu_var(rotation_list);
2895 struct perf_cpu_context *cpuctx, *tmp;
2896 struct perf_event_context *ctx;
2897 int throttled;
2898
2899 WARN_ON(!irqs_disabled());
2900
2901 __this_cpu_inc(perf_throttled_seq);
2902 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2903
2904 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2905 ctx = &cpuctx->ctx;
2906 perf_adjust_freq_unthr_context(ctx, throttled);
2907
2908 ctx = cpuctx->task_ctx;
2909 if (ctx)
2910 perf_adjust_freq_unthr_context(ctx, throttled);
2911 }
2912 }
2913
2914 static int event_enable_on_exec(struct perf_event *event,
2915 struct perf_event_context *ctx)
2916 {
2917 if (!event->attr.enable_on_exec)
2918 return 0;
2919
2920 event->attr.enable_on_exec = 0;
2921 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2922 return 0;
2923
2924 __perf_event_mark_enabled(event);
2925
2926 return 1;
2927 }
2928
2929 /*
2930 * Enable all of a task's events that have been marked enable-on-exec.
2931 * This expects task == current.
2932 */
2933 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2934 {
2935 struct perf_event *event;
2936 unsigned long flags;
2937 int enabled = 0;
2938 int ret;
2939
2940 local_irq_save(flags);
2941 if (!ctx || !ctx->nr_events)
2942 goto out;
2943
2944 /*
2945 * We must ctxsw out cgroup events to avoid conflict
2946 * when invoking perf_task_event_sched_in() later on
2947 * in this function. Otherwise we end up trying to
2948 * ctxswin cgroup events which are already scheduled
2949 * in.
2950 */
2951 perf_cgroup_sched_out(current, NULL);
2952
2953 raw_spin_lock(&ctx->lock);
2954 task_ctx_sched_out(ctx);
2955
2956 list_for_each_entry(event, &ctx->event_list, event_entry) {
2957 ret = event_enable_on_exec(event, ctx);
2958 if (ret)
2959 enabled = 1;
2960 }
2961
2962 /*
2963 * Unclone this context if we enabled any event.
2964 */
2965 if (enabled)
2966 unclone_ctx(ctx);
2967
2968 raw_spin_unlock(&ctx->lock);
2969
2970 /*
2971 * Also calls ctxswin for cgroup events, if any:
2972 */
2973 perf_event_context_sched_in(ctx, ctx->task);
2974 out:
2975 local_irq_restore(flags);
2976 }
2977
2978 void perf_event_exec(void)
2979 {
2980 struct perf_event_context *ctx;
2981 int ctxn;
2982
2983 rcu_read_lock();
2984 for_each_task_context_nr(ctxn) {
2985 ctx = current->perf_event_ctxp[ctxn];
2986 if (!ctx)
2987 continue;
2988
2989 perf_event_enable_on_exec(ctx);
2990 }
2991 rcu_read_unlock();
2992 }
2993
2994 /*
2995 * Cross CPU call to read the hardware event
2996 */
2997 static void __perf_event_read(void *info)
2998 {
2999 struct perf_event *event = info;
3000 struct perf_event_context *ctx = event->ctx;
3001 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
3002
3003 /*
3004 * If this is a task context, we need to check whether it is
3005 * the current task context of this cpu. If not it has been
3006 * scheduled out before the smp call arrived. In that case
3007 * event->count would have been updated to a recent sample
3008 * when the event was scheduled out.
3009 */
3010 if (ctx->task && cpuctx->task_ctx != ctx)
3011 return;
3012
3013 raw_spin_lock(&ctx->lock);
3014 if (ctx->is_active) {
3015 update_context_time(ctx);
3016 update_cgrp_time_from_event(event);
3017 }
3018 update_event_times(event);
3019 if (event->state == PERF_EVENT_STATE_ACTIVE)
3020 event->pmu->read(event);
3021 raw_spin_unlock(&ctx->lock);
3022 }
3023
3024 static inline u64 perf_event_count(struct perf_event *event)
3025 {
3026 return local64_read(&event->count) + atomic64_read(&event->child_count);
3027 }
3028
3029 static u64 perf_event_read(struct perf_event *event)
3030 {
3031 /*
3032 * If event is enabled and currently active on a CPU, update the
3033 * value in the event structure:
3034 */
3035 if (event->state == PERF_EVENT_STATE_ACTIVE) {
3036 smp_call_function_single(event->oncpu,
3037 __perf_event_read, event, 1);
3038 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3039 struct perf_event_context *ctx = event->ctx;
3040 unsigned long flags;
3041
3042 raw_spin_lock_irqsave(&ctx->lock, flags);
3043 /*
3044 * may read while context is not active
3045 * (e.g., thread is blocked), in that case
3046 * we cannot update context time
3047 */
3048 if (ctx->is_active) {
3049 update_context_time(ctx);
3050 update_cgrp_time_from_event(event);
3051 }
3052 update_event_times(event);
3053 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3054 }
3055
3056 return perf_event_count(event);
3057 }
3058
3059 /*
3060 * Initialize the perf_event context in a task_struct:
3061 */
3062 static void __perf_event_init_context(struct perf_event_context *ctx)
3063 {
3064 raw_spin_lock_init(&ctx->lock);
3065 mutex_init(&ctx->mutex);
3066 INIT_LIST_HEAD(&ctx->pinned_groups);
3067 INIT_LIST_HEAD(&ctx->flexible_groups);
3068 INIT_LIST_HEAD(&ctx->event_list);
3069 atomic_set(&ctx->refcount, 1);
3070 }
3071
3072 static struct perf_event_context *
3073 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3074 {
3075 struct perf_event_context *ctx;
3076
3077 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3078 if (!ctx)
3079 return NULL;
3080
3081 __perf_event_init_context(ctx);
3082 if (task) {
3083 ctx->task = task;
3084 get_task_struct(task);
3085 }
3086 ctx->pmu = pmu;
3087
3088 return ctx;
3089 }
3090
3091 static struct task_struct *
3092 find_lively_task_by_vpid(pid_t vpid)
3093 {
3094 struct task_struct *task;
3095 int err;
3096
3097 rcu_read_lock();
3098 if (!vpid)
3099 task = current;
3100 else
3101 task = find_task_by_vpid(vpid);
3102 if (task)
3103 get_task_struct(task);
3104 rcu_read_unlock();
3105
3106 if (!task)
3107 return ERR_PTR(-ESRCH);
3108
3109 /* Reuse ptrace permission checks for now. */
3110 err = -EACCES;
3111 if (!ptrace_may_access(task, PTRACE_MODE_READ))
3112 goto errout;
3113
3114 return task;
3115 errout:
3116 put_task_struct(task);
3117 return ERR_PTR(err);
3118
3119 }
3120
3121 /*
3122 * Returns a matching context with refcount and pincount.
3123 */
3124 static struct perf_event_context *
3125 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
3126 {
3127 struct perf_event_context *ctx;
3128 struct perf_cpu_context *cpuctx;
3129 unsigned long flags;
3130 int ctxn, err;
3131
3132 if (!task) {
3133 /* Must be root to operate on a CPU event: */
3134 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3135 return ERR_PTR(-EACCES);
3136
3137 /*
3138 * We could be clever and allow to attach a event to an
3139 * offline CPU and activate it when the CPU comes up, but
3140 * that's for later.
3141 */
3142 if (!cpu_online(cpu))
3143 return ERR_PTR(-ENODEV);
3144
3145 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
3146 ctx = &cpuctx->ctx;
3147 get_ctx(ctx);
3148 ++ctx->pin_count;
3149
3150 return ctx;
3151 }
3152
3153 err = -EINVAL;
3154 ctxn = pmu->task_ctx_nr;
3155 if (ctxn < 0)
3156 goto errout;
3157
3158 retry:
3159 ctx = perf_lock_task_context(task, ctxn, &flags);
3160 if (ctx) {
3161 unclone_ctx(ctx);
3162 ++ctx->pin_count;
3163 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3164 } else {
3165 ctx = alloc_perf_context(pmu, task);
3166 err = -ENOMEM;
3167 if (!ctx)
3168 goto errout;
3169
3170 err = 0;
3171 mutex_lock(&task->perf_event_mutex);
3172 /*
3173 * If it has already passed perf_event_exit_task().
3174 * we must see PF_EXITING, it takes this mutex too.
3175 */
3176 if (task->flags & PF_EXITING)
3177 err = -ESRCH;
3178 else if (task->perf_event_ctxp[ctxn])
3179 err = -EAGAIN;
3180 else {
3181 get_ctx(ctx);
3182 ++ctx->pin_count;
3183 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3184 }
3185 mutex_unlock(&task->perf_event_mutex);
3186
3187 if (unlikely(err)) {
3188 put_ctx(ctx);
3189
3190 if (err == -EAGAIN)
3191 goto retry;
3192 goto errout;
3193 }
3194 }
3195
3196 return ctx;
3197
3198 errout:
3199 return ERR_PTR(err);
3200 }
3201
3202 static void perf_event_free_filter(struct perf_event *event);
3203
3204 static void free_event_rcu(struct rcu_head *head)
3205 {
3206 struct perf_event *event;
3207
3208 event = container_of(head, struct perf_event, rcu_head);
3209 if (event->ns)
3210 put_pid_ns(event->ns);
3211 perf_event_free_filter(event);
3212 kfree(event);
3213 }
3214
3215 static void ring_buffer_put(struct ring_buffer *rb);
3216 static void ring_buffer_attach(struct perf_event *event,
3217 struct ring_buffer *rb);
3218
3219 static void unaccount_event_cpu(struct perf_event *event, int cpu)
3220 {
3221 if (event->parent)
3222 return;
3223
3224 if (has_branch_stack(event)) {
3225 if (!(event->attach_state & PERF_ATTACH_TASK))
3226 atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
3227 }
3228 if (is_cgroup_event(event))
3229 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3230 }
3231
3232 static void unaccount_event(struct perf_event *event)
3233 {
3234 if (event->parent)
3235 return;
3236
3237 if (event->attach_state & PERF_ATTACH_TASK)
3238 static_key_slow_dec_deferred(&perf_sched_events);
3239 if (event->attr.mmap || event->attr.mmap_data)
3240 atomic_dec(&nr_mmap_events);
3241 if (event->attr.comm)
3242 atomic_dec(&nr_comm_events);
3243 if (event->attr.task)
3244 atomic_dec(&nr_task_events);
3245 if (event->attr.freq)
3246 atomic_dec(&nr_freq_events);
3247 if (is_cgroup_event(event))
3248 static_key_slow_dec_deferred(&perf_sched_events);
3249 if (has_branch_stack(event))
3250 static_key_slow_dec_deferred(&perf_sched_events);
3251
3252 unaccount_event_cpu(event, event->cpu);
3253 }
3254
3255 static void __free_event(struct perf_event *event)
3256 {
3257 if (!event->parent) {
3258 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3259 put_callchain_buffers();
3260 }
3261
3262 if (event->destroy)
3263 event->destroy(event);
3264
3265 if (event->ctx)
3266 put_ctx(event->ctx);
3267
3268 if (event->pmu)
3269 module_put(event->pmu->module);
3270
3271 call_rcu(&event->rcu_head, free_event_rcu);
3272 }
3273
3274 static void _free_event(struct perf_event *event)
3275 {
3276 irq_work_sync(&event->pending);
3277
3278 unaccount_event(event);
3279
3280 if (event->rb) {
3281 /*
3282 * Can happen when we close an event with re-directed output.
3283 *
3284 * Since we have a 0 refcount, perf_mmap_close() will skip
3285 * over us; possibly making our ring_buffer_put() the last.
3286 */
3287 mutex_lock(&event->mmap_mutex);
3288 ring_buffer_attach(event, NULL);
3289 mutex_unlock(&event->mmap_mutex);
3290 }
3291
3292 if (is_cgroup_event(event))
3293 perf_detach_cgroup(event);
3294
3295 __free_event(event);
3296 }
3297
3298 /*
3299 * Used to free events which have a known refcount of 1, such as in error paths
3300 * where the event isn't exposed yet and inherited events.
3301 */
3302 static void free_event(struct perf_event *event)
3303 {
3304 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
3305 "unexpected event refcount: %ld; ptr=%p\n",
3306 atomic_long_read(&event->refcount), event)) {
3307 /* leak to avoid use-after-free */
3308 return;
3309 }
3310
3311 _free_event(event);
3312 }
3313
3314 /*
3315 * Called when the last reference to the file is gone.
3316 */
3317 static void put_event(struct perf_event *event)
3318 {
3319 struct perf_event_context *ctx = event->ctx;
3320 struct task_struct *owner;
3321
3322 if (!atomic_long_dec_and_test(&event->refcount))
3323 return;
3324
3325 rcu_read_lock();
3326 owner = ACCESS_ONCE(event->owner);
3327 /*
3328 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
3329 * !owner it means the list deletion is complete and we can indeed
3330 * free this event, otherwise we need to serialize on
3331 * owner->perf_event_mutex.
3332 */
3333 smp_read_barrier_depends();
3334 if (owner) {
3335 /*
3336 * Since delayed_put_task_struct() also drops the last
3337 * task reference we can safely take a new reference
3338 * while holding the rcu_read_lock().
3339 */
3340 get_task_struct(owner);
3341 }
3342 rcu_read_unlock();
3343
3344 if (owner) {
3345 mutex_lock(&owner->perf_event_mutex);
3346 /*
3347 * We have to re-check the event->owner field, if it is cleared
3348 * we raced with perf_event_exit_task(), acquiring the mutex
3349 * ensured they're done, and we can proceed with freeing the
3350 * event.
3351 */
3352 if (event->owner)
3353 list_del_init(&event->owner_entry);
3354 mutex_unlock(&owner->perf_event_mutex);
3355 put_task_struct(owner);
3356 }
3357
3358 WARN_ON_ONCE(ctx->parent_ctx);
3359 /*
3360 * There are two ways this annotation is useful:
3361 *
3362 * 1) there is a lock recursion from perf_event_exit_task
3363 * see the comment there.
3364 *
3365 * 2) there is a lock-inversion with mmap_sem through
3366 * perf_event_read_group(), which takes faults while
3367 * holding ctx->mutex, however this is called after
3368 * the last filedesc died, so there is no possibility
3369 * to trigger the AB-BA case.
3370 */
3371 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
3372 perf_remove_from_context(event, true);
3373 mutex_unlock(&ctx->mutex);
3374
3375 _free_event(event);
3376 }
3377
3378 int perf_event_release_kernel(struct perf_event *event)
3379 {
3380 put_event(event);
3381 return 0;
3382 }
3383 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3384
3385 static int perf_release(struct inode *inode, struct file *file)
3386 {
3387 put_event(file->private_data);
3388 return 0;
3389 }
3390
3391 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3392 {
3393 struct perf_event *child;
3394 u64 total = 0;
3395
3396 *enabled = 0;
3397 *running = 0;
3398
3399 mutex_lock(&event->child_mutex);
3400 total += perf_event_read(event);
3401 *enabled += event->total_time_enabled +
3402 atomic64_read(&event->child_total_time_enabled);
3403 *running += event->total_time_running +
3404 atomic64_read(&event->child_total_time_running);
3405
3406 list_for_each_entry(child, &event->child_list, child_list) {
3407 total += perf_event_read(child);
3408 *enabled += child->total_time_enabled;
3409 *running += child->total_time_running;
3410 }
3411 mutex_unlock(&event->child_mutex);
3412
3413 return total;
3414 }
3415 EXPORT_SYMBOL_GPL(perf_event_read_value);
3416
3417 static int perf_event_read_group(struct perf_event *event,
3418 u64 read_format, char __user *buf)
3419 {
3420 struct perf_event *leader = event->group_leader, *sub;
3421 int n = 0, size = 0, ret = -EFAULT;
3422 struct perf_event_context *ctx = leader->ctx;
3423 u64 values[5];
3424 u64 count, enabled, running;
3425
3426 mutex_lock(&ctx->mutex);
3427 count = perf_event_read_value(leader, &enabled, &running);
3428
3429 values[n++] = 1 + leader->nr_siblings;
3430 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3431 values[n++] = enabled;
3432 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3433 values[n++] = running;
3434 values[n++] = count;
3435 if (read_format & PERF_FORMAT_ID)
3436 values[n++] = primary_event_id(leader);
3437
3438 size = n * sizeof(u64);
3439
3440 if (copy_to_user(buf, values, size))
3441 goto unlock;
3442
3443 ret = size;
3444
3445 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3446 n = 0;
3447
3448 values[n++] = perf_event_read_value(sub, &enabled, &running);
3449 if (read_format & PERF_FORMAT_ID)
3450 values[n++] = primary_event_id(sub);
3451
3452 size = n * sizeof(u64);
3453
3454 if (copy_to_user(buf + ret, values, size)) {
3455 ret = -EFAULT;
3456 goto unlock;
3457 }
3458
3459 ret += size;
3460 }
3461 unlock:
3462 mutex_unlock(&ctx->mutex);
3463
3464 return ret;
3465 }
3466
3467 static int perf_event_read_one(struct perf_event *event,
3468 u64 read_format, char __user *buf)
3469 {
3470 u64 enabled, running;
3471 u64 values[4];
3472 int n = 0;
3473
3474 values[n++] = perf_event_read_value(event, &enabled, &running);
3475 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3476 values[n++] = enabled;
3477 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3478 values[n++] = running;
3479 if (read_format & PERF_FORMAT_ID)
3480 values[n++] = primary_event_id(event);
3481
3482 if (copy_to_user(buf, values, n * sizeof(u64)))
3483 return -EFAULT;
3484
3485 return n * sizeof(u64);
3486 }
3487
3488 /*
3489 * Read the performance event - simple non blocking version for now
3490 */
3491 static ssize_t
3492 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3493 {
3494 u64 read_format = event->attr.read_format;
3495 int ret;
3496
3497 /*
3498 * Return end-of-file for a read on a event that is in
3499 * error state (i.e. because it was pinned but it couldn't be
3500 * scheduled on to the CPU at some point).
3501 */
3502 if (event->state == PERF_EVENT_STATE_ERROR)
3503 return 0;
3504
3505 if (count < event->read_size)
3506 return -ENOSPC;
3507
3508 WARN_ON_ONCE(event->ctx->parent_ctx);
3509 if (read_format & PERF_FORMAT_GROUP)
3510 ret = perf_event_read_group(event, read_format, buf);
3511 else
3512 ret = perf_event_read_one(event, read_format, buf);
3513
3514 return ret;
3515 }
3516
3517 static ssize_t
3518 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3519 {
3520 struct perf_event *event = file->private_data;
3521
3522 return perf_read_hw(event, buf, count);
3523 }
3524
3525 static unsigned int perf_poll(struct file *file, poll_table *wait)
3526 {
3527 struct perf_event *event = file->private_data;
3528 struct ring_buffer *rb;
3529 unsigned int events = POLL_HUP;
3530
3531 /*
3532 * Pin the event->rb by taking event->mmap_mutex; otherwise
3533 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
3534 */
3535 mutex_lock(&event->mmap_mutex);
3536 rb = event->rb;
3537 if (rb)
3538 events = atomic_xchg(&rb->poll, 0);
3539 mutex_unlock(&event->mmap_mutex);
3540
3541 poll_wait(file, &event->waitq, wait);
3542
3543 return events;
3544 }
3545
3546 static void perf_event_reset(struct perf_event *event)
3547 {
3548 (void)perf_event_read(event);
3549 local64_set(&event->count, 0);
3550 perf_event_update_userpage(event);
3551 }
3552
3553 /*
3554 * Holding the top-level event's child_mutex means that any
3555 * descendant process that has inherited this event will block
3556 * in sync_child_event if it goes to exit, thus satisfying the
3557 * task existence requirements of perf_event_enable/disable.
3558 */
3559 static void perf_event_for_each_child(struct perf_event *event,
3560 void (*func)(struct perf_event *))
3561 {
3562 struct perf_event *child;
3563
3564 WARN_ON_ONCE(event->ctx->parent_ctx);
3565 mutex_lock(&event->child_mutex);
3566 func(event);
3567 list_for_each_entry(child, &event->child_list, child_list)
3568 func(child);
3569 mutex_unlock(&event->child_mutex);
3570 }
3571
3572 static void perf_event_for_each(struct perf_event *event,
3573 void (*func)(struct perf_event *))
3574 {
3575 struct perf_event_context *ctx = event->ctx;
3576 struct perf_event *sibling;
3577
3578 WARN_ON_ONCE(ctx->parent_ctx);
3579 mutex_lock(&ctx->mutex);
3580 event = event->group_leader;
3581
3582 perf_event_for_each_child(event, func);
3583 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3584 perf_event_for_each_child(sibling, func);
3585 mutex_unlock(&ctx->mutex);
3586 }
3587
3588 static int perf_event_period(struct perf_event *event, u64 __user *arg)
3589 {
3590 struct perf_event_context *ctx = event->ctx;
3591 int ret = 0, active;
3592 u64 value;
3593
3594 if (!is_sampling_event(event))
3595 return -EINVAL;
3596
3597 if (copy_from_user(&value, arg, sizeof(value)))
3598 return -EFAULT;
3599
3600 if (!value)
3601 return -EINVAL;
3602
3603 raw_spin_lock_irq(&ctx->lock);
3604 if (event->attr.freq) {
3605 if (value > sysctl_perf_event_sample_rate) {
3606 ret = -EINVAL;
3607 goto unlock;
3608 }
3609
3610 event->attr.sample_freq = value;
3611 } else {
3612 event->attr.sample_period = value;
3613 event->hw.sample_period = value;
3614 }
3615
3616 active = (event->state == PERF_EVENT_STATE_ACTIVE);
3617 if (active) {
3618 perf_pmu_disable(ctx->pmu);
3619 event->pmu->stop(event, PERF_EF_UPDATE);
3620 }
3621
3622 local64_set(&event->hw.period_left, 0);
3623
3624 if (active) {
3625 event->pmu->start(event, PERF_EF_RELOAD);
3626 perf_pmu_enable(ctx->pmu);
3627 }
3628
3629 unlock:
3630 raw_spin_unlock_irq(&ctx->lock);
3631
3632 return ret;
3633 }
3634
3635 static const struct file_operations perf_fops;
3636
3637 static inline int perf_fget_light(int fd, struct fd *p)
3638 {
3639 struct fd f = fdget(fd);
3640 if (!f.file)
3641 return -EBADF;
3642
3643 if (f.file->f_op != &perf_fops) {
3644 fdput(f);
3645 return -EBADF;
3646 }
3647 *p = f;
3648 return 0;
3649 }
3650
3651 static int perf_event_set_output(struct perf_event *event,
3652 struct perf_event *output_event);
3653 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3654
3655 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3656 {
3657 struct perf_event *event = file->private_data;
3658 void (*func)(struct perf_event *);
3659 u32 flags = arg;
3660
3661 switch (cmd) {
3662 case PERF_EVENT_IOC_ENABLE:
3663 func = perf_event_enable;
3664 break;
3665 case PERF_EVENT_IOC_DISABLE:
3666 func = perf_event_disable;
3667 break;
3668 case PERF_EVENT_IOC_RESET:
3669 func = perf_event_reset;
3670 break;
3671
3672 case PERF_EVENT_IOC_REFRESH:
3673 return perf_event_refresh(event, arg);
3674
3675 case PERF_EVENT_IOC_PERIOD:
3676 return perf_event_period(event, (u64 __user *)arg);
3677
3678 case PERF_EVENT_IOC_ID:
3679 {
3680 u64 id = primary_event_id(event);
3681
3682 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
3683 return -EFAULT;
3684 return 0;
3685 }
3686
3687 case PERF_EVENT_IOC_SET_OUTPUT:
3688 {
3689 int ret;
3690 if (arg != -1) {
3691 struct perf_event *output_event;
3692 struct fd output;
3693 ret = perf_fget_light(arg, &output);
3694 if (ret)
3695 return ret;
3696 output_event = output.file->private_data;
3697 ret = perf_event_set_output(event, output_event);
3698 fdput(output);
3699 } else {
3700 ret = perf_event_set_output(event, NULL);
3701 }
3702 return ret;
3703 }
3704
3705 case PERF_EVENT_IOC_SET_FILTER:
3706 return perf_event_set_filter(event, (void __user *)arg);
3707
3708 default:
3709 return -ENOTTY;
3710 }
3711
3712 if (flags & PERF_IOC_FLAG_GROUP)
3713 perf_event_for_each(event, func);
3714 else
3715 perf_event_for_each_child(event, func);
3716
3717 return 0;
3718 }
3719
3720 int perf_event_task_enable(void)
3721 {
3722 struct perf_event *event;
3723
3724 mutex_lock(&current->perf_event_mutex);
3725 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3726 perf_event_for_each_child(event, perf_event_enable);
3727 mutex_unlock(&current->perf_event_mutex);
3728
3729 return 0;
3730 }
3731
3732 int perf_event_task_disable(void)
3733 {
3734 struct perf_event *event;
3735
3736 mutex_lock(&current->perf_event_mutex);
3737 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3738 perf_event_for_each_child(event, perf_event_disable);
3739 mutex_unlock(&current->perf_event_mutex);
3740
3741 return 0;
3742 }
3743
3744 static int perf_event_index(struct perf_event *event)
3745 {
3746 if (event->hw.state & PERF_HES_STOPPED)
3747 return 0;
3748
3749 if (event->state != PERF_EVENT_STATE_ACTIVE)
3750 return 0;
3751
3752 return event->pmu->event_idx(event);
3753 }
3754
3755 static void calc_timer_values(struct perf_event *event,
3756 u64 *now,
3757 u64 *enabled,
3758 u64 *running)
3759 {
3760 u64 ctx_time;
3761
3762 *now = perf_clock();
3763 ctx_time = event->shadow_ctx_time + *now;
3764 *enabled = ctx_time - event->tstamp_enabled;
3765 *running = ctx_time - event->tstamp_running;
3766 }
3767
3768 static void perf_event_init_userpage(struct perf_event *event)
3769 {
3770 struct perf_event_mmap_page *userpg;
3771 struct ring_buffer *rb;
3772
3773 rcu_read_lock();
3774 rb = rcu_dereference(event->rb);
3775 if (!rb)
3776 goto unlock;
3777
3778 userpg = rb->user_page;
3779
3780 /* Allow new userspace to detect that bit 0 is deprecated */
3781 userpg->cap_bit0_is_deprecated = 1;
3782 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3783
3784 unlock:
3785 rcu_read_unlock();
3786 }
3787
3788 void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3789 {
3790 }
3791
3792 /*
3793 * Callers need to ensure there can be no nesting of this function, otherwise
3794 * the seqlock logic goes bad. We can not serialize this because the arch
3795 * code calls this from NMI context.
3796 */
3797 void perf_event_update_userpage(struct perf_event *event)
3798 {
3799 struct perf_event_mmap_page *userpg;
3800 struct ring_buffer *rb;
3801 u64 enabled, running, now;
3802
3803 rcu_read_lock();
3804 rb = rcu_dereference(event->rb);
3805 if (!rb)
3806 goto unlock;
3807
3808 /*
3809 * compute total_time_enabled, total_time_running
3810 * based on snapshot values taken when the event
3811 * was last scheduled in.
3812 *
3813 * we cannot simply called update_context_time()
3814 * because of locking issue as we can be called in
3815 * NMI context
3816 */
3817 calc_timer_values(event, &now, &enabled, &running);
3818
3819 userpg = rb->user_page;
3820 /*
3821 * Disable preemption so as to not let the corresponding user-space
3822 * spin too long if we get preempted.
3823 */
3824 preempt_disable();
3825 ++userpg->lock;
3826 barrier();
3827 userpg->index = perf_event_index(event);
3828 userpg->offset = perf_event_count(event);
3829 if (userpg->index)
3830 userpg->offset -= local64_read(&event->hw.prev_count);
3831
3832 userpg->time_enabled = enabled +
3833 atomic64_read(&event->child_total_time_enabled);
3834
3835 userpg->time_running = running +
3836 atomic64_read(&event->child_total_time_running);
3837
3838 arch_perf_update_userpage(userpg, now);
3839
3840 barrier();
3841 ++userpg->lock;
3842 preempt_enable();
3843 unlock:
3844 rcu_read_unlock();
3845 }
3846
3847 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3848 {
3849 struct perf_event *event = vma->vm_file->private_data;
3850 struct ring_buffer *rb;
3851 int ret = VM_FAULT_SIGBUS;
3852
3853 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3854 if (vmf->pgoff == 0)
3855 ret = 0;
3856 return ret;
3857 }
3858
3859 rcu_read_lock();
3860 rb = rcu_dereference(event->rb);
3861 if (!rb)
3862 goto unlock;
3863
3864 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3865 goto unlock;
3866
3867 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3868 if (!vmf->page)
3869 goto unlock;
3870
3871 get_page(vmf->page);
3872 vmf->page->mapping = vma->vm_file->f_mapping;
3873 vmf->page->index = vmf->pgoff;
3874
3875 ret = 0;
3876 unlock:
3877 rcu_read_unlock();
3878
3879 return ret;
3880 }
3881
3882 static void ring_buffer_attach(struct perf_event *event,
3883 struct ring_buffer *rb)
3884 {
3885 struct ring_buffer *old_rb = NULL;
3886 unsigned long flags;
3887
3888 if (event->rb) {
3889 /*
3890 * Should be impossible, we set this when removing
3891 * event->rb_entry and wait/clear when adding event->rb_entry.
3892 */
3893 WARN_ON_ONCE(event->rcu_pending);
3894
3895 old_rb = event->rb;
3896 event->rcu_batches = get_state_synchronize_rcu();
3897 event->rcu_pending = 1;
3898
3899 spin_lock_irqsave(&old_rb->event_lock, flags);
3900 list_del_rcu(&event->rb_entry);
3901 spin_unlock_irqrestore(&old_rb->event_lock, flags);
3902 }
3903
3904 if (event->rcu_pending && rb) {
3905 cond_synchronize_rcu(event->rcu_batches);
3906 event->rcu_pending = 0;
3907 }
3908
3909 if (rb) {
3910 spin_lock_irqsave(&rb->event_lock, flags);
3911 list_add_rcu(&event->rb_entry, &rb->event_list);
3912 spin_unlock_irqrestore(&rb->event_lock, flags);
3913 }
3914
3915 rcu_assign_pointer(event->rb, rb);
3916
3917 if (old_rb) {
3918 ring_buffer_put(old_rb);
3919 /*
3920 * Since we detached before setting the new rb, so that we
3921 * could attach the new rb, we could have missed a wakeup.
3922 * Provide it now.
3923 */
3924 wake_up_all(&event->waitq);
3925 }
3926 }
3927
3928 static void ring_buffer_wakeup(struct perf_event *event)
3929 {
3930 struct ring_buffer *rb;
3931
3932 rcu_read_lock();
3933 rb = rcu_dereference(event->rb);
3934 if (rb) {
3935 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3936 wake_up_all(&event->waitq);
3937 }
3938 rcu_read_unlock();
3939 }
3940
3941 static void rb_free_rcu(struct rcu_head *rcu_head)
3942 {
3943 struct ring_buffer *rb;
3944
3945 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3946 rb_free(rb);
3947 }
3948
3949 static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3950 {
3951 struct ring_buffer *rb;
3952
3953 rcu_read_lock();
3954 rb = rcu_dereference(event->rb);
3955 if (rb) {
3956 if (!atomic_inc_not_zero(&rb->refcount))
3957 rb = NULL;
3958 }
3959 rcu_read_unlock();
3960
3961 return rb;
3962 }
3963
3964 static void ring_buffer_put(struct ring_buffer *rb)
3965 {
3966 if (!atomic_dec_and_test(&rb->refcount))
3967 return;
3968
3969 WARN_ON_ONCE(!list_empty(&rb->event_list));
3970
3971 call_rcu(&rb->rcu_head, rb_free_rcu);
3972 }
3973
3974 static void perf_mmap_open(struct vm_area_struct *vma)
3975 {
3976 struct perf_event *event = vma->vm_file->private_data;
3977
3978 atomic_inc(&event->mmap_count);
3979 atomic_inc(&event->rb->mmap_count);
3980 }
3981
3982 /*
3983 * A buffer can be mmap()ed multiple times; either directly through the same
3984 * event, or through other events by use of perf_event_set_output().
3985 *
3986 * In order to undo the VM accounting done by perf_mmap() we need to destroy
3987 * the buffer here, where we still have a VM context. This means we need
3988 * to detach all events redirecting to us.
3989 */
3990 static void perf_mmap_close(struct vm_area_struct *vma)
3991 {
3992 struct perf_event *event = vma->vm_file->private_data;
3993
3994 struct ring_buffer *rb = ring_buffer_get(event);
3995 struct user_struct *mmap_user = rb->mmap_user;
3996 int mmap_locked = rb->mmap_locked;
3997 unsigned long size = perf_data_size(rb);
3998
3999 atomic_dec(&rb->mmap_count);
4000
4001 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
4002 goto out_put;
4003
4004 ring_buffer_attach(event, NULL);
4005 mutex_unlock(&event->mmap_mutex);
4006
4007 /* If there's still other mmap()s of this buffer, we're done. */
4008 if (atomic_read(&rb->mmap_count))
4009 goto out_put;
4010
4011 /*
4012 * No other mmap()s, detach from all other events that might redirect
4013 * into the now unreachable buffer. Somewhat complicated by the
4014 * fact that rb::event_lock otherwise nests inside mmap_mutex.
4015 */
4016 again:
4017 rcu_read_lock();
4018 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
4019 if (!atomic_long_inc_not_zero(&event->refcount)) {
4020 /*
4021 * This event is en-route to free_event() which will
4022 * detach it and remove it from the list.
4023 */
4024 continue;
4025 }
4026 rcu_read_unlock();
4027
4028 mutex_lock(&event->mmap_mutex);
4029 /*
4030 * Check we didn't race with perf_event_set_output() which can
4031 * swizzle the rb from under us while we were waiting to
4032 * acquire mmap_mutex.
4033 *
4034 * If we find a different rb; ignore this event, a next
4035 * iteration will no longer find it on the list. We have to
4036 * still restart the iteration to make sure we're not now
4037 * iterating the wrong list.
4038 */
4039 if (event->rb == rb)
4040 ring_buffer_attach(event, NULL);
4041
4042 mutex_unlock(&event->mmap_mutex);
4043 put_event(event);
4044
4045 /*
4046 * Restart the iteration; either we're on the wrong list or
4047 * destroyed its integrity by doing a deletion.
4048 */
4049 goto again;
4050 }
4051 rcu_read_unlock();
4052
4053 /*
4054 * It could be there's still a few 0-ref events on the list; they'll
4055 * get cleaned up by free_event() -- they'll also still have their
4056 * ref on the rb and will free it whenever they are done with it.
4057 *
4058 * Aside from that, this buffer is 'fully' detached and unmapped,
4059 * undo the VM accounting.
4060 */
4061
4062 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
4063 vma->vm_mm->pinned_vm -= mmap_locked;
4064 free_uid(mmap_user);
4065
4066 out_put:
4067 ring_buffer_put(rb); /* could be last */
4068 }
4069
4070 static const struct vm_operations_struct perf_mmap_vmops = {
4071 .open = perf_mmap_open,
4072 .close = perf_mmap_close,
4073 .fault = perf_mmap_fault,
4074 .page_mkwrite = perf_mmap_fault,
4075 };
4076
4077 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4078 {
4079 struct perf_event *event = file->private_data;
4080 unsigned long user_locked, user_lock_limit;
4081 struct user_struct *user = current_user();
4082 unsigned long locked, lock_limit;
4083 struct ring_buffer *rb;
4084 unsigned long vma_size;
4085 unsigned long nr_pages;
4086 long user_extra, extra;
4087 int ret = 0, flags = 0;
4088
4089 /*
4090 * Don't allow mmap() of inherited per-task counters. This would
4091 * create a performance issue due to all children writing to the
4092 * same rb.
4093 */
4094 if (event->cpu == -1 && event->attr.inherit)
4095 return -EINVAL;
4096
4097 if (!(vma->vm_flags & VM_SHARED))
4098 return -EINVAL;
4099
4100 vma_size = vma->vm_end - vma->vm_start;
4101 nr_pages = (vma_size / PAGE_SIZE) - 1;
4102
4103 /*
4104 * If we have rb pages ensure they're a power-of-two number, so we
4105 * can do bitmasks instead of modulo.
4106 */
4107 if (nr_pages != 0 && !is_power_of_2(nr_pages))
4108 return -EINVAL;
4109
4110 if (vma_size != PAGE_SIZE * (1 + nr_pages))
4111 return -EINVAL;
4112
4113 if (vma->vm_pgoff != 0)
4114 return -EINVAL;
4115
4116 WARN_ON_ONCE(event->ctx->parent_ctx);
4117 again:
4118 mutex_lock(&event->mmap_mutex);
4119 if (event->rb) {
4120 if (event->rb->nr_pages != nr_pages) {
4121 ret = -EINVAL;
4122 goto unlock;
4123 }
4124
4125 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
4126 /*
4127 * Raced against perf_mmap_close() through
4128 * perf_event_set_output(). Try again, hope for better
4129 * luck.
4130 */
4131 mutex_unlock(&event->mmap_mutex);
4132 goto again;
4133 }
4134
4135 goto unlock;
4136 }
4137
4138 user_extra = nr_pages + 1;
4139 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
4140
4141 /*
4142 * Increase the limit linearly with more CPUs:
4143 */
4144 user_lock_limit *= num_online_cpus();
4145
4146 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
4147
4148 extra = 0;
4149 if (user_locked > user_lock_limit)
4150 extra = user_locked - user_lock_limit;
4151
4152 lock_limit = rlimit(RLIMIT_MEMLOCK);
4153 lock_limit >>= PAGE_SHIFT;
4154 locked = vma->vm_mm->pinned_vm + extra;
4155
4156 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
4157 !capable(CAP_IPC_LOCK)) {
4158 ret = -EPERM;
4159 goto unlock;
4160 }
4161
4162 WARN_ON(event->rb);
4163
4164 if (vma->vm_flags & VM_WRITE)
4165 flags |= RING_BUFFER_WRITABLE;
4166
4167 rb = rb_alloc(nr_pages,
4168 event->attr.watermark ? event->attr.wakeup_watermark : 0,
4169 event->cpu, flags);
4170
4171 if (!rb) {
4172 ret = -ENOMEM;
4173 goto unlock;
4174 }
4175
4176 atomic_set(&rb->mmap_count, 1);
4177 rb->mmap_locked = extra;
4178 rb->mmap_user = get_current_user();
4179
4180 atomic_long_add(user_extra, &user->locked_vm);
4181 vma->vm_mm->pinned_vm += extra;
4182
4183 ring_buffer_attach(event, rb);
4184
4185 perf_event_init_userpage(event);
4186 perf_event_update_userpage(event);
4187
4188 unlock:
4189 if (!ret)
4190 atomic_inc(&event->mmap_count);
4191 mutex_unlock(&event->mmap_mutex);
4192
4193 /*
4194 * Since pinned accounting is per vm we cannot allow fork() to copy our
4195 * vma.
4196 */
4197 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
4198 vma->vm_ops = &perf_mmap_vmops;
4199
4200 return ret;
4201 }
4202
4203 static int perf_fasync(int fd, struct file *filp, int on)
4204 {
4205 struct inode *inode = file_inode(filp);
4206 struct perf_event *event = filp->private_data;
4207 int retval;
4208
4209 mutex_lock(&inode->i_mutex);
4210 retval = fasync_helper(fd, filp, on, &event->fasync);
4211 mutex_unlock(&inode->i_mutex);
4212
4213 if (retval < 0)
4214 return retval;
4215
4216 return 0;
4217 }
4218
4219 static const struct file_operations perf_fops = {
4220 .llseek = no_llseek,
4221 .release = perf_release,
4222 .read = perf_read,
4223 .poll = perf_poll,
4224 .unlocked_ioctl = perf_ioctl,
4225 .compat_ioctl = perf_ioctl,
4226 .mmap = perf_mmap,
4227 .fasync = perf_fasync,
4228 };
4229
4230 /*
4231 * Perf event wakeup
4232 *
4233 * If there's data, ensure we set the poll() state and publish everything
4234 * to user-space before waking everybody up.
4235 */
4236
4237 void perf_event_wakeup(struct perf_event *event)
4238 {
4239 ring_buffer_wakeup(event);
4240
4241 if (event->pending_kill) {
4242 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
4243 event->pending_kill = 0;
4244 }
4245 }
4246
4247 static void perf_pending_event(struct irq_work *entry)
4248 {
4249 struct perf_event *event = container_of(entry,
4250 struct perf_event, pending);
4251
4252 if (event->pending_disable) {
4253 event->pending_disable = 0;
4254 __perf_event_disable(event);
4255 }
4256
4257 if (event->pending_wakeup) {
4258 event->pending_wakeup = 0;
4259 perf_event_wakeup(event);
4260 }
4261 }
4262
4263 /*
4264 * We assume there is only KVM supporting the callbacks.
4265 * Later on, we might change it to a list if there is
4266 * another virtualization implementation supporting the callbacks.
4267 */
4268 struct perf_guest_info_callbacks *perf_guest_cbs;
4269
4270 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4271 {
4272 perf_guest_cbs = cbs;
4273 return 0;
4274 }
4275 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4276
4277 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4278 {
4279 perf_guest_cbs = NULL;
4280 return 0;
4281 }
4282 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4283
4284 static void
4285 perf_output_sample_regs(struct perf_output_handle *handle,
4286 struct pt_regs *regs, u64 mask)
4287 {
4288 int bit;
4289
4290 for_each_set_bit(bit, (const unsigned long *) &mask,
4291 sizeof(mask) * BITS_PER_BYTE) {
4292 u64 val;
4293
4294 val = perf_reg_value(regs, bit);
4295 perf_output_put(handle, val);
4296 }
4297 }
4298
4299 static void perf_sample_regs_user(struct perf_regs_user *regs_user,
4300 struct pt_regs *regs)
4301 {
4302 if (!user_mode(regs)) {
4303 if (current->mm)
4304 regs = task_pt_regs(current);
4305 else
4306 regs = NULL;
4307 }
4308
4309 if (regs) {
4310 regs_user->regs = regs;
4311 regs_user->abi = perf_reg_abi(current);
4312 }
4313 }
4314
4315 /*
4316 * Get remaining task size from user stack pointer.
4317 *
4318 * It'd be better to take stack vma map and limit this more
4319 * precisly, but there's no way to get it safely under interrupt,
4320 * so using TASK_SIZE as limit.
4321 */
4322 static u64 perf_ustack_task_size(struct pt_regs *regs)
4323 {
4324 unsigned long addr = perf_user_stack_pointer(regs);
4325
4326 if (!addr || addr >= TASK_SIZE)
4327 return 0;
4328
4329 return TASK_SIZE - addr;
4330 }
4331
4332 static u16
4333 perf_sample_ustack_size(u16 stack_size, u16 header_size,
4334 struct pt_regs *regs)
4335 {
4336 u64 task_size;
4337
4338 /* No regs, no stack pointer, no dump. */
4339 if (!regs)
4340 return 0;
4341
4342 /*
4343 * Check if we fit in with the requested stack size into the:
4344 * - TASK_SIZE
4345 * If we don't, we limit the size to the TASK_SIZE.
4346 *
4347 * - remaining sample size
4348 * If we don't, we customize the stack size to
4349 * fit in to the remaining sample size.
4350 */
4351
4352 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
4353 stack_size = min(stack_size, (u16) task_size);
4354
4355 /* Current header size plus static size and dynamic size. */
4356 header_size += 2 * sizeof(u64);
4357
4358 /* Do we fit in with the current stack dump size? */
4359 if ((u16) (header_size + stack_size) < header_size) {
4360 /*
4361 * If we overflow the maximum size for the sample,
4362 * we customize the stack dump size to fit in.
4363 */
4364 stack_size = USHRT_MAX - header_size - sizeof(u64);
4365 stack_size = round_up(stack_size, sizeof(u64));
4366 }
4367
4368 return stack_size;
4369 }
4370
4371 static void
4372 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
4373 struct pt_regs *regs)
4374 {
4375 /* Case of a kernel thread, nothing to dump */
4376 if (!regs) {
4377 u64 size = 0;
4378 perf_output_put(handle, size);
4379 } else {
4380 unsigned long sp;
4381 unsigned int rem;
4382 u64 dyn_size;
4383
4384 /*
4385 * We dump:
4386 * static size
4387 * - the size requested by user or the best one we can fit
4388 * in to the sample max size
4389 * data
4390 * - user stack dump data
4391 * dynamic size
4392 * - the actual dumped size
4393 */
4394
4395 /* Static size. */
4396 perf_output_put(handle, dump_size);
4397
4398 /* Data. */
4399 sp = perf_user_stack_pointer(regs);
4400 rem = __output_copy_user(handle, (void *) sp, dump_size);
4401 dyn_size = dump_size - rem;
4402
4403 perf_output_skip(handle, rem);
4404
4405 /* Dynamic size. */
4406 perf_output_put(handle, dyn_size);
4407 }
4408 }
4409
4410 static void __perf_event_header__init_id(struct perf_event_header *header,
4411 struct perf_sample_data *data,
4412 struct perf_event *event)
4413 {
4414 u64 sample_type = event->attr.sample_type;
4415
4416 data->type = sample_type;
4417 header->size += event->id_header_size;
4418
4419 if (sample_type & PERF_SAMPLE_TID) {
4420 /* namespace issues */
4421 data->tid_entry.pid = perf_event_pid(event, current);
4422 data->tid_entry.tid = perf_event_tid(event, current);
4423 }
4424
4425 if (sample_type & PERF_SAMPLE_TIME)
4426 data->time = perf_clock();
4427
4428 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
4429 data->id = primary_event_id(event);
4430
4431 if (sample_type & PERF_SAMPLE_STREAM_ID)
4432 data->stream_id = event->id;
4433
4434 if (sample_type & PERF_SAMPLE_CPU) {
4435 data->cpu_entry.cpu = raw_smp_processor_id();
4436 data->cpu_entry.reserved = 0;
4437 }
4438 }
4439
4440 void perf_event_header__init_id(struct perf_event_header *header,
4441 struct perf_sample_data *data,
4442 struct perf_event *event)
4443 {
4444 if (event->attr.sample_id_all)
4445 __perf_event_header__init_id(header, data, event);
4446 }
4447
4448 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
4449 struct perf_sample_data *data)
4450 {
4451 u64 sample_type = data->type;
4452
4453 if (sample_type & PERF_SAMPLE_TID)
4454 perf_output_put(handle, data->tid_entry);
4455
4456 if (sample_type & PERF_SAMPLE_TIME)
4457 perf_output_put(handle, data->time);
4458
4459 if (sample_type & PERF_SAMPLE_ID)
4460 perf_output_put(handle, data->id);
4461
4462 if (sample_type & PERF_SAMPLE_STREAM_ID)
4463 perf_output_put(handle, data->stream_id);
4464
4465 if (sample_type & PERF_SAMPLE_CPU)
4466 perf_output_put(handle, data->cpu_entry);
4467
4468 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4469 perf_output_put(handle, data->id);
4470 }
4471
4472 void perf_event__output_id_sample(struct perf_event *event,
4473 struct perf_output_handle *handle,
4474 struct perf_sample_data *sample)
4475 {
4476 if (event->attr.sample_id_all)
4477 __perf_event__output_id_sample(handle, sample);
4478 }
4479
4480 static void perf_output_read_one(struct perf_output_handle *handle,
4481 struct perf_event *event,
4482 u64 enabled, u64 running)
4483 {
4484 u64 read_format = event->attr.read_format;
4485 u64 values[4];
4486 int n = 0;
4487
4488 values[n++] = perf_event_count(event);
4489 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4490 values[n++] = enabled +
4491 atomic64_read(&event->child_total_time_enabled);
4492 }
4493 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4494 values[n++] = running +
4495 atomic64_read(&event->child_total_time_running);
4496 }
4497 if (read_format & PERF_FORMAT_ID)
4498 values[n++] = primary_event_id(event);
4499
4500 __output_copy(handle, values, n * sizeof(u64));
4501 }
4502
4503 /*
4504 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
4505 */
4506 static void perf_output_read_group(struct perf_output_handle *handle,
4507 struct perf_event *event,
4508 u64 enabled, u64 running)
4509 {
4510 struct perf_event *leader = event->group_leader, *sub;
4511 u64 read_format = event->attr.read_format;
4512 u64 values[5];
4513 int n = 0;
4514
4515 values[n++] = 1 + leader->nr_siblings;
4516
4517 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4518 values[n++] = enabled;
4519
4520 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4521 values[n++] = running;
4522
4523 if (leader != event)
4524 leader->pmu->read(leader);
4525
4526 values[n++] = perf_event_count(leader);
4527 if (read_format & PERF_FORMAT_ID)
4528 values[n++] = primary_event_id(leader);
4529
4530 __output_copy(handle, values, n * sizeof(u64));
4531
4532 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4533 n = 0;
4534
4535 if ((sub != event) &&
4536 (sub->state == PERF_EVENT_STATE_ACTIVE))
4537 sub->pmu->read(sub);
4538
4539 values[n++] = perf_event_count(sub);
4540 if (read_format & PERF_FORMAT_ID)
4541 values[n++] = primary_event_id(sub);
4542
4543 __output_copy(handle, values, n * sizeof(u64));
4544 }
4545 }
4546
4547 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
4548 PERF_FORMAT_TOTAL_TIME_RUNNING)
4549
4550 static void perf_output_read(struct perf_output_handle *handle,
4551 struct perf_event *event)
4552 {
4553 u64 enabled = 0, running = 0, now;
4554 u64 read_format = event->attr.read_format;
4555
4556 /*
4557 * compute total_time_enabled, total_time_running
4558 * based on snapshot values taken when the event
4559 * was last scheduled in.
4560 *
4561 * we cannot simply called update_context_time()
4562 * because of locking issue as we are called in
4563 * NMI context
4564 */
4565 if (read_format & PERF_FORMAT_TOTAL_TIMES)
4566 calc_timer_values(event, &now, &enabled, &running);
4567
4568 if (event->attr.read_format & PERF_FORMAT_GROUP)
4569 perf_output_read_group(handle, event, enabled, running);
4570 else
4571 perf_output_read_one(handle, event, enabled, running);
4572 }
4573
4574 void perf_output_sample(struct perf_output_handle *handle,
4575 struct perf_event_header *header,
4576 struct perf_sample_data *data,
4577 struct perf_event *event)
4578 {
4579 u64 sample_type = data->type;
4580
4581 perf_output_put(handle, *header);
4582
4583 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4584 perf_output_put(handle, data->id);
4585
4586 if (sample_type & PERF_SAMPLE_IP)
4587 perf_output_put(handle, data->ip);
4588
4589 if (sample_type & PERF_SAMPLE_TID)
4590 perf_output_put(handle, data->tid_entry);
4591
4592 if (sample_type & PERF_SAMPLE_TIME)
4593 perf_output_put(handle, data->time);
4594
4595 if (sample_type & PERF_SAMPLE_ADDR)
4596 perf_output_put(handle, data->addr);
4597
4598 if (sample_type & PERF_SAMPLE_ID)
4599 perf_output_put(handle, data->id);
4600
4601 if (sample_type & PERF_SAMPLE_STREAM_ID)
4602 perf_output_put(handle, data->stream_id);
4603
4604 if (sample_type & PERF_SAMPLE_CPU)
4605 perf_output_put(handle, data->cpu_entry);
4606
4607 if (sample_type & PERF_SAMPLE_PERIOD)
4608 perf_output_put(handle, data->period);
4609
4610 if (sample_type & PERF_SAMPLE_READ)
4611 perf_output_read(handle, event);
4612
4613 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4614 if (data->callchain) {
4615 int size = 1;
4616
4617 if (data->callchain)
4618 size += data->callchain->nr;
4619
4620 size *= sizeof(u64);
4621
4622 __output_copy(handle, data->callchain, size);
4623 } else {
4624 u64 nr = 0;
4625 perf_output_put(handle, nr);
4626 }
4627 }
4628
4629 if (sample_type & PERF_SAMPLE_RAW) {
4630 if (data->raw) {
4631 perf_output_put(handle, data->raw->size);
4632 __output_copy(handle, data->raw->data,
4633 data->raw->size);
4634 } else {
4635 struct {
4636 u32 size;
4637 u32 data;
4638 } raw = {
4639 .size = sizeof(u32),
4640 .data = 0,
4641 };
4642 perf_output_put(handle, raw);
4643 }
4644 }
4645
4646 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4647 if (data->br_stack) {
4648 size_t size;
4649
4650 size = data->br_stack->nr
4651 * sizeof(struct perf_branch_entry);
4652
4653 perf_output_put(handle, data->br_stack->nr);
4654 perf_output_copy(handle, data->br_stack->entries, size);
4655 } else {
4656 /*
4657 * we always store at least the value of nr
4658 */
4659 u64 nr = 0;
4660 perf_output_put(handle, nr);
4661 }
4662 }
4663
4664 if (sample_type & PERF_SAMPLE_REGS_USER) {
4665 u64 abi = data->regs_user.abi;
4666
4667 /*
4668 * If there are no regs to dump, notice it through
4669 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
4670 */
4671 perf_output_put(handle, abi);
4672
4673 if (abi) {
4674 u64 mask = event->attr.sample_regs_user;
4675 perf_output_sample_regs(handle,
4676 data->regs_user.regs,
4677 mask);
4678 }
4679 }
4680
4681 if (sample_type & PERF_SAMPLE_STACK_USER) {
4682 perf_output_sample_ustack(handle,
4683 data->stack_user_size,
4684 data->regs_user.regs);
4685 }
4686
4687 if (sample_type & PERF_SAMPLE_WEIGHT)
4688 perf_output_put(handle, data->weight);
4689
4690 if (sample_type & PERF_SAMPLE_DATA_SRC)
4691 perf_output_put(handle, data->data_src.val);
4692
4693 if (sample_type & PERF_SAMPLE_TRANSACTION)
4694 perf_output_put(handle, data->txn);
4695
4696 if (!event->attr.watermark) {
4697 int wakeup_events = event->attr.wakeup_events;
4698
4699 if (wakeup_events) {
4700 struct ring_buffer *rb = handle->rb;
4701 int events = local_inc_return(&rb->events);
4702
4703 if (events >= wakeup_events) {
4704 local_sub(wakeup_events, &rb->events);
4705 local_inc(&rb->wakeup);
4706 }
4707 }
4708 }
4709 }
4710
4711 void perf_prepare_sample(struct perf_event_header *header,
4712 struct perf_sample_data *data,
4713 struct perf_event *event,
4714 struct pt_regs *regs)
4715 {
4716 u64 sample_type = event->attr.sample_type;
4717
4718 header->type = PERF_RECORD_SAMPLE;
4719 header->size = sizeof(*header) + event->header_size;
4720
4721 header->misc = 0;
4722 header->misc |= perf_misc_flags(regs);
4723
4724 __perf_event_header__init_id(header, data, event);
4725
4726 if (sample_type & PERF_SAMPLE_IP)
4727 data->ip = perf_instruction_pointer(regs);
4728
4729 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4730 int size = 1;
4731
4732 data->callchain = perf_callchain(event, regs);
4733
4734 if (data->callchain)
4735 size += data->callchain->nr;
4736
4737 header->size += size * sizeof(u64);
4738 }
4739
4740 if (sample_type & PERF_SAMPLE_RAW) {
4741 int size = sizeof(u32);
4742
4743 if (data->raw)
4744 size += data->raw->size;
4745 else
4746 size += sizeof(u32);
4747
4748 WARN_ON_ONCE(size & (sizeof(u64)-1));
4749 header->size += size;
4750 }
4751
4752 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4753 int size = sizeof(u64); /* nr */
4754 if (data->br_stack) {
4755 size += data->br_stack->nr
4756 * sizeof(struct perf_branch_entry);
4757 }
4758 header->size += size;
4759 }
4760
4761 if (sample_type & PERF_SAMPLE_REGS_USER) {
4762 /* regs dump ABI info */
4763 int size = sizeof(u64);
4764
4765 perf_sample_regs_user(&data->regs_user, regs);
4766
4767 if (data->regs_user.regs) {
4768 u64 mask = event->attr.sample_regs_user;
4769 size += hweight64(mask) * sizeof(u64);
4770 }
4771
4772 header->size += size;
4773 }
4774
4775 if (sample_type & PERF_SAMPLE_STACK_USER) {
4776 /*
4777 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
4778 * processed as the last one or have additional check added
4779 * in case new sample type is added, because we could eat
4780 * up the rest of the sample size.
4781 */
4782 struct perf_regs_user *uregs = &data->regs_user;
4783 u16 stack_size = event->attr.sample_stack_user;
4784 u16 size = sizeof(u64);
4785
4786 if (!uregs->abi)
4787 perf_sample_regs_user(uregs, regs);
4788
4789 stack_size = perf_sample_ustack_size(stack_size, header->size,
4790 uregs->regs);
4791
4792 /*
4793 * If there is something to dump, add space for the dump
4794 * itself and for the field that tells the dynamic size,
4795 * which is how many have been actually dumped.
4796 */
4797 if (stack_size)
4798 size += sizeof(u64) + stack_size;
4799
4800 data->stack_user_size = stack_size;
4801 header->size += size;
4802 }
4803 }
4804
4805 static void perf_event_output(struct perf_event *event,
4806 struct perf_sample_data *data,
4807 struct pt_regs *regs)
4808 {
4809 struct perf_output_handle handle;
4810 struct perf_event_header header;
4811
4812 /* protect the callchain buffers */
4813 rcu_read_lock();
4814
4815 perf_prepare_sample(&header, data, event, regs);
4816
4817 if (perf_output_begin(&handle, event, header.size))
4818 goto exit;
4819
4820 perf_output_sample(&handle, &header, data, event);
4821
4822 perf_output_end(&handle);
4823
4824 exit:
4825 rcu_read_unlock();
4826 }
4827
4828 /*
4829 * read event_id
4830 */
4831
4832 struct perf_read_event {
4833 struct perf_event_header header;
4834
4835 u32 pid;
4836 u32 tid;
4837 };
4838
4839 static void
4840 perf_event_read_event(struct perf_event *event,
4841 struct task_struct *task)
4842 {
4843 struct perf_output_handle handle;
4844 struct perf_sample_data sample;
4845 struct perf_read_event read_event = {
4846 .header = {
4847 .type = PERF_RECORD_READ,
4848 .misc = 0,
4849 .size = sizeof(read_event) + event->read_size,
4850 },
4851 .pid = perf_event_pid(event, task),
4852 .tid = perf_event_tid(event, task),
4853 };
4854 int ret;
4855
4856 perf_event_header__init_id(&read_event.header, &sample, event);
4857 ret = perf_output_begin(&handle, event, read_event.header.size);
4858 if (ret)
4859 return;
4860
4861 perf_output_put(&handle, read_event);
4862 perf_output_read(&handle, event);
4863 perf_event__output_id_sample(event, &handle, &sample);
4864
4865 perf_output_end(&handle);
4866 }
4867
4868 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4869
4870 static void
4871 perf_event_aux_ctx(struct perf_event_context *ctx,
4872 perf_event_aux_output_cb output,
4873 void *data)
4874 {
4875 struct perf_event *event;
4876
4877 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4878 if (event->state < PERF_EVENT_STATE_INACTIVE)
4879 continue;
4880 if (!event_filter_match(event))
4881 continue;
4882 output(event, data);
4883 }
4884 }
4885
4886 static void
4887 perf_event_aux(perf_event_aux_output_cb output, void *data,
4888 struct perf_event_context *task_ctx)
4889 {
4890 struct perf_cpu_context *cpuctx;
4891 struct perf_event_context *ctx;
4892 struct pmu *pmu;
4893 int ctxn;
4894
4895 rcu_read_lock();
4896 list_for_each_entry_rcu(pmu, &pmus, entry) {
4897 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4898 if (cpuctx->unique_pmu != pmu)
4899 goto next;
4900 perf_event_aux_ctx(&cpuctx->ctx, output, data);
4901 if (task_ctx)
4902 goto next;
4903 ctxn = pmu->task_ctx_nr;
4904 if (ctxn < 0)
4905 goto next;
4906 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4907 if (ctx)
4908 perf_event_aux_ctx(ctx, output, data);
4909 next:
4910 put_cpu_ptr(pmu->pmu_cpu_context);
4911 }
4912
4913 if (task_ctx) {
4914 preempt_disable();
4915 perf_event_aux_ctx(task_ctx, output, data);
4916 preempt_enable();
4917 }
4918 rcu_read_unlock();
4919 }
4920
4921 /*
4922 * task tracking -- fork/exit
4923 *
4924 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
4925 */
4926
4927 struct perf_task_event {
4928 struct task_struct *task;
4929 struct perf_event_context *task_ctx;
4930
4931 struct {
4932 struct perf_event_header header;
4933
4934 u32 pid;
4935 u32 ppid;
4936 u32 tid;
4937 u32 ptid;
4938 u64 time;
4939 } event_id;
4940 };
4941
4942 static int perf_event_task_match(struct perf_event *event)
4943 {
4944 return event->attr.comm || event->attr.mmap ||
4945 event->attr.mmap2 || event->attr.mmap_data ||
4946 event->attr.task;
4947 }
4948
4949 static void perf_event_task_output(struct perf_event *event,
4950 void *data)
4951 {
4952 struct perf_task_event *task_event = data;
4953 struct perf_output_handle handle;
4954 struct perf_sample_data sample;
4955 struct task_struct *task = task_event->task;
4956 int ret, size = task_event->event_id.header.size;
4957
4958 if (!perf_event_task_match(event))
4959 return;
4960
4961 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4962
4963 ret = perf_output_begin(&handle, event,
4964 task_event->event_id.header.size);
4965 if (ret)
4966 goto out;
4967
4968 task_event->event_id.pid = perf_event_pid(event, task);
4969 task_event->event_id.ppid = perf_event_pid(event, current);
4970
4971 task_event->event_id.tid = perf_event_tid(event, task);
4972 task_event->event_id.ptid = perf_event_tid(event, current);
4973
4974 perf_output_put(&handle, task_event->event_id);
4975
4976 perf_event__output_id_sample(event, &handle, &sample);
4977
4978 perf_output_end(&handle);
4979 out:
4980 task_event->event_id.header.size = size;
4981 }
4982
4983 static void perf_event_task(struct task_struct *task,
4984 struct perf_event_context *task_ctx,
4985 int new)
4986 {
4987 struct perf_task_event task_event;
4988
4989 if (!atomic_read(&nr_comm_events) &&
4990 !atomic_read(&nr_mmap_events) &&
4991 !atomic_read(&nr_task_events))
4992 return;
4993
4994 task_event = (struct perf_task_event){
4995 .task = task,
4996 .task_ctx = task_ctx,
4997 .event_id = {
4998 .header = {
4999 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
5000 .misc = 0,
5001 .size = sizeof(task_event.event_id),
5002 },
5003 /* .pid */
5004 /* .ppid */
5005 /* .tid */
5006 /* .ptid */
5007 .time = perf_clock(),
5008 },
5009 };
5010
5011 perf_event_aux(perf_event_task_output,
5012 &task_event,
5013 task_ctx);
5014 }
5015
5016 void perf_event_fork(struct task_struct *task)
5017 {
5018 perf_event_task(task, NULL, 1);
5019 }
5020
5021 /*
5022 * comm tracking
5023 */
5024
5025 struct perf_comm_event {
5026 struct task_struct *task;
5027 char *comm;
5028 int comm_size;
5029
5030 struct {
5031 struct perf_event_header header;
5032
5033 u32 pid;
5034 u32 tid;
5035 } event_id;
5036 };
5037
5038 static int perf_event_comm_match(struct perf_event *event)
5039 {
5040 return event->attr.comm;
5041 }
5042
5043 static void perf_event_comm_output(struct perf_event *event,
5044 void *data)
5045 {
5046 struct perf_comm_event *comm_event = data;
5047 struct perf_output_handle handle;
5048 struct perf_sample_data sample;
5049 int size = comm_event->event_id.header.size;
5050 int ret;
5051
5052 if (!perf_event_comm_match(event))
5053 return;
5054
5055 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
5056 ret = perf_output_begin(&handle, event,
5057 comm_event->event_id.header.size);
5058
5059 if (ret)
5060 goto out;
5061
5062 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
5063 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
5064
5065 perf_output_put(&handle, comm_event->event_id);
5066 __output_copy(&handle, comm_event->comm,
5067 comm_event->comm_size);
5068
5069 perf_event__output_id_sample(event, &handle, &sample);
5070
5071 perf_output_end(&handle);
5072 out:
5073 comm_event->event_id.header.size = size;
5074 }
5075
5076 static void perf_event_comm_event(struct perf_comm_event *comm_event)
5077 {
5078 char comm[TASK_COMM_LEN];
5079 unsigned int size;
5080
5081 memset(comm, 0, sizeof(comm));
5082 strlcpy(comm, comm_event->task->comm, sizeof(comm));
5083 size = ALIGN(strlen(comm)+1, sizeof(u64));
5084
5085 comm_event->comm = comm;
5086 comm_event->comm_size = size;
5087
5088 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
5089
5090 perf_event_aux(perf_event_comm_output,
5091 comm_event,
5092 NULL);
5093 }
5094
5095 void perf_event_comm(struct task_struct *task, bool exec)
5096 {
5097 struct perf_comm_event comm_event;
5098
5099 if (!atomic_read(&nr_comm_events))
5100 return;
5101
5102 comm_event = (struct perf_comm_event){
5103 .task = task,
5104 /* .comm */
5105 /* .comm_size */
5106 .event_id = {
5107 .header = {
5108 .type = PERF_RECORD_COMM,
5109 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
5110 /* .size */
5111 },
5112 /* .pid */
5113 /* .tid */
5114 },
5115 };
5116
5117 perf_event_comm_event(&comm_event);
5118 }
5119
5120 /*
5121 * mmap tracking
5122 */
5123
5124 struct perf_mmap_event {
5125 struct vm_area_struct *vma;
5126
5127 const char *file_name;
5128 int file_size;
5129 int maj, min;
5130 u64 ino;
5131 u64 ino_generation;
5132 u32 prot, flags;
5133
5134 struct {
5135 struct perf_event_header header;
5136
5137 u32 pid;
5138 u32 tid;
5139 u64 start;
5140 u64 len;
5141 u64 pgoff;
5142 } event_id;
5143 };
5144
5145 static int perf_event_mmap_match(struct perf_event *event,
5146 void *data)
5147 {
5148 struct perf_mmap_event *mmap_event = data;
5149 struct vm_area_struct *vma = mmap_event->vma;
5150 int executable = vma->vm_flags & VM_EXEC;
5151
5152 return (!executable && event->attr.mmap_data) ||
5153 (executable && (event->attr.mmap || event->attr.mmap2));
5154 }
5155
5156 static void perf_event_mmap_output(struct perf_event *event,
5157 void *data)
5158 {
5159 struct perf_mmap_event *mmap_event = data;
5160 struct perf_output_handle handle;
5161 struct perf_sample_data sample;
5162 int size = mmap_event->event_id.header.size;
5163 int ret;
5164
5165 if (!perf_event_mmap_match(event, data))
5166 return;
5167
5168 if (event->attr.mmap2) {
5169 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
5170 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
5171 mmap_event->event_id.header.size += sizeof(mmap_event->min);
5172 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
5173 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
5174 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
5175 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
5176 }
5177
5178 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
5179 ret = perf_output_begin(&handle, event,
5180 mmap_event->event_id.header.size);
5181 if (ret)
5182 goto out;
5183
5184 mmap_event->event_id.pid = perf_event_pid(event, current);
5185 mmap_event->event_id.tid = perf_event_tid(event, current);
5186
5187 perf_output_put(&handle, mmap_event->event_id);
5188
5189 if (event->attr.mmap2) {
5190 perf_output_put(&handle, mmap_event->maj);
5191 perf_output_put(&handle, mmap_event->min);
5192 perf_output_put(&handle, mmap_event->ino);
5193 perf_output_put(&handle, mmap_event->ino_generation);
5194 perf_output_put(&handle, mmap_event->prot);
5195 perf_output_put(&handle, mmap_event->flags);
5196 }
5197
5198 __output_copy(&handle, mmap_event->file_name,
5199 mmap_event->file_size);
5200
5201 perf_event__output_id_sample(event, &handle, &sample);
5202
5203 perf_output_end(&handle);
5204 out:
5205 mmap_event->event_id.header.size = size;
5206 }
5207
5208 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
5209 {
5210 struct vm_area_struct *vma = mmap_event->vma;
5211 struct file *file = vma->vm_file;
5212 int maj = 0, min = 0;
5213 u64 ino = 0, gen = 0;
5214 u32 prot = 0, flags = 0;
5215 unsigned int size;
5216 char tmp[16];
5217 char *buf = NULL;
5218 char *name;
5219
5220 if (file) {
5221 struct inode *inode;
5222 dev_t dev;
5223
5224 buf = kmalloc(PATH_MAX, GFP_KERNEL);
5225 if (!buf) {
5226 name = "//enomem";
5227 goto cpy_name;
5228 }
5229 /*
5230 * d_path() works from the end of the rb backwards, so we
5231 * need to add enough zero bytes after the string to handle
5232 * the 64bit alignment we do later.
5233 */
5234 name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
5235 if (IS_ERR(name)) {
5236 name = "//toolong";
5237 goto cpy_name;
5238 }
5239 inode = file_inode(vma->vm_file);
5240 dev = inode->i_sb->s_dev;
5241 ino = inode->i_ino;
5242 gen = inode->i_generation;
5243 maj = MAJOR(dev);
5244 min = MINOR(dev);
5245
5246 if (vma->vm_flags & VM_READ)
5247 prot |= PROT_READ;
5248 if (vma->vm_flags & VM_WRITE)
5249 prot |= PROT_WRITE;
5250 if (vma->vm_flags & VM_EXEC)
5251 prot |= PROT_EXEC;
5252
5253 if (vma->vm_flags & VM_MAYSHARE)
5254 flags = MAP_SHARED;
5255 else
5256 flags = MAP_PRIVATE;
5257
5258 if (vma->vm_flags & VM_DENYWRITE)
5259 flags |= MAP_DENYWRITE;
5260 if (vma->vm_flags & VM_MAYEXEC)
5261 flags |= MAP_EXECUTABLE;
5262 if (vma->vm_flags & VM_LOCKED)
5263 flags |= MAP_LOCKED;
5264 if (vma->vm_flags & VM_HUGETLB)
5265 flags |= MAP_HUGETLB;
5266
5267 goto got_name;
5268 } else {
5269 name = (char *)arch_vma_name(vma);
5270 if (name)
5271 goto cpy_name;
5272
5273 if (vma->vm_start <= vma->vm_mm->start_brk &&
5274 vma->vm_end >= vma->vm_mm->brk) {
5275 name = "[heap]";
5276 goto cpy_name;
5277 }
5278 if (vma->vm_start <= vma->vm_mm->start_stack &&
5279 vma->vm_end >= vma->vm_mm->start_stack) {
5280 name = "[stack]";
5281 goto cpy_name;
5282 }
5283
5284 name = "//anon";
5285 goto cpy_name;
5286 }
5287
5288 cpy_name:
5289 strlcpy(tmp, name, sizeof(tmp));
5290 name = tmp;
5291 got_name:
5292 /*
5293 * Since our buffer works in 8 byte units we need to align our string
5294 * size to a multiple of 8. However, we must guarantee the tail end is
5295 * zero'd out to avoid leaking random bits to userspace.
5296 */
5297 size = strlen(name)+1;
5298 while (!IS_ALIGNED(size, sizeof(u64)))
5299 name[size++] = '\0';
5300
5301 mmap_event->file_name = name;
5302 mmap_event->file_size = size;
5303 mmap_event->maj = maj;
5304 mmap_event->min = min;
5305 mmap_event->ino = ino;
5306 mmap_event->ino_generation = gen;
5307 mmap_event->prot = prot;
5308 mmap_event->flags = flags;
5309
5310 if (!(vma->vm_flags & VM_EXEC))
5311 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
5312
5313 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
5314
5315 perf_event_aux(perf_event_mmap_output,
5316 mmap_event,
5317 NULL);
5318
5319 kfree(buf);
5320 }
5321
5322 void perf_event_mmap(struct vm_area_struct *vma)
5323 {
5324 struct perf_mmap_event mmap_event;
5325
5326 if (!atomic_read(&nr_mmap_events))
5327 return;
5328
5329 mmap_event = (struct perf_mmap_event){
5330 .vma = vma,
5331 /* .file_name */
5332 /* .file_size */
5333 .event_id = {
5334 .header = {
5335 .type = PERF_RECORD_MMAP,
5336 .misc = PERF_RECORD_MISC_USER,
5337 /* .size */
5338 },
5339 /* .pid */
5340 /* .tid */
5341 .start = vma->vm_start,
5342 .len = vma->vm_end - vma->vm_start,
5343 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
5344 },
5345 /* .maj (attr_mmap2 only) */
5346 /* .min (attr_mmap2 only) */
5347 /* .ino (attr_mmap2 only) */
5348 /* .ino_generation (attr_mmap2 only) */
5349 /* .prot (attr_mmap2 only) */
5350 /* .flags (attr_mmap2 only) */
5351 };
5352
5353 perf_event_mmap_event(&mmap_event);
5354 }
5355
5356 /*
5357 * IRQ throttle logging
5358 */
5359
5360 static void perf_log_throttle(struct perf_event *event, int enable)
5361 {
5362 struct perf_output_handle handle;
5363 struct perf_sample_data sample;
5364 int ret;
5365
5366 struct {
5367 struct perf_event_header header;
5368 u64 time;
5369 u64 id;
5370 u64 stream_id;
5371 } throttle_event = {
5372 .header = {
5373 .type = PERF_RECORD_THROTTLE,
5374 .misc = 0,
5375 .size = sizeof(throttle_event),
5376 },
5377 .time = perf_clock(),
5378 .id = primary_event_id(event),
5379 .stream_id = event->id,
5380 };
5381
5382 if (enable)
5383 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
5384
5385 perf_event_header__init_id(&throttle_event.header, &sample, event);
5386
5387 ret = perf_output_begin(&handle, event,
5388 throttle_event.header.size);
5389 if (ret)
5390 return;
5391
5392 perf_output_put(&handle, throttle_event);
5393 perf_event__output_id_sample(event, &handle, &sample);
5394 perf_output_end(&handle);
5395 }
5396
5397 /*
5398 * Generic event overflow handling, sampling.
5399 */
5400
5401 static int __perf_event_overflow(struct perf_event *event,
5402 int throttle, struct perf_sample_data *data,
5403 struct pt_regs *regs)
5404 {
5405 int events = atomic_read(&event->event_limit);
5406 struct hw_perf_event *hwc = &event->hw;
5407 u64 seq;
5408 int ret = 0;
5409
5410 /*
5411 * Non-sampling counters might still use the PMI to fold short
5412 * hardware counters, ignore those.
5413 */
5414 if (unlikely(!is_sampling_event(event)))
5415 return 0;
5416
5417 seq = __this_cpu_read(perf_throttled_seq);
5418 if (seq != hwc->interrupts_seq) {
5419 hwc->interrupts_seq = seq;
5420 hwc->interrupts = 1;
5421 } else {
5422 hwc->interrupts++;
5423 if (unlikely(throttle
5424 && hwc->interrupts >= max_samples_per_tick)) {
5425 __this_cpu_inc(perf_throttled_count);
5426 hwc->interrupts = MAX_INTERRUPTS;
5427 perf_log_throttle(event, 0);
5428 tick_nohz_full_kick();
5429 ret = 1;
5430 }
5431 }
5432
5433 if (event->attr.freq) {
5434 u64 now = perf_clock();
5435 s64 delta = now - hwc->freq_time_stamp;
5436
5437 hwc->freq_time_stamp = now;
5438
5439 if (delta > 0 && delta < 2*TICK_NSEC)
5440 perf_adjust_period(event, delta, hwc->last_period, true);
5441 }
5442
5443 /*
5444 * XXX event_limit might not quite work as expected on inherited
5445 * events
5446 */
5447
5448 event->pending_kill = POLL_IN;
5449 if (events && atomic_dec_and_test(&event->event_limit)) {
5450 ret = 1;
5451 event->pending_kill = POLL_HUP;
5452 event->pending_disable = 1;
5453 irq_work_queue(&event->pending);
5454 }
5455
5456 if (event->overflow_handler)
5457 event->overflow_handler(event, data, regs);
5458 else
5459 perf_event_output(event, data, regs);
5460
5461 if (event->fasync && event->pending_kill) {
5462 event->pending_wakeup = 1;
5463 irq_work_queue(&event->pending);
5464 }
5465
5466 return ret;
5467 }
5468
5469 int perf_event_overflow(struct perf_event *event,
5470 struct perf_sample_data *data,
5471 struct pt_regs *regs)
5472 {
5473 return __perf_event_overflow(event, 1, data, regs);
5474 }
5475
5476 /*
5477 * Generic software event infrastructure
5478 */
5479
5480 struct swevent_htable {
5481 struct swevent_hlist *swevent_hlist;
5482 struct mutex hlist_mutex;
5483 int hlist_refcount;
5484
5485 /* Recursion avoidance in each contexts */
5486 int recursion[PERF_NR_CONTEXTS];
5487
5488 /* Keeps track of cpu being initialized/exited */
5489 bool online;
5490 };
5491
5492 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
5493
5494 /*
5495 * We directly increment event->count and keep a second value in
5496 * event->hw.period_left to count intervals. This period event
5497 * is kept in the range [-sample_period, 0] so that we can use the
5498 * sign as trigger.
5499 */
5500
5501 u64 perf_swevent_set_period(struct perf_event *event)
5502 {
5503 struct hw_perf_event *hwc = &event->hw;
5504 u64 period = hwc->last_period;
5505 u64 nr, offset;
5506 s64 old, val;
5507
5508 hwc->last_period = hwc->sample_period;
5509
5510 again:
5511 old = val = local64_read(&hwc->period_left);
5512 if (val < 0)
5513 return 0;
5514
5515 nr = div64_u64(period + val, period);
5516 offset = nr * period;
5517 val -= offset;
5518 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
5519 goto again;
5520
5521 return nr;
5522 }
5523
5524 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
5525 struct perf_sample_data *data,
5526 struct pt_regs *regs)
5527 {
5528 struct hw_perf_event *hwc = &event->hw;
5529 int throttle = 0;
5530
5531 if (!overflow)
5532 overflow = perf_swevent_set_period(event);
5533
5534 if (hwc->interrupts == MAX_INTERRUPTS)
5535 return;
5536
5537 for (; overflow; overflow--) {
5538 if (__perf_event_overflow(event, throttle,
5539 data, regs)) {
5540 /*
5541 * We inhibit the overflow from happening when
5542 * hwc->interrupts == MAX_INTERRUPTS.
5543 */
5544 break;
5545 }
5546 throttle = 1;
5547 }
5548 }
5549
5550 static void perf_swevent_event(struct perf_event *event, u64 nr,
5551 struct perf_sample_data *data,
5552 struct pt_regs *regs)
5553 {
5554 struct hw_perf_event *hwc = &event->hw;
5555
5556 local64_add(nr, &event->count);
5557
5558 if (!regs)
5559 return;
5560
5561 if (!is_sampling_event(event))
5562 return;
5563
5564 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
5565 data->period = nr;
5566 return perf_swevent_overflow(event, 1, data, regs);
5567 } else
5568 data->period = event->hw.last_period;
5569
5570 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
5571 return perf_swevent_overflow(event, 1, data, regs);
5572
5573 if (local64_add_negative(nr, &hwc->period_left))
5574 return;
5575
5576 perf_swevent_overflow(event, 0, data, regs);
5577 }
5578
5579 static int perf_exclude_event(struct perf_event *event,
5580 struct pt_regs *regs)
5581 {
5582 if (event->hw.state & PERF_HES_STOPPED)
5583 return 1;
5584
5585 if (regs) {
5586 if (event->attr.exclude_user && user_mode(regs))
5587 return 1;
5588
5589 if (event->attr.exclude_kernel && !user_mode(regs))
5590 return 1;
5591 }
5592
5593 return 0;
5594 }
5595
5596 static int perf_swevent_match(struct perf_event *event,
5597 enum perf_type_id type,
5598 u32 event_id,
5599 struct perf_sample_data *data,
5600 struct pt_regs *regs)
5601 {
5602 if (event->attr.type != type)
5603 return 0;
5604
5605 if (event->attr.config != event_id)
5606 return 0;
5607
5608 if (perf_exclude_event(event, regs))
5609 return 0;
5610
5611 return 1;
5612 }
5613
5614 static inline u64 swevent_hash(u64 type, u32 event_id)
5615 {
5616 u64 val = event_id | (type << 32);
5617
5618 return hash_64(val, SWEVENT_HLIST_BITS);
5619 }
5620
5621 static inline struct hlist_head *
5622 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
5623 {
5624 u64 hash = swevent_hash(type, event_id);
5625
5626 return &hlist->heads[hash];
5627 }
5628
5629 /* For the read side: events when they trigger */
5630 static inline struct hlist_head *
5631 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
5632 {
5633 struct swevent_hlist *hlist;
5634
5635 hlist = rcu_dereference(swhash->swevent_hlist);
5636 if (!hlist)
5637 return NULL;
5638
5639 return __find_swevent_head(hlist, type, event_id);
5640 }
5641
5642 /* For the event head insertion and removal in the hlist */
5643 static inline struct hlist_head *
5644 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
5645 {
5646 struct swevent_hlist *hlist;
5647 u32 event_id = event->attr.config;
5648 u64 type = event->attr.type;
5649
5650 /*
5651 * Event scheduling is always serialized against hlist allocation
5652 * and release. Which makes the protected version suitable here.
5653 * The context lock guarantees that.
5654 */
5655 hlist = rcu_dereference_protected(swhash->swevent_hlist,
5656 lockdep_is_held(&event->ctx->lock));
5657 if (!hlist)
5658 return NULL;
5659
5660 return __find_swevent_head(hlist, type, event_id);
5661 }
5662
5663 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5664 u64 nr,
5665 struct perf_sample_data *data,
5666 struct pt_regs *regs)
5667 {
5668 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5669 struct perf_event *event;
5670 struct hlist_head *head;
5671
5672 rcu_read_lock();
5673 head = find_swevent_head_rcu(swhash, type, event_id);
5674 if (!head)
5675 goto end;
5676
5677 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5678 if (perf_swevent_match(event, type, event_id, data, regs))
5679 perf_swevent_event(event, nr, data, regs);
5680 }
5681 end:
5682 rcu_read_unlock();
5683 }
5684
5685 int perf_swevent_get_recursion_context(void)
5686 {
5687 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5688
5689 return get_recursion_context(swhash->recursion);
5690 }
5691 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
5692
5693 inline void perf_swevent_put_recursion_context(int rctx)
5694 {
5695 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5696
5697 put_recursion_context(swhash->recursion, rctx);
5698 }
5699
5700 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
5701 {
5702 struct perf_sample_data data;
5703 int rctx;
5704
5705 preempt_disable_notrace();
5706 rctx = perf_swevent_get_recursion_context();
5707 if (rctx < 0)
5708 return;
5709
5710 perf_sample_data_init(&data, addr, 0);
5711
5712 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
5713
5714 perf_swevent_put_recursion_context(rctx);
5715 preempt_enable_notrace();
5716 }
5717
5718 static void perf_swevent_read(struct perf_event *event)
5719 {
5720 }
5721
5722 static int perf_swevent_add(struct perf_event *event, int flags)
5723 {
5724 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5725 struct hw_perf_event *hwc = &event->hw;
5726 struct hlist_head *head;
5727
5728 if (is_sampling_event(event)) {
5729 hwc->last_period = hwc->sample_period;
5730 perf_swevent_set_period(event);
5731 }
5732
5733 hwc->state = !(flags & PERF_EF_START);
5734
5735 head = find_swevent_head(swhash, event);
5736 if (!head) {
5737 /*
5738 * We can race with cpu hotplug code. Do not
5739 * WARN if the cpu just got unplugged.
5740 */
5741 WARN_ON_ONCE(swhash->online);
5742 return -EINVAL;
5743 }
5744
5745 hlist_add_head_rcu(&event->hlist_entry, head);
5746
5747 return 0;
5748 }
5749
5750 static void perf_swevent_del(struct perf_event *event, int flags)
5751 {
5752 hlist_del_rcu(&event->hlist_entry);
5753 }
5754
5755 static void perf_swevent_start(struct perf_event *event, int flags)
5756 {
5757 event->hw.state = 0;
5758 }
5759
5760 static void perf_swevent_stop(struct perf_event *event, int flags)
5761 {
5762 event->hw.state = PERF_HES_STOPPED;
5763 }
5764
5765 /* Deref the hlist from the update side */
5766 static inline struct swevent_hlist *
5767 swevent_hlist_deref(struct swevent_htable *swhash)
5768 {
5769 return rcu_dereference_protected(swhash->swevent_hlist,
5770 lockdep_is_held(&swhash->hlist_mutex));
5771 }
5772
5773 static void swevent_hlist_release(struct swevent_htable *swhash)
5774 {
5775 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5776
5777 if (!hlist)
5778 return;
5779
5780 rcu_assign_pointer(swhash->swevent_hlist, NULL);
5781 kfree_rcu(hlist, rcu_head);
5782 }
5783
5784 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5785 {
5786 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5787
5788 mutex_lock(&swhash->hlist_mutex);
5789
5790 if (!--swhash->hlist_refcount)
5791 swevent_hlist_release(swhash);
5792
5793 mutex_unlock(&swhash->hlist_mutex);
5794 }
5795
5796 static void swevent_hlist_put(struct perf_event *event)
5797 {
5798 int cpu;
5799
5800 for_each_possible_cpu(cpu)
5801 swevent_hlist_put_cpu(event, cpu);
5802 }
5803
5804 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5805 {
5806 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5807 int err = 0;
5808
5809 mutex_lock(&swhash->hlist_mutex);
5810
5811 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5812 struct swevent_hlist *hlist;
5813
5814 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5815 if (!hlist) {
5816 err = -ENOMEM;
5817 goto exit;
5818 }
5819 rcu_assign_pointer(swhash->swevent_hlist, hlist);
5820 }
5821 swhash->hlist_refcount++;
5822 exit:
5823 mutex_unlock(&swhash->hlist_mutex);
5824
5825 return err;
5826 }
5827
5828 static int swevent_hlist_get(struct perf_event *event)
5829 {
5830 int err;
5831 int cpu, failed_cpu;
5832
5833 get_online_cpus();
5834 for_each_possible_cpu(cpu) {
5835 err = swevent_hlist_get_cpu(event, cpu);
5836 if (err) {
5837 failed_cpu = cpu;
5838 goto fail;
5839 }
5840 }
5841 put_online_cpus();
5842
5843 return 0;
5844 fail:
5845 for_each_possible_cpu(cpu) {
5846 if (cpu == failed_cpu)
5847 break;
5848 swevent_hlist_put_cpu(event, cpu);
5849 }
5850
5851 put_online_cpus();
5852 return err;
5853 }
5854
5855 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5856
5857 static void sw_perf_event_destroy(struct perf_event *event)
5858 {
5859 u64 event_id = event->attr.config;
5860
5861 WARN_ON(event->parent);
5862
5863 static_key_slow_dec(&perf_swevent_enabled[event_id]);
5864 swevent_hlist_put(event);
5865 }
5866
5867 static int perf_swevent_init(struct perf_event *event)
5868 {
5869 u64 event_id = event->attr.config;
5870
5871 if (event->attr.type != PERF_TYPE_SOFTWARE)
5872 return -ENOENT;
5873
5874 /*
5875 * no branch sampling for software events
5876 */
5877 if (has_branch_stack(event))
5878 return -EOPNOTSUPP;
5879
5880 switch (event_id) {
5881 case PERF_COUNT_SW_CPU_CLOCK:
5882 case PERF_COUNT_SW_TASK_CLOCK:
5883 return -ENOENT;
5884
5885 default:
5886 break;
5887 }
5888
5889 if (event_id >= PERF_COUNT_SW_MAX)
5890 return -ENOENT;
5891
5892 if (!event->parent) {
5893 int err;
5894
5895 err = swevent_hlist_get(event);
5896 if (err)
5897 return err;
5898
5899 static_key_slow_inc(&perf_swevent_enabled[event_id]);
5900 event->destroy = sw_perf_event_destroy;
5901 }
5902
5903 return 0;
5904 }
5905
5906 static int perf_swevent_event_idx(struct perf_event *event)
5907 {
5908 return 0;
5909 }
5910
5911 static struct pmu perf_swevent = {
5912 .task_ctx_nr = perf_sw_context,
5913
5914 .event_init = perf_swevent_init,
5915 .add = perf_swevent_add,
5916 .del = perf_swevent_del,
5917 .start = perf_swevent_start,
5918 .stop = perf_swevent_stop,
5919 .read = perf_swevent_read,
5920
5921 .event_idx = perf_swevent_event_idx,
5922 };
5923
5924 #ifdef CONFIG_EVENT_TRACING
5925
5926 static int perf_tp_filter_match(struct perf_event *event,
5927 struct perf_sample_data *data)
5928 {
5929 void *record = data->raw->data;
5930
5931 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5932 return 1;
5933 return 0;
5934 }
5935
5936 static int perf_tp_event_match(struct perf_event *event,
5937 struct perf_sample_data *data,
5938 struct pt_regs *regs)
5939 {
5940 if (event->hw.state & PERF_HES_STOPPED)
5941 return 0;
5942 /*
5943 * All tracepoints are from kernel-space.
5944 */
5945 if (event->attr.exclude_kernel)
5946 return 0;
5947
5948 if (!perf_tp_filter_match(event, data))
5949 return 0;
5950
5951 return 1;
5952 }
5953
5954 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5955 struct pt_regs *regs, struct hlist_head *head, int rctx,
5956 struct task_struct *task)
5957 {
5958 struct perf_sample_data data;
5959 struct perf_event *event;
5960
5961 struct perf_raw_record raw = {
5962 .size = entry_size,
5963 .data = record,
5964 };
5965
5966 perf_sample_data_init(&data, addr, 0);
5967 data.raw = &raw;
5968
5969 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5970 if (perf_tp_event_match(event, &data, regs))
5971 perf_swevent_event(event, count, &data, regs);
5972 }
5973
5974 /*
5975 * If we got specified a target task, also iterate its context and
5976 * deliver this event there too.
5977 */
5978 if (task && task != current) {
5979 struct perf_event_context *ctx;
5980 struct trace_entry *entry = record;
5981
5982 rcu_read_lock();
5983 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5984 if (!ctx)
5985 goto unlock;
5986
5987 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5988 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5989 continue;
5990 if (event->attr.config != entry->type)
5991 continue;
5992 if (perf_tp_event_match(event, &data, regs))
5993 perf_swevent_event(event, count, &data, regs);
5994 }
5995 unlock:
5996 rcu_read_unlock();
5997 }
5998
5999 perf_swevent_put_recursion_context(rctx);
6000 }
6001 EXPORT_SYMBOL_GPL(perf_tp_event);
6002
6003 static void tp_perf_event_destroy(struct perf_event *event)
6004 {
6005 perf_trace_destroy(event);
6006 }
6007
6008 static int perf_tp_event_init(struct perf_event *event)
6009 {
6010 int err;
6011
6012 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6013 return -ENOENT;
6014
6015 /*
6016 * no branch sampling for tracepoint events
6017 */
6018 if (has_branch_stack(event))
6019 return -EOPNOTSUPP;
6020
6021 err = perf_trace_init(event);
6022 if (err)
6023 return err;
6024
6025 event->destroy = tp_perf_event_destroy;
6026
6027 return 0;
6028 }
6029
6030 static struct pmu perf_tracepoint = {
6031 .task_ctx_nr = perf_sw_context,
6032
6033 .event_init = perf_tp_event_init,
6034 .add = perf_trace_add,
6035 .del = perf_trace_del,
6036 .start = perf_swevent_start,
6037 .stop = perf_swevent_stop,
6038 .read = perf_swevent_read,
6039
6040 .event_idx = perf_swevent_event_idx,
6041 };
6042
6043 static inline void perf_tp_register(void)
6044 {
6045 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
6046 }
6047
6048 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
6049 {
6050 char *filter_str;
6051 int ret;
6052
6053 if (event->attr.type != PERF_TYPE_TRACEPOINT)
6054 return -EINVAL;
6055
6056 filter_str = strndup_user(arg, PAGE_SIZE);
6057 if (IS_ERR(filter_str))
6058 return PTR_ERR(filter_str);
6059
6060 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
6061
6062 kfree(filter_str);
6063 return ret;
6064 }
6065
6066 static void perf_event_free_filter(struct perf_event *event)
6067 {
6068 ftrace_profile_free_filter(event);
6069 }
6070
6071 #else
6072
6073 static inline void perf_tp_register(void)
6074 {
6075 }
6076
6077 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
6078 {
6079 return -ENOENT;
6080 }
6081
6082 static void perf_event_free_filter(struct perf_event *event)
6083 {
6084 }
6085
6086 #endif /* CONFIG_EVENT_TRACING */
6087
6088 #ifdef CONFIG_HAVE_HW_BREAKPOINT
6089 void perf_bp_event(struct perf_event *bp, void *data)
6090 {
6091 struct perf_sample_data sample;
6092 struct pt_regs *regs = data;
6093
6094 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
6095
6096 if (!bp->hw.state && !perf_exclude_event(bp, regs))
6097 perf_swevent_event(bp, 1, &sample, regs);
6098 }
6099 #endif
6100
6101 /*
6102 * hrtimer based swevent callback
6103 */
6104
6105 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
6106 {
6107 enum hrtimer_restart ret = HRTIMER_RESTART;
6108 struct perf_sample_data data;
6109 struct pt_regs *regs;
6110 struct perf_event *event;
6111 u64 period;
6112
6113 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
6114
6115 if (event->state != PERF_EVENT_STATE_ACTIVE)
6116 return HRTIMER_NORESTART;
6117
6118 event->pmu->read(event);
6119
6120 perf_sample_data_init(&data, 0, event->hw.last_period);
6121 regs = get_irq_regs();
6122
6123 if (regs && !perf_exclude_event(event, regs)) {
6124 if (!(event->attr.exclude_idle && is_idle_task(current)))
6125 if (__perf_event_overflow(event, 1, &data, regs))
6126 ret = HRTIMER_NORESTART;
6127 }
6128
6129 period = max_t(u64, 10000, event->hw.sample_period);
6130 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
6131
6132 return ret;
6133 }
6134
6135 static void perf_swevent_start_hrtimer(struct perf_event *event)
6136 {
6137 struct hw_perf_event *hwc = &event->hw;
6138 s64 period;
6139
6140 if (!is_sampling_event(event))
6141 return;
6142
6143 period = local64_read(&hwc->period_left);
6144 if (period) {
6145 if (period < 0)
6146 period = 10000;
6147
6148 local64_set(&hwc->period_left, 0);
6149 } else {
6150 period = max_t(u64, 10000, hwc->sample_period);
6151 }
6152 __hrtimer_start_range_ns(&hwc->hrtimer,
6153 ns_to_ktime(period), 0,
6154 HRTIMER_MODE_REL_PINNED, 0);
6155 }
6156
6157 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
6158 {
6159 struct hw_perf_event *hwc = &event->hw;
6160
6161 if (is_sampling_event(event)) {
6162 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
6163 local64_set(&hwc->period_left, ktime_to_ns(remaining));
6164
6165 hrtimer_cancel(&hwc->hrtimer);
6166 }
6167 }
6168
6169 static void perf_swevent_init_hrtimer(struct perf_event *event)
6170 {
6171 struct hw_perf_event *hwc = &event->hw;
6172
6173 if (!is_sampling_event(event))
6174 return;
6175
6176 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6177 hwc->hrtimer.function = perf_swevent_hrtimer;
6178
6179 /*
6180 * Since hrtimers have a fixed rate, we can do a static freq->period
6181 * mapping and avoid the whole period adjust feedback stuff.
6182 */
6183 if (event->attr.freq) {
6184 long freq = event->attr.sample_freq;
6185
6186 event->attr.sample_period = NSEC_PER_SEC / freq;
6187 hwc->sample_period = event->attr.sample_period;
6188 local64_set(&hwc->period_left, hwc->sample_period);
6189 hwc->last_period = hwc->sample_period;
6190 event->attr.freq = 0;
6191 }
6192 }
6193
6194 /*
6195 * Software event: cpu wall time clock
6196 */
6197
6198 static void cpu_clock_event_update(struct perf_event *event)
6199 {
6200 s64 prev;
6201 u64 now;
6202
6203 now = local_clock();
6204 prev = local64_xchg(&event->hw.prev_count, now);
6205 local64_add(now - prev, &event->count);
6206 }
6207
6208 static void cpu_clock_event_start(struct perf_event *event, int flags)
6209 {
6210 local64_set(&event->hw.prev_count, local_clock());
6211 perf_swevent_start_hrtimer(event);
6212 }
6213
6214 static void cpu_clock_event_stop(struct perf_event *event, int flags)
6215 {
6216 perf_swevent_cancel_hrtimer(event);
6217 cpu_clock_event_update(event);
6218 }
6219
6220 static int cpu_clock_event_add(struct perf_event *event, int flags)
6221 {
6222 if (flags & PERF_EF_START)
6223 cpu_clock_event_start(event, flags);
6224
6225 return 0;
6226 }
6227
6228 static void cpu_clock_event_del(struct perf_event *event, int flags)
6229 {
6230 cpu_clock_event_stop(event, flags);
6231 }
6232
6233 static void cpu_clock_event_read(struct perf_event *event)
6234 {
6235 cpu_clock_event_update(event);
6236 }
6237
6238 static int cpu_clock_event_init(struct perf_event *event)
6239 {
6240 if (event->attr.type != PERF_TYPE_SOFTWARE)
6241 return -ENOENT;
6242
6243 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
6244 return -ENOENT;
6245
6246 /*
6247 * no branch sampling for software events
6248 */
6249 if (has_branch_stack(event))
6250 return -EOPNOTSUPP;
6251
6252 perf_swevent_init_hrtimer(event);
6253
6254 return 0;
6255 }
6256
6257 static struct pmu perf_cpu_clock = {
6258 .task_ctx_nr = perf_sw_context,
6259
6260 .event_init = cpu_clock_event_init,
6261 .add = cpu_clock_event_add,
6262 .del = cpu_clock_event_del,
6263 .start = cpu_clock_event_start,
6264 .stop = cpu_clock_event_stop,
6265 .read = cpu_clock_event_read,
6266
6267 .event_idx = perf_swevent_event_idx,
6268 };
6269
6270 /*
6271 * Software event: task time clock
6272 */
6273
6274 static void task_clock_event_update(struct perf_event *event, u64 now)
6275 {
6276 u64 prev;
6277 s64 delta;
6278
6279 prev = local64_xchg(&event->hw.prev_count, now);
6280 delta = now - prev;
6281 local64_add(delta, &event->count);
6282 }
6283
6284 static void task_clock_event_start(struct perf_event *event, int flags)
6285 {
6286 local64_set(&event->hw.prev_count, event->ctx->time);
6287 perf_swevent_start_hrtimer(event);
6288 }
6289
6290 static void task_clock_event_stop(struct perf_event *event, int flags)
6291 {
6292 perf_swevent_cancel_hrtimer(event);
6293 task_clock_event_update(event, event->ctx->time);
6294 }
6295
6296 static int task_clock_event_add(struct perf_event *event, int flags)
6297 {
6298 if (flags & PERF_EF_START)
6299 task_clock_event_start(event, flags);
6300
6301 return 0;
6302 }
6303
6304 static void task_clock_event_del(struct perf_event *event, int flags)
6305 {
6306 task_clock_event_stop(event, PERF_EF_UPDATE);
6307 }
6308
6309 static void task_clock_event_read(struct perf_event *event)
6310 {
6311 u64 now = perf_clock();
6312 u64 delta = now - event->ctx->timestamp;
6313 u64 time = event->ctx->time + delta;
6314
6315 task_clock_event_update(event, time);
6316 }
6317
6318 static int task_clock_event_init(struct perf_event *event)
6319 {
6320 if (event->attr.type != PERF_TYPE_SOFTWARE)
6321 return -ENOENT;
6322
6323 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
6324 return -ENOENT;
6325
6326 /*
6327 * no branch sampling for software events
6328 */
6329 if (has_branch_stack(event))
6330 return -EOPNOTSUPP;
6331
6332 perf_swevent_init_hrtimer(event);
6333
6334 return 0;
6335 }
6336
6337 static struct pmu perf_task_clock = {
6338 .task_ctx_nr = perf_sw_context,
6339
6340 .event_init = task_clock_event_init,
6341 .add = task_clock_event_add,
6342 .del = task_clock_event_del,
6343 .start = task_clock_event_start,
6344 .stop = task_clock_event_stop,
6345 .read = task_clock_event_read,
6346
6347 .event_idx = perf_swevent_event_idx,
6348 };
6349
6350 static void perf_pmu_nop_void(struct pmu *pmu)
6351 {
6352 }
6353
6354 static int perf_pmu_nop_int(struct pmu *pmu)
6355 {
6356 return 0;
6357 }
6358
6359 static void perf_pmu_start_txn(struct pmu *pmu)
6360 {
6361 perf_pmu_disable(pmu);
6362 }
6363
6364 static int perf_pmu_commit_txn(struct pmu *pmu)
6365 {
6366 perf_pmu_enable(pmu);
6367 return 0;
6368 }
6369
6370 static void perf_pmu_cancel_txn(struct pmu *pmu)
6371 {
6372 perf_pmu_enable(pmu);
6373 }
6374
6375 static int perf_event_idx_default(struct perf_event *event)
6376 {
6377 return event->hw.idx + 1;
6378 }
6379
6380 /*
6381 * Ensures all contexts with the same task_ctx_nr have the same
6382 * pmu_cpu_context too.
6383 */
6384 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
6385 {
6386 struct pmu *pmu;
6387
6388 if (ctxn < 0)
6389 return NULL;
6390
6391 list_for_each_entry(pmu, &pmus, entry) {
6392 if (pmu->task_ctx_nr == ctxn)
6393 return pmu->pmu_cpu_context;
6394 }
6395
6396 return NULL;
6397 }
6398
6399 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
6400 {
6401 int cpu;
6402
6403 for_each_possible_cpu(cpu) {
6404 struct perf_cpu_context *cpuctx;
6405
6406 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6407
6408 if (cpuctx->unique_pmu == old_pmu)
6409 cpuctx->unique_pmu = pmu;
6410 }
6411 }
6412
6413 static void free_pmu_context(struct pmu *pmu)
6414 {
6415 struct pmu *i;
6416
6417 mutex_lock(&pmus_lock);
6418 /*
6419 * Like a real lame refcount.
6420 */
6421 list_for_each_entry(i, &pmus, entry) {
6422 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
6423 update_pmu_context(i, pmu);
6424 goto out;
6425 }
6426 }
6427
6428 free_percpu(pmu->pmu_cpu_context);
6429 out:
6430 mutex_unlock(&pmus_lock);
6431 }
6432 static struct idr pmu_idr;
6433
6434 static ssize_t
6435 type_show(struct device *dev, struct device_attribute *attr, char *page)
6436 {
6437 struct pmu *pmu = dev_get_drvdata(dev);
6438
6439 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
6440 }
6441 static DEVICE_ATTR_RO(type);
6442
6443 static ssize_t
6444 perf_event_mux_interval_ms_show(struct device *dev,
6445 struct device_attribute *attr,
6446 char *page)
6447 {
6448 struct pmu *pmu = dev_get_drvdata(dev);
6449
6450 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
6451 }
6452
6453 static ssize_t
6454 perf_event_mux_interval_ms_store(struct device *dev,
6455 struct device_attribute *attr,
6456 const char *buf, size_t count)
6457 {
6458 struct pmu *pmu = dev_get_drvdata(dev);
6459 int timer, cpu, ret;
6460
6461 ret = kstrtoint(buf, 0, &timer);
6462 if (ret)
6463 return ret;
6464
6465 if (timer < 1)
6466 return -EINVAL;
6467
6468 /* same value, noting to do */
6469 if (timer == pmu->hrtimer_interval_ms)
6470 return count;
6471
6472 pmu->hrtimer_interval_ms = timer;
6473
6474 /* update all cpuctx for this PMU */
6475 for_each_possible_cpu(cpu) {
6476 struct perf_cpu_context *cpuctx;
6477 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6478 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
6479
6480 if (hrtimer_active(&cpuctx->hrtimer))
6481 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
6482 }
6483
6484 return count;
6485 }
6486 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
6487
6488 static struct attribute *pmu_dev_attrs[] = {
6489 &dev_attr_type.attr,
6490 &dev_attr_perf_event_mux_interval_ms.attr,
6491 NULL,
6492 };
6493 ATTRIBUTE_GROUPS(pmu_dev);
6494
6495 static int pmu_bus_running;
6496 static struct bus_type pmu_bus = {
6497 .name = "event_source",
6498 .dev_groups = pmu_dev_groups,
6499 };
6500
6501 static void pmu_dev_release(struct device *dev)
6502 {
6503 kfree(dev);
6504 }
6505
6506 static int pmu_dev_alloc(struct pmu *pmu)
6507 {
6508 int ret = -ENOMEM;
6509
6510 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
6511 if (!pmu->dev)
6512 goto out;
6513
6514 pmu->dev->groups = pmu->attr_groups;
6515 device_initialize(pmu->dev);
6516 ret = dev_set_name(pmu->dev, "%s", pmu->name);
6517 if (ret)
6518 goto free_dev;
6519
6520 dev_set_drvdata(pmu->dev, pmu);
6521 pmu->dev->bus = &pmu_bus;
6522 pmu->dev->release = pmu_dev_release;
6523 ret = device_add(pmu->dev);
6524 if (ret)
6525 goto free_dev;
6526
6527 out:
6528 return ret;
6529
6530 free_dev:
6531 put_device(pmu->dev);
6532 goto out;
6533 }
6534
6535 static struct lock_class_key cpuctx_mutex;
6536 static struct lock_class_key cpuctx_lock;
6537
6538 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
6539 {
6540 int cpu, ret;
6541
6542 mutex_lock(&pmus_lock);
6543 ret = -ENOMEM;
6544 pmu->pmu_disable_count = alloc_percpu(int);
6545 if (!pmu->pmu_disable_count)
6546 goto unlock;
6547
6548 pmu->type = -1;
6549 if (!name)
6550 goto skip_type;
6551 pmu->name = name;
6552
6553 if (type < 0) {
6554 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
6555 if (type < 0) {
6556 ret = type;
6557 goto free_pdc;
6558 }
6559 }
6560 pmu->type = type;
6561
6562 if (pmu_bus_running) {
6563 ret = pmu_dev_alloc(pmu);
6564 if (ret)
6565 goto free_idr;
6566 }
6567
6568 skip_type:
6569 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
6570 if (pmu->pmu_cpu_context)
6571 goto got_cpu_context;
6572
6573 ret = -ENOMEM;
6574 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
6575 if (!pmu->pmu_cpu_context)
6576 goto free_dev;
6577
6578 for_each_possible_cpu(cpu) {
6579 struct perf_cpu_context *cpuctx;
6580
6581 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6582 __perf_event_init_context(&cpuctx->ctx);
6583 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
6584 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
6585 cpuctx->ctx.type = cpu_context;
6586 cpuctx->ctx.pmu = pmu;
6587
6588 __perf_cpu_hrtimer_init(cpuctx, cpu);
6589
6590 INIT_LIST_HEAD(&cpuctx->rotation_list);
6591 cpuctx->unique_pmu = pmu;
6592 }
6593
6594 got_cpu_context:
6595 if (!pmu->start_txn) {
6596 if (pmu->pmu_enable) {
6597 /*
6598 * If we have pmu_enable/pmu_disable calls, install
6599 * transaction stubs that use that to try and batch
6600 * hardware accesses.
6601 */
6602 pmu->start_txn = perf_pmu_start_txn;
6603 pmu->commit_txn = perf_pmu_commit_txn;
6604 pmu->cancel_txn = perf_pmu_cancel_txn;
6605 } else {
6606 pmu->start_txn = perf_pmu_nop_void;
6607 pmu->commit_txn = perf_pmu_nop_int;
6608 pmu->cancel_txn = perf_pmu_nop_void;
6609 }
6610 }
6611
6612 if (!pmu->pmu_enable) {
6613 pmu->pmu_enable = perf_pmu_nop_void;
6614 pmu->pmu_disable = perf_pmu_nop_void;
6615 }
6616
6617 if (!pmu->event_idx)
6618 pmu->event_idx = perf_event_idx_default;
6619
6620 list_add_rcu(&pmu->entry, &pmus);
6621 ret = 0;
6622 unlock:
6623 mutex_unlock(&pmus_lock);
6624
6625 return ret;
6626
6627 free_dev:
6628 device_del(pmu->dev);
6629 put_device(pmu->dev);
6630
6631 free_idr:
6632 if (pmu->type >= PERF_TYPE_MAX)
6633 idr_remove(&pmu_idr, pmu->type);
6634
6635 free_pdc:
6636 free_percpu(pmu->pmu_disable_count);
6637 goto unlock;
6638 }
6639 EXPORT_SYMBOL_GPL(perf_pmu_register);
6640
6641 void perf_pmu_unregister(struct pmu *pmu)
6642 {
6643 mutex_lock(&pmus_lock);
6644 list_del_rcu(&pmu->entry);
6645 mutex_unlock(&pmus_lock);
6646
6647 /*
6648 * We dereference the pmu list under both SRCU and regular RCU, so
6649 * synchronize against both of those.
6650 */
6651 synchronize_srcu(&pmus_srcu);
6652 synchronize_rcu();
6653
6654 free_percpu(pmu->pmu_disable_count);
6655 if (pmu->type >= PERF_TYPE_MAX)
6656 idr_remove(&pmu_idr, pmu->type);
6657 device_del(pmu->dev);
6658 put_device(pmu->dev);
6659 free_pmu_context(pmu);
6660 }
6661 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
6662
6663 struct pmu *perf_init_event(struct perf_event *event)
6664 {
6665 struct pmu *pmu = NULL;
6666 int idx;
6667 int ret;
6668
6669 idx = srcu_read_lock(&pmus_srcu);
6670
6671 rcu_read_lock();
6672 pmu = idr_find(&pmu_idr, event->attr.type);
6673 rcu_read_unlock();
6674 if (pmu) {
6675 if (!try_module_get(pmu->module)) {
6676 pmu = ERR_PTR(-ENODEV);
6677 goto unlock;
6678 }
6679 event->pmu = pmu;
6680 ret = pmu->event_init(event);
6681 if (ret)
6682 pmu = ERR_PTR(ret);
6683 goto unlock;
6684 }
6685
6686 list_for_each_entry_rcu(pmu, &pmus, entry) {
6687 if (!try_module_get(pmu->module)) {
6688 pmu = ERR_PTR(-ENODEV);
6689 goto unlock;
6690 }
6691 event->pmu = pmu;
6692 ret = pmu->event_init(event);
6693 if (!ret)
6694 goto unlock;
6695
6696 if (ret != -ENOENT) {
6697 pmu = ERR_PTR(ret);
6698 goto unlock;
6699 }
6700 }
6701 pmu = ERR_PTR(-ENOENT);
6702 unlock:
6703 srcu_read_unlock(&pmus_srcu, idx);
6704
6705 return pmu;
6706 }
6707
6708 static void account_event_cpu(struct perf_event *event, int cpu)
6709 {
6710 if (event->parent)
6711 return;
6712
6713 if (has_branch_stack(event)) {
6714 if (!(event->attach_state & PERF_ATTACH_TASK))
6715 atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
6716 }
6717 if (is_cgroup_event(event))
6718 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
6719 }
6720
6721 static void account_event(struct perf_event *event)
6722 {
6723 if (event->parent)
6724 return;
6725
6726 if (event->attach_state & PERF_ATTACH_TASK)
6727 static_key_slow_inc(&perf_sched_events.key);
6728 if (event->attr.mmap || event->attr.mmap_data)
6729 atomic_inc(&nr_mmap_events);
6730 if (event->attr.comm)
6731 atomic_inc(&nr_comm_events);
6732 if (event->attr.task)
6733 atomic_inc(&nr_task_events);
6734 if (event->attr.freq) {
6735 if (atomic_inc_return(&nr_freq_events) == 1)
6736 tick_nohz_full_kick_all();
6737 }
6738 if (has_branch_stack(event))
6739 static_key_slow_inc(&perf_sched_events.key);
6740 if (is_cgroup_event(event))
6741 static_key_slow_inc(&perf_sched_events.key);
6742
6743 account_event_cpu(event, event->cpu);
6744 }
6745
6746 /*
6747 * Allocate and initialize a event structure
6748 */
6749 static struct perf_event *
6750 perf_event_alloc(struct perf_event_attr *attr, int cpu,
6751 struct task_struct *task,
6752 struct perf_event *group_leader,
6753 struct perf_event *parent_event,
6754 perf_overflow_handler_t overflow_handler,
6755 void *context)
6756 {
6757 struct pmu *pmu;
6758 struct perf_event *event;
6759 struct hw_perf_event *hwc;
6760 long err = -EINVAL;
6761
6762 if ((unsigned)cpu >= nr_cpu_ids) {
6763 if (!task || cpu != -1)
6764 return ERR_PTR(-EINVAL);
6765 }
6766
6767 event = kzalloc(sizeof(*event), GFP_KERNEL);
6768 if (!event)
6769 return ERR_PTR(-ENOMEM);
6770
6771 /*
6772 * Single events are their own group leaders, with an
6773 * empty sibling list:
6774 */
6775 if (!group_leader)
6776 group_leader = event;
6777
6778 mutex_init(&event->child_mutex);
6779 INIT_LIST_HEAD(&event->child_list);
6780
6781 INIT_LIST_HEAD(&event->group_entry);
6782 INIT_LIST_HEAD(&event->event_entry);
6783 INIT_LIST_HEAD(&event->sibling_list);
6784 INIT_LIST_HEAD(&event->rb_entry);
6785 INIT_LIST_HEAD(&event->active_entry);
6786 INIT_HLIST_NODE(&event->hlist_entry);
6787
6788
6789 init_waitqueue_head(&event->waitq);
6790 init_irq_work(&event->pending, perf_pending_event);
6791
6792 mutex_init(&event->mmap_mutex);
6793
6794 atomic_long_set(&event->refcount, 1);
6795 event->cpu = cpu;
6796 event->attr = *attr;
6797 event->group_leader = group_leader;
6798 event->pmu = NULL;
6799 event->oncpu = -1;
6800
6801 event->parent = parent_event;
6802
6803 event->ns = get_pid_ns(task_active_pid_ns(current));
6804 event->id = atomic64_inc_return(&perf_event_id);
6805
6806 event->state = PERF_EVENT_STATE_INACTIVE;
6807
6808 if (task) {
6809 event->attach_state = PERF_ATTACH_TASK;
6810
6811 if (attr->type == PERF_TYPE_TRACEPOINT)
6812 event->hw.tp_target = task;
6813 #ifdef CONFIG_HAVE_HW_BREAKPOINT
6814 /*
6815 * hw_breakpoint is a bit difficult here..
6816 */
6817 else if (attr->type == PERF_TYPE_BREAKPOINT)
6818 event->hw.bp_target = task;
6819 #endif
6820 }
6821
6822 if (!overflow_handler && parent_event) {
6823 overflow_handler = parent_event->overflow_handler;
6824 context = parent_event->overflow_handler_context;
6825 }
6826
6827 event->overflow_handler = overflow_handler;
6828 event->overflow_handler_context = context;
6829
6830 perf_event__state_init(event);
6831
6832 pmu = NULL;
6833
6834 hwc = &event->hw;
6835 hwc->sample_period = attr->sample_period;
6836 if (attr->freq && attr->sample_freq)
6837 hwc->sample_period = 1;
6838 hwc->last_period = hwc->sample_period;
6839
6840 local64_set(&hwc->period_left, hwc->sample_period);
6841
6842 /*
6843 * we currently do not support PERF_FORMAT_GROUP on inherited events
6844 */
6845 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
6846 goto err_ns;
6847
6848 pmu = perf_init_event(event);
6849 if (!pmu)
6850 goto err_ns;
6851 else if (IS_ERR(pmu)) {
6852 err = PTR_ERR(pmu);
6853 goto err_ns;
6854 }
6855
6856 if (!event->parent) {
6857 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6858 err = get_callchain_buffers();
6859 if (err)
6860 goto err_pmu;
6861 }
6862 }
6863
6864 return event;
6865
6866 err_pmu:
6867 if (event->destroy)
6868 event->destroy(event);
6869 module_put(pmu->module);
6870 err_ns:
6871 if (event->ns)
6872 put_pid_ns(event->ns);
6873 kfree(event);
6874
6875 return ERR_PTR(err);
6876 }
6877
6878 static int perf_copy_attr(struct perf_event_attr __user *uattr,
6879 struct perf_event_attr *attr)
6880 {
6881 u32 size;
6882 int ret;
6883
6884 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6885 return -EFAULT;
6886
6887 /*
6888 * zero the full structure, so that a short copy will be nice.
6889 */
6890 memset(attr, 0, sizeof(*attr));
6891
6892 ret = get_user(size, &uattr->size);
6893 if (ret)
6894 return ret;
6895
6896 if (size > PAGE_SIZE) /* silly large */
6897 goto err_size;
6898
6899 if (!size) /* abi compat */
6900 size = PERF_ATTR_SIZE_VER0;
6901
6902 if (size < PERF_ATTR_SIZE_VER0)
6903 goto err_size;
6904
6905 /*
6906 * If we're handed a bigger struct than we know of,
6907 * ensure all the unknown bits are 0 - i.e. new
6908 * user-space does not rely on any kernel feature
6909 * extensions we dont know about yet.
6910 */
6911 if (size > sizeof(*attr)) {
6912 unsigned char __user *addr;
6913 unsigned char __user *end;
6914 unsigned char val;
6915
6916 addr = (void __user *)uattr + sizeof(*attr);
6917 end = (void __user *)uattr + size;
6918
6919 for (; addr < end; addr++) {
6920 ret = get_user(val, addr);
6921 if (ret)
6922 return ret;
6923 if (val)
6924 goto err_size;
6925 }
6926 size = sizeof(*attr);
6927 }
6928
6929 ret = copy_from_user(attr, uattr, size);
6930 if (ret)
6931 return -EFAULT;
6932
6933 if (attr->__reserved_1)
6934 return -EINVAL;
6935
6936 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6937 return -EINVAL;
6938
6939 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6940 return -EINVAL;
6941
6942 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6943 u64 mask = attr->branch_sample_type;
6944
6945 /* only using defined bits */
6946 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6947 return -EINVAL;
6948
6949 /* at least one branch bit must be set */
6950 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6951 return -EINVAL;
6952
6953 /* propagate priv level, when not set for branch */
6954 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6955
6956 /* exclude_kernel checked on syscall entry */
6957 if (!attr->exclude_kernel)
6958 mask |= PERF_SAMPLE_BRANCH_KERNEL;
6959
6960 if (!attr->exclude_user)
6961 mask |= PERF_SAMPLE_BRANCH_USER;
6962
6963 if (!attr->exclude_hv)
6964 mask |= PERF_SAMPLE_BRANCH_HV;
6965 /*
6966 * adjust user setting (for HW filter setup)
6967 */
6968 attr->branch_sample_type = mask;
6969 }
6970 /* privileged levels capture (kernel, hv): check permissions */
6971 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
6972 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6973 return -EACCES;
6974 }
6975
6976 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
6977 ret = perf_reg_validate(attr->sample_regs_user);
6978 if (ret)
6979 return ret;
6980 }
6981
6982 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
6983 if (!arch_perf_have_user_stack_dump())
6984 return -ENOSYS;
6985
6986 /*
6987 * We have __u32 type for the size, but so far
6988 * we can only use __u16 as maximum due to the
6989 * __u16 sample size limit.
6990 */
6991 if (attr->sample_stack_user >= USHRT_MAX)
6992 ret = -EINVAL;
6993 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
6994 ret = -EINVAL;
6995 }
6996
6997 out:
6998 return ret;
6999
7000 err_size:
7001 put_user(sizeof(*attr), &uattr->size);
7002 ret = -E2BIG;
7003 goto out;
7004 }
7005
7006 static int
7007 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
7008 {
7009 struct ring_buffer *rb = NULL;
7010 int ret = -EINVAL;
7011
7012 if (!output_event)
7013 goto set;
7014
7015 /* don't allow circular references */
7016 if (event == output_event)
7017 goto out;
7018
7019 /*
7020 * Don't allow cross-cpu buffers
7021 */
7022 if (output_event->cpu != event->cpu)
7023 goto out;
7024
7025 /*
7026 * If its not a per-cpu rb, it must be the same task.
7027 */
7028 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
7029 goto out;
7030
7031 set:
7032 mutex_lock(&event->mmap_mutex);
7033 /* Can't redirect output if we've got an active mmap() */
7034 if (atomic_read(&event->mmap_count))
7035 goto unlock;
7036
7037 if (output_event) {
7038 /* get the rb we want to redirect to */
7039 rb = ring_buffer_get(output_event);
7040 if (!rb)
7041 goto unlock;
7042 }
7043
7044 ring_buffer_attach(event, rb);
7045
7046 ret = 0;
7047 unlock:
7048 mutex_unlock(&event->mmap_mutex);
7049
7050 out:
7051 return ret;
7052 }
7053
7054 /**
7055 * sys_perf_event_open - open a performance event, associate it to a task/cpu
7056 *
7057 * @attr_uptr: event_id type attributes for monitoring/sampling
7058 * @pid: target pid
7059 * @cpu: target cpu
7060 * @group_fd: group leader event fd
7061 */
7062 SYSCALL_DEFINE5(perf_event_open,
7063 struct perf_event_attr __user *, attr_uptr,
7064 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
7065 {
7066 struct perf_event *group_leader = NULL, *output_event = NULL;
7067 struct perf_event *event, *sibling;
7068 struct perf_event_attr attr;
7069 struct perf_event_context *ctx;
7070 struct file *event_file = NULL;
7071 struct fd group = {NULL, 0};
7072 struct task_struct *task = NULL;
7073 struct pmu *pmu;
7074 int event_fd;
7075 int move_group = 0;
7076 int err;
7077 int f_flags = O_RDWR;
7078
7079 /* for future expandability... */
7080 if (flags & ~PERF_FLAG_ALL)
7081 return -EINVAL;
7082
7083 err = perf_copy_attr(attr_uptr, &attr);
7084 if (err)
7085 return err;
7086
7087 if (!attr.exclude_kernel) {
7088 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
7089 return -EACCES;
7090 }
7091
7092 if (attr.freq) {
7093 if (attr.sample_freq > sysctl_perf_event_sample_rate)
7094 return -EINVAL;
7095 } else {
7096 if (attr.sample_period & (1ULL << 63))
7097 return -EINVAL;
7098 }
7099
7100 /*
7101 * In cgroup mode, the pid argument is used to pass the fd
7102 * opened to the cgroup directory in cgroupfs. The cpu argument
7103 * designates the cpu on which to monitor threads from that
7104 * cgroup.
7105 */
7106 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
7107 return -EINVAL;
7108
7109 if (flags & PERF_FLAG_FD_CLOEXEC)
7110 f_flags |= O_CLOEXEC;
7111
7112 event_fd = get_unused_fd_flags(f_flags);
7113 if (event_fd < 0)
7114 return event_fd;
7115
7116 if (group_fd != -1) {
7117 err = perf_fget_light(group_fd, &group);
7118 if (err)
7119 goto err_fd;
7120 group_leader = group.file->private_data;
7121 if (flags & PERF_FLAG_FD_OUTPUT)
7122 output_event = group_leader;
7123 if (flags & PERF_FLAG_FD_NO_GROUP)
7124 group_leader = NULL;
7125 }
7126
7127 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
7128 task = find_lively_task_by_vpid(pid);
7129 if (IS_ERR(task)) {
7130 err = PTR_ERR(task);
7131 goto err_group_fd;
7132 }
7133 }
7134
7135 if (task && group_leader &&
7136 group_leader->attr.inherit != attr.inherit) {
7137 err = -EINVAL;
7138 goto err_task;
7139 }
7140
7141 get_online_cpus();
7142
7143 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
7144 NULL, NULL);
7145 if (IS_ERR(event)) {
7146 err = PTR_ERR(event);
7147 goto err_cpus;
7148 }
7149
7150 if (flags & PERF_FLAG_PID_CGROUP) {
7151 err = perf_cgroup_connect(pid, event, &attr, group_leader);
7152 if (err) {
7153 __free_event(event);
7154 goto err_cpus;
7155 }
7156 }
7157
7158 if (is_sampling_event(event)) {
7159 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
7160 err = -ENOTSUPP;
7161 goto err_alloc;
7162 }
7163 }
7164
7165 account_event(event);
7166
7167 /*
7168 * Special case software events and allow them to be part of
7169 * any hardware group.
7170 */
7171 pmu = event->pmu;
7172
7173 if (group_leader &&
7174 (is_software_event(event) != is_software_event(group_leader))) {
7175 if (is_software_event(event)) {
7176 /*
7177 * If event and group_leader are not both a software
7178 * event, and event is, then group leader is not.
7179 *
7180 * Allow the addition of software events to !software
7181 * groups, this is safe because software events never
7182 * fail to schedule.
7183 */
7184 pmu = group_leader->pmu;
7185 } else if (is_software_event(group_leader) &&
7186 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
7187 /*
7188 * In case the group is a pure software group, and we
7189 * try to add a hardware event, move the whole group to
7190 * the hardware context.
7191 */
7192 move_group = 1;
7193 }
7194 }
7195
7196 /*
7197 * Get the target context (task or percpu):
7198 */
7199 ctx = find_get_context(pmu, task, event->cpu);
7200 if (IS_ERR(ctx)) {
7201 err = PTR_ERR(ctx);
7202 goto err_alloc;
7203 }
7204
7205 if (task) {
7206 put_task_struct(task);
7207 task = NULL;
7208 }
7209
7210 /*
7211 * Look up the group leader (we will attach this event to it):
7212 */
7213 if (group_leader) {
7214 err = -EINVAL;
7215
7216 /*
7217 * Do not allow a recursive hierarchy (this new sibling
7218 * becoming part of another group-sibling):
7219 */
7220 if (group_leader->group_leader != group_leader)
7221 goto err_context;
7222 /*
7223 * Do not allow to attach to a group in a different
7224 * task or CPU context:
7225 */
7226 if (move_group) {
7227 if (group_leader->ctx->type != ctx->type)
7228 goto err_context;
7229 } else {
7230 if (group_leader->ctx != ctx)
7231 goto err_context;
7232 }
7233
7234 /*
7235 * Only a group leader can be exclusive or pinned
7236 */
7237 if (attr.exclusive || attr.pinned)
7238 goto err_context;
7239 }
7240
7241 if (output_event) {
7242 err = perf_event_set_output(event, output_event);
7243 if (err)
7244 goto err_context;
7245 }
7246
7247 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
7248 f_flags);
7249 if (IS_ERR(event_file)) {
7250 err = PTR_ERR(event_file);
7251 goto err_context;
7252 }
7253
7254 if (move_group) {
7255 struct perf_event_context *gctx = group_leader->ctx;
7256
7257 mutex_lock(&gctx->mutex);
7258 perf_remove_from_context(group_leader, false);
7259
7260 /*
7261 * Removing from the context ends up with disabled
7262 * event. What we want here is event in the initial
7263 * startup state, ready to be add into new context.
7264 */
7265 perf_event__state_init(group_leader);
7266 list_for_each_entry(sibling, &group_leader->sibling_list,
7267 group_entry) {
7268 perf_remove_from_context(sibling, false);
7269 perf_event__state_init(sibling);
7270 put_ctx(gctx);
7271 }
7272 mutex_unlock(&gctx->mutex);
7273 put_ctx(gctx);
7274 }
7275
7276 WARN_ON_ONCE(ctx->parent_ctx);
7277 mutex_lock(&ctx->mutex);
7278
7279 if (move_group) {
7280 synchronize_rcu();
7281 perf_install_in_context(ctx, group_leader, event->cpu);
7282 get_ctx(ctx);
7283 list_for_each_entry(sibling, &group_leader->sibling_list,
7284 group_entry) {
7285 perf_install_in_context(ctx, sibling, event->cpu);
7286 get_ctx(ctx);
7287 }
7288 }
7289
7290 perf_install_in_context(ctx, event, event->cpu);
7291 perf_unpin_context(ctx);
7292 mutex_unlock(&ctx->mutex);
7293
7294 put_online_cpus();
7295
7296 event->owner = current;
7297
7298 mutex_lock(&current->perf_event_mutex);
7299 list_add_tail(&event->owner_entry, &current->perf_event_list);
7300 mutex_unlock(&current->perf_event_mutex);
7301
7302 /*
7303 * Precalculate sample_data sizes
7304 */
7305 perf_event__header_size(event);
7306 perf_event__id_header_size(event);
7307
7308 /*
7309 * Drop the reference on the group_event after placing the
7310 * new event on the sibling_list. This ensures destruction
7311 * of the group leader will find the pointer to itself in
7312 * perf_group_detach().
7313 */
7314 fdput(group);
7315 fd_install(event_fd, event_file);
7316 return event_fd;
7317
7318 err_context:
7319 perf_unpin_context(ctx);
7320 put_ctx(ctx);
7321 err_alloc:
7322 free_event(event);
7323 err_cpus:
7324 put_online_cpus();
7325 err_task:
7326 if (task)
7327 put_task_struct(task);
7328 err_group_fd:
7329 fdput(group);
7330 err_fd:
7331 put_unused_fd(event_fd);
7332 return err;
7333 }
7334
7335 /**
7336 * perf_event_create_kernel_counter
7337 *
7338 * @attr: attributes of the counter to create
7339 * @cpu: cpu in which the counter is bound
7340 * @task: task to profile (NULL for percpu)
7341 */
7342 struct perf_event *
7343 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
7344 struct task_struct *task,
7345 perf_overflow_handler_t overflow_handler,
7346 void *context)
7347 {
7348 struct perf_event_context *ctx;
7349 struct perf_event *event;
7350 int err;
7351
7352 /*
7353 * Get the target context (task or percpu):
7354 */
7355
7356 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
7357 overflow_handler, context);
7358 if (IS_ERR(event)) {
7359 err = PTR_ERR(event);
7360 goto err;
7361 }
7362
7363 account_event(event);
7364
7365 ctx = find_get_context(event->pmu, task, cpu);
7366 if (IS_ERR(ctx)) {
7367 err = PTR_ERR(ctx);
7368 goto err_free;
7369 }
7370
7371 WARN_ON_ONCE(ctx->parent_ctx);
7372 mutex_lock(&ctx->mutex);
7373 perf_install_in_context(ctx, event, cpu);
7374 perf_unpin_context(ctx);
7375 mutex_unlock(&ctx->mutex);
7376
7377 return event;
7378
7379 err_free:
7380 free_event(event);
7381 err:
7382 return ERR_PTR(err);
7383 }
7384 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
7385
7386 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7387 {
7388 struct perf_event_context *src_ctx;
7389 struct perf_event_context *dst_ctx;
7390 struct perf_event *event, *tmp;
7391 LIST_HEAD(events);
7392
7393 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
7394 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
7395
7396 mutex_lock(&src_ctx->mutex);
7397 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7398 event_entry) {
7399 perf_remove_from_context(event, false);
7400 unaccount_event_cpu(event, src_cpu);
7401 put_ctx(src_ctx);
7402 list_add(&event->migrate_entry, &events);
7403 }
7404 mutex_unlock(&src_ctx->mutex);
7405
7406 synchronize_rcu();
7407
7408 mutex_lock(&dst_ctx->mutex);
7409 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7410 list_del(&event->migrate_entry);
7411 if (event->state >= PERF_EVENT_STATE_OFF)
7412 event->state = PERF_EVENT_STATE_INACTIVE;
7413 account_event_cpu(event, dst_cpu);
7414 perf_install_in_context(dst_ctx, event, dst_cpu);
7415 get_ctx(dst_ctx);
7416 }
7417 mutex_unlock(&dst_ctx->mutex);
7418 }
7419 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
7420
7421 static void sync_child_event(struct perf_event *child_event,
7422 struct task_struct *child)
7423 {
7424 struct perf_event *parent_event = child_event->parent;
7425 u64 child_val;
7426
7427 if (child_event->attr.inherit_stat)
7428 perf_event_read_event(child_event, child);
7429
7430 child_val = perf_event_count(child_event);
7431
7432 /*
7433 * Add back the child's count to the parent's count:
7434 */
7435 atomic64_add(child_val, &parent_event->child_count);
7436 atomic64_add(child_event->total_time_enabled,
7437 &parent_event->child_total_time_enabled);
7438 atomic64_add(child_event->total_time_running,
7439 &parent_event->child_total_time_running);
7440
7441 /*
7442 * Remove this event from the parent's list
7443 */
7444 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7445 mutex_lock(&parent_event->child_mutex);
7446 list_del_init(&child_event->child_list);
7447 mutex_unlock(&parent_event->child_mutex);
7448
7449 /*
7450 * Release the parent event, if this was the last
7451 * reference to it.
7452 */
7453 put_event(parent_event);
7454 }
7455
7456 static void
7457 __perf_event_exit_task(struct perf_event *child_event,
7458 struct perf_event_context *child_ctx,
7459 struct task_struct *child)
7460 {
7461 /*
7462 * Do not destroy the 'original' grouping; because of the context
7463 * switch optimization the original events could've ended up in a
7464 * random child task.
7465 *
7466 * If we were to destroy the original group, all group related
7467 * operations would cease to function properly after this random
7468 * child dies.
7469 *
7470 * Do destroy all inherited groups, we don't care about those
7471 * and being thorough is better.
7472 */
7473 perf_remove_from_context(child_event, !!child_event->parent);
7474
7475 /*
7476 * It can happen that the parent exits first, and has events
7477 * that are still around due to the child reference. These
7478 * events need to be zapped.
7479 */
7480 if (child_event->parent) {
7481 sync_child_event(child_event, child);
7482 free_event(child_event);
7483 }
7484 }
7485
7486 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7487 {
7488 struct perf_event *child_event, *next;
7489 struct perf_event_context *child_ctx, *parent_ctx;
7490 unsigned long flags;
7491
7492 if (likely(!child->perf_event_ctxp[ctxn])) {
7493 perf_event_task(child, NULL, 0);
7494 return;
7495 }
7496
7497 local_irq_save(flags);
7498 /*
7499 * We can't reschedule here because interrupts are disabled,
7500 * and either child is current or it is a task that can't be
7501 * scheduled, so we are now safe from rescheduling changing
7502 * our context.
7503 */
7504 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
7505
7506 /*
7507 * Take the context lock here so that if find_get_context is
7508 * reading child->perf_event_ctxp, we wait until it has
7509 * incremented the context's refcount before we do put_ctx below.
7510 */
7511 raw_spin_lock(&child_ctx->lock);
7512 task_ctx_sched_out(child_ctx);
7513 child->perf_event_ctxp[ctxn] = NULL;
7514
7515 /*
7516 * In order to avoid freeing: child_ctx->parent_ctx->task
7517 * under perf_event_context::lock, grab another reference.
7518 */
7519 parent_ctx = child_ctx->parent_ctx;
7520 if (parent_ctx)
7521 get_ctx(parent_ctx);
7522
7523 /*
7524 * If this context is a clone; unclone it so it can't get
7525 * swapped to another process while we're removing all
7526 * the events from it.
7527 */
7528 unclone_ctx(child_ctx);
7529 update_context_time(child_ctx);
7530 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7531
7532 /*
7533 * Now that we no longer hold perf_event_context::lock, drop
7534 * our extra child_ctx->parent_ctx reference.
7535 */
7536 if (parent_ctx)
7537 put_ctx(parent_ctx);
7538
7539 /*
7540 * Report the task dead after unscheduling the events so that we
7541 * won't get any samples after PERF_RECORD_EXIT. We can however still
7542 * get a few PERF_RECORD_READ events.
7543 */
7544 perf_event_task(child, child_ctx, 0);
7545
7546 /*
7547 * We can recurse on the same lock type through:
7548 *
7549 * __perf_event_exit_task()
7550 * sync_child_event()
7551 * put_event()
7552 * mutex_lock(&ctx->mutex)
7553 *
7554 * But since its the parent context it won't be the same instance.
7555 */
7556 mutex_lock(&child_ctx->mutex);
7557
7558 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
7559 __perf_event_exit_task(child_event, child_ctx, child);
7560
7561 mutex_unlock(&child_ctx->mutex);
7562
7563 put_ctx(child_ctx);
7564 }
7565
7566 /*
7567 * When a child task exits, feed back event values to parent events.
7568 */
7569 void perf_event_exit_task(struct task_struct *child)
7570 {
7571 struct perf_event *event, *tmp;
7572 int ctxn;
7573
7574 mutex_lock(&child->perf_event_mutex);
7575 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
7576 owner_entry) {
7577 list_del_init(&event->owner_entry);
7578
7579 /*
7580 * Ensure the list deletion is visible before we clear
7581 * the owner, closes a race against perf_release() where
7582 * we need to serialize on the owner->perf_event_mutex.
7583 */
7584 smp_wmb();
7585 event->owner = NULL;
7586 }
7587 mutex_unlock(&child->perf_event_mutex);
7588
7589 for_each_task_context_nr(ctxn)
7590 perf_event_exit_task_context(child, ctxn);
7591 }
7592
7593 static void perf_free_event(struct perf_event *event,
7594 struct perf_event_context *ctx)
7595 {
7596 struct perf_event *parent = event->parent;
7597
7598 if (WARN_ON_ONCE(!parent))
7599 return;
7600
7601 mutex_lock(&parent->child_mutex);
7602 list_del_init(&event->child_list);
7603 mutex_unlock(&parent->child_mutex);
7604
7605 put_event(parent);
7606
7607 perf_group_detach(event);
7608 list_del_event(event, ctx);
7609 free_event(event);
7610 }
7611
7612 /*
7613 * free an unexposed, unused context as created by inheritance by
7614 * perf_event_init_task below, used by fork() in case of fail.
7615 */
7616 void perf_event_free_task(struct task_struct *task)
7617 {
7618 struct perf_event_context *ctx;
7619 struct perf_event *event, *tmp;
7620 int ctxn;
7621
7622 for_each_task_context_nr(ctxn) {
7623 ctx = task->perf_event_ctxp[ctxn];
7624 if (!ctx)
7625 continue;
7626
7627 mutex_lock(&ctx->mutex);
7628 again:
7629 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
7630 group_entry)
7631 perf_free_event(event, ctx);
7632
7633 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
7634 group_entry)
7635 perf_free_event(event, ctx);
7636
7637 if (!list_empty(&ctx->pinned_groups) ||
7638 !list_empty(&ctx->flexible_groups))
7639 goto again;
7640
7641 mutex_unlock(&ctx->mutex);
7642
7643 put_ctx(ctx);
7644 }
7645 }
7646
7647 void perf_event_delayed_put(struct task_struct *task)
7648 {
7649 int ctxn;
7650
7651 for_each_task_context_nr(ctxn)
7652 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
7653 }
7654
7655 /*
7656 * inherit a event from parent task to child task:
7657 */
7658 static struct perf_event *
7659 inherit_event(struct perf_event *parent_event,
7660 struct task_struct *parent,
7661 struct perf_event_context *parent_ctx,
7662 struct task_struct *child,
7663 struct perf_event *group_leader,
7664 struct perf_event_context *child_ctx)
7665 {
7666 struct perf_event *child_event;
7667 unsigned long flags;
7668
7669 /*
7670 * Instead of creating recursive hierarchies of events,
7671 * we link inherited events back to the original parent,
7672 * which has a filp for sure, which we use as the reference
7673 * count:
7674 */
7675 if (parent_event->parent)
7676 parent_event = parent_event->parent;
7677
7678 child_event = perf_event_alloc(&parent_event->attr,
7679 parent_event->cpu,
7680 child,
7681 group_leader, parent_event,
7682 NULL, NULL);
7683 if (IS_ERR(child_event))
7684 return child_event;
7685
7686 if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
7687 free_event(child_event);
7688 return NULL;
7689 }
7690
7691 get_ctx(child_ctx);
7692
7693 /*
7694 * Make the child state follow the state of the parent event,
7695 * not its attr.disabled bit. We hold the parent's mutex,
7696 * so we won't race with perf_event_{en, dis}able_family.
7697 */
7698 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
7699 child_event->state = PERF_EVENT_STATE_INACTIVE;
7700 else
7701 child_event->state = PERF_EVENT_STATE_OFF;
7702
7703 if (parent_event->attr.freq) {
7704 u64 sample_period = parent_event->hw.sample_period;
7705 struct hw_perf_event *hwc = &child_event->hw;
7706
7707 hwc->sample_period = sample_period;
7708 hwc->last_period = sample_period;
7709
7710 local64_set(&hwc->period_left, sample_period);
7711 }
7712
7713 child_event->ctx = child_ctx;
7714 child_event->overflow_handler = parent_event->overflow_handler;
7715 child_event->overflow_handler_context
7716 = parent_event->overflow_handler_context;
7717
7718 /*
7719 * Precalculate sample_data sizes
7720 */
7721 perf_event__header_size(child_event);
7722 perf_event__id_header_size(child_event);
7723
7724 /*
7725 * Link it up in the child's context:
7726 */
7727 raw_spin_lock_irqsave(&child_ctx->lock, flags);
7728 add_event_to_ctx(child_event, child_ctx);
7729 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7730
7731 /*
7732 * Link this into the parent event's child list
7733 */
7734 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7735 mutex_lock(&parent_event->child_mutex);
7736 list_add_tail(&child_event->child_list, &parent_event->child_list);
7737 mutex_unlock(&parent_event->child_mutex);
7738
7739 return child_event;
7740 }
7741
7742 static int inherit_group(struct perf_event *parent_event,
7743 struct task_struct *parent,
7744 struct perf_event_context *parent_ctx,
7745 struct task_struct *child,
7746 struct perf_event_context *child_ctx)
7747 {
7748 struct perf_event *leader;
7749 struct perf_event *sub;
7750 struct perf_event *child_ctr;
7751
7752 leader = inherit_event(parent_event, parent, parent_ctx,
7753 child, NULL, child_ctx);
7754 if (IS_ERR(leader))
7755 return PTR_ERR(leader);
7756 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
7757 child_ctr = inherit_event(sub, parent, parent_ctx,
7758 child, leader, child_ctx);
7759 if (IS_ERR(child_ctr))
7760 return PTR_ERR(child_ctr);
7761 }
7762 return 0;
7763 }
7764
7765 static int
7766 inherit_task_group(struct perf_event *event, struct task_struct *parent,
7767 struct perf_event_context *parent_ctx,
7768 struct task_struct *child, int ctxn,
7769 int *inherited_all)
7770 {
7771 int ret;
7772 struct perf_event_context *child_ctx;
7773
7774 if (!event->attr.inherit) {
7775 *inherited_all = 0;
7776 return 0;
7777 }
7778
7779 child_ctx = child->perf_event_ctxp[ctxn];
7780 if (!child_ctx) {
7781 /*
7782 * This is executed from the parent task context, so
7783 * inherit events that have been marked for cloning.
7784 * First allocate and initialize a context for the
7785 * child.
7786 */
7787
7788 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
7789 if (!child_ctx)
7790 return -ENOMEM;
7791
7792 child->perf_event_ctxp[ctxn] = child_ctx;
7793 }
7794
7795 ret = inherit_group(event, parent, parent_ctx,
7796 child, child_ctx);
7797
7798 if (ret)
7799 *inherited_all = 0;
7800
7801 return ret;
7802 }
7803
7804 /*
7805 * Initialize the perf_event context in task_struct
7806 */
7807 int perf_event_init_context(struct task_struct *child, int ctxn)
7808 {
7809 struct perf_event_context *child_ctx, *parent_ctx;
7810 struct perf_event_context *cloned_ctx;
7811 struct perf_event *event;
7812 struct task_struct *parent = current;
7813 int inherited_all = 1;
7814 unsigned long flags;
7815 int ret = 0;
7816
7817 if (likely(!parent->perf_event_ctxp[ctxn]))
7818 return 0;
7819
7820 /*
7821 * If the parent's context is a clone, pin it so it won't get
7822 * swapped under us.
7823 */
7824 parent_ctx = perf_pin_task_context(parent, ctxn);
7825 if (!parent_ctx)
7826 return 0;
7827
7828 /*
7829 * No need to check if parent_ctx != NULL here; since we saw
7830 * it non-NULL earlier, the only reason for it to become NULL
7831 * is if we exit, and since we're currently in the middle of
7832 * a fork we can't be exiting at the same time.
7833 */
7834
7835 /*
7836 * Lock the parent list. No need to lock the child - not PID
7837 * hashed yet and not running, so nobody can access it.
7838 */
7839 mutex_lock(&parent_ctx->mutex);
7840
7841 /*
7842 * We dont have to disable NMIs - we are only looking at
7843 * the list, not manipulating it:
7844 */
7845 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
7846 ret = inherit_task_group(event, parent, parent_ctx,
7847 child, ctxn, &inherited_all);
7848 if (ret)
7849 break;
7850 }
7851
7852 /*
7853 * We can't hold ctx->lock when iterating the ->flexible_group list due
7854 * to allocations, but we need to prevent rotation because
7855 * rotate_ctx() will change the list from interrupt context.
7856 */
7857 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7858 parent_ctx->rotate_disable = 1;
7859 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7860
7861 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
7862 ret = inherit_task_group(event, parent, parent_ctx,
7863 child, ctxn, &inherited_all);
7864 if (ret)
7865 break;
7866 }
7867
7868 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7869 parent_ctx->rotate_disable = 0;
7870
7871 child_ctx = child->perf_event_ctxp[ctxn];
7872
7873 if (child_ctx && inherited_all) {
7874 /*
7875 * Mark the child context as a clone of the parent
7876 * context, or of whatever the parent is a clone of.
7877 *
7878 * Note that if the parent is a clone, the holding of
7879 * parent_ctx->lock avoids it from being uncloned.
7880 */
7881 cloned_ctx = parent_ctx->parent_ctx;
7882 if (cloned_ctx) {
7883 child_ctx->parent_ctx = cloned_ctx;
7884 child_ctx->parent_gen = parent_ctx->parent_gen;
7885 } else {
7886 child_ctx->parent_ctx = parent_ctx;
7887 child_ctx->parent_gen = parent_ctx->generation;
7888 }
7889 get_ctx(child_ctx->parent_ctx);
7890 }
7891
7892 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7893 mutex_unlock(&parent_ctx->mutex);
7894
7895 perf_unpin_context(parent_ctx);
7896 put_ctx(parent_ctx);
7897
7898 return ret;
7899 }
7900
7901 /*
7902 * Initialize the perf_event context in task_struct
7903 */
7904 int perf_event_init_task(struct task_struct *child)
7905 {
7906 int ctxn, ret;
7907
7908 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
7909 mutex_init(&child->perf_event_mutex);
7910 INIT_LIST_HEAD(&child->perf_event_list);
7911
7912 for_each_task_context_nr(ctxn) {
7913 ret = perf_event_init_context(child, ctxn);
7914 if (ret)
7915 return ret;
7916 }
7917
7918 return 0;
7919 }
7920
7921 static void __init perf_event_init_all_cpus(void)
7922 {
7923 struct swevent_htable *swhash;
7924 int cpu;
7925
7926 for_each_possible_cpu(cpu) {
7927 swhash = &per_cpu(swevent_htable, cpu);
7928 mutex_init(&swhash->hlist_mutex);
7929 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
7930 }
7931 }
7932
7933 static void perf_event_init_cpu(int cpu)
7934 {
7935 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7936
7937 mutex_lock(&swhash->hlist_mutex);
7938 swhash->online = true;
7939 if (swhash->hlist_refcount > 0) {
7940 struct swevent_hlist *hlist;
7941
7942 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7943 WARN_ON(!hlist);
7944 rcu_assign_pointer(swhash->swevent_hlist, hlist);
7945 }
7946 mutex_unlock(&swhash->hlist_mutex);
7947 }
7948
7949 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
7950 static void perf_pmu_rotate_stop(struct pmu *pmu)
7951 {
7952 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7953
7954 WARN_ON(!irqs_disabled());
7955
7956 list_del_init(&cpuctx->rotation_list);
7957 }
7958
7959 static void __perf_event_exit_context(void *__info)
7960 {
7961 struct remove_event re = { .detach_group = false };
7962 struct perf_event_context *ctx = __info;
7963
7964 perf_pmu_rotate_stop(ctx->pmu);
7965
7966 rcu_read_lock();
7967 list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
7968 __perf_remove_from_context(&re);
7969 rcu_read_unlock();
7970 }
7971
7972 static void perf_event_exit_cpu_context(int cpu)
7973 {
7974 struct perf_event_context *ctx;
7975 struct pmu *pmu;
7976 int idx;
7977
7978 idx = srcu_read_lock(&pmus_srcu);
7979 list_for_each_entry_rcu(pmu, &pmus, entry) {
7980 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
7981
7982 mutex_lock(&ctx->mutex);
7983 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7984 mutex_unlock(&ctx->mutex);
7985 }
7986 srcu_read_unlock(&pmus_srcu, idx);
7987 }
7988
7989 static void perf_event_exit_cpu(int cpu)
7990 {
7991 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7992
7993 perf_event_exit_cpu_context(cpu);
7994
7995 mutex_lock(&swhash->hlist_mutex);
7996 swhash->online = false;
7997 swevent_hlist_release(swhash);
7998 mutex_unlock(&swhash->hlist_mutex);
7999 }
8000 #else
8001 static inline void perf_event_exit_cpu(int cpu) { }
8002 #endif
8003
8004 static int
8005 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
8006 {
8007 int cpu;
8008
8009 for_each_online_cpu(cpu)
8010 perf_event_exit_cpu(cpu);
8011
8012 return NOTIFY_OK;
8013 }
8014
8015 /*
8016 * Run the perf reboot notifier at the very last possible moment so that
8017 * the generic watchdog code runs as long as possible.
8018 */
8019 static struct notifier_block perf_reboot_notifier = {
8020 .notifier_call = perf_reboot,
8021 .priority = INT_MIN,
8022 };
8023
8024 static int
8025 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
8026 {
8027 unsigned int cpu = (long)hcpu;
8028
8029 switch (action & ~CPU_TASKS_FROZEN) {
8030
8031 case CPU_UP_PREPARE:
8032 case CPU_DOWN_FAILED:
8033 perf_event_init_cpu(cpu);
8034 break;
8035
8036 case CPU_UP_CANCELED:
8037 case CPU_DOWN_PREPARE:
8038 perf_event_exit_cpu(cpu);
8039 break;
8040 default:
8041 break;
8042 }
8043
8044 return NOTIFY_OK;
8045 }
8046
8047 void __init perf_event_init(void)
8048 {
8049 int ret;
8050
8051 idr_init(&pmu_idr);
8052
8053 perf_event_init_all_cpus();
8054 init_srcu_struct(&pmus_srcu);
8055 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
8056 perf_pmu_register(&perf_cpu_clock, NULL, -1);
8057 perf_pmu_register(&perf_task_clock, NULL, -1);
8058 perf_tp_register();
8059 perf_cpu_notifier(perf_cpu_notify);
8060 register_reboot_notifier(&perf_reboot_notifier);
8061
8062 ret = init_hw_breakpoint();
8063 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
8064
8065 /* do not patch jump label more than once per second */
8066 jump_label_rate_limit(&perf_sched_events, HZ);
8067
8068 /*
8069 * Build time assertion that we keep the data_head at the intended
8070 * location. IOW, validation we got the __reserved[] size right.
8071 */
8072 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
8073 != 1024);
8074 }
8075
8076 static int __init perf_event_sysfs_init(void)
8077 {
8078 struct pmu *pmu;
8079 int ret;
8080
8081 mutex_lock(&pmus_lock);
8082
8083 ret = bus_register(&pmu_bus);
8084 if (ret)
8085 goto unlock;
8086
8087 list_for_each_entry(pmu, &pmus, entry) {
8088 if (!pmu->name || pmu->type < 0)
8089 continue;
8090
8091 ret = pmu_dev_alloc(pmu);
8092 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
8093 }
8094 pmu_bus_running = 1;
8095 ret = 0;
8096
8097 unlock:
8098 mutex_unlock(&pmus_lock);
8099
8100 return ret;
8101 }
8102 device_initcall(perf_event_sysfs_init);
8103
8104 #ifdef CONFIG_CGROUP_PERF
8105 static struct cgroup_subsys_state *
8106 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
8107 {
8108 struct perf_cgroup *jc;
8109
8110 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
8111 if (!jc)
8112 return ERR_PTR(-ENOMEM);
8113
8114 jc->info = alloc_percpu(struct perf_cgroup_info);
8115 if (!jc->info) {
8116 kfree(jc);
8117 return ERR_PTR(-ENOMEM);
8118 }
8119
8120 return &jc->css;
8121 }
8122
8123 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
8124 {
8125 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
8126
8127 free_percpu(jc->info);
8128 kfree(jc);
8129 }
8130
8131 static int __perf_cgroup_move(void *info)
8132 {
8133 struct task_struct *task = info;
8134 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
8135 return 0;
8136 }
8137
8138 static void perf_cgroup_attach(struct cgroup_subsys_state *css,
8139 struct cgroup_taskset *tset)
8140 {
8141 struct task_struct *task;
8142
8143 cgroup_taskset_for_each(task, tset)
8144 task_function_call(task, __perf_cgroup_move, task);
8145 }
8146
8147 static void perf_cgroup_exit(struct cgroup_subsys_state *css,
8148 struct cgroup_subsys_state *old_css,
8149 struct task_struct *task)
8150 {
8151 /*
8152 * cgroup_exit() is called in the copy_process() failure path.
8153 * Ignore this case since the task hasn't ran yet, this avoids
8154 * trying to poke a half freed task state from generic code.
8155 */
8156 if (!(task->flags & PF_EXITING))
8157 return;
8158
8159 task_function_call(task, __perf_cgroup_move, task);
8160 }
8161
8162 struct cgroup_subsys perf_event_cgrp_subsys = {
8163 .css_alloc = perf_cgroup_css_alloc,
8164 .css_free = perf_cgroup_css_free,
8165 .exit = perf_cgroup_exit,
8166 .attach = perf_cgroup_attach,
8167 };
8168 #endif /* CONFIG_CGROUP_PERF */