]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/events/core.c
Merge tag 'for-v3.13-fixes' of git://git.infradead.org/battery-2.6
[mirror_ubuntu-jammy-kernel.git] / kernel / events / core.c
1 /*
2 * Performance events core code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/tick.h>
22 #include <linux/sysfs.h>
23 #include <linux/dcache.h>
24 #include <linux/percpu.h>
25 #include <linux/ptrace.h>
26 #include <linux/reboot.h>
27 #include <linux/vmstat.h>
28 #include <linux/device.h>
29 #include <linux/export.h>
30 #include <linux/vmalloc.h>
31 #include <linux/hardirq.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/perf_event.h>
38 #include <linux/ftrace_event.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/mm_types.h>
41 #include <linux/cgroup.h>
42
43 #include "internal.h"
44
45 #include <asm/irq_regs.h>
46
47 struct remote_function_call {
48 struct task_struct *p;
49 int (*func)(void *info);
50 void *info;
51 int ret;
52 };
53
54 static void remote_function(void *data)
55 {
56 struct remote_function_call *tfc = data;
57 struct task_struct *p = tfc->p;
58
59 if (p) {
60 tfc->ret = -EAGAIN;
61 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
62 return;
63 }
64
65 tfc->ret = tfc->func(tfc->info);
66 }
67
68 /**
69 * task_function_call - call a function on the cpu on which a task runs
70 * @p: the task to evaluate
71 * @func: the function to be called
72 * @info: the function call argument
73 *
74 * Calls the function @func when the task is currently running. This might
75 * be on the current CPU, which just calls the function directly
76 *
77 * returns: @func return value, or
78 * -ESRCH - when the process isn't running
79 * -EAGAIN - when the process moved away
80 */
81 static int
82 task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
83 {
84 struct remote_function_call data = {
85 .p = p,
86 .func = func,
87 .info = info,
88 .ret = -ESRCH, /* No such (running) process */
89 };
90
91 if (task_curr(p))
92 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
93
94 return data.ret;
95 }
96
97 /**
98 * cpu_function_call - call a function on the cpu
99 * @func: the function to be called
100 * @info: the function call argument
101 *
102 * Calls the function @func on the remote cpu.
103 *
104 * returns: @func return value or -ENXIO when the cpu is offline
105 */
106 static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
107 {
108 struct remote_function_call data = {
109 .p = NULL,
110 .func = func,
111 .info = info,
112 .ret = -ENXIO, /* No such CPU */
113 };
114
115 smp_call_function_single(cpu, remote_function, &data, 1);
116
117 return data.ret;
118 }
119
120 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
121 PERF_FLAG_FD_OUTPUT |\
122 PERF_FLAG_PID_CGROUP)
123
124 /*
125 * branch priv levels that need permission checks
126 */
127 #define PERF_SAMPLE_BRANCH_PERM_PLM \
128 (PERF_SAMPLE_BRANCH_KERNEL |\
129 PERF_SAMPLE_BRANCH_HV)
130
131 enum event_type_t {
132 EVENT_FLEXIBLE = 0x1,
133 EVENT_PINNED = 0x2,
134 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
135 };
136
137 /*
138 * perf_sched_events : >0 events exist
139 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
140 */
141 struct static_key_deferred perf_sched_events __read_mostly;
142 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
143 static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
144
145 static atomic_t nr_mmap_events __read_mostly;
146 static atomic_t nr_comm_events __read_mostly;
147 static atomic_t nr_task_events __read_mostly;
148 static atomic_t nr_freq_events __read_mostly;
149
150 static LIST_HEAD(pmus);
151 static DEFINE_MUTEX(pmus_lock);
152 static struct srcu_struct pmus_srcu;
153
154 /*
155 * perf event paranoia level:
156 * -1 - not paranoid at all
157 * 0 - disallow raw tracepoint access for unpriv
158 * 1 - disallow cpu events for unpriv
159 * 2 - disallow kernel profiling for unpriv
160 */
161 int sysctl_perf_event_paranoid __read_mostly = 1;
162
163 /* Minimum for 512 kiB + 1 user control page */
164 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
165
166 /*
167 * max perf event sample rate
168 */
169 #define DEFAULT_MAX_SAMPLE_RATE 100000
170 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
171 #define DEFAULT_CPU_TIME_MAX_PERCENT 25
172
173 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
174
175 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
176 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
177
178 static int perf_sample_allowed_ns __read_mostly =
179 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
180
181 void update_perf_cpu_limits(void)
182 {
183 u64 tmp = perf_sample_period_ns;
184
185 tmp *= sysctl_perf_cpu_time_max_percent;
186 do_div(tmp, 100);
187 ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
188 }
189
190 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
191
192 int perf_proc_update_handler(struct ctl_table *table, int write,
193 void __user *buffer, size_t *lenp,
194 loff_t *ppos)
195 {
196 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
197
198 if (ret || !write)
199 return ret;
200
201 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
202 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
203 update_perf_cpu_limits();
204
205 return 0;
206 }
207
208 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
209
210 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
211 void __user *buffer, size_t *lenp,
212 loff_t *ppos)
213 {
214 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
215
216 if (ret || !write)
217 return ret;
218
219 update_perf_cpu_limits();
220
221 return 0;
222 }
223
224 /*
225 * perf samples are done in some very critical code paths (NMIs).
226 * If they take too much CPU time, the system can lock up and not
227 * get any real work done. This will drop the sample rate when
228 * we detect that events are taking too long.
229 */
230 #define NR_ACCUMULATED_SAMPLES 128
231 static DEFINE_PER_CPU(u64, running_sample_length);
232
233 void perf_sample_event_took(u64 sample_len_ns)
234 {
235 u64 avg_local_sample_len;
236 u64 local_samples_len;
237 u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
238
239 if (allowed_ns == 0)
240 return;
241
242 /* decay the counter by 1 average sample */
243 local_samples_len = __get_cpu_var(running_sample_length);
244 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
245 local_samples_len += sample_len_ns;
246 __get_cpu_var(running_sample_length) = local_samples_len;
247
248 /*
249 * note: this will be biased artifically low until we have
250 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
251 * from having to maintain a count.
252 */
253 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
254
255 if (avg_local_sample_len <= allowed_ns)
256 return;
257
258 if (max_samples_per_tick <= 1)
259 return;
260
261 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
262 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
263 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
264
265 printk_ratelimited(KERN_WARNING
266 "perf samples too long (%lld > %lld), lowering "
267 "kernel.perf_event_max_sample_rate to %d\n",
268 avg_local_sample_len, allowed_ns,
269 sysctl_perf_event_sample_rate);
270
271 update_perf_cpu_limits();
272 }
273
274 static atomic64_t perf_event_id;
275
276 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
277 enum event_type_t event_type);
278
279 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
280 enum event_type_t event_type,
281 struct task_struct *task);
282
283 static void update_context_time(struct perf_event_context *ctx);
284 static u64 perf_event_time(struct perf_event *event);
285
286 void __weak perf_event_print_debug(void) { }
287
288 extern __weak const char *perf_pmu_name(void)
289 {
290 return "pmu";
291 }
292
293 static inline u64 perf_clock(void)
294 {
295 return local_clock();
296 }
297
298 static inline struct perf_cpu_context *
299 __get_cpu_context(struct perf_event_context *ctx)
300 {
301 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
302 }
303
304 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
305 struct perf_event_context *ctx)
306 {
307 raw_spin_lock(&cpuctx->ctx.lock);
308 if (ctx)
309 raw_spin_lock(&ctx->lock);
310 }
311
312 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
313 struct perf_event_context *ctx)
314 {
315 if (ctx)
316 raw_spin_unlock(&ctx->lock);
317 raw_spin_unlock(&cpuctx->ctx.lock);
318 }
319
320 #ifdef CONFIG_CGROUP_PERF
321
322 /*
323 * perf_cgroup_info keeps track of time_enabled for a cgroup.
324 * This is a per-cpu dynamically allocated data structure.
325 */
326 struct perf_cgroup_info {
327 u64 time;
328 u64 timestamp;
329 };
330
331 struct perf_cgroup {
332 struct cgroup_subsys_state css;
333 struct perf_cgroup_info __percpu *info;
334 };
335
336 /*
337 * Must ensure cgroup is pinned (css_get) before calling
338 * this function. In other words, we cannot call this function
339 * if there is no cgroup event for the current CPU context.
340 */
341 static inline struct perf_cgroup *
342 perf_cgroup_from_task(struct task_struct *task)
343 {
344 return container_of(task_css(task, perf_subsys_id),
345 struct perf_cgroup, css);
346 }
347
348 static inline bool
349 perf_cgroup_match(struct perf_event *event)
350 {
351 struct perf_event_context *ctx = event->ctx;
352 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
353
354 /* @event doesn't care about cgroup */
355 if (!event->cgrp)
356 return true;
357
358 /* wants specific cgroup scope but @cpuctx isn't associated with any */
359 if (!cpuctx->cgrp)
360 return false;
361
362 /*
363 * Cgroup scoping is recursive. An event enabled for a cgroup is
364 * also enabled for all its descendant cgroups. If @cpuctx's
365 * cgroup is a descendant of @event's (the test covers identity
366 * case), it's a match.
367 */
368 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
369 event->cgrp->css.cgroup);
370 }
371
372 static inline bool perf_tryget_cgroup(struct perf_event *event)
373 {
374 return css_tryget(&event->cgrp->css);
375 }
376
377 static inline void perf_put_cgroup(struct perf_event *event)
378 {
379 css_put(&event->cgrp->css);
380 }
381
382 static inline void perf_detach_cgroup(struct perf_event *event)
383 {
384 perf_put_cgroup(event);
385 event->cgrp = NULL;
386 }
387
388 static inline int is_cgroup_event(struct perf_event *event)
389 {
390 return event->cgrp != NULL;
391 }
392
393 static inline u64 perf_cgroup_event_time(struct perf_event *event)
394 {
395 struct perf_cgroup_info *t;
396
397 t = per_cpu_ptr(event->cgrp->info, event->cpu);
398 return t->time;
399 }
400
401 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
402 {
403 struct perf_cgroup_info *info;
404 u64 now;
405
406 now = perf_clock();
407
408 info = this_cpu_ptr(cgrp->info);
409
410 info->time += now - info->timestamp;
411 info->timestamp = now;
412 }
413
414 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
415 {
416 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
417 if (cgrp_out)
418 __update_cgrp_time(cgrp_out);
419 }
420
421 static inline void update_cgrp_time_from_event(struct perf_event *event)
422 {
423 struct perf_cgroup *cgrp;
424
425 /*
426 * ensure we access cgroup data only when needed and
427 * when we know the cgroup is pinned (css_get)
428 */
429 if (!is_cgroup_event(event))
430 return;
431
432 cgrp = perf_cgroup_from_task(current);
433 /*
434 * Do not update time when cgroup is not active
435 */
436 if (cgrp == event->cgrp)
437 __update_cgrp_time(event->cgrp);
438 }
439
440 static inline void
441 perf_cgroup_set_timestamp(struct task_struct *task,
442 struct perf_event_context *ctx)
443 {
444 struct perf_cgroup *cgrp;
445 struct perf_cgroup_info *info;
446
447 /*
448 * ctx->lock held by caller
449 * ensure we do not access cgroup data
450 * unless we have the cgroup pinned (css_get)
451 */
452 if (!task || !ctx->nr_cgroups)
453 return;
454
455 cgrp = perf_cgroup_from_task(task);
456 info = this_cpu_ptr(cgrp->info);
457 info->timestamp = ctx->timestamp;
458 }
459
460 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
461 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
462
463 /*
464 * reschedule events based on the cgroup constraint of task.
465 *
466 * mode SWOUT : schedule out everything
467 * mode SWIN : schedule in based on cgroup for next
468 */
469 void perf_cgroup_switch(struct task_struct *task, int mode)
470 {
471 struct perf_cpu_context *cpuctx;
472 struct pmu *pmu;
473 unsigned long flags;
474
475 /*
476 * disable interrupts to avoid geting nr_cgroup
477 * changes via __perf_event_disable(). Also
478 * avoids preemption.
479 */
480 local_irq_save(flags);
481
482 /*
483 * we reschedule only in the presence of cgroup
484 * constrained events.
485 */
486 rcu_read_lock();
487
488 list_for_each_entry_rcu(pmu, &pmus, entry) {
489 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
490 if (cpuctx->unique_pmu != pmu)
491 continue; /* ensure we process each cpuctx once */
492
493 /*
494 * perf_cgroup_events says at least one
495 * context on this CPU has cgroup events.
496 *
497 * ctx->nr_cgroups reports the number of cgroup
498 * events for a context.
499 */
500 if (cpuctx->ctx.nr_cgroups > 0) {
501 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
502 perf_pmu_disable(cpuctx->ctx.pmu);
503
504 if (mode & PERF_CGROUP_SWOUT) {
505 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
506 /*
507 * must not be done before ctxswout due
508 * to event_filter_match() in event_sched_out()
509 */
510 cpuctx->cgrp = NULL;
511 }
512
513 if (mode & PERF_CGROUP_SWIN) {
514 WARN_ON_ONCE(cpuctx->cgrp);
515 /*
516 * set cgrp before ctxsw in to allow
517 * event_filter_match() to not have to pass
518 * task around
519 */
520 cpuctx->cgrp = perf_cgroup_from_task(task);
521 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
522 }
523 perf_pmu_enable(cpuctx->ctx.pmu);
524 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
525 }
526 }
527
528 rcu_read_unlock();
529
530 local_irq_restore(flags);
531 }
532
533 static inline void perf_cgroup_sched_out(struct task_struct *task,
534 struct task_struct *next)
535 {
536 struct perf_cgroup *cgrp1;
537 struct perf_cgroup *cgrp2 = NULL;
538
539 /*
540 * we come here when we know perf_cgroup_events > 0
541 */
542 cgrp1 = perf_cgroup_from_task(task);
543
544 /*
545 * next is NULL when called from perf_event_enable_on_exec()
546 * that will systematically cause a cgroup_switch()
547 */
548 if (next)
549 cgrp2 = perf_cgroup_from_task(next);
550
551 /*
552 * only schedule out current cgroup events if we know
553 * that we are switching to a different cgroup. Otherwise,
554 * do no touch the cgroup events.
555 */
556 if (cgrp1 != cgrp2)
557 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
558 }
559
560 static inline void perf_cgroup_sched_in(struct task_struct *prev,
561 struct task_struct *task)
562 {
563 struct perf_cgroup *cgrp1;
564 struct perf_cgroup *cgrp2 = NULL;
565
566 /*
567 * we come here when we know perf_cgroup_events > 0
568 */
569 cgrp1 = perf_cgroup_from_task(task);
570
571 /* prev can never be NULL */
572 cgrp2 = perf_cgroup_from_task(prev);
573
574 /*
575 * only need to schedule in cgroup events if we are changing
576 * cgroup during ctxsw. Cgroup events were not scheduled
577 * out of ctxsw out if that was not the case.
578 */
579 if (cgrp1 != cgrp2)
580 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
581 }
582
583 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
584 struct perf_event_attr *attr,
585 struct perf_event *group_leader)
586 {
587 struct perf_cgroup *cgrp;
588 struct cgroup_subsys_state *css;
589 struct fd f = fdget(fd);
590 int ret = 0;
591
592 if (!f.file)
593 return -EBADF;
594
595 rcu_read_lock();
596
597 css = css_from_dir(f.file->f_dentry, &perf_subsys);
598 if (IS_ERR(css)) {
599 ret = PTR_ERR(css);
600 goto out;
601 }
602
603 cgrp = container_of(css, struct perf_cgroup, css);
604 event->cgrp = cgrp;
605
606 /* must be done before we fput() the file */
607 if (!perf_tryget_cgroup(event)) {
608 event->cgrp = NULL;
609 ret = -ENOENT;
610 goto out;
611 }
612
613 /*
614 * all events in a group must monitor
615 * the same cgroup because a task belongs
616 * to only one perf cgroup at a time
617 */
618 if (group_leader && group_leader->cgrp != cgrp) {
619 perf_detach_cgroup(event);
620 ret = -EINVAL;
621 }
622 out:
623 rcu_read_unlock();
624 fdput(f);
625 return ret;
626 }
627
628 static inline void
629 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
630 {
631 struct perf_cgroup_info *t;
632 t = per_cpu_ptr(event->cgrp->info, event->cpu);
633 event->shadow_ctx_time = now - t->timestamp;
634 }
635
636 static inline void
637 perf_cgroup_defer_enabled(struct perf_event *event)
638 {
639 /*
640 * when the current task's perf cgroup does not match
641 * the event's, we need to remember to call the
642 * perf_mark_enable() function the first time a task with
643 * a matching perf cgroup is scheduled in.
644 */
645 if (is_cgroup_event(event) && !perf_cgroup_match(event))
646 event->cgrp_defer_enabled = 1;
647 }
648
649 static inline void
650 perf_cgroup_mark_enabled(struct perf_event *event,
651 struct perf_event_context *ctx)
652 {
653 struct perf_event *sub;
654 u64 tstamp = perf_event_time(event);
655
656 if (!event->cgrp_defer_enabled)
657 return;
658
659 event->cgrp_defer_enabled = 0;
660
661 event->tstamp_enabled = tstamp - event->total_time_enabled;
662 list_for_each_entry(sub, &event->sibling_list, group_entry) {
663 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
664 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
665 sub->cgrp_defer_enabled = 0;
666 }
667 }
668 }
669 #else /* !CONFIG_CGROUP_PERF */
670
671 static inline bool
672 perf_cgroup_match(struct perf_event *event)
673 {
674 return true;
675 }
676
677 static inline void perf_detach_cgroup(struct perf_event *event)
678 {}
679
680 static inline int is_cgroup_event(struct perf_event *event)
681 {
682 return 0;
683 }
684
685 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
686 {
687 return 0;
688 }
689
690 static inline void update_cgrp_time_from_event(struct perf_event *event)
691 {
692 }
693
694 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
695 {
696 }
697
698 static inline void perf_cgroup_sched_out(struct task_struct *task,
699 struct task_struct *next)
700 {
701 }
702
703 static inline void perf_cgroup_sched_in(struct task_struct *prev,
704 struct task_struct *task)
705 {
706 }
707
708 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
709 struct perf_event_attr *attr,
710 struct perf_event *group_leader)
711 {
712 return -EINVAL;
713 }
714
715 static inline void
716 perf_cgroup_set_timestamp(struct task_struct *task,
717 struct perf_event_context *ctx)
718 {
719 }
720
721 void
722 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
723 {
724 }
725
726 static inline void
727 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
728 {
729 }
730
731 static inline u64 perf_cgroup_event_time(struct perf_event *event)
732 {
733 return 0;
734 }
735
736 static inline void
737 perf_cgroup_defer_enabled(struct perf_event *event)
738 {
739 }
740
741 static inline void
742 perf_cgroup_mark_enabled(struct perf_event *event,
743 struct perf_event_context *ctx)
744 {
745 }
746 #endif
747
748 /*
749 * set default to be dependent on timer tick just
750 * like original code
751 */
752 #define PERF_CPU_HRTIMER (1000 / HZ)
753 /*
754 * function must be called with interrupts disbled
755 */
756 static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
757 {
758 struct perf_cpu_context *cpuctx;
759 enum hrtimer_restart ret = HRTIMER_NORESTART;
760 int rotations = 0;
761
762 WARN_ON(!irqs_disabled());
763
764 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
765
766 rotations = perf_rotate_context(cpuctx);
767
768 /*
769 * arm timer if needed
770 */
771 if (rotations) {
772 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
773 ret = HRTIMER_RESTART;
774 }
775
776 return ret;
777 }
778
779 /* CPU is going down */
780 void perf_cpu_hrtimer_cancel(int cpu)
781 {
782 struct perf_cpu_context *cpuctx;
783 struct pmu *pmu;
784 unsigned long flags;
785
786 if (WARN_ON(cpu != smp_processor_id()))
787 return;
788
789 local_irq_save(flags);
790
791 rcu_read_lock();
792
793 list_for_each_entry_rcu(pmu, &pmus, entry) {
794 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
795
796 if (pmu->task_ctx_nr == perf_sw_context)
797 continue;
798
799 hrtimer_cancel(&cpuctx->hrtimer);
800 }
801
802 rcu_read_unlock();
803
804 local_irq_restore(flags);
805 }
806
807 static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
808 {
809 struct hrtimer *hr = &cpuctx->hrtimer;
810 struct pmu *pmu = cpuctx->ctx.pmu;
811 int timer;
812
813 /* no multiplexing needed for SW PMU */
814 if (pmu->task_ctx_nr == perf_sw_context)
815 return;
816
817 /*
818 * check default is sane, if not set then force to
819 * default interval (1/tick)
820 */
821 timer = pmu->hrtimer_interval_ms;
822 if (timer < 1)
823 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
824
825 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
826
827 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
828 hr->function = perf_cpu_hrtimer_handler;
829 }
830
831 static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
832 {
833 struct hrtimer *hr = &cpuctx->hrtimer;
834 struct pmu *pmu = cpuctx->ctx.pmu;
835
836 /* not for SW PMU */
837 if (pmu->task_ctx_nr == perf_sw_context)
838 return;
839
840 if (hrtimer_active(hr))
841 return;
842
843 if (!hrtimer_callback_running(hr))
844 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
845 0, HRTIMER_MODE_REL_PINNED, 0);
846 }
847
848 void perf_pmu_disable(struct pmu *pmu)
849 {
850 int *count = this_cpu_ptr(pmu->pmu_disable_count);
851 if (!(*count)++)
852 pmu->pmu_disable(pmu);
853 }
854
855 void perf_pmu_enable(struct pmu *pmu)
856 {
857 int *count = this_cpu_ptr(pmu->pmu_disable_count);
858 if (!--(*count))
859 pmu->pmu_enable(pmu);
860 }
861
862 static DEFINE_PER_CPU(struct list_head, rotation_list);
863
864 /*
865 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
866 * because they're strictly cpu affine and rotate_start is called with IRQs
867 * disabled, while rotate_context is called from IRQ context.
868 */
869 static void perf_pmu_rotate_start(struct pmu *pmu)
870 {
871 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
872 struct list_head *head = &__get_cpu_var(rotation_list);
873
874 WARN_ON(!irqs_disabled());
875
876 if (list_empty(&cpuctx->rotation_list))
877 list_add(&cpuctx->rotation_list, head);
878 }
879
880 static void get_ctx(struct perf_event_context *ctx)
881 {
882 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
883 }
884
885 static void put_ctx(struct perf_event_context *ctx)
886 {
887 if (atomic_dec_and_test(&ctx->refcount)) {
888 if (ctx->parent_ctx)
889 put_ctx(ctx->parent_ctx);
890 if (ctx->task)
891 put_task_struct(ctx->task);
892 kfree_rcu(ctx, rcu_head);
893 }
894 }
895
896 static void unclone_ctx(struct perf_event_context *ctx)
897 {
898 if (ctx->parent_ctx) {
899 put_ctx(ctx->parent_ctx);
900 ctx->parent_ctx = NULL;
901 }
902 ctx->generation++;
903 }
904
905 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
906 {
907 /*
908 * only top level events have the pid namespace they were created in
909 */
910 if (event->parent)
911 event = event->parent;
912
913 return task_tgid_nr_ns(p, event->ns);
914 }
915
916 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
917 {
918 /*
919 * only top level events have the pid namespace they were created in
920 */
921 if (event->parent)
922 event = event->parent;
923
924 return task_pid_nr_ns(p, event->ns);
925 }
926
927 /*
928 * If we inherit events we want to return the parent event id
929 * to userspace.
930 */
931 static u64 primary_event_id(struct perf_event *event)
932 {
933 u64 id = event->id;
934
935 if (event->parent)
936 id = event->parent->id;
937
938 return id;
939 }
940
941 /*
942 * Get the perf_event_context for a task and lock it.
943 * This has to cope with with the fact that until it is locked,
944 * the context could get moved to another task.
945 */
946 static struct perf_event_context *
947 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
948 {
949 struct perf_event_context *ctx;
950
951 retry:
952 /*
953 * One of the few rules of preemptible RCU is that one cannot do
954 * rcu_read_unlock() while holding a scheduler (or nested) lock when
955 * part of the read side critical section was preemptible -- see
956 * rcu_read_unlock_special().
957 *
958 * Since ctx->lock nests under rq->lock we must ensure the entire read
959 * side critical section is non-preemptible.
960 */
961 preempt_disable();
962 rcu_read_lock();
963 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
964 if (ctx) {
965 /*
966 * If this context is a clone of another, it might
967 * get swapped for another underneath us by
968 * perf_event_task_sched_out, though the
969 * rcu_read_lock() protects us from any context
970 * getting freed. Lock the context and check if it
971 * got swapped before we could get the lock, and retry
972 * if so. If we locked the right context, then it
973 * can't get swapped on us any more.
974 */
975 raw_spin_lock_irqsave(&ctx->lock, *flags);
976 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
977 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
978 rcu_read_unlock();
979 preempt_enable();
980 goto retry;
981 }
982
983 if (!atomic_inc_not_zero(&ctx->refcount)) {
984 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
985 ctx = NULL;
986 }
987 }
988 rcu_read_unlock();
989 preempt_enable();
990 return ctx;
991 }
992
993 /*
994 * Get the context for a task and increment its pin_count so it
995 * can't get swapped to another task. This also increments its
996 * reference count so that the context can't get freed.
997 */
998 static struct perf_event_context *
999 perf_pin_task_context(struct task_struct *task, int ctxn)
1000 {
1001 struct perf_event_context *ctx;
1002 unsigned long flags;
1003
1004 ctx = perf_lock_task_context(task, ctxn, &flags);
1005 if (ctx) {
1006 ++ctx->pin_count;
1007 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1008 }
1009 return ctx;
1010 }
1011
1012 static void perf_unpin_context(struct perf_event_context *ctx)
1013 {
1014 unsigned long flags;
1015
1016 raw_spin_lock_irqsave(&ctx->lock, flags);
1017 --ctx->pin_count;
1018 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1019 }
1020
1021 /*
1022 * Update the record of the current time in a context.
1023 */
1024 static void update_context_time(struct perf_event_context *ctx)
1025 {
1026 u64 now = perf_clock();
1027
1028 ctx->time += now - ctx->timestamp;
1029 ctx->timestamp = now;
1030 }
1031
1032 static u64 perf_event_time(struct perf_event *event)
1033 {
1034 struct perf_event_context *ctx = event->ctx;
1035
1036 if (is_cgroup_event(event))
1037 return perf_cgroup_event_time(event);
1038
1039 return ctx ? ctx->time : 0;
1040 }
1041
1042 /*
1043 * Update the total_time_enabled and total_time_running fields for a event.
1044 * The caller of this function needs to hold the ctx->lock.
1045 */
1046 static void update_event_times(struct perf_event *event)
1047 {
1048 struct perf_event_context *ctx = event->ctx;
1049 u64 run_end;
1050
1051 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1052 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1053 return;
1054 /*
1055 * in cgroup mode, time_enabled represents
1056 * the time the event was enabled AND active
1057 * tasks were in the monitored cgroup. This is
1058 * independent of the activity of the context as
1059 * there may be a mix of cgroup and non-cgroup events.
1060 *
1061 * That is why we treat cgroup events differently
1062 * here.
1063 */
1064 if (is_cgroup_event(event))
1065 run_end = perf_cgroup_event_time(event);
1066 else if (ctx->is_active)
1067 run_end = ctx->time;
1068 else
1069 run_end = event->tstamp_stopped;
1070
1071 event->total_time_enabled = run_end - event->tstamp_enabled;
1072
1073 if (event->state == PERF_EVENT_STATE_INACTIVE)
1074 run_end = event->tstamp_stopped;
1075 else
1076 run_end = perf_event_time(event);
1077
1078 event->total_time_running = run_end - event->tstamp_running;
1079
1080 }
1081
1082 /*
1083 * Update total_time_enabled and total_time_running for all events in a group.
1084 */
1085 static void update_group_times(struct perf_event *leader)
1086 {
1087 struct perf_event *event;
1088
1089 update_event_times(leader);
1090 list_for_each_entry(event, &leader->sibling_list, group_entry)
1091 update_event_times(event);
1092 }
1093
1094 static struct list_head *
1095 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1096 {
1097 if (event->attr.pinned)
1098 return &ctx->pinned_groups;
1099 else
1100 return &ctx->flexible_groups;
1101 }
1102
1103 /*
1104 * Add a event from the lists for its context.
1105 * Must be called with ctx->mutex and ctx->lock held.
1106 */
1107 static void
1108 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1109 {
1110 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1111 event->attach_state |= PERF_ATTACH_CONTEXT;
1112
1113 /*
1114 * If we're a stand alone event or group leader, we go to the context
1115 * list, group events are kept attached to the group so that
1116 * perf_group_detach can, at all times, locate all siblings.
1117 */
1118 if (event->group_leader == event) {
1119 struct list_head *list;
1120
1121 if (is_software_event(event))
1122 event->group_flags |= PERF_GROUP_SOFTWARE;
1123
1124 list = ctx_group_list(event, ctx);
1125 list_add_tail(&event->group_entry, list);
1126 }
1127
1128 if (is_cgroup_event(event))
1129 ctx->nr_cgroups++;
1130
1131 if (has_branch_stack(event))
1132 ctx->nr_branch_stack++;
1133
1134 list_add_rcu(&event->event_entry, &ctx->event_list);
1135 if (!ctx->nr_events)
1136 perf_pmu_rotate_start(ctx->pmu);
1137 ctx->nr_events++;
1138 if (event->attr.inherit_stat)
1139 ctx->nr_stat++;
1140
1141 ctx->generation++;
1142 }
1143
1144 /*
1145 * Initialize event state based on the perf_event_attr::disabled.
1146 */
1147 static inline void perf_event__state_init(struct perf_event *event)
1148 {
1149 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1150 PERF_EVENT_STATE_INACTIVE;
1151 }
1152
1153 /*
1154 * Called at perf_event creation and when events are attached/detached from a
1155 * group.
1156 */
1157 static void perf_event__read_size(struct perf_event *event)
1158 {
1159 int entry = sizeof(u64); /* value */
1160 int size = 0;
1161 int nr = 1;
1162
1163 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1164 size += sizeof(u64);
1165
1166 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1167 size += sizeof(u64);
1168
1169 if (event->attr.read_format & PERF_FORMAT_ID)
1170 entry += sizeof(u64);
1171
1172 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1173 nr += event->group_leader->nr_siblings;
1174 size += sizeof(u64);
1175 }
1176
1177 size += entry * nr;
1178 event->read_size = size;
1179 }
1180
1181 static void perf_event__header_size(struct perf_event *event)
1182 {
1183 struct perf_sample_data *data;
1184 u64 sample_type = event->attr.sample_type;
1185 u16 size = 0;
1186
1187 perf_event__read_size(event);
1188
1189 if (sample_type & PERF_SAMPLE_IP)
1190 size += sizeof(data->ip);
1191
1192 if (sample_type & PERF_SAMPLE_ADDR)
1193 size += sizeof(data->addr);
1194
1195 if (sample_type & PERF_SAMPLE_PERIOD)
1196 size += sizeof(data->period);
1197
1198 if (sample_type & PERF_SAMPLE_WEIGHT)
1199 size += sizeof(data->weight);
1200
1201 if (sample_type & PERF_SAMPLE_READ)
1202 size += event->read_size;
1203
1204 if (sample_type & PERF_SAMPLE_DATA_SRC)
1205 size += sizeof(data->data_src.val);
1206
1207 if (sample_type & PERF_SAMPLE_TRANSACTION)
1208 size += sizeof(data->txn);
1209
1210 event->header_size = size;
1211 }
1212
1213 static void perf_event__id_header_size(struct perf_event *event)
1214 {
1215 struct perf_sample_data *data;
1216 u64 sample_type = event->attr.sample_type;
1217 u16 size = 0;
1218
1219 if (sample_type & PERF_SAMPLE_TID)
1220 size += sizeof(data->tid_entry);
1221
1222 if (sample_type & PERF_SAMPLE_TIME)
1223 size += sizeof(data->time);
1224
1225 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1226 size += sizeof(data->id);
1227
1228 if (sample_type & PERF_SAMPLE_ID)
1229 size += sizeof(data->id);
1230
1231 if (sample_type & PERF_SAMPLE_STREAM_ID)
1232 size += sizeof(data->stream_id);
1233
1234 if (sample_type & PERF_SAMPLE_CPU)
1235 size += sizeof(data->cpu_entry);
1236
1237 event->id_header_size = size;
1238 }
1239
1240 static void perf_group_attach(struct perf_event *event)
1241 {
1242 struct perf_event *group_leader = event->group_leader, *pos;
1243
1244 /*
1245 * We can have double attach due to group movement in perf_event_open.
1246 */
1247 if (event->attach_state & PERF_ATTACH_GROUP)
1248 return;
1249
1250 event->attach_state |= PERF_ATTACH_GROUP;
1251
1252 if (group_leader == event)
1253 return;
1254
1255 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1256 !is_software_event(event))
1257 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1258
1259 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1260 group_leader->nr_siblings++;
1261
1262 perf_event__header_size(group_leader);
1263
1264 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1265 perf_event__header_size(pos);
1266 }
1267
1268 /*
1269 * Remove a event from the lists for its context.
1270 * Must be called with ctx->mutex and ctx->lock held.
1271 */
1272 static void
1273 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1274 {
1275 struct perf_cpu_context *cpuctx;
1276 /*
1277 * We can have double detach due to exit/hot-unplug + close.
1278 */
1279 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1280 return;
1281
1282 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1283
1284 if (is_cgroup_event(event)) {
1285 ctx->nr_cgroups--;
1286 cpuctx = __get_cpu_context(ctx);
1287 /*
1288 * if there are no more cgroup events
1289 * then cler cgrp to avoid stale pointer
1290 * in update_cgrp_time_from_cpuctx()
1291 */
1292 if (!ctx->nr_cgroups)
1293 cpuctx->cgrp = NULL;
1294 }
1295
1296 if (has_branch_stack(event))
1297 ctx->nr_branch_stack--;
1298
1299 ctx->nr_events--;
1300 if (event->attr.inherit_stat)
1301 ctx->nr_stat--;
1302
1303 list_del_rcu(&event->event_entry);
1304
1305 if (event->group_leader == event)
1306 list_del_init(&event->group_entry);
1307
1308 update_group_times(event);
1309
1310 /*
1311 * If event was in error state, then keep it
1312 * that way, otherwise bogus counts will be
1313 * returned on read(). The only way to get out
1314 * of error state is by explicit re-enabling
1315 * of the event
1316 */
1317 if (event->state > PERF_EVENT_STATE_OFF)
1318 event->state = PERF_EVENT_STATE_OFF;
1319
1320 ctx->generation++;
1321 }
1322
1323 static void perf_group_detach(struct perf_event *event)
1324 {
1325 struct perf_event *sibling, *tmp;
1326 struct list_head *list = NULL;
1327
1328 /*
1329 * We can have double detach due to exit/hot-unplug + close.
1330 */
1331 if (!(event->attach_state & PERF_ATTACH_GROUP))
1332 return;
1333
1334 event->attach_state &= ~PERF_ATTACH_GROUP;
1335
1336 /*
1337 * If this is a sibling, remove it from its group.
1338 */
1339 if (event->group_leader != event) {
1340 list_del_init(&event->group_entry);
1341 event->group_leader->nr_siblings--;
1342 goto out;
1343 }
1344
1345 if (!list_empty(&event->group_entry))
1346 list = &event->group_entry;
1347
1348 /*
1349 * If this was a group event with sibling events then
1350 * upgrade the siblings to singleton events by adding them
1351 * to whatever list we are on.
1352 */
1353 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1354 if (list)
1355 list_move_tail(&sibling->group_entry, list);
1356 sibling->group_leader = sibling;
1357
1358 /* Inherit group flags from the previous leader */
1359 sibling->group_flags = event->group_flags;
1360 }
1361
1362 out:
1363 perf_event__header_size(event->group_leader);
1364
1365 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1366 perf_event__header_size(tmp);
1367 }
1368
1369 static inline int
1370 event_filter_match(struct perf_event *event)
1371 {
1372 return (event->cpu == -1 || event->cpu == smp_processor_id())
1373 && perf_cgroup_match(event);
1374 }
1375
1376 static void
1377 event_sched_out(struct perf_event *event,
1378 struct perf_cpu_context *cpuctx,
1379 struct perf_event_context *ctx)
1380 {
1381 u64 tstamp = perf_event_time(event);
1382 u64 delta;
1383 /*
1384 * An event which could not be activated because of
1385 * filter mismatch still needs to have its timings
1386 * maintained, otherwise bogus information is return
1387 * via read() for time_enabled, time_running:
1388 */
1389 if (event->state == PERF_EVENT_STATE_INACTIVE
1390 && !event_filter_match(event)) {
1391 delta = tstamp - event->tstamp_stopped;
1392 event->tstamp_running += delta;
1393 event->tstamp_stopped = tstamp;
1394 }
1395
1396 if (event->state != PERF_EVENT_STATE_ACTIVE)
1397 return;
1398
1399 perf_pmu_disable(event->pmu);
1400
1401 event->state = PERF_EVENT_STATE_INACTIVE;
1402 if (event->pending_disable) {
1403 event->pending_disable = 0;
1404 event->state = PERF_EVENT_STATE_OFF;
1405 }
1406 event->tstamp_stopped = tstamp;
1407 event->pmu->del(event, 0);
1408 event->oncpu = -1;
1409
1410 if (!is_software_event(event))
1411 cpuctx->active_oncpu--;
1412 ctx->nr_active--;
1413 if (event->attr.freq && event->attr.sample_freq)
1414 ctx->nr_freq--;
1415 if (event->attr.exclusive || !cpuctx->active_oncpu)
1416 cpuctx->exclusive = 0;
1417
1418 perf_pmu_enable(event->pmu);
1419 }
1420
1421 static void
1422 group_sched_out(struct perf_event *group_event,
1423 struct perf_cpu_context *cpuctx,
1424 struct perf_event_context *ctx)
1425 {
1426 struct perf_event *event;
1427 int state = group_event->state;
1428
1429 event_sched_out(group_event, cpuctx, ctx);
1430
1431 /*
1432 * Schedule out siblings (if any):
1433 */
1434 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1435 event_sched_out(event, cpuctx, ctx);
1436
1437 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1438 cpuctx->exclusive = 0;
1439 }
1440
1441 /*
1442 * Cross CPU call to remove a performance event
1443 *
1444 * We disable the event on the hardware level first. After that we
1445 * remove it from the context list.
1446 */
1447 static int __perf_remove_from_context(void *info)
1448 {
1449 struct perf_event *event = info;
1450 struct perf_event_context *ctx = event->ctx;
1451 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1452
1453 raw_spin_lock(&ctx->lock);
1454 event_sched_out(event, cpuctx, ctx);
1455 list_del_event(event, ctx);
1456 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1457 ctx->is_active = 0;
1458 cpuctx->task_ctx = NULL;
1459 }
1460 raw_spin_unlock(&ctx->lock);
1461
1462 return 0;
1463 }
1464
1465
1466 /*
1467 * Remove the event from a task's (or a CPU's) list of events.
1468 *
1469 * CPU events are removed with a smp call. For task events we only
1470 * call when the task is on a CPU.
1471 *
1472 * If event->ctx is a cloned context, callers must make sure that
1473 * every task struct that event->ctx->task could possibly point to
1474 * remains valid. This is OK when called from perf_release since
1475 * that only calls us on the top-level context, which can't be a clone.
1476 * When called from perf_event_exit_task, it's OK because the
1477 * context has been detached from its task.
1478 */
1479 static void perf_remove_from_context(struct perf_event *event)
1480 {
1481 struct perf_event_context *ctx = event->ctx;
1482 struct task_struct *task = ctx->task;
1483
1484 lockdep_assert_held(&ctx->mutex);
1485
1486 if (!task) {
1487 /*
1488 * Per cpu events are removed via an smp call and
1489 * the removal is always successful.
1490 */
1491 cpu_function_call(event->cpu, __perf_remove_from_context, event);
1492 return;
1493 }
1494
1495 retry:
1496 if (!task_function_call(task, __perf_remove_from_context, event))
1497 return;
1498
1499 raw_spin_lock_irq(&ctx->lock);
1500 /*
1501 * If we failed to find a running task, but find the context active now
1502 * that we've acquired the ctx->lock, retry.
1503 */
1504 if (ctx->is_active) {
1505 raw_spin_unlock_irq(&ctx->lock);
1506 goto retry;
1507 }
1508
1509 /*
1510 * Since the task isn't running, its safe to remove the event, us
1511 * holding the ctx->lock ensures the task won't get scheduled in.
1512 */
1513 list_del_event(event, ctx);
1514 raw_spin_unlock_irq(&ctx->lock);
1515 }
1516
1517 /*
1518 * Cross CPU call to disable a performance event
1519 */
1520 int __perf_event_disable(void *info)
1521 {
1522 struct perf_event *event = info;
1523 struct perf_event_context *ctx = event->ctx;
1524 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1525
1526 /*
1527 * If this is a per-task event, need to check whether this
1528 * event's task is the current task on this cpu.
1529 *
1530 * Can trigger due to concurrent perf_event_context_sched_out()
1531 * flipping contexts around.
1532 */
1533 if (ctx->task && cpuctx->task_ctx != ctx)
1534 return -EINVAL;
1535
1536 raw_spin_lock(&ctx->lock);
1537
1538 /*
1539 * If the event is on, turn it off.
1540 * If it is in error state, leave it in error state.
1541 */
1542 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1543 update_context_time(ctx);
1544 update_cgrp_time_from_event(event);
1545 update_group_times(event);
1546 if (event == event->group_leader)
1547 group_sched_out(event, cpuctx, ctx);
1548 else
1549 event_sched_out(event, cpuctx, ctx);
1550 event->state = PERF_EVENT_STATE_OFF;
1551 }
1552
1553 raw_spin_unlock(&ctx->lock);
1554
1555 return 0;
1556 }
1557
1558 /*
1559 * Disable a event.
1560 *
1561 * If event->ctx is a cloned context, callers must make sure that
1562 * every task struct that event->ctx->task could possibly point to
1563 * remains valid. This condition is satisifed when called through
1564 * perf_event_for_each_child or perf_event_for_each because they
1565 * hold the top-level event's child_mutex, so any descendant that
1566 * goes to exit will block in sync_child_event.
1567 * When called from perf_pending_event it's OK because event->ctx
1568 * is the current context on this CPU and preemption is disabled,
1569 * hence we can't get into perf_event_task_sched_out for this context.
1570 */
1571 void perf_event_disable(struct perf_event *event)
1572 {
1573 struct perf_event_context *ctx = event->ctx;
1574 struct task_struct *task = ctx->task;
1575
1576 if (!task) {
1577 /*
1578 * Disable the event on the cpu that it's on
1579 */
1580 cpu_function_call(event->cpu, __perf_event_disable, event);
1581 return;
1582 }
1583
1584 retry:
1585 if (!task_function_call(task, __perf_event_disable, event))
1586 return;
1587
1588 raw_spin_lock_irq(&ctx->lock);
1589 /*
1590 * If the event is still active, we need to retry the cross-call.
1591 */
1592 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1593 raw_spin_unlock_irq(&ctx->lock);
1594 /*
1595 * Reload the task pointer, it might have been changed by
1596 * a concurrent perf_event_context_sched_out().
1597 */
1598 task = ctx->task;
1599 goto retry;
1600 }
1601
1602 /*
1603 * Since we have the lock this context can't be scheduled
1604 * in, so we can change the state safely.
1605 */
1606 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1607 update_group_times(event);
1608 event->state = PERF_EVENT_STATE_OFF;
1609 }
1610 raw_spin_unlock_irq(&ctx->lock);
1611 }
1612 EXPORT_SYMBOL_GPL(perf_event_disable);
1613
1614 static void perf_set_shadow_time(struct perf_event *event,
1615 struct perf_event_context *ctx,
1616 u64 tstamp)
1617 {
1618 /*
1619 * use the correct time source for the time snapshot
1620 *
1621 * We could get by without this by leveraging the
1622 * fact that to get to this function, the caller
1623 * has most likely already called update_context_time()
1624 * and update_cgrp_time_xx() and thus both timestamp
1625 * are identical (or very close). Given that tstamp is,
1626 * already adjusted for cgroup, we could say that:
1627 * tstamp - ctx->timestamp
1628 * is equivalent to
1629 * tstamp - cgrp->timestamp.
1630 *
1631 * Then, in perf_output_read(), the calculation would
1632 * work with no changes because:
1633 * - event is guaranteed scheduled in
1634 * - no scheduled out in between
1635 * - thus the timestamp would be the same
1636 *
1637 * But this is a bit hairy.
1638 *
1639 * So instead, we have an explicit cgroup call to remain
1640 * within the time time source all along. We believe it
1641 * is cleaner and simpler to understand.
1642 */
1643 if (is_cgroup_event(event))
1644 perf_cgroup_set_shadow_time(event, tstamp);
1645 else
1646 event->shadow_ctx_time = tstamp - ctx->timestamp;
1647 }
1648
1649 #define MAX_INTERRUPTS (~0ULL)
1650
1651 static void perf_log_throttle(struct perf_event *event, int enable);
1652
1653 static int
1654 event_sched_in(struct perf_event *event,
1655 struct perf_cpu_context *cpuctx,
1656 struct perf_event_context *ctx)
1657 {
1658 u64 tstamp = perf_event_time(event);
1659 int ret = 0;
1660
1661 if (event->state <= PERF_EVENT_STATE_OFF)
1662 return 0;
1663
1664 event->state = PERF_EVENT_STATE_ACTIVE;
1665 event->oncpu = smp_processor_id();
1666
1667 /*
1668 * Unthrottle events, since we scheduled we might have missed several
1669 * ticks already, also for a heavily scheduling task there is little
1670 * guarantee it'll get a tick in a timely manner.
1671 */
1672 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1673 perf_log_throttle(event, 1);
1674 event->hw.interrupts = 0;
1675 }
1676
1677 /*
1678 * The new state must be visible before we turn it on in the hardware:
1679 */
1680 smp_wmb();
1681
1682 perf_pmu_disable(event->pmu);
1683
1684 if (event->pmu->add(event, PERF_EF_START)) {
1685 event->state = PERF_EVENT_STATE_INACTIVE;
1686 event->oncpu = -1;
1687 ret = -EAGAIN;
1688 goto out;
1689 }
1690
1691 event->tstamp_running += tstamp - event->tstamp_stopped;
1692
1693 perf_set_shadow_time(event, ctx, tstamp);
1694
1695 if (!is_software_event(event))
1696 cpuctx->active_oncpu++;
1697 ctx->nr_active++;
1698 if (event->attr.freq && event->attr.sample_freq)
1699 ctx->nr_freq++;
1700
1701 if (event->attr.exclusive)
1702 cpuctx->exclusive = 1;
1703
1704 out:
1705 perf_pmu_enable(event->pmu);
1706
1707 return ret;
1708 }
1709
1710 static int
1711 group_sched_in(struct perf_event *group_event,
1712 struct perf_cpu_context *cpuctx,
1713 struct perf_event_context *ctx)
1714 {
1715 struct perf_event *event, *partial_group = NULL;
1716 struct pmu *pmu = group_event->pmu;
1717 u64 now = ctx->time;
1718 bool simulate = false;
1719
1720 if (group_event->state == PERF_EVENT_STATE_OFF)
1721 return 0;
1722
1723 pmu->start_txn(pmu);
1724
1725 if (event_sched_in(group_event, cpuctx, ctx)) {
1726 pmu->cancel_txn(pmu);
1727 perf_cpu_hrtimer_restart(cpuctx);
1728 return -EAGAIN;
1729 }
1730
1731 /*
1732 * Schedule in siblings as one group (if any):
1733 */
1734 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1735 if (event_sched_in(event, cpuctx, ctx)) {
1736 partial_group = event;
1737 goto group_error;
1738 }
1739 }
1740
1741 if (!pmu->commit_txn(pmu))
1742 return 0;
1743
1744 group_error:
1745 /*
1746 * Groups can be scheduled in as one unit only, so undo any
1747 * partial group before returning:
1748 * The events up to the failed event are scheduled out normally,
1749 * tstamp_stopped will be updated.
1750 *
1751 * The failed events and the remaining siblings need to have
1752 * their timings updated as if they had gone thru event_sched_in()
1753 * and event_sched_out(). This is required to get consistent timings
1754 * across the group. This also takes care of the case where the group
1755 * could never be scheduled by ensuring tstamp_stopped is set to mark
1756 * the time the event was actually stopped, such that time delta
1757 * calculation in update_event_times() is correct.
1758 */
1759 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1760 if (event == partial_group)
1761 simulate = true;
1762
1763 if (simulate) {
1764 event->tstamp_running += now - event->tstamp_stopped;
1765 event->tstamp_stopped = now;
1766 } else {
1767 event_sched_out(event, cpuctx, ctx);
1768 }
1769 }
1770 event_sched_out(group_event, cpuctx, ctx);
1771
1772 pmu->cancel_txn(pmu);
1773
1774 perf_cpu_hrtimer_restart(cpuctx);
1775
1776 return -EAGAIN;
1777 }
1778
1779 /*
1780 * Work out whether we can put this event group on the CPU now.
1781 */
1782 static int group_can_go_on(struct perf_event *event,
1783 struct perf_cpu_context *cpuctx,
1784 int can_add_hw)
1785 {
1786 /*
1787 * Groups consisting entirely of software events can always go on.
1788 */
1789 if (event->group_flags & PERF_GROUP_SOFTWARE)
1790 return 1;
1791 /*
1792 * If an exclusive group is already on, no other hardware
1793 * events can go on.
1794 */
1795 if (cpuctx->exclusive)
1796 return 0;
1797 /*
1798 * If this group is exclusive and there are already
1799 * events on the CPU, it can't go on.
1800 */
1801 if (event->attr.exclusive && cpuctx->active_oncpu)
1802 return 0;
1803 /*
1804 * Otherwise, try to add it if all previous groups were able
1805 * to go on.
1806 */
1807 return can_add_hw;
1808 }
1809
1810 static void add_event_to_ctx(struct perf_event *event,
1811 struct perf_event_context *ctx)
1812 {
1813 u64 tstamp = perf_event_time(event);
1814
1815 list_add_event(event, ctx);
1816 perf_group_attach(event);
1817 event->tstamp_enabled = tstamp;
1818 event->tstamp_running = tstamp;
1819 event->tstamp_stopped = tstamp;
1820 }
1821
1822 static void task_ctx_sched_out(struct perf_event_context *ctx);
1823 static void
1824 ctx_sched_in(struct perf_event_context *ctx,
1825 struct perf_cpu_context *cpuctx,
1826 enum event_type_t event_type,
1827 struct task_struct *task);
1828
1829 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1830 struct perf_event_context *ctx,
1831 struct task_struct *task)
1832 {
1833 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1834 if (ctx)
1835 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1836 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1837 if (ctx)
1838 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1839 }
1840
1841 /*
1842 * Cross CPU call to install and enable a performance event
1843 *
1844 * Must be called with ctx->mutex held
1845 */
1846 static int __perf_install_in_context(void *info)
1847 {
1848 struct perf_event *event = info;
1849 struct perf_event_context *ctx = event->ctx;
1850 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1851 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1852 struct task_struct *task = current;
1853
1854 perf_ctx_lock(cpuctx, task_ctx);
1855 perf_pmu_disable(cpuctx->ctx.pmu);
1856
1857 /*
1858 * If there was an active task_ctx schedule it out.
1859 */
1860 if (task_ctx)
1861 task_ctx_sched_out(task_ctx);
1862
1863 /*
1864 * If the context we're installing events in is not the
1865 * active task_ctx, flip them.
1866 */
1867 if (ctx->task && task_ctx != ctx) {
1868 if (task_ctx)
1869 raw_spin_unlock(&task_ctx->lock);
1870 raw_spin_lock(&ctx->lock);
1871 task_ctx = ctx;
1872 }
1873
1874 if (task_ctx) {
1875 cpuctx->task_ctx = task_ctx;
1876 task = task_ctx->task;
1877 }
1878
1879 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1880
1881 update_context_time(ctx);
1882 /*
1883 * update cgrp time only if current cgrp
1884 * matches event->cgrp. Must be done before
1885 * calling add_event_to_ctx()
1886 */
1887 update_cgrp_time_from_event(event);
1888
1889 add_event_to_ctx(event, ctx);
1890
1891 /*
1892 * Schedule everything back in
1893 */
1894 perf_event_sched_in(cpuctx, task_ctx, task);
1895
1896 perf_pmu_enable(cpuctx->ctx.pmu);
1897 perf_ctx_unlock(cpuctx, task_ctx);
1898
1899 return 0;
1900 }
1901
1902 /*
1903 * Attach a performance event to a context
1904 *
1905 * First we add the event to the list with the hardware enable bit
1906 * in event->hw_config cleared.
1907 *
1908 * If the event is attached to a task which is on a CPU we use a smp
1909 * call to enable it in the task context. The task might have been
1910 * scheduled away, but we check this in the smp call again.
1911 */
1912 static void
1913 perf_install_in_context(struct perf_event_context *ctx,
1914 struct perf_event *event,
1915 int cpu)
1916 {
1917 struct task_struct *task = ctx->task;
1918
1919 lockdep_assert_held(&ctx->mutex);
1920
1921 event->ctx = ctx;
1922 if (event->cpu != -1)
1923 event->cpu = cpu;
1924
1925 if (!task) {
1926 /*
1927 * Per cpu events are installed via an smp call and
1928 * the install is always successful.
1929 */
1930 cpu_function_call(cpu, __perf_install_in_context, event);
1931 return;
1932 }
1933
1934 retry:
1935 if (!task_function_call(task, __perf_install_in_context, event))
1936 return;
1937
1938 raw_spin_lock_irq(&ctx->lock);
1939 /*
1940 * If we failed to find a running task, but find the context active now
1941 * that we've acquired the ctx->lock, retry.
1942 */
1943 if (ctx->is_active) {
1944 raw_spin_unlock_irq(&ctx->lock);
1945 goto retry;
1946 }
1947
1948 /*
1949 * Since the task isn't running, its safe to add the event, us holding
1950 * the ctx->lock ensures the task won't get scheduled in.
1951 */
1952 add_event_to_ctx(event, ctx);
1953 raw_spin_unlock_irq(&ctx->lock);
1954 }
1955
1956 /*
1957 * Put a event into inactive state and update time fields.
1958 * Enabling the leader of a group effectively enables all
1959 * the group members that aren't explicitly disabled, so we
1960 * have to update their ->tstamp_enabled also.
1961 * Note: this works for group members as well as group leaders
1962 * since the non-leader members' sibling_lists will be empty.
1963 */
1964 static void __perf_event_mark_enabled(struct perf_event *event)
1965 {
1966 struct perf_event *sub;
1967 u64 tstamp = perf_event_time(event);
1968
1969 event->state = PERF_EVENT_STATE_INACTIVE;
1970 event->tstamp_enabled = tstamp - event->total_time_enabled;
1971 list_for_each_entry(sub, &event->sibling_list, group_entry) {
1972 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1973 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1974 }
1975 }
1976
1977 /*
1978 * Cross CPU call to enable a performance event
1979 */
1980 static int __perf_event_enable(void *info)
1981 {
1982 struct perf_event *event = info;
1983 struct perf_event_context *ctx = event->ctx;
1984 struct perf_event *leader = event->group_leader;
1985 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1986 int err;
1987
1988 /*
1989 * There's a time window between 'ctx->is_active' check
1990 * in perf_event_enable function and this place having:
1991 * - IRQs on
1992 * - ctx->lock unlocked
1993 *
1994 * where the task could be killed and 'ctx' deactivated
1995 * by perf_event_exit_task.
1996 */
1997 if (!ctx->is_active)
1998 return -EINVAL;
1999
2000 raw_spin_lock(&ctx->lock);
2001 update_context_time(ctx);
2002
2003 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2004 goto unlock;
2005
2006 /*
2007 * set current task's cgroup time reference point
2008 */
2009 perf_cgroup_set_timestamp(current, ctx);
2010
2011 __perf_event_mark_enabled(event);
2012
2013 if (!event_filter_match(event)) {
2014 if (is_cgroup_event(event))
2015 perf_cgroup_defer_enabled(event);
2016 goto unlock;
2017 }
2018
2019 /*
2020 * If the event is in a group and isn't the group leader,
2021 * then don't put it on unless the group is on.
2022 */
2023 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
2024 goto unlock;
2025
2026 if (!group_can_go_on(event, cpuctx, 1)) {
2027 err = -EEXIST;
2028 } else {
2029 if (event == leader)
2030 err = group_sched_in(event, cpuctx, ctx);
2031 else
2032 err = event_sched_in(event, cpuctx, ctx);
2033 }
2034
2035 if (err) {
2036 /*
2037 * If this event can't go on and it's part of a
2038 * group, then the whole group has to come off.
2039 */
2040 if (leader != event) {
2041 group_sched_out(leader, cpuctx, ctx);
2042 perf_cpu_hrtimer_restart(cpuctx);
2043 }
2044 if (leader->attr.pinned) {
2045 update_group_times(leader);
2046 leader->state = PERF_EVENT_STATE_ERROR;
2047 }
2048 }
2049
2050 unlock:
2051 raw_spin_unlock(&ctx->lock);
2052
2053 return 0;
2054 }
2055
2056 /*
2057 * Enable a event.
2058 *
2059 * If event->ctx is a cloned context, callers must make sure that
2060 * every task struct that event->ctx->task could possibly point to
2061 * remains valid. This condition is satisfied when called through
2062 * perf_event_for_each_child or perf_event_for_each as described
2063 * for perf_event_disable.
2064 */
2065 void perf_event_enable(struct perf_event *event)
2066 {
2067 struct perf_event_context *ctx = event->ctx;
2068 struct task_struct *task = ctx->task;
2069
2070 if (!task) {
2071 /*
2072 * Enable the event on the cpu that it's on
2073 */
2074 cpu_function_call(event->cpu, __perf_event_enable, event);
2075 return;
2076 }
2077
2078 raw_spin_lock_irq(&ctx->lock);
2079 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2080 goto out;
2081
2082 /*
2083 * If the event is in error state, clear that first.
2084 * That way, if we see the event in error state below, we
2085 * know that it has gone back into error state, as distinct
2086 * from the task having been scheduled away before the
2087 * cross-call arrived.
2088 */
2089 if (event->state == PERF_EVENT_STATE_ERROR)
2090 event->state = PERF_EVENT_STATE_OFF;
2091
2092 retry:
2093 if (!ctx->is_active) {
2094 __perf_event_mark_enabled(event);
2095 goto out;
2096 }
2097
2098 raw_spin_unlock_irq(&ctx->lock);
2099
2100 if (!task_function_call(task, __perf_event_enable, event))
2101 return;
2102
2103 raw_spin_lock_irq(&ctx->lock);
2104
2105 /*
2106 * If the context is active and the event is still off,
2107 * we need to retry the cross-call.
2108 */
2109 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
2110 /*
2111 * task could have been flipped by a concurrent
2112 * perf_event_context_sched_out()
2113 */
2114 task = ctx->task;
2115 goto retry;
2116 }
2117
2118 out:
2119 raw_spin_unlock_irq(&ctx->lock);
2120 }
2121 EXPORT_SYMBOL_GPL(perf_event_enable);
2122
2123 int perf_event_refresh(struct perf_event *event, int refresh)
2124 {
2125 /*
2126 * not supported on inherited events
2127 */
2128 if (event->attr.inherit || !is_sampling_event(event))
2129 return -EINVAL;
2130
2131 atomic_add(refresh, &event->event_limit);
2132 perf_event_enable(event);
2133
2134 return 0;
2135 }
2136 EXPORT_SYMBOL_GPL(perf_event_refresh);
2137
2138 static void ctx_sched_out(struct perf_event_context *ctx,
2139 struct perf_cpu_context *cpuctx,
2140 enum event_type_t event_type)
2141 {
2142 struct perf_event *event;
2143 int is_active = ctx->is_active;
2144
2145 ctx->is_active &= ~event_type;
2146 if (likely(!ctx->nr_events))
2147 return;
2148
2149 update_context_time(ctx);
2150 update_cgrp_time_from_cpuctx(cpuctx);
2151 if (!ctx->nr_active)
2152 return;
2153
2154 perf_pmu_disable(ctx->pmu);
2155 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
2156 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2157 group_sched_out(event, cpuctx, ctx);
2158 }
2159
2160 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
2161 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2162 group_sched_out(event, cpuctx, ctx);
2163 }
2164 perf_pmu_enable(ctx->pmu);
2165 }
2166
2167 /*
2168 * Test whether two contexts are equivalent, i.e. whether they have both been
2169 * cloned from the same version of the same context.
2170 *
2171 * Equivalence is measured using a generation number in the context that is
2172 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2173 * and list_del_event().
2174 */
2175 static int context_equiv(struct perf_event_context *ctx1,
2176 struct perf_event_context *ctx2)
2177 {
2178 /* Pinning disables the swap optimization */
2179 if (ctx1->pin_count || ctx2->pin_count)
2180 return 0;
2181
2182 /* If ctx1 is the parent of ctx2 */
2183 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2184 return 1;
2185
2186 /* If ctx2 is the parent of ctx1 */
2187 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2188 return 1;
2189
2190 /*
2191 * If ctx1 and ctx2 have the same parent; we flatten the parent
2192 * hierarchy, see perf_event_init_context().
2193 */
2194 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2195 ctx1->parent_gen == ctx2->parent_gen)
2196 return 1;
2197
2198 /* Unmatched */
2199 return 0;
2200 }
2201
2202 static void __perf_event_sync_stat(struct perf_event *event,
2203 struct perf_event *next_event)
2204 {
2205 u64 value;
2206
2207 if (!event->attr.inherit_stat)
2208 return;
2209
2210 /*
2211 * Update the event value, we cannot use perf_event_read()
2212 * because we're in the middle of a context switch and have IRQs
2213 * disabled, which upsets smp_call_function_single(), however
2214 * we know the event must be on the current CPU, therefore we
2215 * don't need to use it.
2216 */
2217 switch (event->state) {
2218 case PERF_EVENT_STATE_ACTIVE:
2219 event->pmu->read(event);
2220 /* fall-through */
2221
2222 case PERF_EVENT_STATE_INACTIVE:
2223 update_event_times(event);
2224 break;
2225
2226 default:
2227 break;
2228 }
2229
2230 /*
2231 * In order to keep per-task stats reliable we need to flip the event
2232 * values when we flip the contexts.
2233 */
2234 value = local64_read(&next_event->count);
2235 value = local64_xchg(&event->count, value);
2236 local64_set(&next_event->count, value);
2237
2238 swap(event->total_time_enabled, next_event->total_time_enabled);
2239 swap(event->total_time_running, next_event->total_time_running);
2240
2241 /*
2242 * Since we swizzled the values, update the user visible data too.
2243 */
2244 perf_event_update_userpage(event);
2245 perf_event_update_userpage(next_event);
2246 }
2247
2248 static void perf_event_sync_stat(struct perf_event_context *ctx,
2249 struct perf_event_context *next_ctx)
2250 {
2251 struct perf_event *event, *next_event;
2252
2253 if (!ctx->nr_stat)
2254 return;
2255
2256 update_context_time(ctx);
2257
2258 event = list_first_entry(&ctx->event_list,
2259 struct perf_event, event_entry);
2260
2261 next_event = list_first_entry(&next_ctx->event_list,
2262 struct perf_event, event_entry);
2263
2264 while (&event->event_entry != &ctx->event_list &&
2265 &next_event->event_entry != &next_ctx->event_list) {
2266
2267 __perf_event_sync_stat(event, next_event);
2268
2269 event = list_next_entry(event, event_entry);
2270 next_event = list_next_entry(next_event, event_entry);
2271 }
2272 }
2273
2274 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2275 struct task_struct *next)
2276 {
2277 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2278 struct perf_event_context *next_ctx;
2279 struct perf_event_context *parent, *next_parent;
2280 struct perf_cpu_context *cpuctx;
2281 int do_switch = 1;
2282
2283 if (likely(!ctx))
2284 return;
2285
2286 cpuctx = __get_cpu_context(ctx);
2287 if (!cpuctx->task_ctx)
2288 return;
2289
2290 rcu_read_lock();
2291 next_ctx = next->perf_event_ctxp[ctxn];
2292 if (!next_ctx)
2293 goto unlock;
2294
2295 parent = rcu_dereference(ctx->parent_ctx);
2296 next_parent = rcu_dereference(next_ctx->parent_ctx);
2297
2298 /* If neither context have a parent context; they cannot be clones. */
2299 if (!parent && !next_parent)
2300 goto unlock;
2301
2302 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
2303 /*
2304 * Looks like the two contexts are clones, so we might be
2305 * able to optimize the context switch. We lock both
2306 * contexts and check that they are clones under the
2307 * lock (including re-checking that neither has been
2308 * uncloned in the meantime). It doesn't matter which
2309 * order we take the locks because no other cpu could
2310 * be trying to lock both of these tasks.
2311 */
2312 raw_spin_lock(&ctx->lock);
2313 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2314 if (context_equiv(ctx, next_ctx)) {
2315 /*
2316 * XXX do we need a memory barrier of sorts
2317 * wrt to rcu_dereference() of perf_event_ctxp
2318 */
2319 task->perf_event_ctxp[ctxn] = next_ctx;
2320 next->perf_event_ctxp[ctxn] = ctx;
2321 ctx->task = next;
2322 next_ctx->task = task;
2323 do_switch = 0;
2324
2325 perf_event_sync_stat(ctx, next_ctx);
2326 }
2327 raw_spin_unlock(&next_ctx->lock);
2328 raw_spin_unlock(&ctx->lock);
2329 }
2330 unlock:
2331 rcu_read_unlock();
2332
2333 if (do_switch) {
2334 raw_spin_lock(&ctx->lock);
2335 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2336 cpuctx->task_ctx = NULL;
2337 raw_spin_unlock(&ctx->lock);
2338 }
2339 }
2340
2341 #define for_each_task_context_nr(ctxn) \
2342 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2343
2344 /*
2345 * Called from scheduler to remove the events of the current task,
2346 * with interrupts disabled.
2347 *
2348 * We stop each event and update the event value in event->count.
2349 *
2350 * This does not protect us against NMI, but disable()
2351 * sets the disabled bit in the control field of event _before_
2352 * accessing the event control register. If a NMI hits, then it will
2353 * not restart the event.
2354 */
2355 void __perf_event_task_sched_out(struct task_struct *task,
2356 struct task_struct *next)
2357 {
2358 int ctxn;
2359
2360 for_each_task_context_nr(ctxn)
2361 perf_event_context_sched_out(task, ctxn, next);
2362
2363 /*
2364 * if cgroup events exist on this CPU, then we need
2365 * to check if we have to switch out PMU state.
2366 * cgroup event are system-wide mode only
2367 */
2368 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2369 perf_cgroup_sched_out(task, next);
2370 }
2371
2372 static void task_ctx_sched_out(struct perf_event_context *ctx)
2373 {
2374 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2375
2376 if (!cpuctx->task_ctx)
2377 return;
2378
2379 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2380 return;
2381
2382 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2383 cpuctx->task_ctx = NULL;
2384 }
2385
2386 /*
2387 * Called with IRQs disabled
2388 */
2389 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2390 enum event_type_t event_type)
2391 {
2392 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2393 }
2394
2395 static void
2396 ctx_pinned_sched_in(struct perf_event_context *ctx,
2397 struct perf_cpu_context *cpuctx)
2398 {
2399 struct perf_event *event;
2400
2401 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2402 if (event->state <= PERF_EVENT_STATE_OFF)
2403 continue;
2404 if (!event_filter_match(event))
2405 continue;
2406
2407 /* may need to reset tstamp_enabled */
2408 if (is_cgroup_event(event))
2409 perf_cgroup_mark_enabled(event, ctx);
2410
2411 if (group_can_go_on(event, cpuctx, 1))
2412 group_sched_in(event, cpuctx, ctx);
2413
2414 /*
2415 * If this pinned group hasn't been scheduled,
2416 * put it in error state.
2417 */
2418 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2419 update_group_times(event);
2420 event->state = PERF_EVENT_STATE_ERROR;
2421 }
2422 }
2423 }
2424
2425 static void
2426 ctx_flexible_sched_in(struct perf_event_context *ctx,
2427 struct perf_cpu_context *cpuctx)
2428 {
2429 struct perf_event *event;
2430 int can_add_hw = 1;
2431
2432 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2433 /* Ignore events in OFF or ERROR state */
2434 if (event->state <= PERF_EVENT_STATE_OFF)
2435 continue;
2436 /*
2437 * Listen to the 'cpu' scheduling filter constraint
2438 * of events:
2439 */
2440 if (!event_filter_match(event))
2441 continue;
2442
2443 /* may need to reset tstamp_enabled */
2444 if (is_cgroup_event(event))
2445 perf_cgroup_mark_enabled(event, ctx);
2446
2447 if (group_can_go_on(event, cpuctx, can_add_hw)) {
2448 if (group_sched_in(event, cpuctx, ctx))
2449 can_add_hw = 0;
2450 }
2451 }
2452 }
2453
2454 static void
2455 ctx_sched_in(struct perf_event_context *ctx,
2456 struct perf_cpu_context *cpuctx,
2457 enum event_type_t event_type,
2458 struct task_struct *task)
2459 {
2460 u64 now;
2461 int is_active = ctx->is_active;
2462
2463 ctx->is_active |= event_type;
2464 if (likely(!ctx->nr_events))
2465 return;
2466
2467 now = perf_clock();
2468 ctx->timestamp = now;
2469 perf_cgroup_set_timestamp(task, ctx);
2470 /*
2471 * First go through the list and put on any pinned groups
2472 * in order to give them the best chance of going on.
2473 */
2474 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2475 ctx_pinned_sched_in(ctx, cpuctx);
2476
2477 /* Then walk through the lower prio flexible groups */
2478 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2479 ctx_flexible_sched_in(ctx, cpuctx);
2480 }
2481
2482 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2483 enum event_type_t event_type,
2484 struct task_struct *task)
2485 {
2486 struct perf_event_context *ctx = &cpuctx->ctx;
2487
2488 ctx_sched_in(ctx, cpuctx, event_type, task);
2489 }
2490
2491 static void perf_event_context_sched_in(struct perf_event_context *ctx,
2492 struct task_struct *task)
2493 {
2494 struct perf_cpu_context *cpuctx;
2495
2496 cpuctx = __get_cpu_context(ctx);
2497 if (cpuctx->task_ctx == ctx)
2498 return;
2499
2500 perf_ctx_lock(cpuctx, ctx);
2501 perf_pmu_disable(ctx->pmu);
2502 /*
2503 * We want to keep the following priority order:
2504 * cpu pinned (that don't need to move), task pinned,
2505 * cpu flexible, task flexible.
2506 */
2507 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2508
2509 if (ctx->nr_events)
2510 cpuctx->task_ctx = ctx;
2511
2512 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2513
2514 perf_pmu_enable(ctx->pmu);
2515 perf_ctx_unlock(cpuctx, ctx);
2516
2517 /*
2518 * Since these rotations are per-cpu, we need to ensure the
2519 * cpu-context we got scheduled on is actually rotating.
2520 */
2521 perf_pmu_rotate_start(ctx->pmu);
2522 }
2523
2524 /*
2525 * When sampling the branck stack in system-wide, it may be necessary
2526 * to flush the stack on context switch. This happens when the branch
2527 * stack does not tag its entries with the pid of the current task.
2528 * Otherwise it becomes impossible to associate a branch entry with a
2529 * task. This ambiguity is more likely to appear when the branch stack
2530 * supports priv level filtering and the user sets it to monitor only
2531 * at the user level (which could be a useful measurement in system-wide
2532 * mode). In that case, the risk is high of having a branch stack with
2533 * branch from multiple tasks. Flushing may mean dropping the existing
2534 * entries or stashing them somewhere in the PMU specific code layer.
2535 *
2536 * This function provides the context switch callback to the lower code
2537 * layer. It is invoked ONLY when there is at least one system-wide context
2538 * with at least one active event using taken branch sampling.
2539 */
2540 static void perf_branch_stack_sched_in(struct task_struct *prev,
2541 struct task_struct *task)
2542 {
2543 struct perf_cpu_context *cpuctx;
2544 struct pmu *pmu;
2545 unsigned long flags;
2546
2547 /* no need to flush branch stack if not changing task */
2548 if (prev == task)
2549 return;
2550
2551 local_irq_save(flags);
2552
2553 rcu_read_lock();
2554
2555 list_for_each_entry_rcu(pmu, &pmus, entry) {
2556 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2557
2558 /*
2559 * check if the context has at least one
2560 * event using PERF_SAMPLE_BRANCH_STACK
2561 */
2562 if (cpuctx->ctx.nr_branch_stack > 0
2563 && pmu->flush_branch_stack) {
2564
2565 pmu = cpuctx->ctx.pmu;
2566
2567 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2568
2569 perf_pmu_disable(pmu);
2570
2571 pmu->flush_branch_stack();
2572
2573 perf_pmu_enable(pmu);
2574
2575 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2576 }
2577 }
2578
2579 rcu_read_unlock();
2580
2581 local_irq_restore(flags);
2582 }
2583
2584 /*
2585 * Called from scheduler to add the events of the current task
2586 * with interrupts disabled.
2587 *
2588 * We restore the event value and then enable it.
2589 *
2590 * This does not protect us against NMI, but enable()
2591 * sets the enabled bit in the control field of event _before_
2592 * accessing the event control register. If a NMI hits, then it will
2593 * keep the event running.
2594 */
2595 void __perf_event_task_sched_in(struct task_struct *prev,
2596 struct task_struct *task)
2597 {
2598 struct perf_event_context *ctx;
2599 int ctxn;
2600
2601 for_each_task_context_nr(ctxn) {
2602 ctx = task->perf_event_ctxp[ctxn];
2603 if (likely(!ctx))
2604 continue;
2605
2606 perf_event_context_sched_in(ctx, task);
2607 }
2608 /*
2609 * if cgroup events exist on this CPU, then we need
2610 * to check if we have to switch in PMU state.
2611 * cgroup event are system-wide mode only
2612 */
2613 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2614 perf_cgroup_sched_in(prev, task);
2615
2616 /* check for system-wide branch_stack events */
2617 if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2618 perf_branch_stack_sched_in(prev, task);
2619 }
2620
2621 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2622 {
2623 u64 frequency = event->attr.sample_freq;
2624 u64 sec = NSEC_PER_SEC;
2625 u64 divisor, dividend;
2626
2627 int count_fls, nsec_fls, frequency_fls, sec_fls;
2628
2629 count_fls = fls64(count);
2630 nsec_fls = fls64(nsec);
2631 frequency_fls = fls64(frequency);
2632 sec_fls = 30;
2633
2634 /*
2635 * We got @count in @nsec, with a target of sample_freq HZ
2636 * the target period becomes:
2637 *
2638 * @count * 10^9
2639 * period = -------------------
2640 * @nsec * sample_freq
2641 *
2642 */
2643
2644 /*
2645 * Reduce accuracy by one bit such that @a and @b converge
2646 * to a similar magnitude.
2647 */
2648 #define REDUCE_FLS(a, b) \
2649 do { \
2650 if (a##_fls > b##_fls) { \
2651 a >>= 1; \
2652 a##_fls--; \
2653 } else { \
2654 b >>= 1; \
2655 b##_fls--; \
2656 } \
2657 } while (0)
2658
2659 /*
2660 * Reduce accuracy until either term fits in a u64, then proceed with
2661 * the other, so that finally we can do a u64/u64 division.
2662 */
2663 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2664 REDUCE_FLS(nsec, frequency);
2665 REDUCE_FLS(sec, count);
2666 }
2667
2668 if (count_fls + sec_fls > 64) {
2669 divisor = nsec * frequency;
2670
2671 while (count_fls + sec_fls > 64) {
2672 REDUCE_FLS(count, sec);
2673 divisor >>= 1;
2674 }
2675
2676 dividend = count * sec;
2677 } else {
2678 dividend = count * sec;
2679
2680 while (nsec_fls + frequency_fls > 64) {
2681 REDUCE_FLS(nsec, frequency);
2682 dividend >>= 1;
2683 }
2684
2685 divisor = nsec * frequency;
2686 }
2687
2688 if (!divisor)
2689 return dividend;
2690
2691 return div64_u64(dividend, divisor);
2692 }
2693
2694 static DEFINE_PER_CPU(int, perf_throttled_count);
2695 static DEFINE_PER_CPU(u64, perf_throttled_seq);
2696
2697 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2698 {
2699 struct hw_perf_event *hwc = &event->hw;
2700 s64 period, sample_period;
2701 s64 delta;
2702
2703 period = perf_calculate_period(event, nsec, count);
2704
2705 delta = (s64)(period - hwc->sample_period);
2706 delta = (delta + 7) / 8; /* low pass filter */
2707
2708 sample_period = hwc->sample_period + delta;
2709
2710 if (!sample_period)
2711 sample_period = 1;
2712
2713 hwc->sample_period = sample_period;
2714
2715 if (local64_read(&hwc->period_left) > 8*sample_period) {
2716 if (disable)
2717 event->pmu->stop(event, PERF_EF_UPDATE);
2718
2719 local64_set(&hwc->period_left, 0);
2720
2721 if (disable)
2722 event->pmu->start(event, PERF_EF_RELOAD);
2723 }
2724 }
2725
2726 /*
2727 * combine freq adjustment with unthrottling to avoid two passes over the
2728 * events. At the same time, make sure, having freq events does not change
2729 * the rate of unthrottling as that would introduce bias.
2730 */
2731 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2732 int needs_unthr)
2733 {
2734 struct perf_event *event;
2735 struct hw_perf_event *hwc;
2736 u64 now, period = TICK_NSEC;
2737 s64 delta;
2738
2739 /*
2740 * only need to iterate over all events iff:
2741 * - context have events in frequency mode (needs freq adjust)
2742 * - there are events to unthrottle on this cpu
2743 */
2744 if (!(ctx->nr_freq || needs_unthr))
2745 return;
2746
2747 raw_spin_lock(&ctx->lock);
2748 perf_pmu_disable(ctx->pmu);
2749
2750 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2751 if (event->state != PERF_EVENT_STATE_ACTIVE)
2752 continue;
2753
2754 if (!event_filter_match(event))
2755 continue;
2756
2757 perf_pmu_disable(event->pmu);
2758
2759 hwc = &event->hw;
2760
2761 if (hwc->interrupts == MAX_INTERRUPTS) {
2762 hwc->interrupts = 0;
2763 perf_log_throttle(event, 1);
2764 event->pmu->start(event, 0);
2765 }
2766
2767 if (!event->attr.freq || !event->attr.sample_freq)
2768 goto next;
2769
2770 /*
2771 * stop the event and update event->count
2772 */
2773 event->pmu->stop(event, PERF_EF_UPDATE);
2774
2775 now = local64_read(&event->count);
2776 delta = now - hwc->freq_count_stamp;
2777 hwc->freq_count_stamp = now;
2778
2779 /*
2780 * restart the event
2781 * reload only if value has changed
2782 * we have stopped the event so tell that
2783 * to perf_adjust_period() to avoid stopping it
2784 * twice.
2785 */
2786 if (delta > 0)
2787 perf_adjust_period(event, period, delta, false);
2788
2789 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2790 next:
2791 perf_pmu_enable(event->pmu);
2792 }
2793
2794 perf_pmu_enable(ctx->pmu);
2795 raw_spin_unlock(&ctx->lock);
2796 }
2797
2798 /*
2799 * Round-robin a context's events:
2800 */
2801 static void rotate_ctx(struct perf_event_context *ctx)
2802 {
2803 /*
2804 * Rotate the first entry last of non-pinned groups. Rotation might be
2805 * disabled by the inheritance code.
2806 */
2807 if (!ctx->rotate_disable)
2808 list_rotate_left(&ctx->flexible_groups);
2809 }
2810
2811 /*
2812 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2813 * because they're strictly cpu affine and rotate_start is called with IRQs
2814 * disabled, while rotate_context is called from IRQ context.
2815 */
2816 static int perf_rotate_context(struct perf_cpu_context *cpuctx)
2817 {
2818 struct perf_event_context *ctx = NULL;
2819 int rotate = 0, remove = 1;
2820
2821 if (cpuctx->ctx.nr_events) {
2822 remove = 0;
2823 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2824 rotate = 1;
2825 }
2826
2827 ctx = cpuctx->task_ctx;
2828 if (ctx && ctx->nr_events) {
2829 remove = 0;
2830 if (ctx->nr_events != ctx->nr_active)
2831 rotate = 1;
2832 }
2833
2834 if (!rotate)
2835 goto done;
2836
2837 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2838 perf_pmu_disable(cpuctx->ctx.pmu);
2839
2840 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2841 if (ctx)
2842 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2843
2844 rotate_ctx(&cpuctx->ctx);
2845 if (ctx)
2846 rotate_ctx(ctx);
2847
2848 perf_event_sched_in(cpuctx, ctx, current);
2849
2850 perf_pmu_enable(cpuctx->ctx.pmu);
2851 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2852 done:
2853 if (remove)
2854 list_del_init(&cpuctx->rotation_list);
2855
2856 return rotate;
2857 }
2858
2859 #ifdef CONFIG_NO_HZ_FULL
2860 bool perf_event_can_stop_tick(void)
2861 {
2862 if (atomic_read(&nr_freq_events) ||
2863 __this_cpu_read(perf_throttled_count))
2864 return false;
2865 else
2866 return true;
2867 }
2868 #endif
2869
2870 void perf_event_task_tick(void)
2871 {
2872 struct list_head *head = &__get_cpu_var(rotation_list);
2873 struct perf_cpu_context *cpuctx, *tmp;
2874 struct perf_event_context *ctx;
2875 int throttled;
2876
2877 WARN_ON(!irqs_disabled());
2878
2879 __this_cpu_inc(perf_throttled_seq);
2880 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2881
2882 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2883 ctx = &cpuctx->ctx;
2884 perf_adjust_freq_unthr_context(ctx, throttled);
2885
2886 ctx = cpuctx->task_ctx;
2887 if (ctx)
2888 perf_adjust_freq_unthr_context(ctx, throttled);
2889 }
2890 }
2891
2892 static int event_enable_on_exec(struct perf_event *event,
2893 struct perf_event_context *ctx)
2894 {
2895 if (!event->attr.enable_on_exec)
2896 return 0;
2897
2898 event->attr.enable_on_exec = 0;
2899 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2900 return 0;
2901
2902 __perf_event_mark_enabled(event);
2903
2904 return 1;
2905 }
2906
2907 /*
2908 * Enable all of a task's events that have been marked enable-on-exec.
2909 * This expects task == current.
2910 */
2911 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2912 {
2913 struct perf_event *event;
2914 unsigned long flags;
2915 int enabled = 0;
2916 int ret;
2917
2918 local_irq_save(flags);
2919 if (!ctx || !ctx->nr_events)
2920 goto out;
2921
2922 /*
2923 * We must ctxsw out cgroup events to avoid conflict
2924 * when invoking perf_task_event_sched_in() later on
2925 * in this function. Otherwise we end up trying to
2926 * ctxswin cgroup events which are already scheduled
2927 * in.
2928 */
2929 perf_cgroup_sched_out(current, NULL);
2930
2931 raw_spin_lock(&ctx->lock);
2932 task_ctx_sched_out(ctx);
2933
2934 list_for_each_entry(event, &ctx->event_list, event_entry) {
2935 ret = event_enable_on_exec(event, ctx);
2936 if (ret)
2937 enabled = 1;
2938 }
2939
2940 /*
2941 * Unclone this context if we enabled any event.
2942 */
2943 if (enabled)
2944 unclone_ctx(ctx);
2945
2946 raw_spin_unlock(&ctx->lock);
2947
2948 /*
2949 * Also calls ctxswin for cgroup events, if any:
2950 */
2951 perf_event_context_sched_in(ctx, ctx->task);
2952 out:
2953 local_irq_restore(flags);
2954 }
2955
2956 /*
2957 * Cross CPU call to read the hardware event
2958 */
2959 static void __perf_event_read(void *info)
2960 {
2961 struct perf_event *event = info;
2962 struct perf_event_context *ctx = event->ctx;
2963 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2964
2965 /*
2966 * If this is a task context, we need to check whether it is
2967 * the current task context of this cpu. If not it has been
2968 * scheduled out before the smp call arrived. In that case
2969 * event->count would have been updated to a recent sample
2970 * when the event was scheduled out.
2971 */
2972 if (ctx->task && cpuctx->task_ctx != ctx)
2973 return;
2974
2975 raw_spin_lock(&ctx->lock);
2976 if (ctx->is_active) {
2977 update_context_time(ctx);
2978 update_cgrp_time_from_event(event);
2979 }
2980 update_event_times(event);
2981 if (event->state == PERF_EVENT_STATE_ACTIVE)
2982 event->pmu->read(event);
2983 raw_spin_unlock(&ctx->lock);
2984 }
2985
2986 static inline u64 perf_event_count(struct perf_event *event)
2987 {
2988 return local64_read(&event->count) + atomic64_read(&event->child_count);
2989 }
2990
2991 static u64 perf_event_read(struct perf_event *event)
2992 {
2993 /*
2994 * If event is enabled and currently active on a CPU, update the
2995 * value in the event structure:
2996 */
2997 if (event->state == PERF_EVENT_STATE_ACTIVE) {
2998 smp_call_function_single(event->oncpu,
2999 __perf_event_read, event, 1);
3000 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3001 struct perf_event_context *ctx = event->ctx;
3002 unsigned long flags;
3003
3004 raw_spin_lock_irqsave(&ctx->lock, flags);
3005 /*
3006 * may read while context is not active
3007 * (e.g., thread is blocked), in that case
3008 * we cannot update context time
3009 */
3010 if (ctx->is_active) {
3011 update_context_time(ctx);
3012 update_cgrp_time_from_event(event);
3013 }
3014 update_event_times(event);
3015 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3016 }
3017
3018 return perf_event_count(event);
3019 }
3020
3021 /*
3022 * Initialize the perf_event context in a task_struct:
3023 */
3024 static void __perf_event_init_context(struct perf_event_context *ctx)
3025 {
3026 raw_spin_lock_init(&ctx->lock);
3027 mutex_init(&ctx->mutex);
3028 INIT_LIST_HEAD(&ctx->pinned_groups);
3029 INIT_LIST_HEAD(&ctx->flexible_groups);
3030 INIT_LIST_HEAD(&ctx->event_list);
3031 atomic_set(&ctx->refcount, 1);
3032 }
3033
3034 static struct perf_event_context *
3035 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3036 {
3037 struct perf_event_context *ctx;
3038
3039 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3040 if (!ctx)
3041 return NULL;
3042
3043 __perf_event_init_context(ctx);
3044 if (task) {
3045 ctx->task = task;
3046 get_task_struct(task);
3047 }
3048 ctx->pmu = pmu;
3049
3050 return ctx;
3051 }
3052
3053 static struct task_struct *
3054 find_lively_task_by_vpid(pid_t vpid)
3055 {
3056 struct task_struct *task;
3057 int err;
3058
3059 rcu_read_lock();
3060 if (!vpid)
3061 task = current;
3062 else
3063 task = find_task_by_vpid(vpid);
3064 if (task)
3065 get_task_struct(task);
3066 rcu_read_unlock();
3067
3068 if (!task)
3069 return ERR_PTR(-ESRCH);
3070
3071 /* Reuse ptrace permission checks for now. */
3072 err = -EACCES;
3073 if (!ptrace_may_access(task, PTRACE_MODE_READ))
3074 goto errout;
3075
3076 return task;
3077 errout:
3078 put_task_struct(task);
3079 return ERR_PTR(err);
3080
3081 }
3082
3083 /*
3084 * Returns a matching context with refcount and pincount.
3085 */
3086 static struct perf_event_context *
3087 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
3088 {
3089 struct perf_event_context *ctx;
3090 struct perf_cpu_context *cpuctx;
3091 unsigned long flags;
3092 int ctxn, err;
3093
3094 if (!task) {
3095 /* Must be root to operate on a CPU event: */
3096 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3097 return ERR_PTR(-EACCES);
3098
3099 /*
3100 * We could be clever and allow to attach a event to an
3101 * offline CPU and activate it when the CPU comes up, but
3102 * that's for later.
3103 */
3104 if (!cpu_online(cpu))
3105 return ERR_PTR(-ENODEV);
3106
3107 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
3108 ctx = &cpuctx->ctx;
3109 get_ctx(ctx);
3110 ++ctx->pin_count;
3111
3112 return ctx;
3113 }
3114
3115 err = -EINVAL;
3116 ctxn = pmu->task_ctx_nr;
3117 if (ctxn < 0)
3118 goto errout;
3119
3120 retry:
3121 ctx = perf_lock_task_context(task, ctxn, &flags);
3122 if (ctx) {
3123 unclone_ctx(ctx);
3124 ++ctx->pin_count;
3125 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3126 } else {
3127 ctx = alloc_perf_context(pmu, task);
3128 err = -ENOMEM;
3129 if (!ctx)
3130 goto errout;
3131
3132 err = 0;
3133 mutex_lock(&task->perf_event_mutex);
3134 /*
3135 * If it has already passed perf_event_exit_task().
3136 * we must see PF_EXITING, it takes this mutex too.
3137 */
3138 if (task->flags & PF_EXITING)
3139 err = -ESRCH;
3140 else if (task->perf_event_ctxp[ctxn])
3141 err = -EAGAIN;
3142 else {
3143 get_ctx(ctx);
3144 ++ctx->pin_count;
3145 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3146 }
3147 mutex_unlock(&task->perf_event_mutex);
3148
3149 if (unlikely(err)) {
3150 put_ctx(ctx);
3151
3152 if (err == -EAGAIN)
3153 goto retry;
3154 goto errout;
3155 }
3156 }
3157
3158 return ctx;
3159
3160 errout:
3161 return ERR_PTR(err);
3162 }
3163
3164 static void perf_event_free_filter(struct perf_event *event);
3165
3166 static void free_event_rcu(struct rcu_head *head)
3167 {
3168 struct perf_event *event;
3169
3170 event = container_of(head, struct perf_event, rcu_head);
3171 if (event->ns)
3172 put_pid_ns(event->ns);
3173 perf_event_free_filter(event);
3174 kfree(event);
3175 }
3176
3177 static void ring_buffer_put(struct ring_buffer *rb);
3178 static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
3179
3180 static void unaccount_event_cpu(struct perf_event *event, int cpu)
3181 {
3182 if (event->parent)
3183 return;
3184
3185 if (has_branch_stack(event)) {
3186 if (!(event->attach_state & PERF_ATTACH_TASK))
3187 atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
3188 }
3189 if (is_cgroup_event(event))
3190 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3191 }
3192
3193 static void unaccount_event(struct perf_event *event)
3194 {
3195 if (event->parent)
3196 return;
3197
3198 if (event->attach_state & PERF_ATTACH_TASK)
3199 static_key_slow_dec_deferred(&perf_sched_events);
3200 if (event->attr.mmap || event->attr.mmap_data)
3201 atomic_dec(&nr_mmap_events);
3202 if (event->attr.comm)
3203 atomic_dec(&nr_comm_events);
3204 if (event->attr.task)
3205 atomic_dec(&nr_task_events);
3206 if (event->attr.freq)
3207 atomic_dec(&nr_freq_events);
3208 if (is_cgroup_event(event))
3209 static_key_slow_dec_deferred(&perf_sched_events);
3210 if (has_branch_stack(event))
3211 static_key_slow_dec_deferred(&perf_sched_events);
3212
3213 unaccount_event_cpu(event, event->cpu);
3214 }
3215
3216 static void __free_event(struct perf_event *event)
3217 {
3218 if (!event->parent) {
3219 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3220 put_callchain_buffers();
3221 }
3222
3223 if (event->destroy)
3224 event->destroy(event);
3225
3226 if (event->ctx)
3227 put_ctx(event->ctx);
3228
3229 call_rcu(&event->rcu_head, free_event_rcu);
3230 }
3231 static void free_event(struct perf_event *event)
3232 {
3233 irq_work_sync(&event->pending);
3234
3235 unaccount_event(event);
3236
3237 if (event->rb) {
3238 struct ring_buffer *rb;
3239
3240 /*
3241 * Can happen when we close an event with re-directed output.
3242 *
3243 * Since we have a 0 refcount, perf_mmap_close() will skip
3244 * over us; possibly making our ring_buffer_put() the last.
3245 */
3246 mutex_lock(&event->mmap_mutex);
3247 rb = event->rb;
3248 if (rb) {
3249 rcu_assign_pointer(event->rb, NULL);
3250 ring_buffer_detach(event, rb);
3251 ring_buffer_put(rb); /* could be last */
3252 }
3253 mutex_unlock(&event->mmap_mutex);
3254 }
3255
3256 if (is_cgroup_event(event))
3257 perf_detach_cgroup(event);
3258
3259
3260 __free_event(event);
3261 }
3262
3263 int perf_event_release_kernel(struct perf_event *event)
3264 {
3265 struct perf_event_context *ctx = event->ctx;
3266
3267 WARN_ON_ONCE(ctx->parent_ctx);
3268 /*
3269 * There are two ways this annotation is useful:
3270 *
3271 * 1) there is a lock recursion from perf_event_exit_task
3272 * see the comment there.
3273 *
3274 * 2) there is a lock-inversion with mmap_sem through
3275 * perf_event_read_group(), which takes faults while
3276 * holding ctx->mutex, however this is called after
3277 * the last filedesc died, so there is no possibility
3278 * to trigger the AB-BA case.
3279 */
3280 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
3281 raw_spin_lock_irq(&ctx->lock);
3282 perf_group_detach(event);
3283 raw_spin_unlock_irq(&ctx->lock);
3284 perf_remove_from_context(event);
3285 mutex_unlock(&ctx->mutex);
3286
3287 free_event(event);
3288
3289 return 0;
3290 }
3291 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3292
3293 /*
3294 * Called when the last reference to the file is gone.
3295 */
3296 static void put_event(struct perf_event *event)
3297 {
3298 struct task_struct *owner;
3299
3300 if (!atomic_long_dec_and_test(&event->refcount))
3301 return;
3302
3303 rcu_read_lock();
3304 owner = ACCESS_ONCE(event->owner);
3305 /*
3306 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
3307 * !owner it means the list deletion is complete and we can indeed
3308 * free this event, otherwise we need to serialize on
3309 * owner->perf_event_mutex.
3310 */
3311 smp_read_barrier_depends();
3312 if (owner) {
3313 /*
3314 * Since delayed_put_task_struct() also drops the last
3315 * task reference we can safely take a new reference
3316 * while holding the rcu_read_lock().
3317 */
3318 get_task_struct(owner);
3319 }
3320 rcu_read_unlock();
3321
3322 if (owner) {
3323 mutex_lock(&owner->perf_event_mutex);
3324 /*
3325 * We have to re-check the event->owner field, if it is cleared
3326 * we raced with perf_event_exit_task(), acquiring the mutex
3327 * ensured they're done, and we can proceed with freeing the
3328 * event.
3329 */
3330 if (event->owner)
3331 list_del_init(&event->owner_entry);
3332 mutex_unlock(&owner->perf_event_mutex);
3333 put_task_struct(owner);
3334 }
3335
3336 perf_event_release_kernel(event);
3337 }
3338
3339 static int perf_release(struct inode *inode, struct file *file)
3340 {
3341 put_event(file->private_data);
3342 return 0;
3343 }
3344
3345 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3346 {
3347 struct perf_event *child;
3348 u64 total = 0;
3349
3350 *enabled = 0;
3351 *running = 0;
3352
3353 mutex_lock(&event->child_mutex);
3354 total += perf_event_read(event);
3355 *enabled += event->total_time_enabled +
3356 atomic64_read(&event->child_total_time_enabled);
3357 *running += event->total_time_running +
3358 atomic64_read(&event->child_total_time_running);
3359
3360 list_for_each_entry(child, &event->child_list, child_list) {
3361 total += perf_event_read(child);
3362 *enabled += child->total_time_enabled;
3363 *running += child->total_time_running;
3364 }
3365 mutex_unlock(&event->child_mutex);
3366
3367 return total;
3368 }
3369 EXPORT_SYMBOL_GPL(perf_event_read_value);
3370
3371 static int perf_event_read_group(struct perf_event *event,
3372 u64 read_format, char __user *buf)
3373 {
3374 struct perf_event *leader = event->group_leader, *sub;
3375 int n = 0, size = 0, ret = -EFAULT;
3376 struct perf_event_context *ctx = leader->ctx;
3377 u64 values[5];
3378 u64 count, enabled, running;
3379
3380 mutex_lock(&ctx->mutex);
3381 count = perf_event_read_value(leader, &enabled, &running);
3382
3383 values[n++] = 1 + leader->nr_siblings;
3384 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3385 values[n++] = enabled;
3386 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3387 values[n++] = running;
3388 values[n++] = count;
3389 if (read_format & PERF_FORMAT_ID)
3390 values[n++] = primary_event_id(leader);
3391
3392 size = n * sizeof(u64);
3393
3394 if (copy_to_user(buf, values, size))
3395 goto unlock;
3396
3397 ret = size;
3398
3399 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3400 n = 0;
3401
3402 values[n++] = perf_event_read_value(sub, &enabled, &running);
3403 if (read_format & PERF_FORMAT_ID)
3404 values[n++] = primary_event_id(sub);
3405
3406 size = n * sizeof(u64);
3407
3408 if (copy_to_user(buf + ret, values, size)) {
3409 ret = -EFAULT;
3410 goto unlock;
3411 }
3412
3413 ret += size;
3414 }
3415 unlock:
3416 mutex_unlock(&ctx->mutex);
3417
3418 return ret;
3419 }
3420
3421 static int perf_event_read_one(struct perf_event *event,
3422 u64 read_format, char __user *buf)
3423 {
3424 u64 enabled, running;
3425 u64 values[4];
3426 int n = 0;
3427
3428 values[n++] = perf_event_read_value(event, &enabled, &running);
3429 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3430 values[n++] = enabled;
3431 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3432 values[n++] = running;
3433 if (read_format & PERF_FORMAT_ID)
3434 values[n++] = primary_event_id(event);
3435
3436 if (copy_to_user(buf, values, n * sizeof(u64)))
3437 return -EFAULT;
3438
3439 return n * sizeof(u64);
3440 }
3441
3442 /*
3443 * Read the performance event - simple non blocking version for now
3444 */
3445 static ssize_t
3446 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3447 {
3448 u64 read_format = event->attr.read_format;
3449 int ret;
3450
3451 /*
3452 * Return end-of-file for a read on a event that is in
3453 * error state (i.e. because it was pinned but it couldn't be
3454 * scheduled on to the CPU at some point).
3455 */
3456 if (event->state == PERF_EVENT_STATE_ERROR)
3457 return 0;
3458
3459 if (count < event->read_size)
3460 return -ENOSPC;
3461
3462 WARN_ON_ONCE(event->ctx->parent_ctx);
3463 if (read_format & PERF_FORMAT_GROUP)
3464 ret = perf_event_read_group(event, read_format, buf);
3465 else
3466 ret = perf_event_read_one(event, read_format, buf);
3467
3468 return ret;
3469 }
3470
3471 static ssize_t
3472 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3473 {
3474 struct perf_event *event = file->private_data;
3475
3476 return perf_read_hw(event, buf, count);
3477 }
3478
3479 static unsigned int perf_poll(struct file *file, poll_table *wait)
3480 {
3481 struct perf_event *event = file->private_data;
3482 struct ring_buffer *rb;
3483 unsigned int events = POLL_HUP;
3484
3485 /*
3486 * Pin the event->rb by taking event->mmap_mutex; otherwise
3487 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
3488 */
3489 mutex_lock(&event->mmap_mutex);
3490 rb = event->rb;
3491 if (rb)
3492 events = atomic_xchg(&rb->poll, 0);
3493 mutex_unlock(&event->mmap_mutex);
3494
3495 poll_wait(file, &event->waitq, wait);
3496
3497 return events;
3498 }
3499
3500 static void perf_event_reset(struct perf_event *event)
3501 {
3502 (void)perf_event_read(event);
3503 local64_set(&event->count, 0);
3504 perf_event_update_userpage(event);
3505 }
3506
3507 /*
3508 * Holding the top-level event's child_mutex means that any
3509 * descendant process that has inherited this event will block
3510 * in sync_child_event if it goes to exit, thus satisfying the
3511 * task existence requirements of perf_event_enable/disable.
3512 */
3513 static void perf_event_for_each_child(struct perf_event *event,
3514 void (*func)(struct perf_event *))
3515 {
3516 struct perf_event *child;
3517
3518 WARN_ON_ONCE(event->ctx->parent_ctx);
3519 mutex_lock(&event->child_mutex);
3520 func(event);
3521 list_for_each_entry(child, &event->child_list, child_list)
3522 func(child);
3523 mutex_unlock(&event->child_mutex);
3524 }
3525
3526 static void perf_event_for_each(struct perf_event *event,
3527 void (*func)(struct perf_event *))
3528 {
3529 struct perf_event_context *ctx = event->ctx;
3530 struct perf_event *sibling;
3531
3532 WARN_ON_ONCE(ctx->parent_ctx);
3533 mutex_lock(&ctx->mutex);
3534 event = event->group_leader;
3535
3536 perf_event_for_each_child(event, func);
3537 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3538 perf_event_for_each_child(sibling, func);
3539 mutex_unlock(&ctx->mutex);
3540 }
3541
3542 static int perf_event_period(struct perf_event *event, u64 __user *arg)
3543 {
3544 struct perf_event_context *ctx = event->ctx;
3545 int ret = 0;
3546 u64 value;
3547
3548 if (!is_sampling_event(event))
3549 return -EINVAL;
3550
3551 if (copy_from_user(&value, arg, sizeof(value)))
3552 return -EFAULT;
3553
3554 if (!value)
3555 return -EINVAL;
3556
3557 raw_spin_lock_irq(&ctx->lock);
3558 if (event->attr.freq) {
3559 if (value > sysctl_perf_event_sample_rate) {
3560 ret = -EINVAL;
3561 goto unlock;
3562 }
3563
3564 event->attr.sample_freq = value;
3565 } else {
3566 event->attr.sample_period = value;
3567 event->hw.sample_period = value;
3568 }
3569 unlock:
3570 raw_spin_unlock_irq(&ctx->lock);
3571
3572 return ret;
3573 }
3574
3575 static const struct file_operations perf_fops;
3576
3577 static inline int perf_fget_light(int fd, struct fd *p)
3578 {
3579 struct fd f = fdget(fd);
3580 if (!f.file)
3581 return -EBADF;
3582
3583 if (f.file->f_op != &perf_fops) {
3584 fdput(f);
3585 return -EBADF;
3586 }
3587 *p = f;
3588 return 0;
3589 }
3590
3591 static int perf_event_set_output(struct perf_event *event,
3592 struct perf_event *output_event);
3593 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3594
3595 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3596 {
3597 struct perf_event *event = file->private_data;
3598 void (*func)(struct perf_event *);
3599 u32 flags = arg;
3600
3601 switch (cmd) {
3602 case PERF_EVENT_IOC_ENABLE:
3603 func = perf_event_enable;
3604 break;
3605 case PERF_EVENT_IOC_DISABLE:
3606 func = perf_event_disable;
3607 break;
3608 case PERF_EVENT_IOC_RESET:
3609 func = perf_event_reset;
3610 break;
3611
3612 case PERF_EVENT_IOC_REFRESH:
3613 return perf_event_refresh(event, arg);
3614
3615 case PERF_EVENT_IOC_PERIOD:
3616 return perf_event_period(event, (u64 __user *)arg);
3617
3618 case PERF_EVENT_IOC_ID:
3619 {
3620 u64 id = primary_event_id(event);
3621
3622 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
3623 return -EFAULT;
3624 return 0;
3625 }
3626
3627 case PERF_EVENT_IOC_SET_OUTPUT:
3628 {
3629 int ret;
3630 if (arg != -1) {
3631 struct perf_event *output_event;
3632 struct fd output;
3633 ret = perf_fget_light(arg, &output);
3634 if (ret)
3635 return ret;
3636 output_event = output.file->private_data;
3637 ret = perf_event_set_output(event, output_event);
3638 fdput(output);
3639 } else {
3640 ret = perf_event_set_output(event, NULL);
3641 }
3642 return ret;
3643 }
3644
3645 case PERF_EVENT_IOC_SET_FILTER:
3646 return perf_event_set_filter(event, (void __user *)arg);
3647
3648 default:
3649 return -ENOTTY;
3650 }
3651
3652 if (flags & PERF_IOC_FLAG_GROUP)
3653 perf_event_for_each(event, func);
3654 else
3655 perf_event_for_each_child(event, func);
3656
3657 return 0;
3658 }
3659
3660 int perf_event_task_enable(void)
3661 {
3662 struct perf_event *event;
3663
3664 mutex_lock(&current->perf_event_mutex);
3665 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3666 perf_event_for_each_child(event, perf_event_enable);
3667 mutex_unlock(&current->perf_event_mutex);
3668
3669 return 0;
3670 }
3671
3672 int perf_event_task_disable(void)
3673 {
3674 struct perf_event *event;
3675
3676 mutex_lock(&current->perf_event_mutex);
3677 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3678 perf_event_for_each_child(event, perf_event_disable);
3679 mutex_unlock(&current->perf_event_mutex);
3680
3681 return 0;
3682 }
3683
3684 static int perf_event_index(struct perf_event *event)
3685 {
3686 if (event->hw.state & PERF_HES_STOPPED)
3687 return 0;
3688
3689 if (event->state != PERF_EVENT_STATE_ACTIVE)
3690 return 0;
3691
3692 return event->pmu->event_idx(event);
3693 }
3694
3695 static void calc_timer_values(struct perf_event *event,
3696 u64 *now,
3697 u64 *enabled,
3698 u64 *running)
3699 {
3700 u64 ctx_time;
3701
3702 *now = perf_clock();
3703 ctx_time = event->shadow_ctx_time + *now;
3704 *enabled = ctx_time - event->tstamp_enabled;
3705 *running = ctx_time - event->tstamp_running;
3706 }
3707
3708 static void perf_event_init_userpage(struct perf_event *event)
3709 {
3710 struct perf_event_mmap_page *userpg;
3711 struct ring_buffer *rb;
3712
3713 rcu_read_lock();
3714 rb = rcu_dereference(event->rb);
3715 if (!rb)
3716 goto unlock;
3717
3718 userpg = rb->user_page;
3719
3720 /* Allow new userspace to detect that bit 0 is deprecated */
3721 userpg->cap_bit0_is_deprecated = 1;
3722 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
3723
3724 unlock:
3725 rcu_read_unlock();
3726 }
3727
3728 void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3729 {
3730 }
3731
3732 /*
3733 * Callers need to ensure there can be no nesting of this function, otherwise
3734 * the seqlock logic goes bad. We can not serialize this because the arch
3735 * code calls this from NMI context.
3736 */
3737 void perf_event_update_userpage(struct perf_event *event)
3738 {
3739 struct perf_event_mmap_page *userpg;
3740 struct ring_buffer *rb;
3741 u64 enabled, running, now;
3742
3743 rcu_read_lock();
3744 rb = rcu_dereference(event->rb);
3745 if (!rb)
3746 goto unlock;
3747
3748 /*
3749 * compute total_time_enabled, total_time_running
3750 * based on snapshot values taken when the event
3751 * was last scheduled in.
3752 *
3753 * we cannot simply called update_context_time()
3754 * because of locking issue as we can be called in
3755 * NMI context
3756 */
3757 calc_timer_values(event, &now, &enabled, &running);
3758
3759 userpg = rb->user_page;
3760 /*
3761 * Disable preemption so as to not let the corresponding user-space
3762 * spin too long if we get preempted.
3763 */
3764 preempt_disable();
3765 ++userpg->lock;
3766 barrier();
3767 userpg->index = perf_event_index(event);
3768 userpg->offset = perf_event_count(event);
3769 if (userpg->index)
3770 userpg->offset -= local64_read(&event->hw.prev_count);
3771
3772 userpg->time_enabled = enabled +
3773 atomic64_read(&event->child_total_time_enabled);
3774
3775 userpg->time_running = running +
3776 atomic64_read(&event->child_total_time_running);
3777
3778 arch_perf_update_userpage(userpg, now);
3779
3780 barrier();
3781 ++userpg->lock;
3782 preempt_enable();
3783 unlock:
3784 rcu_read_unlock();
3785 }
3786
3787 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3788 {
3789 struct perf_event *event = vma->vm_file->private_data;
3790 struct ring_buffer *rb;
3791 int ret = VM_FAULT_SIGBUS;
3792
3793 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3794 if (vmf->pgoff == 0)
3795 ret = 0;
3796 return ret;
3797 }
3798
3799 rcu_read_lock();
3800 rb = rcu_dereference(event->rb);
3801 if (!rb)
3802 goto unlock;
3803
3804 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3805 goto unlock;
3806
3807 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3808 if (!vmf->page)
3809 goto unlock;
3810
3811 get_page(vmf->page);
3812 vmf->page->mapping = vma->vm_file->f_mapping;
3813 vmf->page->index = vmf->pgoff;
3814
3815 ret = 0;
3816 unlock:
3817 rcu_read_unlock();
3818
3819 return ret;
3820 }
3821
3822 static void ring_buffer_attach(struct perf_event *event,
3823 struct ring_buffer *rb)
3824 {
3825 unsigned long flags;
3826
3827 if (!list_empty(&event->rb_entry))
3828 return;
3829
3830 spin_lock_irqsave(&rb->event_lock, flags);
3831 if (list_empty(&event->rb_entry))
3832 list_add(&event->rb_entry, &rb->event_list);
3833 spin_unlock_irqrestore(&rb->event_lock, flags);
3834 }
3835
3836 static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
3837 {
3838 unsigned long flags;
3839
3840 if (list_empty(&event->rb_entry))
3841 return;
3842
3843 spin_lock_irqsave(&rb->event_lock, flags);
3844 list_del_init(&event->rb_entry);
3845 wake_up_all(&event->waitq);
3846 spin_unlock_irqrestore(&rb->event_lock, flags);
3847 }
3848
3849 static void ring_buffer_wakeup(struct perf_event *event)
3850 {
3851 struct ring_buffer *rb;
3852
3853 rcu_read_lock();
3854 rb = rcu_dereference(event->rb);
3855 if (rb) {
3856 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3857 wake_up_all(&event->waitq);
3858 }
3859 rcu_read_unlock();
3860 }
3861
3862 static void rb_free_rcu(struct rcu_head *rcu_head)
3863 {
3864 struct ring_buffer *rb;
3865
3866 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3867 rb_free(rb);
3868 }
3869
3870 static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3871 {
3872 struct ring_buffer *rb;
3873
3874 rcu_read_lock();
3875 rb = rcu_dereference(event->rb);
3876 if (rb) {
3877 if (!atomic_inc_not_zero(&rb->refcount))
3878 rb = NULL;
3879 }
3880 rcu_read_unlock();
3881
3882 return rb;
3883 }
3884
3885 static void ring_buffer_put(struct ring_buffer *rb)
3886 {
3887 if (!atomic_dec_and_test(&rb->refcount))
3888 return;
3889
3890 WARN_ON_ONCE(!list_empty(&rb->event_list));
3891
3892 call_rcu(&rb->rcu_head, rb_free_rcu);
3893 }
3894
3895 static void perf_mmap_open(struct vm_area_struct *vma)
3896 {
3897 struct perf_event *event = vma->vm_file->private_data;
3898
3899 atomic_inc(&event->mmap_count);
3900 atomic_inc(&event->rb->mmap_count);
3901 }
3902
3903 /*
3904 * A buffer can be mmap()ed multiple times; either directly through the same
3905 * event, or through other events by use of perf_event_set_output().
3906 *
3907 * In order to undo the VM accounting done by perf_mmap() we need to destroy
3908 * the buffer here, where we still have a VM context. This means we need
3909 * to detach all events redirecting to us.
3910 */
3911 static void perf_mmap_close(struct vm_area_struct *vma)
3912 {
3913 struct perf_event *event = vma->vm_file->private_data;
3914
3915 struct ring_buffer *rb = event->rb;
3916 struct user_struct *mmap_user = rb->mmap_user;
3917 int mmap_locked = rb->mmap_locked;
3918 unsigned long size = perf_data_size(rb);
3919
3920 atomic_dec(&rb->mmap_count);
3921
3922 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3923 return;
3924
3925 /* Detach current event from the buffer. */
3926 rcu_assign_pointer(event->rb, NULL);
3927 ring_buffer_detach(event, rb);
3928 mutex_unlock(&event->mmap_mutex);
3929
3930 /* If there's still other mmap()s of this buffer, we're done. */
3931 if (atomic_read(&rb->mmap_count)) {
3932 ring_buffer_put(rb); /* can't be last */
3933 return;
3934 }
3935
3936 /*
3937 * No other mmap()s, detach from all other events that might redirect
3938 * into the now unreachable buffer. Somewhat complicated by the
3939 * fact that rb::event_lock otherwise nests inside mmap_mutex.
3940 */
3941 again:
3942 rcu_read_lock();
3943 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3944 if (!atomic_long_inc_not_zero(&event->refcount)) {
3945 /*
3946 * This event is en-route to free_event() which will
3947 * detach it and remove it from the list.
3948 */
3949 continue;
3950 }
3951 rcu_read_unlock();
3952
3953 mutex_lock(&event->mmap_mutex);
3954 /*
3955 * Check we didn't race with perf_event_set_output() which can
3956 * swizzle the rb from under us while we were waiting to
3957 * acquire mmap_mutex.
3958 *
3959 * If we find a different rb; ignore this event, a next
3960 * iteration will no longer find it on the list. We have to
3961 * still restart the iteration to make sure we're not now
3962 * iterating the wrong list.
3963 */
3964 if (event->rb == rb) {
3965 rcu_assign_pointer(event->rb, NULL);
3966 ring_buffer_detach(event, rb);
3967 ring_buffer_put(rb); /* can't be last, we still have one */
3968 }
3969 mutex_unlock(&event->mmap_mutex);
3970 put_event(event);
3971
3972 /*
3973 * Restart the iteration; either we're on the wrong list or
3974 * destroyed its integrity by doing a deletion.
3975 */
3976 goto again;
3977 }
3978 rcu_read_unlock();
3979
3980 /*
3981 * It could be there's still a few 0-ref events on the list; they'll
3982 * get cleaned up by free_event() -- they'll also still have their
3983 * ref on the rb and will free it whenever they are done with it.
3984 *
3985 * Aside from that, this buffer is 'fully' detached and unmapped,
3986 * undo the VM accounting.
3987 */
3988
3989 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
3990 vma->vm_mm->pinned_vm -= mmap_locked;
3991 free_uid(mmap_user);
3992
3993 ring_buffer_put(rb); /* could be last */
3994 }
3995
3996 static const struct vm_operations_struct perf_mmap_vmops = {
3997 .open = perf_mmap_open,
3998 .close = perf_mmap_close,
3999 .fault = perf_mmap_fault,
4000 .page_mkwrite = perf_mmap_fault,
4001 };
4002
4003 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4004 {
4005 struct perf_event *event = file->private_data;
4006 unsigned long user_locked, user_lock_limit;
4007 struct user_struct *user = current_user();
4008 unsigned long locked, lock_limit;
4009 struct ring_buffer *rb;
4010 unsigned long vma_size;
4011 unsigned long nr_pages;
4012 long user_extra, extra;
4013 int ret = 0, flags = 0;
4014
4015 /*
4016 * Don't allow mmap() of inherited per-task counters. This would
4017 * create a performance issue due to all children writing to the
4018 * same rb.
4019 */
4020 if (event->cpu == -1 && event->attr.inherit)
4021 return -EINVAL;
4022
4023 if (!(vma->vm_flags & VM_SHARED))
4024 return -EINVAL;
4025
4026 vma_size = vma->vm_end - vma->vm_start;
4027 nr_pages = (vma_size / PAGE_SIZE) - 1;
4028
4029 /*
4030 * If we have rb pages ensure they're a power-of-two number, so we
4031 * can do bitmasks instead of modulo.
4032 */
4033 if (nr_pages != 0 && !is_power_of_2(nr_pages))
4034 return -EINVAL;
4035
4036 if (vma_size != PAGE_SIZE * (1 + nr_pages))
4037 return -EINVAL;
4038
4039 if (vma->vm_pgoff != 0)
4040 return -EINVAL;
4041
4042 WARN_ON_ONCE(event->ctx->parent_ctx);
4043 again:
4044 mutex_lock(&event->mmap_mutex);
4045 if (event->rb) {
4046 if (event->rb->nr_pages != nr_pages) {
4047 ret = -EINVAL;
4048 goto unlock;
4049 }
4050
4051 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
4052 /*
4053 * Raced against perf_mmap_close() through
4054 * perf_event_set_output(). Try again, hope for better
4055 * luck.
4056 */
4057 mutex_unlock(&event->mmap_mutex);
4058 goto again;
4059 }
4060
4061 goto unlock;
4062 }
4063
4064 user_extra = nr_pages + 1;
4065 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
4066
4067 /*
4068 * Increase the limit linearly with more CPUs:
4069 */
4070 user_lock_limit *= num_online_cpus();
4071
4072 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
4073
4074 extra = 0;
4075 if (user_locked > user_lock_limit)
4076 extra = user_locked - user_lock_limit;
4077
4078 lock_limit = rlimit(RLIMIT_MEMLOCK);
4079 lock_limit >>= PAGE_SHIFT;
4080 locked = vma->vm_mm->pinned_vm + extra;
4081
4082 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
4083 !capable(CAP_IPC_LOCK)) {
4084 ret = -EPERM;
4085 goto unlock;
4086 }
4087
4088 WARN_ON(event->rb);
4089
4090 if (vma->vm_flags & VM_WRITE)
4091 flags |= RING_BUFFER_WRITABLE;
4092
4093 rb = rb_alloc(nr_pages,
4094 event->attr.watermark ? event->attr.wakeup_watermark : 0,
4095 event->cpu, flags);
4096
4097 if (!rb) {
4098 ret = -ENOMEM;
4099 goto unlock;
4100 }
4101
4102 atomic_set(&rb->mmap_count, 1);
4103 rb->mmap_locked = extra;
4104 rb->mmap_user = get_current_user();
4105
4106 atomic_long_add(user_extra, &user->locked_vm);
4107 vma->vm_mm->pinned_vm += extra;
4108
4109 ring_buffer_attach(event, rb);
4110 rcu_assign_pointer(event->rb, rb);
4111
4112 perf_event_init_userpage(event);
4113 perf_event_update_userpage(event);
4114
4115 unlock:
4116 if (!ret)
4117 atomic_inc(&event->mmap_count);
4118 mutex_unlock(&event->mmap_mutex);
4119
4120 /*
4121 * Since pinned accounting is per vm we cannot allow fork() to copy our
4122 * vma.
4123 */
4124 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
4125 vma->vm_ops = &perf_mmap_vmops;
4126
4127 return ret;
4128 }
4129
4130 static int perf_fasync(int fd, struct file *filp, int on)
4131 {
4132 struct inode *inode = file_inode(filp);
4133 struct perf_event *event = filp->private_data;
4134 int retval;
4135
4136 mutex_lock(&inode->i_mutex);
4137 retval = fasync_helper(fd, filp, on, &event->fasync);
4138 mutex_unlock(&inode->i_mutex);
4139
4140 if (retval < 0)
4141 return retval;
4142
4143 return 0;
4144 }
4145
4146 static const struct file_operations perf_fops = {
4147 .llseek = no_llseek,
4148 .release = perf_release,
4149 .read = perf_read,
4150 .poll = perf_poll,
4151 .unlocked_ioctl = perf_ioctl,
4152 .compat_ioctl = perf_ioctl,
4153 .mmap = perf_mmap,
4154 .fasync = perf_fasync,
4155 };
4156
4157 /*
4158 * Perf event wakeup
4159 *
4160 * If there's data, ensure we set the poll() state and publish everything
4161 * to user-space before waking everybody up.
4162 */
4163
4164 void perf_event_wakeup(struct perf_event *event)
4165 {
4166 ring_buffer_wakeup(event);
4167
4168 if (event->pending_kill) {
4169 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
4170 event->pending_kill = 0;
4171 }
4172 }
4173
4174 static void perf_pending_event(struct irq_work *entry)
4175 {
4176 struct perf_event *event = container_of(entry,
4177 struct perf_event, pending);
4178
4179 if (event->pending_disable) {
4180 event->pending_disable = 0;
4181 __perf_event_disable(event);
4182 }
4183
4184 if (event->pending_wakeup) {
4185 event->pending_wakeup = 0;
4186 perf_event_wakeup(event);
4187 }
4188 }
4189
4190 /*
4191 * We assume there is only KVM supporting the callbacks.
4192 * Later on, we might change it to a list if there is
4193 * another virtualization implementation supporting the callbacks.
4194 */
4195 struct perf_guest_info_callbacks *perf_guest_cbs;
4196
4197 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4198 {
4199 perf_guest_cbs = cbs;
4200 return 0;
4201 }
4202 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4203
4204 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4205 {
4206 perf_guest_cbs = NULL;
4207 return 0;
4208 }
4209 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4210
4211 static void
4212 perf_output_sample_regs(struct perf_output_handle *handle,
4213 struct pt_regs *regs, u64 mask)
4214 {
4215 int bit;
4216
4217 for_each_set_bit(bit, (const unsigned long *) &mask,
4218 sizeof(mask) * BITS_PER_BYTE) {
4219 u64 val;
4220
4221 val = perf_reg_value(regs, bit);
4222 perf_output_put(handle, val);
4223 }
4224 }
4225
4226 static void perf_sample_regs_user(struct perf_regs_user *regs_user,
4227 struct pt_regs *regs)
4228 {
4229 if (!user_mode(regs)) {
4230 if (current->mm)
4231 regs = task_pt_regs(current);
4232 else
4233 regs = NULL;
4234 }
4235
4236 if (regs) {
4237 regs_user->regs = regs;
4238 regs_user->abi = perf_reg_abi(current);
4239 }
4240 }
4241
4242 /*
4243 * Get remaining task size from user stack pointer.
4244 *
4245 * It'd be better to take stack vma map and limit this more
4246 * precisly, but there's no way to get it safely under interrupt,
4247 * so using TASK_SIZE as limit.
4248 */
4249 static u64 perf_ustack_task_size(struct pt_regs *regs)
4250 {
4251 unsigned long addr = perf_user_stack_pointer(regs);
4252
4253 if (!addr || addr >= TASK_SIZE)
4254 return 0;
4255
4256 return TASK_SIZE - addr;
4257 }
4258
4259 static u16
4260 perf_sample_ustack_size(u16 stack_size, u16 header_size,
4261 struct pt_regs *regs)
4262 {
4263 u64 task_size;
4264
4265 /* No regs, no stack pointer, no dump. */
4266 if (!regs)
4267 return 0;
4268
4269 /*
4270 * Check if we fit in with the requested stack size into the:
4271 * - TASK_SIZE
4272 * If we don't, we limit the size to the TASK_SIZE.
4273 *
4274 * - remaining sample size
4275 * If we don't, we customize the stack size to
4276 * fit in to the remaining sample size.
4277 */
4278
4279 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
4280 stack_size = min(stack_size, (u16) task_size);
4281
4282 /* Current header size plus static size and dynamic size. */
4283 header_size += 2 * sizeof(u64);
4284
4285 /* Do we fit in with the current stack dump size? */
4286 if ((u16) (header_size + stack_size) < header_size) {
4287 /*
4288 * If we overflow the maximum size for the sample,
4289 * we customize the stack dump size to fit in.
4290 */
4291 stack_size = USHRT_MAX - header_size - sizeof(u64);
4292 stack_size = round_up(stack_size, sizeof(u64));
4293 }
4294
4295 return stack_size;
4296 }
4297
4298 static void
4299 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
4300 struct pt_regs *regs)
4301 {
4302 /* Case of a kernel thread, nothing to dump */
4303 if (!regs) {
4304 u64 size = 0;
4305 perf_output_put(handle, size);
4306 } else {
4307 unsigned long sp;
4308 unsigned int rem;
4309 u64 dyn_size;
4310
4311 /*
4312 * We dump:
4313 * static size
4314 * - the size requested by user or the best one we can fit
4315 * in to the sample max size
4316 * data
4317 * - user stack dump data
4318 * dynamic size
4319 * - the actual dumped size
4320 */
4321
4322 /* Static size. */
4323 perf_output_put(handle, dump_size);
4324
4325 /* Data. */
4326 sp = perf_user_stack_pointer(regs);
4327 rem = __output_copy_user(handle, (void *) sp, dump_size);
4328 dyn_size = dump_size - rem;
4329
4330 perf_output_skip(handle, rem);
4331
4332 /* Dynamic size. */
4333 perf_output_put(handle, dyn_size);
4334 }
4335 }
4336
4337 static void __perf_event_header__init_id(struct perf_event_header *header,
4338 struct perf_sample_data *data,
4339 struct perf_event *event)
4340 {
4341 u64 sample_type = event->attr.sample_type;
4342
4343 data->type = sample_type;
4344 header->size += event->id_header_size;
4345
4346 if (sample_type & PERF_SAMPLE_TID) {
4347 /* namespace issues */
4348 data->tid_entry.pid = perf_event_pid(event, current);
4349 data->tid_entry.tid = perf_event_tid(event, current);
4350 }
4351
4352 if (sample_type & PERF_SAMPLE_TIME)
4353 data->time = perf_clock();
4354
4355 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
4356 data->id = primary_event_id(event);
4357
4358 if (sample_type & PERF_SAMPLE_STREAM_ID)
4359 data->stream_id = event->id;
4360
4361 if (sample_type & PERF_SAMPLE_CPU) {
4362 data->cpu_entry.cpu = raw_smp_processor_id();
4363 data->cpu_entry.reserved = 0;
4364 }
4365 }
4366
4367 void perf_event_header__init_id(struct perf_event_header *header,
4368 struct perf_sample_data *data,
4369 struct perf_event *event)
4370 {
4371 if (event->attr.sample_id_all)
4372 __perf_event_header__init_id(header, data, event);
4373 }
4374
4375 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
4376 struct perf_sample_data *data)
4377 {
4378 u64 sample_type = data->type;
4379
4380 if (sample_type & PERF_SAMPLE_TID)
4381 perf_output_put(handle, data->tid_entry);
4382
4383 if (sample_type & PERF_SAMPLE_TIME)
4384 perf_output_put(handle, data->time);
4385
4386 if (sample_type & PERF_SAMPLE_ID)
4387 perf_output_put(handle, data->id);
4388
4389 if (sample_type & PERF_SAMPLE_STREAM_ID)
4390 perf_output_put(handle, data->stream_id);
4391
4392 if (sample_type & PERF_SAMPLE_CPU)
4393 perf_output_put(handle, data->cpu_entry);
4394
4395 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4396 perf_output_put(handle, data->id);
4397 }
4398
4399 void perf_event__output_id_sample(struct perf_event *event,
4400 struct perf_output_handle *handle,
4401 struct perf_sample_data *sample)
4402 {
4403 if (event->attr.sample_id_all)
4404 __perf_event__output_id_sample(handle, sample);
4405 }
4406
4407 static void perf_output_read_one(struct perf_output_handle *handle,
4408 struct perf_event *event,
4409 u64 enabled, u64 running)
4410 {
4411 u64 read_format = event->attr.read_format;
4412 u64 values[4];
4413 int n = 0;
4414
4415 values[n++] = perf_event_count(event);
4416 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4417 values[n++] = enabled +
4418 atomic64_read(&event->child_total_time_enabled);
4419 }
4420 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4421 values[n++] = running +
4422 atomic64_read(&event->child_total_time_running);
4423 }
4424 if (read_format & PERF_FORMAT_ID)
4425 values[n++] = primary_event_id(event);
4426
4427 __output_copy(handle, values, n * sizeof(u64));
4428 }
4429
4430 /*
4431 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
4432 */
4433 static void perf_output_read_group(struct perf_output_handle *handle,
4434 struct perf_event *event,
4435 u64 enabled, u64 running)
4436 {
4437 struct perf_event *leader = event->group_leader, *sub;
4438 u64 read_format = event->attr.read_format;
4439 u64 values[5];
4440 int n = 0;
4441
4442 values[n++] = 1 + leader->nr_siblings;
4443
4444 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4445 values[n++] = enabled;
4446
4447 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4448 values[n++] = running;
4449
4450 if (leader != event)
4451 leader->pmu->read(leader);
4452
4453 values[n++] = perf_event_count(leader);
4454 if (read_format & PERF_FORMAT_ID)
4455 values[n++] = primary_event_id(leader);
4456
4457 __output_copy(handle, values, n * sizeof(u64));
4458
4459 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4460 n = 0;
4461
4462 if ((sub != event) &&
4463 (sub->state == PERF_EVENT_STATE_ACTIVE))
4464 sub->pmu->read(sub);
4465
4466 values[n++] = perf_event_count(sub);
4467 if (read_format & PERF_FORMAT_ID)
4468 values[n++] = primary_event_id(sub);
4469
4470 __output_copy(handle, values, n * sizeof(u64));
4471 }
4472 }
4473
4474 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
4475 PERF_FORMAT_TOTAL_TIME_RUNNING)
4476
4477 static void perf_output_read(struct perf_output_handle *handle,
4478 struct perf_event *event)
4479 {
4480 u64 enabled = 0, running = 0, now;
4481 u64 read_format = event->attr.read_format;
4482
4483 /*
4484 * compute total_time_enabled, total_time_running
4485 * based on snapshot values taken when the event
4486 * was last scheduled in.
4487 *
4488 * we cannot simply called update_context_time()
4489 * because of locking issue as we are called in
4490 * NMI context
4491 */
4492 if (read_format & PERF_FORMAT_TOTAL_TIMES)
4493 calc_timer_values(event, &now, &enabled, &running);
4494
4495 if (event->attr.read_format & PERF_FORMAT_GROUP)
4496 perf_output_read_group(handle, event, enabled, running);
4497 else
4498 perf_output_read_one(handle, event, enabled, running);
4499 }
4500
4501 void perf_output_sample(struct perf_output_handle *handle,
4502 struct perf_event_header *header,
4503 struct perf_sample_data *data,
4504 struct perf_event *event)
4505 {
4506 u64 sample_type = data->type;
4507
4508 perf_output_put(handle, *header);
4509
4510 if (sample_type & PERF_SAMPLE_IDENTIFIER)
4511 perf_output_put(handle, data->id);
4512
4513 if (sample_type & PERF_SAMPLE_IP)
4514 perf_output_put(handle, data->ip);
4515
4516 if (sample_type & PERF_SAMPLE_TID)
4517 perf_output_put(handle, data->tid_entry);
4518
4519 if (sample_type & PERF_SAMPLE_TIME)
4520 perf_output_put(handle, data->time);
4521
4522 if (sample_type & PERF_SAMPLE_ADDR)
4523 perf_output_put(handle, data->addr);
4524
4525 if (sample_type & PERF_SAMPLE_ID)
4526 perf_output_put(handle, data->id);
4527
4528 if (sample_type & PERF_SAMPLE_STREAM_ID)
4529 perf_output_put(handle, data->stream_id);
4530
4531 if (sample_type & PERF_SAMPLE_CPU)
4532 perf_output_put(handle, data->cpu_entry);
4533
4534 if (sample_type & PERF_SAMPLE_PERIOD)
4535 perf_output_put(handle, data->period);
4536
4537 if (sample_type & PERF_SAMPLE_READ)
4538 perf_output_read(handle, event);
4539
4540 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4541 if (data->callchain) {
4542 int size = 1;
4543
4544 if (data->callchain)
4545 size += data->callchain->nr;
4546
4547 size *= sizeof(u64);
4548
4549 __output_copy(handle, data->callchain, size);
4550 } else {
4551 u64 nr = 0;
4552 perf_output_put(handle, nr);
4553 }
4554 }
4555
4556 if (sample_type & PERF_SAMPLE_RAW) {
4557 if (data->raw) {
4558 perf_output_put(handle, data->raw->size);
4559 __output_copy(handle, data->raw->data,
4560 data->raw->size);
4561 } else {
4562 struct {
4563 u32 size;
4564 u32 data;
4565 } raw = {
4566 .size = sizeof(u32),
4567 .data = 0,
4568 };
4569 perf_output_put(handle, raw);
4570 }
4571 }
4572
4573 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4574 if (data->br_stack) {
4575 size_t size;
4576
4577 size = data->br_stack->nr
4578 * sizeof(struct perf_branch_entry);
4579
4580 perf_output_put(handle, data->br_stack->nr);
4581 perf_output_copy(handle, data->br_stack->entries, size);
4582 } else {
4583 /*
4584 * we always store at least the value of nr
4585 */
4586 u64 nr = 0;
4587 perf_output_put(handle, nr);
4588 }
4589 }
4590
4591 if (sample_type & PERF_SAMPLE_REGS_USER) {
4592 u64 abi = data->regs_user.abi;
4593
4594 /*
4595 * If there are no regs to dump, notice it through
4596 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
4597 */
4598 perf_output_put(handle, abi);
4599
4600 if (abi) {
4601 u64 mask = event->attr.sample_regs_user;
4602 perf_output_sample_regs(handle,
4603 data->regs_user.regs,
4604 mask);
4605 }
4606 }
4607
4608 if (sample_type & PERF_SAMPLE_STACK_USER) {
4609 perf_output_sample_ustack(handle,
4610 data->stack_user_size,
4611 data->regs_user.regs);
4612 }
4613
4614 if (sample_type & PERF_SAMPLE_WEIGHT)
4615 perf_output_put(handle, data->weight);
4616
4617 if (sample_type & PERF_SAMPLE_DATA_SRC)
4618 perf_output_put(handle, data->data_src.val);
4619
4620 if (sample_type & PERF_SAMPLE_TRANSACTION)
4621 perf_output_put(handle, data->txn);
4622
4623 if (!event->attr.watermark) {
4624 int wakeup_events = event->attr.wakeup_events;
4625
4626 if (wakeup_events) {
4627 struct ring_buffer *rb = handle->rb;
4628 int events = local_inc_return(&rb->events);
4629
4630 if (events >= wakeup_events) {
4631 local_sub(wakeup_events, &rb->events);
4632 local_inc(&rb->wakeup);
4633 }
4634 }
4635 }
4636 }
4637
4638 void perf_prepare_sample(struct perf_event_header *header,
4639 struct perf_sample_data *data,
4640 struct perf_event *event,
4641 struct pt_regs *regs)
4642 {
4643 u64 sample_type = event->attr.sample_type;
4644
4645 header->type = PERF_RECORD_SAMPLE;
4646 header->size = sizeof(*header) + event->header_size;
4647
4648 header->misc = 0;
4649 header->misc |= perf_misc_flags(regs);
4650
4651 __perf_event_header__init_id(header, data, event);
4652
4653 if (sample_type & PERF_SAMPLE_IP)
4654 data->ip = perf_instruction_pointer(regs);
4655
4656 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4657 int size = 1;
4658
4659 data->callchain = perf_callchain(event, regs);
4660
4661 if (data->callchain)
4662 size += data->callchain->nr;
4663
4664 header->size += size * sizeof(u64);
4665 }
4666
4667 if (sample_type & PERF_SAMPLE_RAW) {
4668 int size = sizeof(u32);
4669
4670 if (data->raw)
4671 size += data->raw->size;
4672 else
4673 size += sizeof(u32);
4674
4675 WARN_ON_ONCE(size & (sizeof(u64)-1));
4676 header->size += size;
4677 }
4678
4679 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4680 int size = sizeof(u64); /* nr */
4681 if (data->br_stack) {
4682 size += data->br_stack->nr
4683 * sizeof(struct perf_branch_entry);
4684 }
4685 header->size += size;
4686 }
4687
4688 if (sample_type & PERF_SAMPLE_REGS_USER) {
4689 /* regs dump ABI info */
4690 int size = sizeof(u64);
4691
4692 perf_sample_regs_user(&data->regs_user, regs);
4693
4694 if (data->regs_user.regs) {
4695 u64 mask = event->attr.sample_regs_user;
4696 size += hweight64(mask) * sizeof(u64);
4697 }
4698
4699 header->size += size;
4700 }
4701
4702 if (sample_type & PERF_SAMPLE_STACK_USER) {
4703 /*
4704 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
4705 * processed as the last one or have additional check added
4706 * in case new sample type is added, because we could eat
4707 * up the rest of the sample size.
4708 */
4709 struct perf_regs_user *uregs = &data->regs_user;
4710 u16 stack_size = event->attr.sample_stack_user;
4711 u16 size = sizeof(u64);
4712
4713 if (!uregs->abi)
4714 perf_sample_regs_user(uregs, regs);
4715
4716 stack_size = perf_sample_ustack_size(stack_size, header->size,
4717 uregs->regs);
4718
4719 /*
4720 * If there is something to dump, add space for the dump
4721 * itself and for the field that tells the dynamic size,
4722 * which is how many have been actually dumped.
4723 */
4724 if (stack_size)
4725 size += sizeof(u64) + stack_size;
4726
4727 data->stack_user_size = stack_size;
4728 header->size += size;
4729 }
4730 }
4731
4732 static void perf_event_output(struct perf_event *event,
4733 struct perf_sample_data *data,
4734 struct pt_regs *regs)
4735 {
4736 struct perf_output_handle handle;
4737 struct perf_event_header header;
4738
4739 /* protect the callchain buffers */
4740 rcu_read_lock();
4741
4742 perf_prepare_sample(&header, data, event, regs);
4743
4744 if (perf_output_begin(&handle, event, header.size))
4745 goto exit;
4746
4747 perf_output_sample(&handle, &header, data, event);
4748
4749 perf_output_end(&handle);
4750
4751 exit:
4752 rcu_read_unlock();
4753 }
4754
4755 /*
4756 * read event_id
4757 */
4758
4759 struct perf_read_event {
4760 struct perf_event_header header;
4761
4762 u32 pid;
4763 u32 tid;
4764 };
4765
4766 static void
4767 perf_event_read_event(struct perf_event *event,
4768 struct task_struct *task)
4769 {
4770 struct perf_output_handle handle;
4771 struct perf_sample_data sample;
4772 struct perf_read_event read_event = {
4773 .header = {
4774 .type = PERF_RECORD_READ,
4775 .misc = 0,
4776 .size = sizeof(read_event) + event->read_size,
4777 },
4778 .pid = perf_event_pid(event, task),
4779 .tid = perf_event_tid(event, task),
4780 };
4781 int ret;
4782
4783 perf_event_header__init_id(&read_event.header, &sample, event);
4784 ret = perf_output_begin(&handle, event, read_event.header.size);
4785 if (ret)
4786 return;
4787
4788 perf_output_put(&handle, read_event);
4789 perf_output_read(&handle, event);
4790 perf_event__output_id_sample(event, &handle, &sample);
4791
4792 perf_output_end(&handle);
4793 }
4794
4795 typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4796
4797 static void
4798 perf_event_aux_ctx(struct perf_event_context *ctx,
4799 perf_event_aux_output_cb output,
4800 void *data)
4801 {
4802 struct perf_event *event;
4803
4804 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4805 if (event->state < PERF_EVENT_STATE_INACTIVE)
4806 continue;
4807 if (!event_filter_match(event))
4808 continue;
4809 output(event, data);
4810 }
4811 }
4812
4813 static void
4814 perf_event_aux(perf_event_aux_output_cb output, void *data,
4815 struct perf_event_context *task_ctx)
4816 {
4817 struct perf_cpu_context *cpuctx;
4818 struct perf_event_context *ctx;
4819 struct pmu *pmu;
4820 int ctxn;
4821
4822 rcu_read_lock();
4823 list_for_each_entry_rcu(pmu, &pmus, entry) {
4824 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4825 if (cpuctx->unique_pmu != pmu)
4826 goto next;
4827 perf_event_aux_ctx(&cpuctx->ctx, output, data);
4828 if (task_ctx)
4829 goto next;
4830 ctxn = pmu->task_ctx_nr;
4831 if (ctxn < 0)
4832 goto next;
4833 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4834 if (ctx)
4835 perf_event_aux_ctx(ctx, output, data);
4836 next:
4837 put_cpu_ptr(pmu->pmu_cpu_context);
4838 }
4839
4840 if (task_ctx) {
4841 preempt_disable();
4842 perf_event_aux_ctx(task_ctx, output, data);
4843 preempt_enable();
4844 }
4845 rcu_read_unlock();
4846 }
4847
4848 /*
4849 * task tracking -- fork/exit
4850 *
4851 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
4852 */
4853
4854 struct perf_task_event {
4855 struct task_struct *task;
4856 struct perf_event_context *task_ctx;
4857
4858 struct {
4859 struct perf_event_header header;
4860
4861 u32 pid;
4862 u32 ppid;
4863 u32 tid;
4864 u32 ptid;
4865 u64 time;
4866 } event_id;
4867 };
4868
4869 static int perf_event_task_match(struct perf_event *event)
4870 {
4871 return event->attr.comm || event->attr.mmap ||
4872 event->attr.mmap2 || event->attr.mmap_data ||
4873 event->attr.task;
4874 }
4875
4876 static void perf_event_task_output(struct perf_event *event,
4877 void *data)
4878 {
4879 struct perf_task_event *task_event = data;
4880 struct perf_output_handle handle;
4881 struct perf_sample_data sample;
4882 struct task_struct *task = task_event->task;
4883 int ret, size = task_event->event_id.header.size;
4884
4885 if (!perf_event_task_match(event))
4886 return;
4887
4888 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4889
4890 ret = perf_output_begin(&handle, event,
4891 task_event->event_id.header.size);
4892 if (ret)
4893 goto out;
4894
4895 task_event->event_id.pid = perf_event_pid(event, task);
4896 task_event->event_id.ppid = perf_event_pid(event, current);
4897
4898 task_event->event_id.tid = perf_event_tid(event, task);
4899 task_event->event_id.ptid = perf_event_tid(event, current);
4900
4901 perf_output_put(&handle, task_event->event_id);
4902
4903 perf_event__output_id_sample(event, &handle, &sample);
4904
4905 perf_output_end(&handle);
4906 out:
4907 task_event->event_id.header.size = size;
4908 }
4909
4910 static void perf_event_task(struct task_struct *task,
4911 struct perf_event_context *task_ctx,
4912 int new)
4913 {
4914 struct perf_task_event task_event;
4915
4916 if (!atomic_read(&nr_comm_events) &&
4917 !atomic_read(&nr_mmap_events) &&
4918 !atomic_read(&nr_task_events))
4919 return;
4920
4921 task_event = (struct perf_task_event){
4922 .task = task,
4923 .task_ctx = task_ctx,
4924 .event_id = {
4925 .header = {
4926 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4927 .misc = 0,
4928 .size = sizeof(task_event.event_id),
4929 },
4930 /* .pid */
4931 /* .ppid */
4932 /* .tid */
4933 /* .ptid */
4934 .time = perf_clock(),
4935 },
4936 };
4937
4938 perf_event_aux(perf_event_task_output,
4939 &task_event,
4940 task_ctx);
4941 }
4942
4943 void perf_event_fork(struct task_struct *task)
4944 {
4945 perf_event_task(task, NULL, 1);
4946 }
4947
4948 /*
4949 * comm tracking
4950 */
4951
4952 struct perf_comm_event {
4953 struct task_struct *task;
4954 char *comm;
4955 int comm_size;
4956
4957 struct {
4958 struct perf_event_header header;
4959
4960 u32 pid;
4961 u32 tid;
4962 } event_id;
4963 };
4964
4965 static int perf_event_comm_match(struct perf_event *event)
4966 {
4967 return event->attr.comm;
4968 }
4969
4970 static void perf_event_comm_output(struct perf_event *event,
4971 void *data)
4972 {
4973 struct perf_comm_event *comm_event = data;
4974 struct perf_output_handle handle;
4975 struct perf_sample_data sample;
4976 int size = comm_event->event_id.header.size;
4977 int ret;
4978
4979 if (!perf_event_comm_match(event))
4980 return;
4981
4982 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4983 ret = perf_output_begin(&handle, event,
4984 comm_event->event_id.header.size);
4985
4986 if (ret)
4987 goto out;
4988
4989 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4990 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4991
4992 perf_output_put(&handle, comm_event->event_id);
4993 __output_copy(&handle, comm_event->comm,
4994 comm_event->comm_size);
4995
4996 perf_event__output_id_sample(event, &handle, &sample);
4997
4998 perf_output_end(&handle);
4999 out:
5000 comm_event->event_id.header.size = size;
5001 }
5002
5003 static void perf_event_comm_event(struct perf_comm_event *comm_event)
5004 {
5005 char comm[TASK_COMM_LEN];
5006 unsigned int size;
5007
5008 memset(comm, 0, sizeof(comm));
5009 strlcpy(comm, comm_event->task->comm, sizeof(comm));
5010 size = ALIGN(strlen(comm)+1, sizeof(u64));
5011
5012 comm_event->comm = comm;
5013 comm_event->comm_size = size;
5014
5015 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
5016
5017 perf_event_aux(perf_event_comm_output,
5018 comm_event,
5019 NULL);
5020 }
5021
5022 void perf_event_comm(struct task_struct *task)
5023 {
5024 struct perf_comm_event comm_event;
5025 struct perf_event_context *ctx;
5026 int ctxn;
5027
5028 rcu_read_lock();
5029 for_each_task_context_nr(ctxn) {
5030 ctx = task->perf_event_ctxp[ctxn];
5031 if (!ctx)
5032 continue;
5033
5034 perf_event_enable_on_exec(ctx);
5035 }
5036 rcu_read_unlock();
5037
5038 if (!atomic_read(&nr_comm_events))
5039 return;
5040
5041 comm_event = (struct perf_comm_event){
5042 .task = task,
5043 /* .comm */
5044 /* .comm_size */
5045 .event_id = {
5046 .header = {
5047 .type = PERF_RECORD_COMM,
5048 .misc = 0,
5049 /* .size */
5050 },
5051 /* .pid */
5052 /* .tid */
5053 },
5054 };
5055
5056 perf_event_comm_event(&comm_event);
5057 }
5058
5059 /*
5060 * mmap tracking
5061 */
5062
5063 struct perf_mmap_event {
5064 struct vm_area_struct *vma;
5065
5066 const char *file_name;
5067 int file_size;
5068 int maj, min;
5069 u64 ino;
5070 u64 ino_generation;
5071
5072 struct {
5073 struct perf_event_header header;
5074
5075 u32 pid;
5076 u32 tid;
5077 u64 start;
5078 u64 len;
5079 u64 pgoff;
5080 } event_id;
5081 };
5082
5083 static int perf_event_mmap_match(struct perf_event *event,
5084 void *data)
5085 {
5086 struct perf_mmap_event *mmap_event = data;
5087 struct vm_area_struct *vma = mmap_event->vma;
5088 int executable = vma->vm_flags & VM_EXEC;
5089
5090 return (!executable && event->attr.mmap_data) ||
5091 (executable && (event->attr.mmap || event->attr.mmap2));
5092 }
5093
5094 static void perf_event_mmap_output(struct perf_event *event,
5095 void *data)
5096 {
5097 struct perf_mmap_event *mmap_event = data;
5098 struct perf_output_handle handle;
5099 struct perf_sample_data sample;
5100 int size = mmap_event->event_id.header.size;
5101 int ret;
5102
5103 if (!perf_event_mmap_match(event, data))
5104 return;
5105
5106 if (event->attr.mmap2) {
5107 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
5108 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
5109 mmap_event->event_id.header.size += sizeof(mmap_event->min);
5110 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
5111 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
5112 }
5113
5114 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
5115 ret = perf_output_begin(&handle, event,
5116 mmap_event->event_id.header.size);
5117 if (ret)
5118 goto out;
5119
5120 mmap_event->event_id.pid = perf_event_pid(event, current);
5121 mmap_event->event_id.tid = perf_event_tid(event, current);
5122
5123 perf_output_put(&handle, mmap_event->event_id);
5124
5125 if (event->attr.mmap2) {
5126 perf_output_put(&handle, mmap_event->maj);
5127 perf_output_put(&handle, mmap_event->min);
5128 perf_output_put(&handle, mmap_event->ino);
5129 perf_output_put(&handle, mmap_event->ino_generation);
5130 }
5131
5132 __output_copy(&handle, mmap_event->file_name,
5133 mmap_event->file_size);
5134
5135 perf_event__output_id_sample(event, &handle, &sample);
5136
5137 perf_output_end(&handle);
5138 out:
5139 mmap_event->event_id.header.size = size;
5140 }
5141
5142 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
5143 {
5144 struct vm_area_struct *vma = mmap_event->vma;
5145 struct file *file = vma->vm_file;
5146 int maj = 0, min = 0;
5147 u64 ino = 0, gen = 0;
5148 unsigned int size;
5149 char tmp[16];
5150 char *buf = NULL;
5151 char *name;
5152
5153 if (file) {
5154 struct inode *inode;
5155 dev_t dev;
5156
5157 buf = kmalloc(PATH_MAX, GFP_KERNEL);
5158 if (!buf) {
5159 name = "//enomem";
5160 goto cpy_name;
5161 }
5162 /*
5163 * d_path() works from the end of the rb backwards, so we
5164 * need to add enough zero bytes after the string to handle
5165 * the 64bit alignment we do later.
5166 */
5167 name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
5168 if (IS_ERR(name)) {
5169 name = "//toolong";
5170 goto cpy_name;
5171 }
5172 inode = file_inode(vma->vm_file);
5173 dev = inode->i_sb->s_dev;
5174 ino = inode->i_ino;
5175 gen = inode->i_generation;
5176 maj = MAJOR(dev);
5177 min = MINOR(dev);
5178 goto got_name;
5179 } else {
5180 name = (char *)arch_vma_name(vma);
5181 if (name)
5182 goto cpy_name;
5183
5184 if (vma->vm_start <= vma->vm_mm->start_brk &&
5185 vma->vm_end >= vma->vm_mm->brk) {
5186 name = "[heap]";
5187 goto cpy_name;
5188 }
5189 if (vma->vm_start <= vma->vm_mm->start_stack &&
5190 vma->vm_end >= vma->vm_mm->start_stack) {
5191 name = "[stack]";
5192 goto cpy_name;
5193 }
5194
5195 name = "//anon";
5196 goto cpy_name;
5197 }
5198
5199 cpy_name:
5200 strlcpy(tmp, name, sizeof(tmp));
5201 name = tmp;
5202 got_name:
5203 /*
5204 * Since our buffer works in 8 byte units we need to align our string
5205 * size to a multiple of 8. However, we must guarantee the tail end is
5206 * zero'd out to avoid leaking random bits to userspace.
5207 */
5208 size = strlen(name)+1;
5209 while (!IS_ALIGNED(size, sizeof(u64)))
5210 name[size++] = '\0';
5211
5212 mmap_event->file_name = name;
5213 mmap_event->file_size = size;
5214 mmap_event->maj = maj;
5215 mmap_event->min = min;
5216 mmap_event->ino = ino;
5217 mmap_event->ino_generation = gen;
5218
5219 if (!(vma->vm_flags & VM_EXEC))
5220 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
5221
5222 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
5223
5224 perf_event_aux(perf_event_mmap_output,
5225 mmap_event,
5226 NULL);
5227
5228 kfree(buf);
5229 }
5230
5231 void perf_event_mmap(struct vm_area_struct *vma)
5232 {
5233 struct perf_mmap_event mmap_event;
5234
5235 if (!atomic_read(&nr_mmap_events))
5236 return;
5237
5238 mmap_event = (struct perf_mmap_event){
5239 .vma = vma,
5240 /* .file_name */
5241 /* .file_size */
5242 .event_id = {
5243 .header = {
5244 .type = PERF_RECORD_MMAP,
5245 .misc = PERF_RECORD_MISC_USER,
5246 /* .size */
5247 },
5248 /* .pid */
5249 /* .tid */
5250 .start = vma->vm_start,
5251 .len = vma->vm_end - vma->vm_start,
5252 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
5253 },
5254 /* .maj (attr_mmap2 only) */
5255 /* .min (attr_mmap2 only) */
5256 /* .ino (attr_mmap2 only) */
5257 /* .ino_generation (attr_mmap2 only) */
5258 };
5259
5260 perf_event_mmap_event(&mmap_event);
5261 }
5262
5263 /*
5264 * IRQ throttle logging
5265 */
5266
5267 static void perf_log_throttle(struct perf_event *event, int enable)
5268 {
5269 struct perf_output_handle handle;
5270 struct perf_sample_data sample;
5271 int ret;
5272
5273 struct {
5274 struct perf_event_header header;
5275 u64 time;
5276 u64 id;
5277 u64 stream_id;
5278 } throttle_event = {
5279 .header = {
5280 .type = PERF_RECORD_THROTTLE,
5281 .misc = 0,
5282 .size = sizeof(throttle_event),
5283 },
5284 .time = perf_clock(),
5285 .id = primary_event_id(event),
5286 .stream_id = event->id,
5287 };
5288
5289 if (enable)
5290 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
5291
5292 perf_event_header__init_id(&throttle_event.header, &sample, event);
5293
5294 ret = perf_output_begin(&handle, event,
5295 throttle_event.header.size);
5296 if (ret)
5297 return;
5298
5299 perf_output_put(&handle, throttle_event);
5300 perf_event__output_id_sample(event, &handle, &sample);
5301 perf_output_end(&handle);
5302 }
5303
5304 /*
5305 * Generic event overflow handling, sampling.
5306 */
5307
5308 static int __perf_event_overflow(struct perf_event *event,
5309 int throttle, struct perf_sample_data *data,
5310 struct pt_regs *regs)
5311 {
5312 int events = atomic_read(&event->event_limit);
5313 struct hw_perf_event *hwc = &event->hw;
5314 u64 seq;
5315 int ret = 0;
5316
5317 /*
5318 * Non-sampling counters might still use the PMI to fold short
5319 * hardware counters, ignore those.
5320 */
5321 if (unlikely(!is_sampling_event(event)))
5322 return 0;
5323
5324 seq = __this_cpu_read(perf_throttled_seq);
5325 if (seq != hwc->interrupts_seq) {
5326 hwc->interrupts_seq = seq;
5327 hwc->interrupts = 1;
5328 } else {
5329 hwc->interrupts++;
5330 if (unlikely(throttle
5331 && hwc->interrupts >= max_samples_per_tick)) {
5332 __this_cpu_inc(perf_throttled_count);
5333 hwc->interrupts = MAX_INTERRUPTS;
5334 perf_log_throttle(event, 0);
5335 tick_nohz_full_kick();
5336 ret = 1;
5337 }
5338 }
5339
5340 if (event->attr.freq) {
5341 u64 now = perf_clock();
5342 s64 delta = now - hwc->freq_time_stamp;
5343
5344 hwc->freq_time_stamp = now;
5345
5346 if (delta > 0 && delta < 2*TICK_NSEC)
5347 perf_adjust_period(event, delta, hwc->last_period, true);
5348 }
5349
5350 /*
5351 * XXX event_limit might not quite work as expected on inherited
5352 * events
5353 */
5354
5355 event->pending_kill = POLL_IN;
5356 if (events && atomic_dec_and_test(&event->event_limit)) {
5357 ret = 1;
5358 event->pending_kill = POLL_HUP;
5359 event->pending_disable = 1;
5360 irq_work_queue(&event->pending);
5361 }
5362
5363 if (event->overflow_handler)
5364 event->overflow_handler(event, data, regs);
5365 else
5366 perf_event_output(event, data, regs);
5367
5368 if (event->fasync && event->pending_kill) {
5369 event->pending_wakeup = 1;
5370 irq_work_queue(&event->pending);
5371 }
5372
5373 return ret;
5374 }
5375
5376 int perf_event_overflow(struct perf_event *event,
5377 struct perf_sample_data *data,
5378 struct pt_regs *regs)
5379 {
5380 return __perf_event_overflow(event, 1, data, regs);
5381 }
5382
5383 /*
5384 * Generic software event infrastructure
5385 */
5386
5387 struct swevent_htable {
5388 struct swevent_hlist *swevent_hlist;
5389 struct mutex hlist_mutex;
5390 int hlist_refcount;
5391
5392 /* Recursion avoidance in each contexts */
5393 int recursion[PERF_NR_CONTEXTS];
5394 };
5395
5396 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
5397
5398 /*
5399 * We directly increment event->count and keep a second value in
5400 * event->hw.period_left to count intervals. This period event
5401 * is kept in the range [-sample_period, 0] so that we can use the
5402 * sign as trigger.
5403 */
5404
5405 u64 perf_swevent_set_period(struct perf_event *event)
5406 {
5407 struct hw_perf_event *hwc = &event->hw;
5408 u64 period = hwc->last_period;
5409 u64 nr, offset;
5410 s64 old, val;
5411
5412 hwc->last_period = hwc->sample_period;
5413
5414 again:
5415 old = val = local64_read(&hwc->period_left);
5416 if (val < 0)
5417 return 0;
5418
5419 nr = div64_u64(period + val, period);
5420 offset = nr * period;
5421 val -= offset;
5422 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
5423 goto again;
5424
5425 return nr;
5426 }
5427
5428 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
5429 struct perf_sample_data *data,
5430 struct pt_regs *regs)
5431 {
5432 struct hw_perf_event *hwc = &event->hw;
5433 int throttle = 0;
5434
5435 if (!overflow)
5436 overflow = perf_swevent_set_period(event);
5437
5438 if (hwc->interrupts == MAX_INTERRUPTS)
5439 return;
5440
5441 for (; overflow; overflow--) {
5442 if (__perf_event_overflow(event, throttle,
5443 data, regs)) {
5444 /*
5445 * We inhibit the overflow from happening when
5446 * hwc->interrupts == MAX_INTERRUPTS.
5447 */
5448 break;
5449 }
5450 throttle = 1;
5451 }
5452 }
5453
5454 static void perf_swevent_event(struct perf_event *event, u64 nr,
5455 struct perf_sample_data *data,
5456 struct pt_regs *regs)
5457 {
5458 struct hw_perf_event *hwc = &event->hw;
5459
5460 local64_add(nr, &event->count);
5461
5462 if (!regs)
5463 return;
5464
5465 if (!is_sampling_event(event))
5466 return;
5467
5468 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
5469 data->period = nr;
5470 return perf_swevent_overflow(event, 1, data, regs);
5471 } else
5472 data->period = event->hw.last_period;
5473
5474 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
5475 return perf_swevent_overflow(event, 1, data, regs);
5476
5477 if (local64_add_negative(nr, &hwc->period_left))
5478 return;
5479
5480 perf_swevent_overflow(event, 0, data, regs);
5481 }
5482
5483 static int perf_exclude_event(struct perf_event *event,
5484 struct pt_regs *regs)
5485 {
5486 if (event->hw.state & PERF_HES_STOPPED)
5487 return 1;
5488
5489 if (regs) {
5490 if (event->attr.exclude_user && user_mode(regs))
5491 return 1;
5492
5493 if (event->attr.exclude_kernel && !user_mode(regs))
5494 return 1;
5495 }
5496
5497 return 0;
5498 }
5499
5500 static int perf_swevent_match(struct perf_event *event,
5501 enum perf_type_id type,
5502 u32 event_id,
5503 struct perf_sample_data *data,
5504 struct pt_regs *regs)
5505 {
5506 if (event->attr.type != type)
5507 return 0;
5508
5509 if (event->attr.config != event_id)
5510 return 0;
5511
5512 if (perf_exclude_event(event, regs))
5513 return 0;
5514
5515 return 1;
5516 }
5517
5518 static inline u64 swevent_hash(u64 type, u32 event_id)
5519 {
5520 u64 val = event_id | (type << 32);
5521
5522 return hash_64(val, SWEVENT_HLIST_BITS);
5523 }
5524
5525 static inline struct hlist_head *
5526 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
5527 {
5528 u64 hash = swevent_hash(type, event_id);
5529
5530 return &hlist->heads[hash];
5531 }
5532
5533 /* For the read side: events when they trigger */
5534 static inline struct hlist_head *
5535 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
5536 {
5537 struct swevent_hlist *hlist;
5538
5539 hlist = rcu_dereference(swhash->swevent_hlist);
5540 if (!hlist)
5541 return NULL;
5542
5543 return __find_swevent_head(hlist, type, event_id);
5544 }
5545
5546 /* For the event head insertion and removal in the hlist */
5547 static inline struct hlist_head *
5548 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
5549 {
5550 struct swevent_hlist *hlist;
5551 u32 event_id = event->attr.config;
5552 u64 type = event->attr.type;
5553
5554 /*
5555 * Event scheduling is always serialized against hlist allocation
5556 * and release. Which makes the protected version suitable here.
5557 * The context lock guarantees that.
5558 */
5559 hlist = rcu_dereference_protected(swhash->swevent_hlist,
5560 lockdep_is_held(&event->ctx->lock));
5561 if (!hlist)
5562 return NULL;
5563
5564 return __find_swevent_head(hlist, type, event_id);
5565 }
5566
5567 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5568 u64 nr,
5569 struct perf_sample_data *data,
5570 struct pt_regs *regs)
5571 {
5572 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5573 struct perf_event *event;
5574 struct hlist_head *head;
5575
5576 rcu_read_lock();
5577 head = find_swevent_head_rcu(swhash, type, event_id);
5578 if (!head)
5579 goto end;
5580
5581 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5582 if (perf_swevent_match(event, type, event_id, data, regs))
5583 perf_swevent_event(event, nr, data, regs);
5584 }
5585 end:
5586 rcu_read_unlock();
5587 }
5588
5589 int perf_swevent_get_recursion_context(void)
5590 {
5591 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5592
5593 return get_recursion_context(swhash->recursion);
5594 }
5595 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
5596
5597 inline void perf_swevent_put_recursion_context(int rctx)
5598 {
5599 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5600
5601 put_recursion_context(swhash->recursion, rctx);
5602 }
5603
5604 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
5605 {
5606 struct perf_sample_data data;
5607 int rctx;
5608
5609 preempt_disable_notrace();
5610 rctx = perf_swevent_get_recursion_context();
5611 if (rctx < 0)
5612 return;
5613
5614 perf_sample_data_init(&data, addr, 0);
5615
5616 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
5617
5618 perf_swevent_put_recursion_context(rctx);
5619 preempt_enable_notrace();
5620 }
5621
5622 static void perf_swevent_read(struct perf_event *event)
5623 {
5624 }
5625
5626 static int perf_swevent_add(struct perf_event *event, int flags)
5627 {
5628 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5629 struct hw_perf_event *hwc = &event->hw;
5630 struct hlist_head *head;
5631
5632 if (is_sampling_event(event)) {
5633 hwc->last_period = hwc->sample_period;
5634 perf_swevent_set_period(event);
5635 }
5636
5637 hwc->state = !(flags & PERF_EF_START);
5638
5639 head = find_swevent_head(swhash, event);
5640 if (WARN_ON_ONCE(!head))
5641 return -EINVAL;
5642
5643 hlist_add_head_rcu(&event->hlist_entry, head);
5644
5645 return 0;
5646 }
5647
5648 static void perf_swevent_del(struct perf_event *event, int flags)
5649 {
5650 hlist_del_rcu(&event->hlist_entry);
5651 }
5652
5653 static void perf_swevent_start(struct perf_event *event, int flags)
5654 {
5655 event->hw.state = 0;
5656 }
5657
5658 static void perf_swevent_stop(struct perf_event *event, int flags)
5659 {
5660 event->hw.state = PERF_HES_STOPPED;
5661 }
5662
5663 /* Deref the hlist from the update side */
5664 static inline struct swevent_hlist *
5665 swevent_hlist_deref(struct swevent_htable *swhash)
5666 {
5667 return rcu_dereference_protected(swhash->swevent_hlist,
5668 lockdep_is_held(&swhash->hlist_mutex));
5669 }
5670
5671 static void swevent_hlist_release(struct swevent_htable *swhash)
5672 {
5673 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5674
5675 if (!hlist)
5676 return;
5677
5678 rcu_assign_pointer(swhash->swevent_hlist, NULL);
5679 kfree_rcu(hlist, rcu_head);
5680 }
5681
5682 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5683 {
5684 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5685
5686 mutex_lock(&swhash->hlist_mutex);
5687
5688 if (!--swhash->hlist_refcount)
5689 swevent_hlist_release(swhash);
5690
5691 mutex_unlock(&swhash->hlist_mutex);
5692 }
5693
5694 static void swevent_hlist_put(struct perf_event *event)
5695 {
5696 int cpu;
5697
5698 for_each_possible_cpu(cpu)
5699 swevent_hlist_put_cpu(event, cpu);
5700 }
5701
5702 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5703 {
5704 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5705 int err = 0;
5706
5707 mutex_lock(&swhash->hlist_mutex);
5708
5709 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5710 struct swevent_hlist *hlist;
5711
5712 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5713 if (!hlist) {
5714 err = -ENOMEM;
5715 goto exit;
5716 }
5717 rcu_assign_pointer(swhash->swevent_hlist, hlist);
5718 }
5719 swhash->hlist_refcount++;
5720 exit:
5721 mutex_unlock(&swhash->hlist_mutex);
5722
5723 return err;
5724 }
5725
5726 static int swevent_hlist_get(struct perf_event *event)
5727 {
5728 int err;
5729 int cpu, failed_cpu;
5730
5731 get_online_cpus();
5732 for_each_possible_cpu(cpu) {
5733 err = swevent_hlist_get_cpu(event, cpu);
5734 if (err) {
5735 failed_cpu = cpu;
5736 goto fail;
5737 }
5738 }
5739 put_online_cpus();
5740
5741 return 0;
5742 fail:
5743 for_each_possible_cpu(cpu) {
5744 if (cpu == failed_cpu)
5745 break;
5746 swevent_hlist_put_cpu(event, cpu);
5747 }
5748
5749 put_online_cpus();
5750 return err;
5751 }
5752
5753 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5754
5755 static void sw_perf_event_destroy(struct perf_event *event)
5756 {
5757 u64 event_id = event->attr.config;
5758
5759 WARN_ON(event->parent);
5760
5761 static_key_slow_dec(&perf_swevent_enabled[event_id]);
5762 swevent_hlist_put(event);
5763 }
5764
5765 static int perf_swevent_init(struct perf_event *event)
5766 {
5767 u64 event_id = event->attr.config;
5768
5769 if (event->attr.type != PERF_TYPE_SOFTWARE)
5770 return -ENOENT;
5771
5772 /*
5773 * no branch sampling for software events
5774 */
5775 if (has_branch_stack(event))
5776 return -EOPNOTSUPP;
5777
5778 switch (event_id) {
5779 case PERF_COUNT_SW_CPU_CLOCK:
5780 case PERF_COUNT_SW_TASK_CLOCK:
5781 return -ENOENT;
5782
5783 default:
5784 break;
5785 }
5786
5787 if (event_id >= PERF_COUNT_SW_MAX)
5788 return -ENOENT;
5789
5790 if (!event->parent) {
5791 int err;
5792
5793 err = swevent_hlist_get(event);
5794 if (err)
5795 return err;
5796
5797 static_key_slow_inc(&perf_swevent_enabled[event_id]);
5798 event->destroy = sw_perf_event_destroy;
5799 }
5800
5801 return 0;
5802 }
5803
5804 static int perf_swevent_event_idx(struct perf_event *event)
5805 {
5806 return 0;
5807 }
5808
5809 static struct pmu perf_swevent = {
5810 .task_ctx_nr = perf_sw_context,
5811
5812 .event_init = perf_swevent_init,
5813 .add = perf_swevent_add,
5814 .del = perf_swevent_del,
5815 .start = perf_swevent_start,
5816 .stop = perf_swevent_stop,
5817 .read = perf_swevent_read,
5818
5819 .event_idx = perf_swevent_event_idx,
5820 };
5821
5822 #ifdef CONFIG_EVENT_TRACING
5823
5824 static int perf_tp_filter_match(struct perf_event *event,
5825 struct perf_sample_data *data)
5826 {
5827 void *record = data->raw->data;
5828
5829 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5830 return 1;
5831 return 0;
5832 }
5833
5834 static int perf_tp_event_match(struct perf_event *event,
5835 struct perf_sample_data *data,
5836 struct pt_regs *regs)
5837 {
5838 if (event->hw.state & PERF_HES_STOPPED)
5839 return 0;
5840 /*
5841 * All tracepoints are from kernel-space.
5842 */
5843 if (event->attr.exclude_kernel)
5844 return 0;
5845
5846 if (!perf_tp_filter_match(event, data))
5847 return 0;
5848
5849 return 1;
5850 }
5851
5852 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5853 struct pt_regs *regs, struct hlist_head *head, int rctx,
5854 struct task_struct *task)
5855 {
5856 struct perf_sample_data data;
5857 struct perf_event *event;
5858
5859 struct perf_raw_record raw = {
5860 .size = entry_size,
5861 .data = record,
5862 };
5863
5864 perf_sample_data_init(&data, addr, 0);
5865 data.raw = &raw;
5866
5867 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5868 if (perf_tp_event_match(event, &data, regs))
5869 perf_swevent_event(event, count, &data, regs);
5870 }
5871
5872 /*
5873 * If we got specified a target task, also iterate its context and
5874 * deliver this event there too.
5875 */
5876 if (task && task != current) {
5877 struct perf_event_context *ctx;
5878 struct trace_entry *entry = record;
5879
5880 rcu_read_lock();
5881 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5882 if (!ctx)
5883 goto unlock;
5884
5885 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5886 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5887 continue;
5888 if (event->attr.config != entry->type)
5889 continue;
5890 if (perf_tp_event_match(event, &data, regs))
5891 perf_swevent_event(event, count, &data, regs);
5892 }
5893 unlock:
5894 rcu_read_unlock();
5895 }
5896
5897 perf_swevent_put_recursion_context(rctx);
5898 }
5899 EXPORT_SYMBOL_GPL(perf_tp_event);
5900
5901 static void tp_perf_event_destroy(struct perf_event *event)
5902 {
5903 perf_trace_destroy(event);
5904 }
5905
5906 static int perf_tp_event_init(struct perf_event *event)
5907 {
5908 int err;
5909
5910 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5911 return -ENOENT;
5912
5913 /*
5914 * no branch sampling for tracepoint events
5915 */
5916 if (has_branch_stack(event))
5917 return -EOPNOTSUPP;
5918
5919 err = perf_trace_init(event);
5920 if (err)
5921 return err;
5922
5923 event->destroy = tp_perf_event_destroy;
5924
5925 return 0;
5926 }
5927
5928 static struct pmu perf_tracepoint = {
5929 .task_ctx_nr = perf_sw_context,
5930
5931 .event_init = perf_tp_event_init,
5932 .add = perf_trace_add,
5933 .del = perf_trace_del,
5934 .start = perf_swevent_start,
5935 .stop = perf_swevent_stop,
5936 .read = perf_swevent_read,
5937
5938 .event_idx = perf_swevent_event_idx,
5939 };
5940
5941 static inline void perf_tp_register(void)
5942 {
5943 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5944 }
5945
5946 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5947 {
5948 char *filter_str;
5949 int ret;
5950
5951 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5952 return -EINVAL;
5953
5954 filter_str = strndup_user(arg, PAGE_SIZE);
5955 if (IS_ERR(filter_str))
5956 return PTR_ERR(filter_str);
5957
5958 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5959
5960 kfree(filter_str);
5961 return ret;
5962 }
5963
5964 static void perf_event_free_filter(struct perf_event *event)
5965 {
5966 ftrace_profile_free_filter(event);
5967 }
5968
5969 #else
5970
5971 static inline void perf_tp_register(void)
5972 {
5973 }
5974
5975 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5976 {
5977 return -ENOENT;
5978 }
5979
5980 static void perf_event_free_filter(struct perf_event *event)
5981 {
5982 }
5983
5984 #endif /* CONFIG_EVENT_TRACING */
5985
5986 #ifdef CONFIG_HAVE_HW_BREAKPOINT
5987 void perf_bp_event(struct perf_event *bp, void *data)
5988 {
5989 struct perf_sample_data sample;
5990 struct pt_regs *regs = data;
5991
5992 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5993
5994 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5995 perf_swevent_event(bp, 1, &sample, regs);
5996 }
5997 #endif
5998
5999 /*
6000 * hrtimer based swevent callback
6001 */
6002
6003 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
6004 {
6005 enum hrtimer_restart ret = HRTIMER_RESTART;
6006 struct perf_sample_data data;
6007 struct pt_regs *regs;
6008 struct perf_event *event;
6009 u64 period;
6010
6011 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
6012
6013 if (event->state != PERF_EVENT_STATE_ACTIVE)
6014 return HRTIMER_NORESTART;
6015
6016 event->pmu->read(event);
6017
6018 perf_sample_data_init(&data, 0, event->hw.last_period);
6019 regs = get_irq_regs();
6020
6021 if (regs && !perf_exclude_event(event, regs)) {
6022 if (!(event->attr.exclude_idle && is_idle_task(current)))
6023 if (__perf_event_overflow(event, 1, &data, regs))
6024 ret = HRTIMER_NORESTART;
6025 }
6026
6027 period = max_t(u64, 10000, event->hw.sample_period);
6028 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
6029
6030 return ret;
6031 }
6032
6033 static void perf_swevent_start_hrtimer(struct perf_event *event)
6034 {
6035 struct hw_perf_event *hwc = &event->hw;
6036 s64 period;
6037
6038 if (!is_sampling_event(event))
6039 return;
6040
6041 period = local64_read(&hwc->period_left);
6042 if (period) {
6043 if (period < 0)
6044 period = 10000;
6045
6046 local64_set(&hwc->period_left, 0);
6047 } else {
6048 period = max_t(u64, 10000, hwc->sample_period);
6049 }
6050 __hrtimer_start_range_ns(&hwc->hrtimer,
6051 ns_to_ktime(period), 0,
6052 HRTIMER_MODE_REL_PINNED, 0);
6053 }
6054
6055 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
6056 {
6057 struct hw_perf_event *hwc = &event->hw;
6058
6059 if (is_sampling_event(event)) {
6060 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
6061 local64_set(&hwc->period_left, ktime_to_ns(remaining));
6062
6063 hrtimer_cancel(&hwc->hrtimer);
6064 }
6065 }
6066
6067 static void perf_swevent_init_hrtimer(struct perf_event *event)
6068 {
6069 struct hw_perf_event *hwc = &event->hw;
6070
6071 if (!is_sampling_event(event))
6072 return;
6073
6074 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6075 hwc->hrtimer.function = perf_swevent_hrtimer;
6076
6077 /*
6078 * Since hrtimers have a fixed rate, we can do a static freq->period
6079 * mapping and avoid the whole period adjust feedback stuff.
6080 */
6081 if (event->attr.freq) {
6082 long freq = event->attr.sample_freq;
6083
6084 event->attr.sample_period = NSEC_PER_SEC / freq;
6085 hwc->sample_period = event->attr.sample_period;
6086 local64_set(&hwc->period_left, hwc->sample_period);
6087 hwc->last_period = hwc->sample_period;
6088 event->attr.freq = 0;
6089 }
6090 }
6091
6092 /*
6093 * Software event: cpu wall time clock
6094 */
6095
6096 static void cpu_clock_event_update(struct perf_event *event)
6097 {
6098 s64 prev;
6099 u64 now;
6100
6101 now = local_clock();
6102 prev = local64_xchg(&event->hw.prev_count, now);
6103 local64_add(now - prev, &event->count);
6104 }
6105
6106 static void cpu_clock_event_start(struct perf_event *event, int flags)
6107 {
6108 local64_set(&event->hw.prev_count, local_clock());
6109 perf_swevent_start_hrtimer(event);
6110 }
6111
6112 static void cpu_clock_event_stop(struct perf_event *event, int flags)
6113 {
6114 perf_swevent_cancel_hrtimer(event);
6115 cpu_clock_event_update(event);
6116 }
6117
6118 static int cpu_clock_event_add(struct perf_event *event, int flags)
6119 {
6120 if (flags & PERF_EF_START)
6121 cpu_clock_event_start(event, flags);
6122
6123 return 0;
6124 }
6125
6126 static void cpu_clock_event_del(struct perf_event *event, int flags)
6127 {
6128 cpu_clock_event_stop(event, flags);
6129 }
6130
6131 static void cpu_clock_event_read(struct perf_event *event)
6132 {
6133 cpu_clock_event_update(event);
6134 }
6135
6136 static int cpu_clock_event_init(struct perf_event *event)
6137 {
6138 if (event->attr.type != PERF_TYPE_SOFTWARE)
6139 return -ENOENT;
6140
6141 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
6142 return -ENOENT;
6143
6144 /*
6145 * no branch sampling for software events
6146 */
6147 if (has_branch_stack(event))
6148 return -EOPNOTSUPP;
6149
6150 perf_swevent_init_hrtimer(event);
6151
6152 return 0;
6153 }
6154
6155 static struct pmu perf_cpu_clock = {
6156 .task_ctx_nr = perf_sw_context,
6157
6158 .event_init = cpu_clock_event_init,
6159 .add = cpu_clock_event_add,
6160 .del = cpu_clock_event_del,
6161 .start = cpu_clock_event_start,
6162 .stop = cpu_clock_event_stop,
6163 .read = cpu_clock_event_read,
6164
6165 .event_idx = perf_swevent_event_idx,
6166 };
6167
6168 /*
6169 * Software event: task time clock
6170 */
6171
6172 static void task_clock_event_update(struct perf_event *event, u64 now)
6173 {
6174 u64 prev;
6175 s64 delta;
6176
6177 prev = local64_xchg(&event->hw.prev_count, now);
6178 delta = now - prev;
6179 local64_add(delta, &event->count);
6180 }
6181
6182 static void task_clock_event_start(struct perf_event *event, int flags)
6183 {
6184 local64_set(&event->hw.prev_count, event->ctx->time);
6185 perf_swevent_start_hrtimer(event);
6186 }
6187
6188 static void task_clock_event_stop(struct perf_event *event, int flags)
6189 {
6190 perf_swevent_cancel_hrtimer(event);
6191 task_clock_event_update(event, event->ctx->time);
6192 }
6193
6194 static int task_clock_event_add(struct perf_event *event, int flags)
6195 {
6196 if (flags & PERF_EF_START)
6197 task_clock_event_start(event, flags);
6198
6199 return 0;
6200 }
6201
6202 static void task_clock_event_del(struct perf_event *event, int flags)
6203 {
6204 task_clock_event_stop(event, PERF_EF_UPDATE);
6205 }
6206
6207 static void task_clock_event_read(struct perf_event *event)
6208 {
6209 u64 now = perf_clock();
6210 u64 delta = now - event->ctx->timestamp;
6211 u64 time = event->ctx->time + delta;
6212
6213 task_clock_event_update(event, time);
6214 }
6215
6216 static int task_clock_event_init(struct perf_event *event)
6217 {
6218 if (event->attr.type != PERF_TYPE_SOFTWARE)
6219 return -ENOENT;
6220
6221 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
6222 return -ENOENT;
6223
6224 /*
6225 * no branch sampling for software events
6226 */
6227 if (has_branch_stack(event))
6228 return -EOPNOTSUPP;
6229
6230 perf_swevent_init_hrtimer(event);
6231
6232 return 0;
6233 }
6234
6235 static struct pmu perf_task_clock = {
6236 .task_ctx_nr = perf_sw_context,
6237
6238 .event_init = task_clock_event_init,
6239 .add = task_clock_event_add,
6240 .del = task_clock_event_del,
6241 .start = task_clock_event_start,
6242 .stop = task_clock_event_stop,
6243 .read = task_clock_event_read,
6244
6245 .event_idx = perf_swevent_event_idx,
6246 };
6247
6248 static void perf_pmu_nop_void(struct pmu *pmu)
6249 {
6250 }
6251
6252 static int perf_pmu_nop_int(struct pmu *pmu)
6253 {
6254 return 0;
6255 }
6256
6257 static void perf_pmu_start_txn(struct pmu *pmu)
6258 {
6259 perf_pmu_disable(pmu);
6260 }
6261
6262 static int perf_pmu_commit_txn(struct pmu *pmu)
6263 {
6264 perf_pmu_enable(pmu);
6265 return 0;
6266 }
6267
6268 static void perf_pmu_cancel_txn(struct pmu *pmu)
6269 {
6270 perf_pmu_enable(pmu);
6271 }
6272
6273 static int perf_event_idx_default(struct perf_event *event)
6274 {
6275 return event->hw.idx + 1;
6276 }
6277
6278 /*
6279 * Ensures all contexts with the same task_ctx_nr have the same
6280 * pmu_cpu_context too.
6281 */
6282 static void *find_pmu_context(int ctxn)
6283 {
6284 struct pmu *pmu;
6285
6286 if (ctxn < 0)
6287 return NULL;
6288
6289 list_for_each_entry(pmu, &pmus, entry) {
6290 if (pmu->task_ctx_nr == ctxn)
6291 return pmu->pmu_cpu_context;
6292 }
6293
6294 return NULL;
6295 }
6296
6297 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
6298 {
6299 int cpu;
6300
6301 for_each_possible_cpu(cpu) {
6302 struct perf_cpu_context *cpuctx;
6303
6304 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6305
6306 if (cpuctx->unique_pmu == old_pmu)
6307 cpuctx->unique_pmu = pmu;
6308 }
6309 }
6310
6311 static void free_pmu_context(struct pmu *pmu)
6312 {
6313 struct pmu *i;
6314
6315 mutex_lock(&pmus_lock);
6316 /*
6317 * Like a real lame refcount.
6318 */
6319 list_for_each_entry(i, &pmus, entry) {
6320 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
6321 update_pmu_context(i, pmu);
6322 goto out;
6323 }
6324 }
6325
6326 free_percpu(pmu->pmu_cpu_context);
6327 out:
6328 mutex_unlock(&pmus_lock);
6329 }
6330 static struct idr pmu_idr;
6331
6332 static ssize_t
6333 type_show(struct device *dev, struct device_attribute *attr, char *page)
6334 {
6335 struct pmu *pmu = dev_get_drvdata(dev);
6336
6337 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
6338 }
6339 static DEVICE_ATTR_RO(type);
6340
6341 static ssize_t
6342 perf_event_mux_interval_ms_show(struct device *dev,
6343 struct device_attribute *attr,
6344 char *page)
6345 {
6346 struct pmu *pmu = dev_get_drvdata(dev);
6347
6348 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
6349 }
6350
6351 static ssize_t
6352 perf_event_mux_interval_ms_store(struct device *dev,
6353 struct device_attribute *attr,
6354 const char *buf, size_t count)
6355 {
6356 struct pmu *pmu = dev_get_drvdata(dev);
6357 int timer, cpu, ret;
6358
6359 ret = kstrtoint(buf, 0, &timer);
6360 if (ret)
6361 return ret;
6362
6363 if (timer < 1)
6364 return -EINVAL;
6365
6366 /* same value, noting to do */
6367 if (timer == pmu->hrtimer_interval_ms)
6368 return count;
6369
6370 pmu->hrtimer_interval_ms = timer;
6371
6372 /* update all cpuctx for this PMU */
6373 for_each_possible_cpu(cpu) {
6374 struct perf_cpu_context *cpuctx;
6375 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6376 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
6377
6378 if (hrtimer_active(&cpuctx->hrtimer))
6379 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
6380 }
6381
6382 return count;
6383 }
6384 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
6385
6386 static struct attribute *pmu_dev_attrs[] = {
6387 &dev_attr_type.attr,
6388 &dev_attr_perf_event_mux_interval_ms.attr,
6389 NULL,
6390 };
6391 ATTRIBUTE_GROUPS(pmu_dev);
6392
6393 static int pmu_bus_running;
6394 static struct bus_type pmu_bus = {
6395 .name = "event_source",
6396 .dev_groups = pmu_dev_groups,
6397 };
6398
6399 static void pmu_dev_release(struct device *dev)
6400 {
6401 kfree(dev);
6402 }
6403
6404 static int pmu_dev_alloc(struct pmu *pmu)
6405 {
6406 int ret = -ENOMEM;
6407
6408 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
6409 if (!pmu->dev)
6410 goto out;
6411
6412 pmu->dev->groups = pmu->attr_groups;
6413 device_initialize(pmu->dev);
6414 ret = dev_set_name(pmu->dev, "%s", pmu->name);
6415 if (ret)
6416 goto free_dev;
6417
6418 dev_set_drvdata(pmu->dev, pmu);
6419 pmu->dev->bus = &pmu_bus;
6420 pmu->dev->release = pmu_dev_release;
6421 ret = device_add(pmu->dev);
6422 if (ret)
6423 goto free_dev;
6424
6425 out:
6426 return ret;
6427
6428 free_dev:
6429 put_device(pmu->dev);
6430 goto out;
6431 }
6432
6433 static struct lock_class_key cpuctx_mutex;
6434 static struct lock_class_key cpuctx_lock;
6435
6436 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
6437 {
6438 int cpu, ret;
6439
6440 mutex_lock(&pmus_lock);
6441 ret = -ENOMEM;
6442 pmu->pmu_disable_count = alloc_percpu(int);
6443 if (!pmu->pmu_disable_count)
6444 goto unlock;
6445
6446 pmu->type = -1;
6447 if (!name)
6448 goto skip_type;
6449 pmu->name = name;
6450
6451 if (type < 0) {
6452 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
6453 if (type < 0) {
6454 ret = type;
6455 goto free_pdc;
6456 }
6457 }
6458 pmu->type = type;
6459
6460 if (pmu_bus_running) {
6461 ret = pmu_dev_alloc(pmu);
6462 if (ret)
6463 goto free_idr;
6464 }
6465
6466 skip_type:
6467 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
6468 if (pmu->pmu_cpu_context)
6469 goto got_cpu_context;
6470
6471 ret = -ENOMEM;
6472 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
6473 if (!pmu->pmu_cpu_context)
6474 goto free_dev;
6475
6476 for_each_possible_cpu(cpu) {
6477 struct perf_cpu_context *cpuctx;
6478
6479 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6480 __perf_event_init_context(&cpuctx->ctx);
6481 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
6482 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
6483 cpuctx->ctx.type = cpu_context;
6484 cpuctx->ctx.pmu = pmu;
6485
6486 __perf_cpu_hrtimer_init(cpuctx, cpu);
6487
6488 INIT_LIST_HEAD(&cpuctx->rotation_list);
6489 cpuctx->unique_pmu = pmu;
6490 }
6491
6492 got_cpu_context:
6493 if (!pmu->start_txn) {
6494 if (pmu->pmu_enable) {
6495 /*
6496 * If we have pmu_enable/pmu_disable calls, install
6497 * transaction stubs that use that to try and batch
6498 * hardware accesses.
6499 */
6500 pmu->start_txn = perf_pmu_start_txn;
6501 pmu->commit_txn = perf_pmu_commit_txn;
6502 pmu->cancel_txn = perf_pmu_cancel_txn;
6503 } else {
6504 pmu->start_txn = perf_pmu_nop_void;
6505 pmu->commit_txn = perf_pmu_nop_int;
6506 pmu->cancel_txn = perf_pmu_nop_void;
6507 }
6508 }
6509
6510 if (!pmu->pmu_enable) {
6511 pmu->pmu_enable = perf_pmu_nop_void;
6512 pmu->pmu_disable = perf_pmu_nop_void;
6513 }
6514
6515 if (!pmu->event_idx)
6516 pmu->event_idx = perf_event_idx_default;
6517
6518 list_add_rcu(&pmu->entry, &pmus);
6519 ret = 0;
6520 unlock:
6521 mutex_unlock(&pmus_lock);
6522
6523 return ret;
6524
6525 free_dev:
6526 device_del(pmu->dev);
6527 put_device(pmu->dev);
6528
6529 free_idr:
6530 if (pmu->type >= PERF_TYPE_MAX)
6531 idr_remove(&pmu_idr, pmu->type);
6532
6533 free_pdc:
6534 free_percpu(pmu->pmu_disable_count);
6535 goto unlock;
6536 }
6537
6538 void perf_pmu_unregister(struct pmu *pmu)
6539 {
6540 mutex_lock(&pmus_lock);
6541 list_del_rcu(&pmu->entry);
6542 mutex_unlock(&pmus_lock);
6543
6544 /*
6545 * We dereference the pmu list under both SRCU and regular RCU, so
6546 * synchronize against both of those.
6547 */
6548 synchronize_srcu(&pmus_srcu);
6549 synchronize_rcu();
6550
6551 free_percpu(pmu->pmu_disable_count);
6552 if (pmu->type >= PERF_TYPE_MAX)
6553 idr_remove(&pmu_idr, pmu->type);
6554 device_del(pmu->dev);
6555 put_device(pmu->dev);
6556 free_pmu_context(pmu);
6557 }
6558
6559 struct pmu *perf_init_event(struct perf_event *event)
6560 {
6561 struct pmu *pmu = NULL;
6562 int idx;
6563 int ret;
6564
6565 idx = srcu_read_lock(&pmus_srcu);
6566
6567 rcu_read_lock();
6568 pmu = idr_find(&pmu_idr, event->attr.type);
6569 rcu_read_unlock();
6570 if (pmu) {
6571 event->pmu = pmu;
6572 ret = pmu->event_init(event);
6573 if (ret)
6574 pmu = ERR_PTR(ret);
6575 goto unlock;
6576 }
6577
6578 list_for_each_entry_rcu(pmu, &pmus, entry) {
6579 event->pmu = pmu;
6580 ret = pmu->event_init(event);
6581 if (!ret)
6582 goto unlock;
6583
6584 if (ret != -ENOENT) {
6585 pmu = ERR_PTR(ret);
6586 goto unlock;
6587 }
6588 }
6589 pmu = ERR_PTR(-ENOENT);
6590 unlock:
6591 srcu_read_unlock(&pmus_srcu, idx);
6592
6593 return pmu;
6594 }
6595
6596 static void account_event_cpu(struct perf_event *event, int cpu)
6597 {
6598 if (event->parent)
6599 return;
6600
6601 if (has_branch_stack(event)) {
6602 if (!(event->attach_state & PERF_ATTACH_TASK))
6603 atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
6604 }
6605 if (is_cgroup_event(event))
6606 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
6607 }
6608
6609 static void account_event(struct perf_event *event)
6610 {
6611 if (event->parent)
6612 return;
6613
6614 if (event->attach_state & PERF_ATTACH_TASK)
6615 static_key_slow_inc(&perf_sched_events.key);
6616 if (event->attr.mmap || event->attr.mmap_data)
6617 atomic_inc(&nr_mmap_events);
6618 if (event->attr.comm)
6619 atomic_inc(&nr_comm_events);
6620 if (event->attr.task)
6621 atomic_inc(&nr_task_events);
6622 if (event->attr.freq) {
6623 if (atomic_inc_return(&nr_freq_events) == 1)
6624 tick_nohz_full_kick_all();
6625 }
6626 if (has_branch_stack(event))
6627 static_key_slow_inc(&perf_sched_events.key);
6628 if (is_cgroup_event(event))
6629 static_key_slow_inc(&perf_sched_events.key);
6630
6631 account_event_cpu(event, event->cpu);
6632 }
6633
6634 /*
6635 * Allocate and initialize a event structure
6636 */
6637 static struct perf_event *
6638 perf_event_alloc(struct perf_event_attr *attr, int cpu,
6639 struct task_struct *task,
6640 struct perf_event *group_leader,
6641 struct perf_event *parent_event,
6642 perf_overflow_handler_t overflow_handler,
6643 void *context)
6644 {
6645 struct pmu *pmu;
6646 struct perf_event *event;
6647 struct hw_perf_event *hwc;
6648 long err = -EINVAL;
6649
6650 if ((unsigned)cpu >= nr_cpu_ids) {
6651 if (!task || cpu != -1)
6652 return ERR_PTR(-EINVAL);
6653 }
6654
6655 event = kzalloc(sizeof(*event), GFP_KERNEL);
6656 if (!event)
6657 return ERR_PTR(-ENOMEM);
6658
6659 /*
6660 * Single events are their own group leaders, with an
6661 * empty sibling list:
6662 */
6663 if (!group_leader)
6664 group_leader = event;
6665
6666 mutex_init(&event->child_mutex);
6667 INIT_LIST_HEAD(&event->child_list);
6668
6669 INIT_LIST_HEAD(&event->group_entry);
6670 INIT_LIST_HEAD(&event->event_entry);
6671 INIT_LIST_HEAD(&event->sibling_list);
6672 INIT_LIST_HEAD(&event->rb_entry);
6673
6674 init_waitqueue_head(&event->waitq);
6675 init_irq_work(&event->pending, perf_pending_event);
6676
6677 mutex_init(&event->mmap_mutex);
6678
6679 atomic_long_set(&event->refcount, 1);
6680 event->cpu = cpu;
6681 event->attr = *attr;
6682 event->group_leader = group_leader;
6683 event->pmu = NULL;
6684 event->oncpu = -1;
6685
6686 event->parent = parent_event;
6687
6688 event->ns = get_pid_ns(task_active_pid_ns(current));
6689 event->id = atomic64_inc_return(&perf_event_id);
6690
6691 event->state = PERF_EVENT_STATE_INACTIVE;
6692
6693 if (task) {
6694 event->attach_state = PERF_ATTACH_TASK;
6695
6696 if (attr->type == PERF_TYPE_TRACEPOINT)
6697 event->hw.tp_target = task;
6698 #ifdef CONFIG_HAVE_HW_BREAKPOINT
6699 /*
6700 * hw_breakpoint is a bit difficult here..
6701 */
6702 else if (attr->type == PERF_TYPE_BREAKPOINT)
6703 event->hw.bp_target = task;
6704 #endif
6705 }
6706
6707 if (!overflow_handler && parent_event) {
6708 overflow_handler = parent_event->overflow_handler;
6709 context = parent_event->overflow_handler_context;
6710 }
6711
6712 event->overflow_handler = overflow_handler;
6713 event->overflow_handler_context = context;
6714
6715 perf_event__state_init(event);
6716
6717 pmu = NULL;
6718
6719 hwc = &event->hw;
6720 hwc->sample_period = attr->sample_period;
6721 if (attr->freq && attr->sample_freq)
6722 hwc->sample_period = 1;
6723 hwc->last_period = hwc->sample_period;
6724
6725 local64_set(&hwc->period_left, hwc->sample_period);
6726
6727 /*
6728 * we currently do not support PERF_FORMAT_GROUP on inherited events
6729 */
6730 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
6731 goto err_ns;
6732
6733 pmu = perf_init_event(event);
6734 if (!pmu)
6735 goto err_ns;
6736 else if (IS_ERR(pmu)) {
6737 err = PTR_ERR(pmu);
6738 goto err_ns;
6739 }
6740
6741 if (!event->parent) {
6742 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6743 err = get_callchain_buffers();
6744 if (err)
6745 goto err_pmu;
6746 }
6747 }
6748
6749 return event;
6750
6751 err_pmu:
6752 if (event->destroy)
6753 event->destroy(event);
6754 err_ns:
6755 if (event->ns)
6756 put_pid_ns(event->ns);
6757 kfree(event);
6758
6759 return ERR_PTR(err);
6760 }
6761
6762 static int perf_copy_attr(struct perf_event_attr __user *uattr,
6763 struct perf_event_attr *attr)
6764 {
6765 u32 size;
6766 int ret;
6767
6768 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6769 return -EFAULT;
6770
6771 /*
6772 * zero the full structure, so that a short copy will be nice.
6773 */
6774 memset(attr, 0, sizeof(*attr));
6775
6776 ret = get_user(size, &uattr->size);
6777 if (ret)
6778 return ret;
6779
6780 if (size > PAGE_SIZE) /* silly large */
6781 goto err_size;
6782
6783 if (!size) /* abi compat */
6784 size = PERF_ATTR_SIZE_VER0;
6785
6786 if (size < PERF_ATTR_SIZE_VER0)
6787 goto err_size;
6788
6789 /*
6790 * If we're handed a bigger struct than we know of,
6791 * ensure all the unknown bits are 0 - i.e. new
6792 * user-space does not rely on any kernel feature
6793 * extensions we dont know about yet.
6794 */
6795 if (size > sizeof(*attr)) {
6796 unsigned char __user *addr;
6797 unsigned char __user *end;
6798 unsigned char val;
6799
6800 addr = (void __user *)uattr + sizeof(*attr);
6801 end = (void __user *)uattr + size;
6802
6803 for (; addr < end; addr++) {
6804 ret = get_user(val, addr);
6805 if (ret)
6806 return ret;
6807 if (val)
6808 goto err_size;
6809 }
6810 size = sizeof(*attr);
6811 }
6812
6813 ret = copy_from_user(attr, uattr, size);
6814 if (ret)
6815 return -EFAULT;
6816
6817 /* disabled for now */
6818 if (attr->mmap2)
6819 return -EINVAL;
6820
6821 if (attr->__reserved_1)
6822 return -EINVAL;
6823
6824 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6825 return -EINVAL;
6826
6827 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6828 return -EINVAL;
6829
6830 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6831 u64 mask = attr->branch_sample_type;
6832
6833 /* only using defined bits */
6834 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6835 return -EINVAL;
6836
6837 /* at least one branch bit must be set */
6838 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6839 return -EINVAL;
6840
6841 /* propagate priv level, when not set for branch */
6842 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6843
6844 /* exclude_kernel checked on syscall entry */
6845 if (!attr->exclude_kernel)
6846 mask |= PERF_SAMPLE_BRANCH_KERNEL;
6847
6848 if (!attr->exclude_user)
6849 mask |= PERF_SAMPLE_BRANCH_USER;
6850
6851 if (!attr->exclude_hv)
6852 mask |= PERF_SAMPLE_BRANCH_HV;
6853 /*
6854 * adjust user setting (for HW filter setup)
6855 */
6856 attr->branch_sample_type = mask;
6857 }
6858 /* privileged levels capture (kernel, hv): check permissions */
6859 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
6860 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6861 return -EACCES;
6862 }
6863
6864 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
6865 ret = perf_reg_validate(attr->sample_regs_user);
6866 if (ret)
6867 return ret;
6868 }
6869
6870 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
6871 if (!arch_perf_have_user_stack_dump())
6872 return -ENOSYS;
6873
6874 /*
6875 * We have __u32 type for the size, but so far
6876 * we can only use __u16 as maximum due to the
6877 * __u16 sample size limit.
6878 */
6879 if (attr->sample_stack_user >= USHRT_MAX)
6880 ret = -EINVAL;
6881 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
6882 ret = -EINVAL;
6883 }
6884
6885 out:
6886 return ret;
6887
6888 err_size:
6889 put_user(sizeof(*attr), &uattr->size);
6890 ret = -E2BIG;
6891 goto out;
6892 }
6893
6894 static int
6895 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6896 {
6897 struct ring_buffer *rb = NULL, *old_rb = NULL;
6898 int ret = -EINVAL;
6899
6900 if (!output_event)
6901 goto set;
6902
6903 /* don't allow circular references */
6904 if (event == output_event)
6905 goto out;
6906
6907 /*
6908 * Don't allow cross-cpu buffers
6909 */
6910 if (output_event->cpu != event->cpu)
6911 goto out;
6912
6913 /*
6914 * If its not a per-cpu rb, it must be the same task.
6915 */
6916 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6917 goto out;
6918
6919 set:
6920 mutex_lock(&event->mmap_mutex);
6921 /* Can't redirect output if we've got an active mmap() */
6922 if (atomic_read(&event->mmap_count))
6923 goto unlock;
6924
6925 old_rb = event->rb;
6926
6927 if (output_event) {
6928 /* get the rb we want to redirect to */
6929 rb = ring_buffer_get(output_event);
6930 if (!rb)
6931 goto unlock;
6932 }
6933
6934 if (old_rb)
6935 ring_buffer_detach(event, old_rb);
6936
6937 if (rb)
6938 ring_buffer_attach(event, rb);
6939
6940 rcu_assign_pointer(event->rb, rb);
6941
6942 if (old_rb) {
6943 ring_buffer_put(old_rb);
6944 /*
6945 * Since we detached before setting the new rb, so that we
6946 * could attach the new rb, we could have missed a wakeup.
6947 * Provide it now.
6948 */
6949 wake_up_all(&event->waitq);
6950 }
6951
6952 ret = 0;
6953 unlock:
6954 mutex_unlock(&event->mmap_mutex);
6955
6956 out:
6957 return ret;
6958 }
6959
6960 /**
6961 * sys_perf_event_open - open a performance event, associate it to a task/cpu
6962 *
6963 * @attr_uptr: event_id type attributes for monitoring/sampling
6964 * @pid: target pid
6965 * @cpu: target cpu
6966 * @group_fd: group leader event fd
6967 */
6968 SYSCALL_DEFINE5(perf_event_open,
6969 struct perf_event_attr __user *, attr_uptr,
6970 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
6971 {
6972 struct perf_event *group_leader = NULL, *output_event = NULL;
6973 struct perf_event *event, *sibling;
6974 struct perf_event_attr attr;
6975 struct perf_event_context *ctx;
6976 struct file *event_file = NULL;
6977 struct fd group = {NULL, 0};
6978 struct task_struct *task = NULL;
6979 struct pmu *pmu;
6980 int event_fd;
6981 int move_group = 0;
6982 int err;
6983
6984 /* for future expandability... */
6985 if (flags & ~PERF_FLAG_ALL)
6986 return -EINVAL;
6987
6988 err = perf_copy_attr(attr_uptr, &attr);
6989 if (err)
6990 return err;
6991
6992 if (!attr.exclude_kernel) {
6993 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6994 return -EACCES;
6995 }
6996
6997 if (attr.freq) {
6998 if (attr.sample_freq > sysctl_perf_event_sample_rate)
6999 return -EINVAL;
7000 }
7001
7002 /*
7003 * In cgroup mode, the pid argument is used to pass the fd
7004 * opened to the cgroup directory in cgroupfs. The cpu argument
7005 * designates the cpu on which to monitor threads from that
7006 * cgroup.
7007 */
7008 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
7009 return -EINVAL;
7010
7011 event_fd = get_unused_fd();
7012 if (event_fd < 0)
7013 return event_fd;
7014
7015 if (group_fd != -1) {
7016 err = perf_fget_light(group_fd, &group);
7017 if (err)
7018 goto err_fd;
7019 group_leader = group.file->private_data;
7020 if (flags & PERF_FLAG_FD_OUTPUT)
7021 output_event = group_leader;
7022 if (flags & PERF_FLAG_FD_NO_GROUP)
7023 group_leader = NULL;
7024 }
7025
7026 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
7027 task = find_lively_task_by_vpid(pid);
7028 if (IS_ERR(task)) {
7029 err = PTR_ERR(task);
7030 goto err_group_fd;
7031 }
7032 }
7033
7034 get_online_cpus();
7035
7036 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
7037 NULL, NULL);
7038 if (IS_ERR(event)) {
7039 err = PTR_ERR(event);
7040 goto err_task;
7041 }
7042
7043 if (flags & PERF_FLAG_PID_CGROUP) {
7044 err = perf_cgroup_connect(pid, event, &attr, group_leader);
7045 if (err) {
7046 __free_event(event);
7047 goto err_task;
7048 }
7049 }
7050
7051 account_event(event);
7052
7053 /*
7054 * Special case software events and allow them to be part of
7055 * any hardware group.
7056 */
7057 pmu = event->pmu;
7058
7059 if (group_leader &&
7060 (is_software_event(event) != is_software_event(group_leader))) {
7061 if (is_software_event(event)) {
7062 /*
7063 * If event and group_leader are not both a software
7064 * event, and event is, then group leader is not.
7065 *
7066 * Allow the addition of software events to !software
7067 * groups, this is safe because software events never
7068 * fail to schedule.
7069 */
7070 pmu = group_leader->pmu;
7071 } else if (is_software_event(group_leader) &&
7072 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
7073 /*
7074 * In case the group is a pure software group, and we
7075 * try to add a hardware event, move the whole group to
7076 * the hardware context.
7077 */
7078 move_group = 1;
7079 }
7080 }
7081
7082 /*
7083 * Get the target context (task or percpu):
7084 */
7085 ctx = find_get_context(pmu, task, event->cpu);
7086 if (IS_ERR(ctx)) {
7087 err = PTR_ERR(ctx);
7088 goto err_alloc;
7089 }
7090
7091 if (task) {
7092 put_task_struct(task);
7093 task = NULL;
7094 }
7095
7096 /*
7097 * Look up the group leader (we will attach this event to it):
7098 */
7099 if (group_leader) {
7100 err = -EINVAL;
7101
7102 /*
7103 * Do not allow a recursive hierarchy (this new sibling
7104 * becoming part of another group-sibling):
7105 */
7106 if (group_leader->group_leader != group_leader)
7107 goto err_context;
7108 /*
7109 * Do not allow to attach to a group in a different
7110 * task or CPU context:
7111 */
7112 if (move_group) {
7113 if (group_leader->ctx->type != ctx->type)
7114 goto err_context;
7115 } else {
7116 if (group_leader->ctx != ctx)
7117 goto err_context;
7118 }
7119
7120 /*
7121 * Only a group leader can be exclusive or pinned
7122 */
7123 if (attr.exclusive || attr.pinned)
7124 goto err_context;
7125 }
7126
7127 if (output_event) {
7128 err = perf_event_set_output(event, output_event);
7129 if (err)
7130 goto err_context;
7131 }
7132
7133 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
7134 if (IS_ERR(event_file)) {
7135 err = PTR_ERR(event_file);
7136 goto err_context;
7137 }
7138
7139 if (move_group) {
7140 struct perf_event_context *gctx = group_leader->ctx;
7141
7142 mutex_lock(&gctx->mutex);
7143 perf_remove_from_context(group_leader);
7144
7145 /*
7146 * Removing from the context ends up with disabled
7147 * event. What we want here is event in the initial
7148 * startup state, ready to be add into new context.
7149 */
7150 perf_event__state_init(group_leader);
7151 list_for_each_entry(sibling, &group_leader->sibling_list,
7152 group_entry) {
7153 perf_remove_from_context(sibling);
7154 perf_event__state_init(sibling);
7155 put_ctx(gctx);
7156 }
7157 mutex_unlock(&gctx->mutex);
7158 put_ctx(gctx);
7159 }
7160
7161 WARN_ON_ONCE(ctx->parent_ctx);
7162 mutex_lock(&ctx->mutex);
7163
7164 if (move_group) {
7165 synchronize_rcu();
7166 perf_install_in_context(ctx, group_leader, event->cpu);
7167 get_ctx(ctx);
7168 list_for_each_entry(sibling, &group_leader->sibling_list,
7169 group_entry) {
7170 perf_install_in_context(ctx, sibling, event->cpu);
7171 get_ctx(ctx);
7172 }
7173 }
7174
7175 perf_install_in_context(ctx, event, event->cpu);
7176 perf_unpin_context(ctx);
7177 mutex_unlock(&ctx->mutex);
7178
7179 put_online_cpus();
7180
7181 event->owner = current;
7182
7183 mutex_lock(&current->perf_event_mutex);
7184 list_add_tail(&event->owner_entry, &current->perf_event_list);
7185 mutex_unlock(&current->perf_event_mutex);
7186
7187 /*
7188 * Precalculate sample_data sizes
7189 */
7190 perf_event__header_size(event);
7191 perf_event__id_header_size(event);
7192
7193 /*
7194 * Drop the reference on the group_event after placing the
7195 * new event on the sibling_list. This ensures destruction
7196 * of the group leader will find the pointer to itself in
7197 * perf_group_detach().
7198 */
7199 fdput(group);
7200 fd_install(event_fd, event_file);
7201 return event_fd;
7202
7203 err_context:
7204 perf_unpin_context(ctx);
7205 put_ctx(ctx);
7206 err_alloc:
7207 free_event(event);
7208 err_task:
7209 put_online_cpus();
7210 if (task)
7211 put_task_struct(task);
7212 err_group_fd:
7213 fdput(group);
7214 err_fd:
7215 put_unused_fd(event_fd);
7216 return err;
7217 }
7218
7219 /**
7220 * perf_event_create_kernel_counter
7221 *
7222 * @attr: attributes of the counter to create
7223 * @cpu: cpu in which the counter is bound
7224 * @task: task to profile (NULL for percpu)
7225 */
7226 struct perf_event *
7227 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
7228 struct task_struct *task,
7229 perf_overflow_handler_t overflow_handler,
7230 void *context)
7231 {
7232 struct perf_event_context *ctx;
7233 struct perf_event *event;
7234 int err;
7235
7236 /*
7237 * Get the target context (task or percpu):
7238 */
7239
7240 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
7241 overflow_handler, context);
7242 if (IS_ERR(event)) {
7243 err = PTR_ERR(event);
7244 goto err;
7245 }
7246
7247 account_event(event);
7248
7249 ctx = find_get_context(event->pmu, task, cpu);
7250 if (IS_ERR(ctx)) {
7251 err = PTR_ERR(ctx);
7252 goto err_free;
7253 }
7254
7255 WARN_ON_ONCE(ctx->parent_ctx);
7256 mutex_lock(&ctx->mutex);
7257 perf_install_in_context(ctx, event, cpu);
7258 perf_unpin_context(ctx);
7259 mutex_unlock(&ctx->mutex);
7260
7261 return event;
7262
7263 err_free:
7264 free_event(event);
7265 err:
7266 return ERR_PTR(err);
7267 }
7268 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
7269
7270 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7271 {
7272 struct perf_event_context *src_ctx;
7273 struct perf_event_context *dst_ctx;
7274 struct perf_event *event, *tmp;
7275 LIST_HEAD(events);
7276
7277 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
7278 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
7279
7280 mutex_lock(&src_ctx->mutex);
7281 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7282 event_entry) {
7283 perf_remove_from_context(event);
7284 unaccount_event_cpu(event, src_cpu);
7285 put_ctx(src_ctx);
7286 list_add(&event->migrate_entry, &events);
7287 }
7288 mutex_unlock(&src_ctx->mutex);
7289
7290 synchronize_rcu();
7291
7292 mutex_lock(&dst_ctx->mutex);
7293 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
7294 list_del(&event->migrate_entry);
7295 if (event->state >= PERF_EVENT_STATE_OFF)
7296 event->state = PERF_EVENT_STATE_INACTIVE;
7297 account_event_cpu(event, dst_cpu);
7298 perf_install_in_context(dst_ctx, event, dst_cpu);
7299 get_ctx(dst_ctx);
7300 }
7301 mutex_unlock(&dst_ctx->mutex);
7302 }
7303 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
7304
7305 static void sync_child_event(struct perf_event *child_event,
7306 struct task_struct *child)
7307 {
7308 struct perf_event *parent_event = child_event->parent;
7309 u64 child_val;
7310
7311 if (child_event->attr.inherit_stat)
7312 perf_event_read_event(child_event, child);
7313
7314 child_val = perf_event_count(child_event);
7315
7316 /*
7317 * Add back the child's count to the parent's count:
7318 */
7319 atomic64_add(child_val, &parent_event->child_count);
7320 atomic64_add(child_event->total_time_enabled,
7321 &parent_event->child_total_time_enabled);
7322 atomic64_add(child_event->total_time_running,
7323 &parent_event->child_total_time_running);
7324
7325 /*
7326 * Remove this event from the parent's list
7327 */
7328 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7329 mutex_lock(&parent_event->child_mutex);
7330 list_del_init(&child_event->child_list);
7331 mutex_unlock(&parent_event->child_mutex);
7332
7333 /*
7334 * Release the parent event, if this was the last
7335 * reference to it.
7336 */
7337 put_event(parent_event);
7338 }
7339
7340 static void
7341 __perf_event_exit_task(struct perf_event *child_event,
7342 struct perf_event_context *child_ctx,
7343 struct task_struct *child)
7344 {
7345 if (child_event->parent) {
7346 raw_spin_lock_irq(&child_ctx->lock);
7347 perf_group_detach(child_event);
7348 raw_spin_unlock_irq(&child_ctx->lock);
7349 }
7350
7351 perf_remove_from_context(child_event);
7352
7353 /*
7354 * It can happen that the parent exits first, and has events
7355 * that are still around due to the child reference. These
7356 * events need to be zapped.
7357 */
7358 if (child_event->parent) {
7359 sync_child_event(child_event, child);
7360 free_event(child_event);
7361 }
7362 }
7363
7364 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7365 {
7366 struct perf_event *child_event, *tmp;
7367 struct perf_event_context *child_ctx;
7368 unsigned long flags;
7369
7370 if (likely(!child->perf_event_ctxp[ctxn])) {
7371 perf_event_task(child, NULL, 0);
7372 return;
7373 }
7374
7375 local_irq_save(flags);
7376 /*
7377 * We can't reschedule here because interrupts are disabled,
7378 * and either child is current or it is a task that can't be
7379 * scheduled, so we are now safe from rescheduling changing
7380 * our context.
7381 */
7382 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
7383
7384 /*
7385 * Take the context lock here so that if find_get_context is
7386 * reading child->perf_event_ctxp, we wait until it has
7387 * incremented the context's refcount before we do put_ctx below.
7388 */
7389 raw_spin_lock(&child_ctx->lock);
7390 task_ctx_sched_out(child_ctx);
7391 child->perf_event_ctxp[ctxn] = NULL;
7392 /*
7393 * If this context is a clone; unclone it so it can't get
7394 * swapped to another process while we're removing all
7395 * the events from it.
7396 */
7397 unclone_ctx(child_ctx);
7398 update_context_time(child_ctx);
7399 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7400
7401 /*
7402 * Report the task dead after unscheduling the events so that we
7403 * won't get any samples after PERF_RECORD_EXIT. We can however still
7404 * get a few PERF_RECORD_READ events.
7405 */
7406 perf_event_task(child, child_ctx, 0);
7407
7408 /*
7409 * We can recurse on the same lock type through:
7410 *
7411 * __perf_event_exit_task()
7412 * sync_child_event()
7413 * put_event()
7414 * mutex_lock(&ctx->mutex)
7415 *
7416 * But since its the parent context it won't be the same instance.
7417 */
7418 mutex_lock(&child_ctx->mutex);
7419
7420 again:
7421 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
7422 group_entry)
7423 __perf_event_exit_task(child_event, child_ctx, child);
7424
7425 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
7426 group_entry)
7427 __perf_event_exit_task(child_event, child_ctx, child);
7428
7429 /*
7430 * If the last event was a group event, it will have appended all
7431 * its siblings to the list, but we obtained 'tmp' before that which
7432 * will still point to the list head terminating the iteration.
7433 */
7434 if (!list_empty(&child_ctx->pinned_groups) ||
7435 !list_empty(&child_ctx->flexible_groups))
7436 goto again;
7437
7438 mutex_unlock(&child_ctx->mutex);
7439
7440 put_ctx(child_ctx);
7441 }
7442
7443 /*
7444 * When a child task exits, feed back event values to parent events.
7445 */
7446 void perf_event_exit_task(struct task_struct *child)
7447 {
7448 struct perf_event *event, *tmp;
7449 int ctxn;
7450
7451 mutex_lock(&child->perf_event_mutex);
7452 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
7453 owner_entry) {
7454 list_del_init(&event->owner_entry);
7455
7456 /*
7457 * Ensure the list deletion is visible before we clear
7458 * the owner, closes a race against perf_release() where
7459 * we need to serialize on the owner->perf_event_mutex.
7460 */
7461 smp_wmb();
7462 event->owner = NULL;
7463 }
7464 mutex_unlock(&child->perf_event_mutex);
7465
7466 for_each_task_context_nr(ctxn)
7467 perf_event_exit_task_context(child, ctxn);
7468 }
7469
7470 static void perf_free_event(struct perf_event *event,
7471 struct perf_event_context *ctx)
7472 {
7473 struct perf_event *parent = event->parent;
7474
7475 if (WARN_ON_ONCE(!parent))
7476 return;
7477
7478 mutex_lock(&parent->child_mutex);
7479 list_del_init(&event->child_list);
7480 mutex_unlock(&parent->child_mutex);
7481
7482 put_event(parent);
7483
7484 perf_group_detach(event);
7485 list_del_event(event, ctx);
7486 free_event(event);
7487 }
7488
7489 /*
7490 * free an unexposed, unused context as created by inheritance by
7491 * perf_event_init_task below, used by fork() in case of fail.
7492 */
7493 void perf_event_free_task(struct task_struct *task)
7494 {
7495 struct perf_event_context *ctx;
7496 struct perf_event *event, *tmp;
7497 int ctxn;
7498
7499 for_each_task_context_nr(ctxn) {
7500 ctx = task->perf_event_ctxp[ctxn];
7501 if (!ctx)
7502 continue;
7503
7504 mutex_lock(&ctx->mutex);
7505 again:
7506 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
7507 group_entry)
7508 perf_free_event(event, ctx);
7509
7510 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
7511 group_entry)
7512 perf_free_event(event, ctx);
7513
7514 if (!list_empty(&ctx->pinned_groups) ||
7515 !list_empty(&ctx->flexible_groups))
7516 goto again;
7517
7518 mutex_unlock(&ctx->mutex);
7519
7520 put_ctx(ctx);
7521 }
7522 }
7523
7524 void perf_event_delayed_put(struct task_struct *task)
7525 {
7526 int ctxn;
7527
7528 for_each_task_context_nr(ctxn)
7529 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
7530 }
7531
7532 /*
7533 * inherit a event from parent task to child task:
7534 */
7535 static struct perf_event *
7536 inherit_event(struct perf_event *parent_event,
7537 struct task_struct *parent,
7538 struct perf_event_context *parent_ctx,
7539 struct task_struct *child,
7540 struct perf_event *group_leader,
7541 struct perf_event_context *child_ctx)
7542 {
7543 struct perf_event *child_event;
7544 unsigned long flags;
7545
7546 /*
7547 * Instead of creating recursive hierarchies of events,
7548 * we link inherited events back to the original parent,
7549 * which has a filp for sure, which we use as the reference
7550 * count:
7551 */
7552 if (parent_event->parent)
7553 parent_event = parent_event->parent;
7554
7555 child_event = perf_event_alloc(&parent_event->attr,
7556 parent_event->cpu,
7557 child,
7558 group_leader, parent_event,
7559 NULL, NULL);
7560 if (IS_ERR(child_event))
7561 return child_event;
7562
7563 if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
7564 free_event(child_event);
7565 return NULL;
7566 }
7567
7568 get_ctx(child_ctx);
7569
7570 /*
7571 * Make the child state follow the state of the parent event,
7572 * not its attr.disabled bit. We hold the parent's mutex,
7573 * so we won't race with perf_event_{en, dis}able_family.
7574 */
7575 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
7576 child_event->state = PERF_EVENT_STATE_INACTIVE;
7577 else
7578 child_event->state = PERF_EVENT_STATE_OFF;
7579
7580 if (parent_event->attr.freq) {
7581 u64 sample_period = parent_event->hw.sample_period;
7582 struct hw_perf_event *hwc = &child_event->hw;
7583
7584 hwc->sample_period = sample_period;
7585 hwc->last_period = sample_period;
7586
7587 local64_set(&hwc->period_left, sample_period);
7588 }
7589
7590 child_event->ctx = child_ctx;
7591 child_event->overflow_handler = parent_event->overflow_handler;
7592 child_event->overflow_handler_context
7593 = parent_event->overflow_handler_context;
7594
7595 /*
7596 * Precalculate sample_data sizes
7597 */
7598 perf_event__header_size(child_event);
7599 perf_event__id_header_size(child_event);
7600
7601 /*
7602 * Link it up in the child's context:
7603 */
7604 raw_spin_lock_irqsave(&child_ctx->lock, flags);
7605 add_event_to_ctx(child_event, child_ctx);
7606 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7607
7608 /*
7609 * Link this into the parent event's child list
7610 */
7611 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7612 mutex_lock(&parent_event->child_mutex);
7613 list_add_tail(&child_event->child_list, &parent_event->child_list);
7614 mutex_unlock(&parent_event->child_mutex);
7615
7616 return child_event;
7617 }
7618
7619 static int inherit_group(struct perf_event *parent_event,
7620 struct task_struct *parent,
7621 struct perf_event_context *parent_ctx,
7622 struct task_struct *child,
7623 struct perf_event_context *child_ctx)
7624 {
7625 struct perf_event *leader;
7626 struct perf_event *sub;
7627 struct perf_event *child_ctr;
7628
7629 leader = inherit_event(parent_event, parent, parent_ctx,
7630 child, NULL, child_ctx);
7631 if (IS_ERR(leader))
7632 return PTR_ERR(leader);
7633 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
7634 child_ctr = inherit_event(sub, parent, parent_ctx,
7635 child, leader, child_ctx);
7636 if (IS_ERR(child_ctr))
7637 return PTR_ERR(child_ctr);
7638 }
7639 return 0;
7640 }
7641
7642 static int
7643 inherit_task_group(struct perf_event *event, struct task_struct *parent,
7644 struct perf_event_context *parent_ctx,
7645 struct task_struct *child, int ctxn,
7646 int *inherited_all)
7647 {
7648 int ret;
7649 struct perf_event_context *child_ctx;
7650
7651 if (!event->attr.inherit) {
7652 *inherited_all = 0;
7653 return 0;
7654 }
7655
7656 child_ctx = child->perf_event_ctxp[ctxn];
7657 if (!child_ctx) {
7658 /*
7659 * This is executed from the parent task context, so
7660 * inherit events that have been marked for cloning.
7661 * First allocate and initialize a context for the
7662 * child.
7663 */
7664
7665 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
7666 if (!child_ctx)
7667 return -ENOMEM;
7668
7669 child->perf_event_ctxp[ctxn] = child_ctx;
7670 }
7671
7672 ret = inherit_group(event, parent, parent_ctx,
7673 child, child_ctx);
7674
7675 if (ret)
7676 *inherited_all = 0;
7677
7678 return ret;
7679 }
7680
7681 /*
7682 * Initialize the perf_event context in task_struct
7683 */
7684 int perf_event_init_context(struct task_struct *child, int ctxn)
7685 {
7686 struct perf_event_context *child_ctx, *parent_ctx;
7687 struct perf_event_context *cloned_ctx;
7688 struct perf_event *event;
7689 struct task_struct *parent = current;
7690 int inherited_all = 1;
7691 unsigned long flags;
7692 int ret = 0;
7693
7694 if (likely(!parent->perf_event_ctxp[ctxn]))
7695 return 0;
7696
7697 /*
7698 * If the parent's context is a clone, pin it so it won't get
7699 * swapped under us.
7700 */
7701 parent_ctx = perf_pin_task_context(parent, ctxn);
7702
7703 /*
7704 * No need to check if parent_ctx != NULL here; since we saw
7705 * it non-NULL earlier, the only reason for it to become NULL
7706 * is if we exit, and since we're currently in the middle of
7707 * a fork we can't be exiting at the same time.
7708 */
7709
7710 /*
7711 * Lock the parent list. No need to lock the child - not PID
7712 * hashed yet and not running, so nobody can access it.
7713 */
7714 mutex_lock(&parent_ctx->mutex);
7715
7716 /*
7717 * We dont have to disable NMIs - we are only looking at
7718 * the list, not manipulating it:
7719 */
7720 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
7721 ret = inherit_task_group(event, parent, parent_ctx,
7722 child, ctxn, &inherited_all);
7723 if (ret)
7724 break;
7725 }
7726
7727 /*
7728 * We can't hold ctx->lock when iterating the ->flexible_group list due
7729 * to allocations, but we need to prevent rotation because
7730 * rotate_ctx() will change the list from interrupt context.
7731 */
7732 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7733 parent_ctx->rotate_disable = 1;
7734 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7735
7736 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
7737 ret = inherit_task_group(event, parent, parent_ctx,
7738 child, ctxn, &inherited_all);
7739 if (ret)
7740 break;
7741 }
7742
7743 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7744 parent_ctx->rotate_disable = 0;
7745
7746 child_ctx = child->perf_event_ctxp[ctxn];
7747
7748 if (child_ctx && inherited_all) {
7749 /*
7750 * Mark the child context as a clone of the parent
7751 * context, or of whatever the parent is a clone of.
7752 *
7753 * Note that if the parent is a clone, the holding of
7754 * parent_ctx->lock avoids it from being uncloned.
7755 */
7756 cloned_ctx = parent_ctx->parent_ctx;
7757 if (cloned_ctx) {
7758 child_ctx->parent_ctx = cloned_ctx;
7759 child_ctx->parent_gen = parent_ctx->parent_gen;
7760 } else {
7761 child_ctx->parent_ctx = parent_ctx;
7762 child_ctx->parent_gen = parent_ctx->generation;
7763 }
7764 get_ctx(child_ctx->parent_ctx);
7765 }
7766
7767 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7768 mutex_unlock(&parent_ctx->mutex);
7769
7770 perf_unpin_context(parent_ctx);
7771 put_ctx(parent_ctx);
7772
7773 return ret;
7774 }
7775
7776 /*
7777 * Initialize the perf_event context in task_struct
7778 */
7779 int perf_event_init_task(struct task_struct *child)
7780 {
7781 int ctxn, ret;
7782
7783 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
7784 mutex_init(&child->perf_event_mutex);
7785 INIT_LIST_HEAD(&child->perf_event_list);
7786
7787 for_each_task_context_nr(ctxn) {
7788 ret = perf_event_init_context(child, ctxn);
7789 if (ret)
7790 return ret;
7791 }
7792
7793 return 0;
7794 }
7795
7796 static void __init perf_event_init_all_cpus(void)
7797 {
7798 struct swevent_htable *swhash;
7799 int cpu;
7800
7801 for_each_possible_cpu(cpu) {
7802 swhash = &per_cpu(swevent_htable, cpu);
7803 mutex_init(&swhash->hlist_mutex);
7804 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
7805 }
7806 }
7807
7808 static void perf_event_init_cpu(int cpu)
7809 {
7810 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7811
7812 mutex_lock(&swhash->hlist_mutex);
7813 if (swhash->hlist_refcount > 0) {
7814 struct swevent_hlist *hlist;
7815
7816 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7817 WARN_ON(!hlist);
7818 rcu_assign_pointer(swhash->swevent_hlist, hlist);
7819 }
7820 mutex_unlock(&swhash->hlist_mutex);
7821 }
7822
7823 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
7824 static void perf_pmu_rotate_stop(struct pmu *pmu)
7825 {
7826 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7827
7828 WARN_ON(!irqs_disabled());
7829
7830 list_del_init(&cpuctx->rotation_list);
7831 }
7832
7833 static void __perf_event_exit_context(void *__info)
7834 {
7835 struct perf_event_context *ctx = __info;
7836 struct perf_event *event, *tmp;
7837
7838 perf_pmu_rotate_stop(ctx->pmu);
7839
7840 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
7841 __perf_remove_from_context(event);
7842 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
7843 __perf_remove_from_context(event);
7844 }
7845
7846 static void perf_event_exit_cpu_context(int cpu)
7847 {
7848 struct perf_event_context *ctx;
7849 struct pmu *pmu;
7850 int idx;
7851
7852 idx = srcu_read_lock(&pmus_srcu);
7853 list_for_each_entry_rcu(pmu, &pmus, entry) {
7854 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
7855
7856 mutex_lock(&ctx->mutex);
7857 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7858 mutex_unlock(&ctx->mutex);
7859 }
7860 srcu_read_unlock(&pmus_srcu, idx);
7861 }
7862
7863 static void perf_event_exit_cpu(int cpu)
7864 {
7865 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7866
7867 mutex_lock(&swhash->hlist_mutex);
7868 swevent_hlist_release(swhash);
7869 mutex_unlock(&swhash->hlist_mutex);
7870
7871 perf_event_exit_cpu_context(cpu);
7872 }
7873 #else
7874 static inline void perf_event_exit_cpu(int cpu) { }
7875 #endif
7876
7877 static int
7878 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7879 {
7880 int cpu;
7881
7882 for_each_online_cpu(cpu)
7883 perf_event_exit_cpu(cpu);
7884
7885 return NOTIFY_OK;
7886 }
7887
7888 /*
7889 * Run the perf reboot notifier at the very last possible moment so that
7890 * the generic watchdog code runs as long as possible.
7891 */
7892 static struct notifier_block perf_reboot_notifier = {
7893 .notifier_call = perf_reboot,
7894 .priority = INT_MIN,
7895 };
7896
7897 static int
7898 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7899 {
7900 unsigned int cpu = (long)hcpu;
7901
7902 switch (action & ~CPU_TASKS_FROZEN) {
7903
7904 case CPU_UP_PREPARE:
7905 case CPU_DOWN_FAILED:
7906 perf_event_init_cpu(cpu);
7907 break;
7908
7909 case CPU_UP_CANCELED:
7910 case CPU_DOWN_PREPARE:
7911 perf_event_exit_cpu(cpu);
7912 break;
7913 default:
7914 break;
7915 }
7916
7917 return NOTIFY_OK;
7918 }
7919
7920 void __init perf_event_init(void)
7921 {
7922 int ret;
7923
7924 idr_init(&pmu_idr);
7925
7926 perf_event_init_all_cpus();
7927 init_srcu_struct(&pmus_srcu);
7928 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7929 perf_pmu_register(&perf_cpu_clock, NULL, -1);
7930 perf_pmu_register(&perf_task_clock, NULL, -1);
7931 perf_tp_register();
7932 perf_cpu_notifier(perf_cpu_notify);
7933 register_reboot_notifier(&perf_reboot_notifier);
7934
7935 ret = init_hw_breakpoint();
7936 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
7937
7938 /* do not patch jump label more than once per second */
7939 jump_label_rate_limit(&perf_sched_events, HZ);
7940
7941 /*
7942 * Build time assertion that we keep the data_head at the intended
7943 * location. IOW, validation we got the __reserved[] size right.
7944 */
7945 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7946 != 1024);
7947 }
7948
7949 static int __init perf_event_sysfs_init(void)
7950 {
7951 struct pmu *pmu;
7952 int ret;
7953
7954 mutex_lock(&pmus_lock);
7955
7956 ret = bus_register(&pmu_bus);
7957 if (ret)
7958 goto unlock;
7959
7960 list_for_each_entry(pmu, &pmus, entry) {
7961 if (!pmu->name || pmu->type < 0)
7962 continue;
7963
7964 ret = pmu_dev_alloc(pmu);
7965 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7966 }
7967 pmu_bus_running = 1;
7968 ret = 0;
7969
7970 unlock:
7971 mutex_unlock(&pmus_lock);
7972
7973 return ret;
7974 }
7975 device_initcall(perf_event_sysfs_init);
7976
7977 #ifdef CONFIG_CGROUP_PERF
7978 static struct cgroup_subsys_state *
7979 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
7980 {
7981 struct perf_cgroup *jc;
7982
7983 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
7984 if (!jc)
7985 return ERR_PTR(-ENOMEM);
7986
7987 jc->info = alloc_percpu(struct perf_cgroup_info);
7988 if (!jc->info) {
7989 kfree(jc);
7990 return ERR_PTR(-ENOMEM);
7991 }
7992
7993 return &jc->css;
7994 }
7995
7996 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
7997 {
7998 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
7999
8000 free_percpu(jc->info);
8001 kfree(jc);
8002 }
8003
8004 static int __perf_cgroup_move(void *info)
8005 {
8006 struct task_struct *task = info;
8007 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
8008 return 0;
8009 }
8010
8011 static void perf_cgroup_attach(struct cgroup_subsys_state *css,
8012 struct cgroup_taskset *tset)
8013 {
8014 struct task_struct *task;
8015
8016 cgroup_taskset_for_each(task, css, tset)
8017 task_function_call(task, __perf_cgroup_move, task);
8018 }
8019
8020 static void perf_cgroup_exit(struct cgroup_subsys_state *css,
8021 struct cgroup_subsys_state *old_css,
8022 struct task_struct *task)
8023 {
8024 /*
8025 * cgroup_exit() is called in the copy_process() failure path.
8026 * Ignore this case since the task hasn't ran yet, this avoids
8027 * trying to poke a half freed task state from generic code.
8028 */
8029 if (!(task->flags & PF_EXITING))
8030 return;
8031
8032 task_function_call(task, __perf_cgroup_move, task);
8033 }
8034
8035 struct cgroup_subsys perf_subsys = {
8036 .name = "perf_event",
8037 .subsys_id = perf_subsys_id,
8038 .css_alloc = perf_cgroup_css_alloc,
8039 .css_free = perf_cgroup_css_free,
8040 .exit = perf_cgroup_exit,
8041 .attach = perf_cgroup_attach,
8042 };
8043 #endif /* CONFIG_CGROUP_PERF */