]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/events/core.c
Merge tag 'media/v4.9-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[mirror_ubuntu-artful-kernel.git] / kernel / events / core.c
1 /*
2 * Performance events core code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/idr.h>
17 #include <linux/file.h>
18 #include <linux/poll.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/tick.h>
22 #include <linux/sysfs.h>
23 #include <linux/dcache.h>
24 #include <linux/percpu.h>
25 #include <linux/ptrace.h>
26 #include <linux/reboot.h>
27 #include <linux/vmstat.h>
28 #include <linux/device.h>
29 #include <linux/export.h>
30 #include <linux/vmalloc.h>
31 #include <linux/hardirq.h>
32 #include <linux/rculist.h>
33 #include <linux/uaccess.h>
34 #include <linux/syscalls.h>
35 #include <linux/anon_inodes.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/cgroup.h>
38 #include <linux/perf_event.h>
39 #include <linux/trace_events.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/mm_types.h>
42 #include <linux/module.h>
43 #include <linux/mman.h>
44 #include <linux/compat.h>
45 #include <linux/bpf.h>
46 #include <linux/filter.h>
47 #include <linux/namei.h>
48 #include <linux/parser.h>
49
50 #include "internal.h"
51
52 #include <asm/irq_regs.h>
53
54 typedef int (*remote_function_f)(void *);
55
56 struct remote_function_call {
57 struct task_struct *p;
58 remote_function_f func;
59 void *info;
60 int ret;
61 };
62
63 static void remote_function(void *data)
64 {
65 struct remote_function_call *tfc = data;
66 struct task_struct *p = tfc->p;
67
68 if (p) {
69 /* -EAGAIN */
70 if (task_cpu(p) != smp_processor_id())
71 return;
72
73 /*
74 * Now that we're on right CPU with IRQs disabled, we can test
75 * if we hit the right task without races.
76 */
77
78 tfc->ret = -ESRCH; /* No such (running) process */
79 if (p != current)
80 return;
81 }
82
83 tfc->ret = tfc->func(tfc->info);
84 }
85
86 /**
87 * task_function_call - call a function on the cpu on which a task runs
88 * @p: the task to evaluate
89 * @func: the function to be called
90 * @info: the function call argument
91 *
92 * Calls the function @func when the task is currently running. This might
93 * be on the current CPU, which just calls the function directly
94 *
95 * returns: @func return value, or
96 * -ESRCH - when the process isn't running
97 * -EAGAIN - when the process moved away
98 */
99 static int
100 task_function_call(struct task_struct *p, remote_function_f func, void *info)
101 {
102 struct remote_function_call data = {
103 .p = p,
104 .func = func,
105 .info = info,
106 .ret = -EAGAIN,
107 };
108 int ret;
109
110 do {
111 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
112 if (!ret)
113 ret = data.ret;
114 } while (ret == -EAGAIN);
115
116 return ret;
117 }
118
119 /**
120 * cpu_function_call - call a function on the cpu
121 * @func: the function to be called
122 * @info: the function call argument
123 *
124 * Calls the function @func on the remote cpu.
125 *
126 * returns: @func return value or -ENXIO when the cpu is offline
127 */
128 static int cpu_function_call(int cpu, remote_function_f func, void *info)
129 {
130 struct remote_function_call data = {
131 .p = NULL,
132 .func = func,
133 .info = info,
134 .ret = -ENXIO, /* No such CPU */
135 };
136
137 smp_call_function_single(cpu, remote_function, &data, 1);
138
139 return data.ret;
140 }
141
142 static inline struct perf_cpu_context *
143 __get_cpu_context(struct perf_event_context *ctx)
144 {
145 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
146 }
147
148 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
149 struct perf_event_context *ctx)
150 {
151 raw_spin_lock(&cpuctx->ctx.lock);
152 if (ctx)
153 raw_spin_lock(&ctx->lock);
154 }
155
156 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
157 struct perf_event_context *ctx)
158 {
159 if (ctx)
160 raw_spin_unlock(&ctx->lock);
161 raw_spin_unlock(&cpuctx->ctx.lock);
162 }
163
164 #define TASK_TOMBSTONE ((void *)-1L)
165
166 static bool is_kernel_event(struct perf_event *event)
167 {
168 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
169 }
170
171 /*
172 * On task ctx scheduling...
173 *
174 * When !ctx->nr_events a task context will not be scheduled. This means
175 * we can disable the scheduler hooks (for performance) without leaving
176 * pending task ctx state.
177 *
178 * This however results in two special cases:
179 *
180 * - removing the last event from a task ctx; this is relatively straight
181 * forward and is done in __perf_remove_from_context.
182 *
183 * - adding the first event to a task ctx; this is tricky because we cannot
184 * rely on ctx->is_active and therefore cannot use event_function_call().
185 * See perf_install_in_context().
186 *
187 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
188 */
189
190 typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
191 struct perf_event_context *, void *);
192
193 struct event_function_struct {
194 struct perf_event *event;
195 event_f func;
196 void *data;
197 };
198
199 static int event_function(void *info)
200 {
201 struct event_function_struct *efs = info;
202 struct perf_event *event = efs->event;
203 struct perf_event_context *ctx = event->ctx;
204 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
205 struct perf_event_context *task_ctx = cpuctx->task_ctx;
206 int ret = 0;
207
208 WARN_ON_ONCE(!irqs_disabled());
209
210 perf_ctx_lock(cpuctx, task_ctx);
211 /*
212 * Since we do the IPI call without holding ctx->lock things can have
213 * changed, double check we hit the task we set out to hit.
214 */
215 if (ctx->task) {
216 if (ctx->task != current) {
217 ret = -ESRCH;
218 goto unlock;
219 }
220
221 /*
222 * We only use event_function_call() on established contexts,
223 * and event_function() is only ever called when active (or
224 * rather, we'll have bailed in task_function_call() or the
225 * above ctx->task != current test), therefore we must have
226 * ctx->is_active here.
227 */
228 WARN_ON_ONCE(!ctx->is_active);
229 /*
230 * And since we have ctx->is_active, cpuctx->task_ctx must
231 * match.
232 */
233 WARN_ON_ONCE(task_ctx != ctx);
234 } else {
235 WARN_ON_ONCE(&cpuctx->ctx != ctx);
236 }
237
238 efs->func(event, cpuctx, ctx, efs->data);
239 unlock:
240 perf_ctx_unlock(cpuctx, task_ctx);
241
242 return ret;
243 }
244
245 static void event_function_call(struct perf_event *event, event_f func, void *data)
246 {
247 struct perf_event_context *ctx = event->ctx;
248 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
249 struct event_function_struct efs = {
250 .event = event,
251 .func = func,
252 .data = data,
253 };
254
255 if (!event->parent) {
256 /*
257 * If this is a !child event, we must hold ctx::mutex to
258 * stabilize the the event->ctx relation. See
259 * perf_event_ctx_lock().
260 */
261 lockdep_assert_held(&ctx->mutex);
262 }
263
264 if (!task) {
265 cpu_function_call(event->cpu, event_function, &efs);
266 return;
267 }
268
269 if (task == TASK_TOMBSTONE)
270 return;
271
272 again:
273 if (!task_function_call(task, event_function, &efs))
274 return;
275
276 raw_spin_lock_irq(&ctx->lock);
277 /*
278 * Reload the task pointer, it might have been changed by
279 * a concurrent perf_event_context_sched_out().
280 */
281 task = ctx->task;
282 if (task == TASK_TOMBSTONE) {
283 raw_spin_unlock_irq(&ctx->lock);
284 return;
285 }
286 if (ctx->is_active) {
287 raw_spin_unlock_irq(&ctx->lock);
288 goto again;
289 }
290 func(event, NULL, ctx, data);
291 raw_spin_unlock_irq(&ctx->lock);
292 }
293
294 /*
295 * Similar to event_function_call() + event_function(), but hard assumes IRQs
296 * are already disabled and we're on the right CPU.
297 */
298 static void event_function_local(struct perf_event *event, event_f func, void *data)
299 {
300 struct perf_event_context *ctx = event->ctx;
301 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
302 struct task_struct *task = READ_ONCE(ctx->task);
303 struct perf_event_context *task_ctx = NULL;
304
305 WARN_ON_ONCE(!irqs_disabled());
306
307 if (task) {
308 if (task == TASK_TOMBSTONE)
309 return;
310
311 task_ctx = ctx;
312 }
313
314 perf_ctx_lock(cpuctx, task_ctx);
315
316 task = ctx->task;
317 if (task == TASK_TOMBSTONE)
318 goto unlock;
319
320 if (task) {
321 /*
322 * We must be either inactive or active and the right task,
323 * otherwise we're screwed, since we cannot IPI to somewhere
324 * else.
325 */
326 if (ctx->is_active) {
327 if (WARN_ON_ONCE(task != current))
328 goto unlock;
329
330 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
331 goto unlock;
332 }
333 } else {
334 WARN_ON_ONCE(&cpuctx->ctx != ctx);
335 }
336
337 func(event, cpuctx, ctx, data);
338 unlock:
339 perf_ctx_unlock(cpuctx, task_ctx);
340 }
341
342 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
343 PERF_FLAG_FD_OUTPUT |\
344 PERF_FLAG_PID_CGROUP |\
345 PERF_FLAG_FD_CLOEXEC)
346
347 /*
348 * branch priv levels that need permission checks
349 */
350 #define PERF_SAMPLE_BRANCH_PERM_PLM \
351 (PERF_SAMPLE_BRANCH_KERNEL |\
352 PERF_SAMPLE_BRANCH_HV)
353
354 enum event_type_t {
355 EVENT_FLEXIBLE = 0x1,
356 EVENT_PINNED = 0x2,
357 EVENT_TIME = 0x4,
358 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
359 };
360
361 /*
362 * perf_sched_events : >0 events exist
363 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
364 */
365
366 static void perf_sched_delayed(struct work_struct *work);
367 DEFINE_STATIC_KEY_FALSE(perf_sched_events);
368 static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
369 static DEFINE_MUTEX(perf_sched_mutex);
370 static atomic_t perf_sched_count;
371
372 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
373 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
374 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
375
376 static atomic_t nr_mmap_events __read_mostly;
377 static atomic_t nr_comm_events __read_mostly;
378 static atomic_t nr_task_events __read_mostly;
379 static atomic_t nr_freq_events __read_mostly;
380 static atomic_t nr_switch_events __read_mostly;
381
382 static LIST_HEAD(pmus);
383 static DEFINE_MUTEX(pmus_lock);
384 static struct srcu_struct pmus_srcu;
385
386 /*
387 * perf event paranoia level:
388 * -1 - not paranoid at all
389 * 0 - disallow raw tracepoint access for unpriv
390 * 1 - disallow cpu events for unpriv
391 * 2 - disallow kernel profiling for unpriv
392 */
393 int sysctl_perf_event_paranoid __read_mostly = 2;
394
395 /* Minimum for 512 kiB + 1 user control page */
396 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
397
398 /*
399 * max perf event sample rate
400 */
401 #define DEFAULT_MAX_SAMPLE_RATE 100000
402 #define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
403 #define DEFAULT_CPU_TIME_MAX_PERCENT 25
404
405 int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
406
407 static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
408 static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
409
410 static int perf_sample_allowed_ns __read_mostly =
411 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
412
413 static void update_perf_cpu_limits(void)
414 {
415 u64 tmp = perf_sample_period_ns;
416
417 tmp *= sysctl_perf_cpu_time_max_percent;
418 tmp = div_u64(tmp, 100);
419 if (!tmp)
420 tmp = 1;
421
422 WRITE_ONCE(perf_sample_allowed_ns, tmp);
423 }
424
425 static int perf_rotate_context(struct perf_cpu_context *cpuctx);
426
427 int perf_proc_update_handler(struct ctl_table *table, int write,
428 void __user *buffer, size_t *lenp,
429 loff_t *ppos)
430 {
431 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
432
433 if (ret || !write)
434 return ret;
435
436 /*
437 * If throttling is disabled don't allow the write:
438 */
439 if (sysctl_perf_cpu_time_max_percent == 100 ||
440 sysctl_perf_cpu_time_max_percent == 0)
441 return -EINVAL;
442
443 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
444 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
445 update_perf_cpu_limits();
446
447 return 0;
448 }
449
450 int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
451
452 int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
453 void __user *buffer, size_t *lenp,
454 loff_t *ppos)
455 {
456 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
457
458 if (ret || !write)
459 return ret;
460
461 if (sysctl_perf_cpu_time_max_percent == 100 ||
462 sysctl_perf_cpu_time_max_percent == 0) {
463 printk(KERN_WARNING
464 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
465 WRITE_ONCE(perf_sample_allowed_ns, 0);
466 } else {
467 update_perf_cpu_limits();
468 }
469
470 return 0;
471 }
472
473 /*
474 * perf samples are done in some very critical code paths (NMIs).
475 * If they take too much CPU time, the system can lock up and not
476 * get any real work done. This will drop the sample rate when
477 * we detect that events are taking too long.
478 */
479 #define NR_ACCUMULATED_SAMPLES 128
480 static DEFINE_PER_CPU(u64, running_sample_length);
481
482 static u64 __report_avg;
483 static u64 __report_allowed;
484
485 static void perf_duration_warn(struct irq_work *w)
486 {
487 printk_ratelimited(KERN_INFO
488 "perf: interrupt took too long (%lld > %lld), lowering "
489 "kernel.perf_event_max_sample_rate to %d\n",
490 __report_avg, __report_allowed,
491 sysctl_perf_event_sample_rate);
492 }
493
494 static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
495
496 void perf_sample_event_took(u64 sample_len_ns)
497 {
498 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
499 u64 running_len;
500 u64 avg_len;
501 u32 max;
502
503 if (max_len == 0)
504 return;
505
506 /* Decay the counter by 1 average sample. */
507 running_len = __this_cpu_read(running_sample_length);
508 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
509 running_len += sample_len_ns;
510 __this_cpu_write(running_sample_length, running_len);
511
512 /*
513 * Note: this will be biased artifically low until we have
514 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
515 * from having to maintain a count.
516 */
517 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
518 if (avg_len <= max_len)
519 return;
520
521 __report_avg = avg_len;
522 __report_allowed = max_len;
523
524 /*
525 * Compute a throttle threshold 25% below the current duration.
526 */
527 avg_len += avg_len / 4;
528 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
529 if (avg_len < max)
530 max /= (u32)avg_len;
531 else
532 max = 1;
533
534 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
535 WRITE_ONCE(max_samples_per_tick, max);
536
537 sysctl_perf_event_sample_rate = max * HZ;
538 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
539
540 if (!irq_work_queue(&perf_duration_work)) {
541 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
542 "kernel.perf_event_max_sample_rate to %d\n",
543 __report_avg, __report_allowed,
544 sysctl_perf_event_sample_rate);
545 }
546 }
547
548 static atomic64_t perf_event_id;
549
550 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
551 enum event_type_t event_type);
552
553 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
554 enum event_type_t event_type,
555 struct task_struct *task);
556
557 static void update_context_time(struct perf_event_context *ctx);
558 static u64 perf_event_time(struct perf_event *event);
559
560 void __weak perf_event_print_debug(void) { }
561
562 extern __weak const char *perf_pmu_name(void)
563 {
564 return "pmu";
565 }
566
567 static inline u64 perf_clock(void)
568 {
569 return local_clock();
570 }
571
572 static inline u64 perf_event_clock(struct perf_event *event)
573 {
574 return event->clock();
575 }
576
577 #ifdef CONFIG_CGROUP_PERF
578
579 static inline bool
580 perf_cgroup_match(struct perf_event *event)
581 {
582 struct perf_event_context *ctx = event->ctx;
583 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
584
585 /* @event doesn't care about cgroup */
586 if (!event->cgrp)
587 return true;
588
589 /* wants specific cgroup scope but @cpuctx isn't associated with any */
590 if (!cpuctx->cgrp)
591 return false;
592
593 /*
594 * Cgroup scoping is recursive. An event enabled for a cgroup is
595 * also enabled for all its descendant cgroups. If @cpuctx's
596 * cgroup is a descendant of @event's (the test covers identity
597 * case), it's a match.
598 */
599 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
600 event->cgrp->css.cgroup);
601 }
602
603 static inline void perf_detach_cgroup(struct perf_event *event)
604 {
605 css_put(&event->cgrp->css);
606 event->cgrp = NULL;
607 }
608
609 static inline int is_cgroup_event(struct perf_event *event)
610 {
611 return event->cgrp != NULL;
612 }
613
614 static inline u64 perf_cgroup_event_time(struct perf_event *event)
615 {
616 struct perf_cgroup_info *t;
617
618 t = per_cpu_ptr(event->cgrp->info, event->cpu);
619 return t->time;
620 }
621
622 static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
623 {
624 struct perf_cgroup_info *info;
625 u64 now;
626
627 now = perf_clock();
628
629 info = this_cpu_ptr(cgrp->info);
630
631 info->time += now - info->timestamp;
632 info->timestamp = now;
633 }
634
635 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
636 {
637 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
638 if (cgrp_out)
639 __update_cgrp_time(cgrp_out);
640 }
641
642 static inline void update_cgrp_time_from_event(struct perf_event *event)
643 {
644 struct perf_cgroup *cgrp;
645
646 /*
647 * ensure we access cgroup data only when needed and
648 * when we know the cgroup is pinned (css_get)
649 */
650 if (!is_cgroup_event(event))
651 return;
652
653 cgrp = perf_cgroup_from_task(current, event->ctx);
654 /*
655 * Do not update time when cgroup is not active
656 */
657 if (cgrp == event->cgrp)
658 __update_cgrp_time(event->cgrp);
659 }
660
661 static inline void
662 perf_cgroup_set_timestamp(struct task_struct *task,
663 struct perf_event_context *ctx)
664 {
665 struct perf_cgroup *cgrp;
666 struct perf_cgroup_info *info;
667
668 /*
669 * ctx->lock held by caller
670 * ensure we do not access cgroup data
671 * unless we have the cgroup pinned (css_get)
672 */
673 if (!task || !ctx->nr_cgroups)
674 return;
675
676 cgrp = perf_cgroup_from_task(task, ctx);
677 info = this_cpu_ptr(cgrp->info);
678 info->timestamp = ctx->timestamp;
679 }
680
681 #define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
682 #define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
683
684 /*
685 * reschedule events based on the cgroup constraint of task.
686 *
687 * mode SWOUT : schedule out everything
688 * mode SWIN : schedule in based on cgroup for next
689 */
690 static void perf_cgroup_switch(struct task_struct *task, int mode)
691 {
692 struct perf_cpu_context *cpuctx;
693 struct pmu *pmu;
694 unsigned long flags;
695
696 /*
697 * disable interrupts to avoid geting nr_cgroup
698 * changes via __perf_event_disable(). Also
699 * avoids preemption.
700 */
701 local_irq_save(flags);
702
703 /*
704 * we reschedule only in the presence of cgroup
705 * constrained events.
706 */
707
708 list_for_each_entry_rcu(pmu, &pmus, entry) {
709 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
710 if (cpuctx->unique_pmu != pmu)
711 continue; /* ensure we process each cpuctx once */
712
713 /*
714 * perf_cgroup_events says at least one
715 * context on this CPU has cgroup events.
716 *
717 * ctx->nr_cgroups reports the number of cgroup
718 * events for a context.
719 */
720 if (cpuctx->ctx.nr_cgroups > 0) {
721 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
722 perf_pmu_disable(cpuctx->ctx.pmu);
723
724 if (mode & PERF_CGROUP_SWOUT) {
725 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
726 /*
727 * must not be done before ctxswout due
728 * to event_filter_match() in event_sched_out()
729 */
730 cpuctx->cgrp = NULL;
731 }
732
733 if (mode & PERF_CGROUP_SWIN) {
734 WARN_ON_ONCE(cpuctx->cgrp);
735 /*
736 * set cgrp before ctxsw in to allow
737 * event_filter_match() to not have to pass
738 * task around
739 * we pass the cpuctx->ctx to perf_cgroup_from_task()
740 * because cgorup events are only per-cpu
741 */
742 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
743 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
744 }
745 perf_pmu_enable(cpuctx->ctx.pmu);
746 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
747 }
748 }
749
750 local_irq_restore(flags);
751 }
752
753 static inline void perf_cgroup_sched_out(struct task_struct *task,
754 struct task_struct *next)
755 {
756 struct perf_cgroup *cgrp1;
757 struct perf_cgroup *cgrp2 = NULL;
758
759 rcu_read_lock();
760 /*
761 * we come here when we know perf_cgroup_events > 0
762 * we do not need to pass the ctx here because we know
763 * we are holding the rcu lock
764 */
765 cgrp1 = perf_cgroup_from_task(task, NULL);
766 cgrp2 = perf_cgroup_from_task(next, NULL);
767
768 /*
769 * only schedule out current cgroup events if we know
770 * that we are switching to a different cgroup. Otherwise,
771 * do no touch the cgroup events.
772 */
773 if (cgrp1 != cgrp2)
774 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
775
776 rcu_read_unlock();
777 }
778
779 static inline void perf_cgroup_sched_in(struct task_struct *prev,
780 struct task_struct *task)
781 {
782 struct perf_cgroup *cgrp1;
783 struct perf_cgroup *cgrp2 = NULL;
784
785 rcu_read_lock();
786 /*
787 * we come here when we know perf_cgroup_events > 0
788 * we do not need to pass the ctx here because we know
789 * we are holding the rcu lock
790 */
791 cgrp1 = perf_cgroup_from_task(task, NULL);
792 cgrp2 = perf_cgroup_from_task(prev, NULL);
793
794 /*
795 * only need to schedule in cgroup events if we are changing
796 * cgroup during ctxsw. Cgroup events were not scheduled
797 * out of ctxsw out if that was not the case.
798 */
799 if (cgrp1 != cgrp2)
800 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
801
802 rcu_read_unlock();
803 }
804
805 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
806 struct perf_event_attr *attr,
807 struct perf_event *group_leader)
808 {
809 struct perf_cgroup *cgrp;
810 struct cgroup_subsys_state *css;
811 struct fd f = fdget(fd);
812 int ret = 0;
813
814 if (!f.file)
815 return -EBADF;
816
817 css = css_tryget_online_from_dir(f.file->f_path.dentry,
818 &perf_event_cgrp_subsys);
819 if (IS_ERR(css)) {
820 ret = PTR_ERR(css);
821 goto out;
822 }
823
824 cgrp = container_of(css, struct perf_cgroup, css);
825 event->cgrp = cgrp;
826
827 /*
828 * all events in a group must monitor
829 * the same cgroup because a task belongs
830 * to only one perf cgroup at a time
831 */
832 if (group_leader && group_leader->cgrp != cgrp) {
833 perf_detach_cgroup(event);
834 ret = -EINVAL;
835 }
836 out:
837 fdput(f);
838 return ret;
839 }
840
841 static inline void
842 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
843 {
844 struct perf_cgroup_info *t;
845 t = per_cpu_ptr(event->cgrp->info, event->cpu);
846 event->shadow_ctx_time = now - t->timestamp;
847 }
848
849 static inline void
850 perf_cgroup_defer_enabled(struct perf_event *event)
851 {
852 /*
853 * when the current task's perf cgroup does not match
854 * the event's, we need to remember to call the
855 * perf_mark_enable() function the first time a task with
856 * a matching perf cgroup is scheduled in.
857 */
858 if (is_cgroup_event(event) && !perf_cgroup_match(event))
859 event->cgrp_defer_enabled = 1;
860 }
861
862 static inline void
863 perf_cgroup_mark_enabled(struct perf_event *event,
864 struct perf_event_context *ctx)
865 {
866 struct perf_event *sub;
867 u64 tstamp = perf_event_time(event);
868
869 if (!event->cgrp_defer_enabled)
870 return;
871
872 event->cgrp_defer_enabled = 0;
873
874 event->tstamp_enabled = tstamp - event->total_time_enabled;
875 list_for_each_entry(sub, &event->sibling_list, group_entry) {
876 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
877 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
878 sub->cgrp_defer_enabled = 0;
879 }
880 }
881 }
882
883 /*
884 * Update cpuctx->cgrp so that it is set when first cgroup event is added and
885 * cleared when last cgroup event is removed.
886 */
887 static inline void
888 list_update_cgroup_event(struct perf_event *event,
889 struct perf_event_context *ctx, bool add)
890 {
891 struct perf_cpu_context *cpuctx;
892
893 if (!is_cgroup_event(event))
894 return;
895
896 if (add && ctx->nr_cgroups++)
897 return;
898 else if (!add && --ctx->nr_cgroups)
899 return;
900 /*
901 * Because cgroup events are always per-cpu events,
902 * this will always be called from the right CPU.
903 */
904 cpuctx = __get_cpu_context(ctx);
905
906 /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
907 if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
908 /*
909 * We are removing the last cpu event in this context.
910 * If that event is not active in this cpu, cpuctx->cgrp
911 * should've been cleared by perf_cgroup_switch.
912 */
913 WARN_ON_ONCE(!add && cpuctx->cgrp);
914 return;
915 }
916 cpuctx->cgrp = add ? event->cgrp : NULL;
917 }
918
919 #else /* !CONFIG_CGROUP_PERF */
920
921 static inline bool
922 perf_cgroup_match(struct perf_event *event)
923 {
924 return true;
925 }
926
927 static inline void perf_detach_cgroup(struct perf_event *event)
928 {}
929
930 static inline int is_cgroup_event(struct perf_event *event)
931 {
932 return 0;
933 }
934
935 static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
936 {
937 return 0;
938 }
939
940 static inline void update_cgrp_time_from_event(struct perf_event *event)
941 {
942 }
943
944 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
945 {
946 }
947
948 static inline void perf_cgroup_sched_out(struct task_struct *task,
949 struct task_struct *next)
950 {
951 }
952
953 static inline void perf_cgroup_sched_in(struct task_struct *prev,
954 struct task_struct *task)
955 {
956 }
957
958 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
959 struct perf_event_attr *attr,
960 struct perf_event *group_leader)
961 {
962 return -EINVAL;
963 }
964
965 static inline void
966 perf_cgroup_set_timestamp(struct task_struct *task,
967 struct perf_event_context *ctx)
968 {
969 }
970
971 void
972 perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
973 {
974 }
975
976 static inline void
977 perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
978 {
979 }
980
981 static inline u64 perf_cgroup_event_time(struct perf_event *event)
982 {
983 return 0;
984 }
985
986 static inline void
987 perf_cgroup_defer_enabled(struct perf_event *event)
988 {
989 }
990
991 static inline void
992 perf_cgroup_mark_enabled(struct perf_event *event,
993 struct perf_event_context *ctx)
994 {
995 }
996
997 static inline void
998 list_update_cgroup_event(struct perf_event *event,
999 struct perf_event_context *ctx, bool add)
1000 {
1001 }
1002
1003 #endif
1004
1005 /*
1006 * set default to be dependent on timer tick just
1007 * like original code
1008 */
1009 #define PERF_CPU_HRTIMER (1000 / HZ)
1010 /*
1011 * function must be called with interrupts disbled
1012 */
1013 static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
1014 {
1015 struct perf_cpu_context *cpuctx;
1016 int rotations = 0;
1017
1018 WARN_ON(!irqs_disabled());
1019
1020 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
1021 rotations = perf_rotate_context(cpuctx);
1022
1023 raw_spin_lock(&cpuctx->hrtimer_lock);
1024 if (rotations)
1025 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
1026 else
1027 cpuctx->hrtimer_active = 0;
1028 raw_spin_unlock(&cpuctx->hrtimer_lock);
1029
1030 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
1031 }
1032
1033 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
1034 {
1035 struct hrtimer *timer = &cpuctx->hrtimer;
1036 struct pmu *pmu = cpuctx->ctx.pmu;
1037 u64 interval;
1038
1039 /* no multiplexing needed for SW PMU */
1040 if (pmu->task_ctx_nr == perf_sw_context)
1041 return;
1042
1043 /*
1044 * check default is sane, if not set then force to
1045 * default interval (1/tick)
1046 */
1047 interval = pmu->hrtimer_interval_ms;
1048 if (interval < 1)
1049 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
1050
1051 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
1052
1053 raw_spin_lock_init(&cpuctx->hrtimer_lock);
1054 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1055 timer->function = perf_mux_hrtimer_handler;
1056 }
1057
1058 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
1059 {
1060 struct hrtimer *timer = &cpuctx->hrtimer;
1061 struct pmu *pmu = cpuctx->ctx.pmu;
1062 unsigned long flags;
1063
1064 /* not for SW PMU */
1065 if (pmu->task_ctx_nr == perf_sw_context)
1066 return 0;
1067
1068 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
1069 if (!cpuctx->hrtimer_active) {
1070 cpuctx->hrtimer_active = 1;
1071 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
1072 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
1073 }
1074 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
1075
1076 return 0;
1077 }
1078
1079 void perf_pmu_disable(struct pmu *pmu)
1080 {
1081 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1082 if (!(*count)++)
1083 pmu->pmu_disable(pmu);
1084 }
1085
1086 void perf_pmu_enable(struct pmu *pmu)
1087 {
1088 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1089 if (!--(*count))
1090 pmu->pmu_enable(pmu);
1091 }
1092
1093 static DEFINE_PER_CPU(struct list_head, active_ctx_list);
1094
1095 /*
1096 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1097 * perf_event_task_tick() are fully serialized because they're strictly cpu
1098 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1099 * disabled, while perf_event_task_tick is called from IRQ context.
1100 */
1101 static void perf_event_ctx_activate(struct perf_event_context *ctx)
1102 {
1103 struct list_head *head = this_cpu_ptr(&active_ctx_list);
1104
1105 WARN_ON(!irqs_disabled());
1106
1107 WARN_ON(!list_empty(&ctx->active_ctx_list));
1108
1109 list_add(&ctx->active_ctx_list, head);
1110 }
1111
1112 static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1113 {
1114 WARN_ON(!irqs_disabled());
1115
1116 WARN_ON(list_empty(&ctx->active_ctx_list));
1117
1118 list_del_init(&ctx->active_ctx_list);
1119 }
1120
1121 static void get_ctx(struct perf_event_context *ctx)
1122 {
1123 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
1124 }
1125
1126 static void free_ctx(struct rcu_head *head)
1127 {
1128 struct perf_event_context *ctx;
1129
1130 ctx = container_of(head, struct perf_event_context, rcu_head);
1131 kfree(ctx->task_ctx_data);
1132 kfree(ctx);
1133 }
1134
1135 static void put_ctx(struct perf_event_context *ctx)
1136 {
1137 if (atomic_dec_and_test(&ctx->refcount)) {
1138 if (ctx->parent_ctx)
1139 put_ctx(ctx->parent_ctx);
1140 if (ctx->task && ctx->task != TASK_TOMBSTONE)
1141 put_task_struct(ctx->task);
1142 call_rcu(&ctx->rcu_head, free_ctx);
1143 }
1144 }
1145
1146 /*
1147 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1148 * perf_pmu_migrate_context() we need some magic.
1149 *
1150 * Those places that change perf_event::ctx will hold both
1151 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1152 *
1153 * Lock ordering is by mutex address. There are two other sites where
1154 * perf_event_context::mutex nests and those are:
1155 *
1156 * - perf_event_exit_task_context() [ child , 0 ]
1157 * perf_event_exit_event()
1158 * put_event() [ parent, 1 ]
1159 *
1160 * - perf_event_init_context() [ parent, 0 ]
1161 * inherit_task_group()
1162 * inherit_group()
1163 * inherit_event()
1164 * perf_event_alloc()
1165 * perf_init_event()
1166 * perf_try_init_event() [ child , 1 ]
1167 *
1168 * While it appears there is an obvious deadlock here -- the parent and child
1169 * nesting levels are inverted between the two. This is in fact safe because
1170 * life-time rules separate them. That is an exiting task cannot fork, and a
1171 * spawning task cannot (yet) exit.
1172 *
1173 * But remember that that these are parent<->child context relations, and
1174 * migration does not affect children, therefore these two orderings should not
1175 * interact.
1176 *
1177 * The change in perf_event::ctx does not affect children (as claimed above)
1178 * because the sys_perf_event_open() case will install a new event and break
1179 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1180 * concerned with cpuctx and that doesn't have children.
1181 *
1182 * The places that change perf_event::ctx will issue:
1183 *
1184 * perf_remove_from_context();
1185 * synchronize_rcu();
1186 * perf_install_in_context();
1187 *
1188 * to affect the change. The remove_from_context() + synchronize_rcu() should
1189 * quiesce the event, after which we can install it in the new location. This
1190 * means that only external vectors (perf_fops, prctl) can perturb the event
1191 * while in transit. Therefore all such accessors should also acquire
1192 * perf_event_context::mutex to serialize against this.
1193 *
1194 * However; because event->ctx can change while we're waiting to acquire
1195 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1196 * function.
1197 *
1198 * Lock order:
1199 * cred_guard_mutex
1200 * task_struct::perf_event_mutex
1201 * perf_event_context::mutex
1202 * perf_event::child_mutex;
1203 * perf_event_context::lock
1204 * perf_event::mmap_mutex
1205 * mmap_sem
1206 */
1207 static struct perf_event_context *
1208 perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
1209 {
1210 struct perf_event_context *ctx;
1211
1212 again:
1213 rcu_read_lock();
1214 ctx = ACCESS_ONCE(event->ctx);
1215 if (!atomic_inc_not_zero(&ctx->refcount)) {
1216 rcu_read_unlock();
1217 goto again;
1218 }
1219 rcu_read_unlock();
1220
1221 mutex_lock_nested(&ctx->mutex, nesting);
1222 if (event->ctx != ctx) {
1223 mutex_unlock(&ctx->mutex);
1224 put_ctx(ctx);
1225 goto again;
1226 }
1227
1228 return ctx;
1229 }
1230
1231 static inline struct perf_event_context *
1232 perf_event_ctx_lock(struct perf_event *event)
1233 {
1234 return perf_event_ctx_lock_nested(event, 0);
1235 }
1236
1237 static void perf_event_ctx_unlock(struct perf_event *event,
1238 struct perf_event_context *ctx)
1239 {
1240 mutex_unlock(&ctx->mutex);
1241 put_ctx(ctx);
1242 }
1243
1244 /*
1245 * This must be done under the ctx->lock, such as to serialize against
1246 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1247 * calling scheduler related locks and ctx->lock nests inside those.
1248 */
1249 static __must_check struct perf_event_context *
1250 unclone_ctx(struct perf_event_context *ctx)
1251 {
1252 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1253
1254 lockdep_assert_held(&ctx->lock);
1255
1256 if (parent_ctx)
1257 ctx->parent_ctx = NULL;
1258 ctx->generation++;
1259
1260 return parent_ctx;
1261 }
1262
1263 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1264 {
1265 /*
1266 * only top level events have the pid namespace they were created in
1267 */
1268 if (event->parent)
1269 event = event->parent;
1270
1271 return task_tgid_nr_ns(p, event->ns);
1272 }
1273
1274 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1275 {
1276 /*
1277 * only top level events have the pid namespace they were created in
1278 */
1279 if (event->parent)
1280 event = event->parent;
1281
1282 return task_pid_nr_ns(p, event->ns);
1283 }
1284
1285 /*
1286 * If we inherit events we want to return the parent event id
1287 * to userspace.
1288 */
1289 static u64 primary_event_id(struct perf_event *event)
1290 {
1291 u64 id = event->id;
1292
1293 if (event->parent)
1294 id = event->parent->id;
1295
1296 return id;
1297 }
1298
1299 /*
1300 * Get the perf_event_context for a task and lock it.
1301 *
1302 * This has to cope with with the fact that until it is locked,
1303 * the context could get moved to another task.
1304 */
1305 static struct perf_event_context *
1306 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
1307 {
1308 struct perf_event_context *ctx;
1309
1310 retry:
1311 /*
1312 * One of the few rules of preemptible RCU is that one cannot do
1313 * rcu_read_unlock() while holding a scheduler (or nested) lock when
1314 * part of the read side critical section was irqs-enabled -- see
1315 * rcu_read_unlock_special().
1316 *
1317 * Since ctx->lock nests under rq->lock we must ensure the entire read
1318 * side critical section has interrupts disabled.
1319 */
1320 local_irq_save(*flags);
1321 rcu_read_lock();
1322 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
1323 if (ctx) {
1324 /*
1325 * If this context is a clone of another, it might
1326 * get swapped for another underneath us by
1327 * perf_event_task_sched_out, though the
1328 * rcu_read_lock() protects us from any context
1329 * getting freed. Lock the context and check if it
1330 * got swapped before we could get the lock, and retry
1331 * if so. If we locked the right context, then it
1332 * can't get swapped on us any more.
1333 */
1334 raw_spin_lock(&ctx->lock);
1335 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
1336 raw_spin_unlock(&ctx->lock);
1337 rcu_read_unlock();
1338 local_irq_restore(*flags);
1339 goto retry;
1340 }
1341
1342 if (ctx->task == TASK_TOMBSTONE ||
1343 !atomic_inc_not_zero(&ctx->refcount)) {
1344 raw_spin_unlock(&ctx->lock);
1345 ctx = NULL;
1346 } else {
1347 WARN_ON_ONCE(ctx->task != task);
1348 }
1349 }
1350 rcu_read_unlock();
1351 if (!ctx)
1352 local_irq_restore(*flags);
1353 return ctx;
1354 }
1355
1356 /*
1357 * Get the context for a task and increment its pin_count so it
1358 * can't get swapped to another task. This also increments its
1359 * reference count so that the context can't get freed.
1360 */
1361 static struct perf_event_context *
1362 perf_pin_task_context(struct task_struct *task, int ctxn)
1363 {
1364 struct perf_event_context *ctx;
1365 unsigned long flags;
1366
1367 ctx = perf_lock_task_context(task, ctxn, &flags);
1368 if (ctx) {
1369 ++ctx->pin_count;
1370 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1371 }
1372 return ctx;
1373 }
1374
1375 static void perf_unpin_context(struct perf_event_context *ctx)
1376 {
1377 unsigned long flags;
1378
1379 raw_spin_lock_irqsave(&ctx->lock, flags);
1380 --ctx->pin_count;
1381 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1382 }
1383
1384 /*
1385 * Update the record of the current time in a context.
1386 */
1387 static void update_context_time(struct perf_event_context *ctx)
1388 {
1389 u64 now = perf_clock();
1390
1391 ctx->time += now - ctx->timestamp;
1392 ctx->timestamp = now;
1393 }
1394
1395 static u64 perf_event_time(struct perf_event *event)
1396 {
1397 struct perf_event_context *ctx = event->ctx;
1398
1399 if (is_cgroup_event(event))
1400 return perf_cgroup_event_time(event);
1401
1402 return ctx ? ctx->time : 0;
1403 }
1404
1405 /*
1406 * Update the total_time_enabled and total_time_running fields for a event.
1407 */
1408 static void update_event_times(struct perf_event *event)
1409 {
1410 struct perf_event_context *ctx = event->ctx;
1411 u64 run_end;
1412
1413 lockdep_assert_held(&ctx->lock);
1414
1415 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1416 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1417 return;
1418
1419 /*
1420 * in cgroup mode, time_enabled represents
1421 * the time the event was enabled AND active
1422 * tasks were in the monitored cgroup. This is
1423 * independent of the activity of the context as
1424 * there may be a mix of cgroup and non-cgroup events.
1425 *
1426 * That is why we treat cgroup events differently
1427 * here.
1428 */
1429 if (is_cgroup_event(event))
1430 run_end = perf_cgroup_event_time(event);
1431 else if (ctx->is_active)
1432 run_end = ctx->time;
1433 else
1434 run_end = event->tstamp_stopped;
1435
1436 event->total_time_enabled = run_end - event->tstamp_enabled;
1437
1438 if (event->state == PERF_EVENT_STATE_INACTIVE)
1439 run_end = event->tstamp_stopped;
1440 else
1441 run_end = perf_event_time(event);
1442
1443 event->total_time_running = run_end - event->tstamp_running;
1444
1445 }
1446
1447 /*
1448 * Update total_time_enabled and total_time_running for all events in a group.
1449 */
1450 static void update_group_times(struct perf_event *leader)
1451 {
1452 struct perf_event *event;
1453
1454 update_event_times(leader);
1455 list_for_each_entry(event, &leader->sibling_list, group_entry)
1456 update_event_times(event);
1457 }
1458
1459 static struct list_head *
1460 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1461 {
1462 if (event->attr.pinned)
1463 return &ctx->pinned_groups;
1464 else
1465 return &ctx->flexible_groups;
1466 }
1467
1468 /*
1469 * Add a event from the lists for its context.
1470 * Must be called with ctx->mutex and ctx->lock held.
1471 */
1472 static void
1473 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1474 {
1475
1476 lockdep_assert_held(&ctx->lock);
1477
1478 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1479 event->attach_state |= PERF_ATTACH_CONTEXT;
1480
1481 /*
1482 * If we're a stand alone event or group leader, we go to the context
1483 * list, group events are kept attached to the group so that
1484 * perf_group_detach can, at all times, locate all siblings.
1485 */
1486 if (event->group_leader == event) {
1487 struct list_head *list;
1488
1489 event->group_caps = event->event_caps;
1490
1491 list = ctx_group_list(event, ctx);
1492 list_add_tail(&event->group_entry, list);
1493 }
1494
1495 list_update_cgroup_event(event, ctx, true);
1496
1497 list_add_rcu(&event->event_entry, &ctx->event_list);
1498 ctx->nr_events++;
1499 if (event->attr.inherit_stat)
1500 ctx->nr_stat++;
1501
1502 ctx->generation++;
1503 }
1504
1505 /*
1506 * Initialize event state based on the perf_event_attr::disabled.
1507 */
1508 static inline void perf_event__state_init(struct perf_event *event)
1509 {
1510 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1511 PERF_EVENT_STATE_INACTIVE;
1512 }
1513
1514 static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
1515 {
1516 int entry = sizeof(u64); /* value */
1517 int size = 0;
1518 int nr = 1;
1519
1520 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1521 size += sizeof(u64);
1522
1523 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1524 size += sizeof(u64);
1525
1526 if (event->attr.read_format & PERF_FORMAT_ID)
1527 entry += sizeof(u64);
1528
1529 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1530 nr += nr_siblings;
1531 size += sizeof(u64);
1532 }
1533
1534 size += entry * nr;
1535 event->read_size = size;
1536 }
1537
1538 static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
1539 {
1540 struct perf_sample_data *data;
1541 u16 size = 0;
1542
1543 if (sample_type & PERF_SAMPLE_IP)
1544 size += sizeof(data->ip);
1545
1546 if (sample_type & PERF_SAMPLE_ADDR)
1547 size += sizeof(data->addr);
1548
1549 if (sample_type & PERF_SAMPLE_PERIOD)
1550 size += sizeof(data->period);
1551
1552 if (sample_type & PERF_SAMPLE_WEIGHT)
1553 size += sizeof(data->weight);
1554
1555 if (sample_type & PERF_SAMPLE_READ)
1556 size += event->read_size;
1557
1558 if (sample_type & PERF_SAMPLE_DATA_SRC)
1559 size += sizeof(data->data_src.val);
1560
1561 if (sample_type & PERF_SAMPLE_TRANSACTION)
1562 size += sizeof(data->txn);
1563
1564 event->header_size = size;
1565 }
1566
1567 /*
1568 * Called at perf_event creation and when events are attached/detached from a
1569 * group.
1570 */
1571 static void perf_event__header_size(struct perf_event *event)
1572 {
1573 __perf_event_read_size(event,
1574 event->group_leader->nr_siblings);
1575 __perf_event_header_size(event, event->attr.sample_type);
1576 }
1577
1578 static void perf_event__id_header_size(struct perf_event *event)
1579 {
1580 struct perf_sample_data *data;
1581 u64 sample_type = event->attr.sample_type;
1582 u16 size = 0;
1583
1584 if (sample_type & PERF_SAMPLE_TID)
1585 size += sizeof(data->tid_entry);
1586
1587 if (sample_type & PERF_SAMPLE_TIME)
1588 size += sizeof(data->time);
1589
1590 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1591 size += sizeof(data->id);
1592
1593 if (sample_type & PERF_SAMPLE_ID)
1594 size += sizeof(data->id);
1595
1596 if (sample_type & PERF_SAMPLE_STREAM_ID)
1597 size += sizeof(data->stream_id);
1598
1599 if (sample_type & PERF_SAMPLE_CPU)
1600 size += sizeof(data->cpu_entry);
1601
1602 event->id_header_size = size;
1603 }
1604
1605 static bool perf_event_validate_size(struct perf_event *event)
1606 {
1607 /*
1608 * The values computed here will be over-written when we actually
1609 * attach the event.
1610 */
1611 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1612 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1613 perf_event__id_header_size(event);
1614
1615 /*
1616 * Sum the lot; should not exceed the 64k limit we have on records.
1617 * Conservative limit to allow for callchains and other variable fields.
1618 */
1619 if (event->read_size + event->header_size +
1620 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1621 return false;
1622
1623 return true;
1624 }
1625
1626 static void perf_group_attach(struct perf_event *event)
1627 {
1628 struct perf_event *group_leader = event->group_leader, *pos;
1629
1630 /*
1631 * We can have double attach due to group movement in perf_event_open.
1632 */
1633 if (event->attach_state & PERF_ATTACH_GROUP)
1634 return;
1635
1636 event->attach_state |= PERF_ATTACH_GROUP;
1637
1638 if (group_leader == event)
1639 return;
1640
1641 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1642
1643 group_leader->group_caps &= event->event_caps;
1644
1645 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1646 group_leader->nr_siblings++;
1647
1648 perf_event__header_size(group_leader);
1649
1650 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1651 perf_event__header_size(pos);
1652 }
1653
1654 /*
1655 * Remove a event from the lists for its context.
1656 * Must be called with ctx->mutex and ctx->lock held.
1657 */
1658 static void
1659 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1660 {
1661 WARN_ON_ONCE(event->ctx != ctx);
1662 lockdep_assert_held(&ctx->lock);
1663
1664 /*
1665 * We can have double detach due to exit/hot-unplug + close.
1666 */
1667 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1668 return;
1669
1670 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1671
1672 list_update_cgroup_event(event, ctx, false);
1673
1674 ctx->nr_events--;
1675 if (event->attr.inherit_stat)
1676 ctx->nr_stat--;
1677
1678 list_del_rcu(&event->event_entry);
1679
1680 if (event->group_leader == event)
1681 list_del_init(&event->group_entry);
1682
1683 update_group_times(event);
1684
1685 /*
1686 * If event was in error state, then keep it
1687 * that way, otherwise bogus counts will be
1688 * returned on read(). The only way to get out
1689 * of error state is by explicit re-enabling
1690 * of the event
1691 */
1692 if (event->state > PERF_EVENT_STATE_OFF)
1693 event->state = PERF_EVENT_STATE_OFF;
1694
1695 ctx->generation++;
1696 }
1697
1698 static void perf_group_detach(struct perf_event *event)
1699 {
1700 struct perf_event *sibling, *tmp;
1701 struct list_head *list = NULL;
1702
1703 /*
1704 * We can have double detach due to exit/hot-unplug + close.
1705 */
1706 if (!(event->attach_state & PERF_ATTACH_GROUP))
1707 return;
1708
1709 event->attach_state &= ~PERF_ATTACH_GROUP;
1710
1711 /*
1712 * If this is a sibling, remove it from its group.
1713 */
1714 if (event->group_leader != event) {
1715 list_del_init(&event->group_entry);
1716 event->group_leader->nr_siblings--;
1717 goto out;
1718 }
1719
1720 if (!list_empty(&event->group_entry))
1721 list = &event->group_entry;
1722
1723 /*
1724 * If this was a group event with sibling events then
1725 * upgrade the siblings to singleton events by adding them
1726 * to whatever list we are on.
1727 */
1728 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1729 if (list)
1730 list_move_tail(&sibling->group_entry, list);
1731 sibling->group_leader = sibling;
1732
1733 /* Inherit group flags from the previous leader */
1734 sibling->group_caps = event->group_caps;
1735
1736 WARN_ON_ONCE(sibling->ctx != event->ctx);
1737 }
1738
1739 out:
1740 perf_event__header_size(event->group_leader);
1741
1742 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1743 perf_event__header_size(tmp);
1744 }
1745
1746 static bool is_orphaned_event(struct perf_event *event)
1747 {
1748 return event->state == PERF_EVENT_STATE_DEAD;
1749 }
1750
1751 static inline int __pmu_filter_match(struct perf_event *event)
1752 {
1753 struct pmu *pmu = event->pmu;
1754 return pmu->filter_match ? pmu->filter_match(event) : 1;
1755 }
1756
1757 /*
1758 * Check whether we should attempt to schedule an event group based on
1759 * PMU-specific filtering. An event group can consist of HW and SW events,
1760 * potentially with a SW leader, so we must check all the filters, to
1761 * determine whether a group is schedulable:
1762 */
1763 static inline int pmu_filter_match(struct perf_event *event)
1764 {
1765 struct perf_event *child;
1766
1767 if (!__pmu_filter_match(event))
1768 return 0;
1769
1770 list_for_each_entry(child, &event->sibling_list, group_entry) {
1771 if (!__pmu_filter_match(child))
1772 return 0;
1773 }
1774
1775 return 1;
1776 }
1777
1778 static inline int
1779 event_filter_match(struct perf_event *event)
1780 {
1781 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1782 perf_cgroup_match(event) && pmu_filter_match(event);
1783 }
1784
1785 static void
1786 event_sched_out(struct perf_event *event,
1787 struct perf_cpu_context *cpuctx,
1788 struct perf_event_context *ctx)
1789 {
1790 u64 tstamp = perf_event_time(event);
1791 u64 delta;
1792
1793 WARN_ON_ONCE(event->ctx != ctx);
1794 lockdep_assert_held(&ctx->lock);
1795
1796 /*
1797 * An event which could not be activated because of
1798 * filter mismatch still needs to have its timings
1799 * maintained, otherwise bogus information is return
1800 * via read() for time_enabled, time_running:
1801 */
1802 if (event->state == PERF_EVENT_STATE_INACTIVE &&
1803 !event_filter_match(event)) {
1804 delta = tstamp - event->tstamp_stopped;
1805 event->tstamp_running += delta;
1806 event->tstamp_stopped = tstamp;
1807 }
1808
1809 if (event->state != PERF_EVENT_STATE_ACTIVE)
1810 return;
1811
1812 perf_pmu_disable(event->pmu);
1813
1814 event->tstamp_stopped = tstamp;
1815 event->pmu->del(event, 0);
1816 event->oncpu = -1;
1817 event->state = PERF_EVENT_STATE_INACTIVE;
1818 if (event->pending_disable) {
1819 event->pending_disable = 0;
1820 event->state = PERF_EVENT_STATE_OFF;
1821 }
1822
1823 if (!is_software_event(event))
1824 cpuctx->active_oncpu--;
1825 if (!--ctx->nr_active)
1826 perf_event_ctx_deactivate(ctx);
1827 if (event->attr.freq && event->attr.sample_freq)
1828 ctx->nr_freq--;
1829 if (event->attr.exclusive || !cpuctx->active_oncpu)
1830 cpuctx->exclusive = 0;
1831
1832 perf_pmu_enable(event->pmu);
1833 }
1834
1835 static void
1836 group_sched_out(struct perf_event *group_event,
1837 struct perf_cpu_context *cpuctx,
1838 struct perf_event_context *ctx)
1839 {
1840 struct perf_event *event;
1841 int state = group_event->state;
1842
1843 perf_pmu_disable(ctx->pmu);
1844
1845 event_sched_out(group_event, cpuctx, ctx);
1846
1847 /*
1848 * Schedule out siblings (if any):
1849 */
1850 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1851 event_sched_out(event, cpuctx, ctx);
1852
1853 perf_pmu_enable(ctx->pmu);
1854
1855 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1856 cpuctx->exclusive = 0;
1857 }
1858
1859 #define DETACH_GROUP 0x01UL
1860
1861 /*
1862 * Cross CPU call to remove a performance event
1863 *
1864 * We disable the event on the hardware level first. After that we
1865 * remove it from the context list.
1866 */
1867 static void
1868 __perf_remove_from_context(struct perf_event *event,
1869 struct perf_cpu_context *cpuctx,
1870 struct perf_event_context *ctx,
1871 void *info)
1872 {
1873 unsigned long flags = (unsigned long)info;
1874
1875 event_sched_out(event, cpuctx, ctx);
1876 if (flags & DETACH_GROUP)
1877 perf_group_detach(event);
1878 list_del_event(event, ctx);
1879
1880 if (!ctx->nr_events && ctx->is_active) {
1881 ctx->is_active = 0;
1882 if (ctx->task) {
1883 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1884 cpuctx->task_ctx = NULL;
1885 }
1886 }
1887 }
1888
1889 /*
1890 * Remove the event from a task's (or a CPU's) list of events.
1891 *
1892 * If event->ctx is a cloned context, callers must make sure that
1893 * every task struct that event->ctx->task could possibly point to
1894 * remains valid. This is OK when called from perf_release since
1895 * that only calls us on the top-level context, which can't be a clone.
1896 * When called from perf_event_exit_task, it's OK because the
1897 * context has been detached from its task.
1898 */
1899 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1900 {
1901 lockdep_assert_held(&event->ctx->mutex);
1902
1903 event_function_call(event, __perf_remove_from_context, (void *)flags);
1904 }
1905
1906 /*
1907 * Cross CPU call to disable a performance event
1908 */
1909 static void __perf_event_disable(struct perf_event *event,
1910 struct perf_cpu_context *cpuctx,
1911 struct perf_event_context *ctx,
1912 void *info)
1913 {
1914 if (event->state < PERF_EVENT_STATE_INACTIVE)
1915 return;
1916
1917 update_context_time(ctx);
1918 update_cgrp_time_from_event(event);
1919 update_group_times(event);
1920 if (event == event->group_leader)
1921 group_sched_out(event, cpuctx, ctx);
1922 else
1923 event_sched_out(event, cpuctx, ctx);
1924 event->state = PERF_EVENT_STATE_OFF;
1925 }
1926
1927 /*
1928 * Disable a event.
1929 *
1930 * If event->ctx is a cloned context, callers must make sure that
1931 * every task struct that event->ctx->task could possibly point to
1932 * remains valid. This condition is satisifed when called through
1933 * perf_event_for_each_child or perf_event_for_each because they
1934 * hold the top-level event's child_mutex, so any descendant that
1935 * goes to exit will block in perf_event_exit_event().
1936 *
1937 * When called from perf_pending_event it's OK because event->ctx
1938 * is the current context on this CPU and preemption is disabled,
1939 * hence we can't get into perf_event_task_sched_out for this context.
1940 */
1941 static void _perf_event_disable(struct perf_event *event)
1942 {
1943 struct perf_event_context *ctx = event->ctx;
1944
1945 raw_spin_lock_irq(&ctx->lock);
1946 if (event->state <= PERF_EVENT_STATE_OFF) {
1947 raw_spin_unlock_irq(&ctx->lock);
1948 return;
1949 }
1950 raw_spin_unlock_irq(&ctx->lock);
1951
1952 event_function_call(event, __perf_event_disable, NULL);
1953 }
1954
1955 void perf_event_disable_local(struct perf_event *event)
1956 {
1957 event_function_local(event, __perf_event_disable, NULL);
1958 }
1959
1960 /*
1961 * Strictly speaking kernel users cannot create groups and therefore this
1962 * interface does not need the perf_event_ctx_lock() magic.
1963 */
1964 void perf_event_disable(struct perf_event *event)
1965 {
1966 struct perf_event_context *ctx;
1967
1968 ctx = perf_event_ctx_lock(event);
1969 _perf_event_disable(event);
1970 perf_event_ctx_unlock(event, ctx);
1971 }
1972 EXPORT_SYMBOL_GPL(perf_event_disable);
1973
1974 void perf_event_disable_inatomic(struct perf_event *event)
1975 {
1976 event->pending_disable = 1;
1977 irq_work_queue(&event->pending);
1978 }
1979
1980 static void perf_set_shadow_time(struct perf_event *event,
1981 struct perf_event_context *ctx,
1982 u64 tstamp)
1983 {
1984 /*
1985 * use the correct time source for the time snapshot
1986 *
1987 * We could get by without this by leveraging the
1988 * fact that to get to this function, the caller
1989 * has most likely already called update_context_time()
1990 * and update_cgrp_time_xx() and thus both timestamp
1991 * are identical (or very close). Given that tstamp is,
1992 * already adjusted for cgroup, we could say that:
1993 * tstamp - ctx->timestamp
1994 * is equivalent to
1995 * tstamp - cgrp->timestamp.
1996 *
1997 * Then, in perf_output_read(), the calculation would
1998 * work with no changes because:
1999 * - event is guaranteed scheduled in
2000 * - no scheduled out in between
2001 * - thus the timestamp would be the same
2002 *
2003 * But this is a bit hairy.
2004 *
2005 * So instead, we have an explicit cgroup call to remain
2006 * within the time time source all along. We believe it
2007 * is cleaner and simpler to understand.
2008 */
2009 if (is_cgroup_event(event))
2010 perf_cgroup_set_shadow_time(event, tstamp);
2011 else
2012 event->shadow_ctx_time = tstamp - ctx->timestamp;
2013 }
2014
2015 #define MAX_INTERRUPTS (~0ULL)
2016
2017 static void perf_log_throttle(struct perf_event *event, int enable);
2018 static void perf_log_itrace_start(struct perf_event *event);
2019
2020 static int
2021 event_sched_in(struct perf_event *event,
2022 struct perf_cpu_context *cpuctx,
2023 struct perf_event_context *ctx)
2024 {
2025 u64 tstamp = perf_event_time(event);
2026 int ret = 0;
2027
2028 lockdep_assert_held(&ctx->lock);
2029
2030 if (event->state <= PERF_EVENT_STATE_OFF)
2031 return 0;
2032
2033 WRITE_ONCE(event->oncpu, smp_processor_id());
2034 /*
2035 * Order event::oncpu write to happen before the ACTIVE state
2036 * is visible.
2037 */
2038 smp_wmb();
2039 WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
2040
2041 /*
2042 * Unthrottle events, since we scheduled we might have missed several
2043 * ticks already, also for a heavily scheduling task there is little
2044 * guarantee it'll get a tick in a timely manner.
2045 */
2046 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2047 perf_log_throttle(event, 1);
2048 event->hw.interrupts = 0;
2049 }
2050
2051 /*
2052 * The new state must be visible before we turn it on in the hardware:
2053 */
2054 smp_wmb();
2055
2056 perf_pmu_disable(event->pmu);
2057
2058 perf_set_shadow_time(event, ctx, tstamp);
2059
2060 perf_log_itrace_start(event);
2061
2062 if (event->pmu->add(event, PERF_EF_START)) {
2063 event->state = PERF_EVENT_STATE_INACTIVE;
2064 event->oncpu = -1;
2065 ret = -EAGAIN;
2066 goto out;
2067 }
2068
2069 event->tstamp_running += tstamp - event->tstamp_stopped;
2070
2071 if (!is_software_event(event))
2072 cpuctx->active_oncpu++;
2073 if (!ctx->nr_active++)
2074 perf_event_ctx_activate(ctx);
2075 if (event->attr.freq && event->attr.sample_freq)
2076 ctx->nr_freq++;
2077
2078 if (event->attr.exclusive)
2079 cpuctx->exclusive = 1;
2080
2081 out:
2082 perf_pmu_enable(event->pmu);
2083
2084 return ret;
2085 }
2086
2087 static int
2088 group_sched_in(struct perf_event *group_event,
2089 struct perf_cpu_context *cpuctx,
2090 struct perf_event_context *ctx)
2091 {
2092 struct perf_event *event, *partial_group = NULL;
2093 struct pmu *pmu = ctx->pmu;
2094 u64 now = ctx->time;
2095 bool simulate = false;
2096
2097 if (group_event->state == PERF_EVENT_STATE_OFF)
2098 return 0;
2099
2100 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
2101
2102 if (event_sched_in(group_event, cpuctx, ctx)) {
2103 pmu->cancel_txn(pmu);
2104 perf_mux_hrtimer_restart(cpuctx);
2105 return -EAGAIN;
2106 }
2107
2108 /*
2109 * Schedule in siblings as one group (if any):
2110 */
2111 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2112 if (event_sched_in(event, cpuctx, ctx)) {
2113 partial_group = event;
2114 goto group_error;
2115 }
2116 }
2117
2118 if (!pmu->commit_txn(pmu))
2119 return 0;
2120
2121 group_error:
2122 /*
2123 * Groups can be scheduled in as one unit only, so undo any
2124 * partial group before returning:
2125 * The events up to the failed event are scheduled out normally,
2126 * tstamp_stopped will be updated.
2127 *
2128 * The failed events and the remaining siblings need to have
2129 * their timings updated as if they had gone thru event_sched_in()
2130 * and event_sched_out(). This is required to get consistent timings
2131 * across the group. This also takes care of the case where the group
2132 * could never be scheduled by ensuring tstamp_stopped is set to mark
2133 * the time the event was actually stopped, such that time delta
2134 * calculation in update_event_times() is correct.
2135 */
2136 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2137 if (event == partial_group)
2138 simulate = true;
2139
2140 if (simulate) {
2141 event->tstamp_running += now - event->tstamp_stopped;
2142 event->tstamp_stopped = now;
2143 } else {
2144 event_sched_out(event, cpuctx, ctx);
2145 }
2146 }
2147 event_sched_out(group_event, cpuctx, ctx);
2148
2149 pmu->cancel_txn(pmu);
2150
2151 perf_mux_hrtimer_restart(cpuctx);
2152
2153 return -EAGAIN;
2154 }
2155
2156 /*
2157 * Work out whether we can put this event group on the CPU now.
2158 */
2159 static int group_can_go_on(struct perf_event *event,
2160 struct perf_cpu_context *cpuctx,
2161 int can_add_hw)
2162 {
2163 /*
2164 * Groups consisting entirely of software events can always go on.
2165 */
2166 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
2167 return 1;
2168 /*
2169 * If an exclusive group is already on, no other hardware
2170 * events can go on.
2171 */
2172 if (cpuctx->exclusive)
2173 return 0;
2174 /*
2175 * If this group is exclusive and there are already
2176 * events on the CPU, it can't go on.
2177 */
2178 if (event->attr.exclusive && cpuctx->active_oncpu)
2179 return 0;
2180 /*
2181 * Otherwise, try to add it if all previous groups were able
2182 * to go on.
2183 */
2184 return can_add_hw;
2185 }
2186
2187 static void add_event_to_ctx(struct perf_event *event,
2188 struct perf_event_context *ctx)
2189 {
2190 u64 tstamp = perf_event_time(event);
2191
2192 list_add_event(event, ctx);
2193 perf_group_attach(event);
2194 event->tstamp_enabled = tstamp;
2195 event->tstamp_running = tstamp;
2196 event->tstamp_stopped = tstamp;
2197 }
2198
2199 static void ctx_sched_out(struct perf_event_context *ctx,
2200 struct perf_cpu_context *cpuctx,
2201 enum event_type_t event_type);
2202 static void
2203 ctx_sched_in(struct perf_event_context *ctx,
2204 struct perf_cpu_context *cpuctx,
2205 enum event_type_t event_type,
2206 struct task_struct *task);
2207
2208 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2209 struct perf_event_context *ctx)
2210 {
2211 if (!cpuctx->task_ctx)
2212 return;
2213
2214 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2215 return;
2216
2217 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2218 }
2219
2220 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2221 struct perf_event_context *ctx,
2222 struct task_struct *task)
2223 {
2224 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2225 if (ctx)
2226 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2227 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2228 if (ctx)
2229 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2230 }
2231
2232 static void ctx_resched(struct perf_cpu_context *cpuctx,
2233 struct perf_event_context *task_ctx)
2234 {
2235 perf_pmu_disable(cpuctx->ctx.pmu);
2236 if (task_ctx)
2237 task_ctx_sched_out(cpuctx, task_ctx);
2238 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
2239 perf_event_sched_in(cpuctx, task_ctx, current);
2240 perf_pmu_enable(cpuctx->ctx.pmu);
2241 }
2242
2243 /*
2244 * Cross CPU call to install and enable a performance event
2245 *
2246 * Very similar to remote_function() + event_function() but cannot assume that
2247 * things like ctx->is_active and cpuctx->task_ctx are set.
2248 */
2249 static int __perf_install_in_context(void *info)
2250 {
2251 struct perf_event *event = info;
2252 struct perf_event_context *ctx = event->ctx;
2253 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2254 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2255 bool activate = true;
2256 int ret = 0;
2257
2258 raw_spin_lock(&cpuctx->ctx.lock);
2259 if (ctx->task) {
2260 raw_spin_lock(&ctx->lock);
2261 task_ctx = ctx;
2262
2263 /* If we're on the wrong CPU, try again */
2264 if (task_cpu(ctx->task) != smp_processor_id()) {
2265 ret = -ESRCH;
2266 goto unlock;
2267 }
2268
2269 /*
2270 * If we're on the right CPU, see if the task we target is
2271 * current, if not we don't have to activate the ctx, a future
2272 * context switch will do that for us.
2273 */
2274 if (ctx->task != current)
2275 activate = false;
2276 else
2277 WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2278
2279 } else if (task_ctx) {
2280 raw_spin_lock(&task_ctx->lock);
2281 }
2282
2283 if (activate) {
2284 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2285 add_event_to_ctx(event, ctx);
2286 ctx_resched(cpuctx, task_ctx);
2287 } else {
2288 add_event_to_ctx(event, ctx);
2289 }
2290
2291 unlock:
2292 perf_ctx_unlock(cpuctx, task_ctx);
2293
2294 return ret;
2295 }
2296
2297 /*
2298 * Attach a performance event to a context.
2299 *
2300 * Very similar to event_function_call, see comment there.
2301 */
2302 static void
2303 perf_install_in_context(struct perf_event_context *ctx,
2304 struct perf_event *event,
2305 int cpu)
2306 {
2307 struct task_struct *task = READ_ONCE(ctx->task);
2308
2309 lockdep_assert_held(&ctx->mutex);
2310
2311 if (event->cpu != -1)
2312 event->cpu = cpu;
2313
2314 /*
2315 * Ensures that if we can observe event->ctx, both the event and ctx
2316 * will be 'complete'. See perf_iterate_sb_cpu().
2317 */
2318 smp_store_release(&event->ctx, ctx);
2319
2320 if (!task) {
2321 cpu_function_call(cpu, __perf_install_in_context, event);
2322 return;
2323 }
2324
2325 /*
2326 * Should not happen, we validate the ctx is still alive before calling.
2327 */
2328 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2329 return;
2330
2331 /*
2332 * Installing events is tricky because we cannot rely on ctx->is_active
2333 * to be set in case this is the nr_events 0 -> 1 transition.
2334 */
2335 again:
2336 /*
2337 * Cannot use task_function_call() because we need to run on the task's
2338 * CPU regardless of whether its current or not.
2339 */
2340 if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
2341 return;
2342
2343 raw_spin_lock_irq(&ctx->lock);
2344 task = ctx->task;
2345 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
2346 /*
2347 * Cannot happen because we already checked above (which also
2348 * cannot happen), and we hold ctx->mutex, which serializes us
2349 * against perf_event_exit_task_context().
2350 */
2351 raw_spin_unlock_irq(&ctx->lock);
2352 return;
2353 }
2354 raw_spin_unlock_irq(&ctx->lock);
2355 /*
2356 * Since !ctx->is_active doesn't mean anything, we must IPI
2357 * unconditionally.
2358 */
2359 goto again;
2360 }
2361
2362 /*
2363 * Put a event into inactive state and update time fields.
2364 * Enabling the leader of a group effectively enables all
2365 * the group members that aren't explicitly disabled, so we
2366 * have to update their ->tstamp_enabled also.
2367 * Note: this works for group members as well as group leaders
2368 * since the non-leader members' sibling_lists will be empty.
2369 */
2370 static void __perf_event_mark_enabled(struct perf_event *event)
2371 {
2372 struct perf_event *sub;
2373 u64 tstamp = perf_event_time(event);
2374
2375 event->state = PERF_EVENT_STATE_INACTIVE;
2376 event->tstamp_enabled = tstamp - event->total_time_enabled;
2377 list_for_each_entry(sub, &event->sibling_list, group_entry) {
2378 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2379 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
2380 }
2381 }
2382
2383 /*
2384 * Cross CPU call to enable a performance event
2385 */
2386 static void __perf_event_enable(struct perf_event *event,
2387 struct perf_cpu_context *cpuctx,
2388 struct perf_event_context *ctx,
2389 void *info)
2390 {
2391 struct perf_event *leader = event->group_leader;
2392 struct perf_event_context *task_ctx;
2393
2394 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2395 event->state <= PERF_EVENT_STATE_ERROR)
2396 return;
2397
2398 if (ctx->is_active)
2399 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2400
2401 __perf_event_mark_enabled(event);
2402
2403 if (!ctx->is_active)
2404 return;
2405
2406 if (!event_filter_match(event)) {
2407 if (is_cgroup_event(event))
2408 perf_cgroup_defer_enabled(event);
2409 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2410 return;
2411 }
2412
2413 /*
2414 * If the event is in a group and isn't the group leader,
2415 * then don't put it on unless the group is on.
2416 */
2417 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2418 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2419 return;
2420 }
2421
2422 task_ctx = cpuctx->task_ctx;
2423 if (ctx->task)
2424 WARN_ON_ONCE(task_ctx != ctx);
2425
2426 ctx_resched(cpuctx, task_ctx);
2427 }
2428
2429 /*
2430 * Enable a event.
2431 *
2432 * If event->ctx is a cloned context, callers must make sure that
2433 * every task struct that event->ctx->task could possibly point to
2434 * remains valid. This condition is satisfied when called through
2435 * perf_event_for_each_child or perf_event_for_each as described
2436 * for perf_event_disable.
2437 */
2438 static void _perf_event_enable(struct perf_event *event)
2439 {
2440 struct perf_event_context *ctx = event->ctx;
2441
2442 raw_spin_lock_irq(&ctx->lock);
2443 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2444 event->state < PERF_EVENT_STATE_ERROR) {
2445 raw_spin_unlock_irq(&ctx->lock);
2446 return;
2447 }
2448
2449 /*
2450 * If the event is in error state, clear that first.
2451 *
2452 * That way, if we see the event in error state below, we know that it
2453 * has gone back into error state, as distinct from the task having
2454 * been scheduled away before the cross-call arrived.
2455 */
2456 if (event->state == PERF_EVENT_STATE_ERROR)
2457 event->state = PERF_EVENT_STATE_OFF;
2458 raw_spin_unlock_irq(&ctx->lock);
2459
2460 event_function_call(event, __perf_event_enable, NULL);
2461 }
2462
2463 /*
2464 * See perf_event_disable();
2465 */
2466 void perf_event_enable(struct perf_event *event)
2467 {
2468 struct perf_event_context *ctx;
2469
2470 ctx = perf_event_ctx_lock(event);
2471 _perf_event_enable(event);
2472 perf_event_ctx_unlock(event, ctx);
2473 }
2474 EXPORT_SYMBOL_GPL(perf_event_enable);
2475
2476 struct stop_event_data {
2477 struct perf_event *event;
2478 unsigned int restart;
2479 };
2480
2481 static int __perf_event_stop(void *info)
2482 {
2483 struct stop_event_data *sd = info;
2484 struct perf_event *event = sd->event;
2485
2486 /* if it's already INACTIVE, do nothing */
2487 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2488 return 0;
2489
2490 /* matches smp_wmb() in event_sched_in() */
2491 smp_rmb();
2492
2493 /*
2494 * There is a window with interrupts enabled before we get here,
2495 * so we need to check again lest we try to stop another CPU's event.
2496 */
2497 if (READ_ONCE(event->oncpu) != smp_processor_id())
2498 return -EAGAIN;
2499
2500 event->pmu->stop(event, PERF_EF_UPDATE);
2501
2502 /*
2503 * May race with the actual stop (through perf_pmu_output_stop()),
2504 * but it is only used for events with AUX ring buffer, and such
2505 * events will refuse to restart because of rb::aux_mmap_count==0,
2506 * see comments in perf_aux_output_begin().
2507 *
2508 * Since this is happening on a event-local CPU, no trace is lost
2509 * while restarting.
2510 */
2511 if (sd->restart)
2512 event->pmu->start(event, 0);
2513
2514 return 0;
2515 }
2516
2517 static int perf_event_stop(struct perf_event *event, int restart)
2518 {
2519 struct stop_event_data sd = {
2520 .event = event,
2521 .restart = restart,
2522 };
2523 int ret = 0;
2524
2525 do {
2526 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2527 return 0;
2528
2529 /* matches smp_wmb() in event_sched_in() */
2530 smp_rmb();
2531
2532 /*
2533 * We only want to restart ACTIVE events, so if the event goes
2534 * inactive here (event->oncpu==-1), there's nothing more to do;
2535 * fall through with ret==-ENXIO.
2536 */
2537 ret = cpu_function_call(READ_ONCE(event->oncpu),
2538 __perf_event_stop, &sd);
2539 } while (ret == -EAGAIN);
2540
2541 return ret;
2542 }
2543
2544 /*
2545 * In order to contain the amount of racy and tricky in the address filter
2546 * configuration management, it is a two part process:
2547 *
2548 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2549 * we update the addresses of corresponding vmas in
2550 * event::addr_filters_offs array and bump the event::addr_filters_gen;
2551 * (p2) when an event is scheduled in (pmu::add), it calls
2552 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2553 * if the generation has changed since the previous call.
2554 *
2555 * If (p1) happens while the event is active, we restart it to force (p2).
2556 *
2557 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2558 * pre-existing mappings, called once when new filters arrive via SET_FILTER
2559 * ioctl;
2560 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2561 * registered mapping, called for every new mmap(), with mm::mmap_sem down
2562 * for reading;
2563 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2564 * of exec.
2565 */
2566 void perf_event_addr_filters_sync(struct perf_event *event)
2567 {
2568 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2569
2570 if (!has_addr_filter(event))
2571 return;
2572
2573 raw_spin_lock(&ifh->lock);
2574 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2575 event->pmu->addr_filters_sync(event);
2576 event->hw.addr_filters_gen = event->addr_filters_gen;
2577 }
2578 raw_spin_unlock(&ifh->lock);
2579 }
2580 EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2581
2582 static int _perf_event_refresh(struct perf_event *event, int refresh)
2583 {
2584 /*
2585 * not supported on inherited events
2586 */
2587 if (event->attr.inherit || !is_sampling_event(event))
2588 return -EINVAL;
2589
2590 atomic_add(refresh, &event->event_limit);
2591 _perf_event_enable(event);
2592
2593 return 0;
2594 }
2595
2596 /*
2597 * See perf_event_disable()
2598 */
2599 int perf_event_refresh(struct perf_event *event, int refresh)
2600 {
2601 struct perf_event_context *ctx;
2602 int ret;
2603
2604 ctx = perf_event_ctx_lock(event);
2605 ret = _perf_event_refresh(event, refresh);
2606 perf_event_ctx_unlock(event, ctx);
2607
2608 return ret;
2609 }
2610 EXPORT_SYMBOL_GPL(perf_event_refresh);
2611
2612 static void ctx_sched_out(struct perf_event_context *ctx,
2613 struct perf_cpu_context *cpuctx,
2614 enum event_type_t event_type)
2615 {
2616 int is_active = ctx->is_active;
2617 struct perf_event *event;
2618
2619 lockdep_assert_held(&ctx->lock);
2620
2621 if (likely(!ctx->nr_events)) {
2622 /*
2623 * See __perf_remove_from_context().
2624 */
2625 WARN_ON_ONCE(ctx->is_active);
2626 if (ctx->task)
2627 WARN_ON_ONCE(cpuctx->task_ctx);
2628 return;
2629 }
2630
2631 ctx->is_active &= ~event_type;
2632 if (!(ctx->is_active & EVENT_ALL))
2633 ctx->is_active = 0;
2634
2635 if (ctx->task) {
2636 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2637 if (!ctx->is_active)
2638 cpuctx->task_ctx = NULL;
2639 }
2640
2641 /*
2642 * Always update time if it was set; not only when it changes.
2643 * Otherwise we can 'forget' to update time for any but the last
2644 * context we sched out. For example:
2645 *
2646 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2647 * ctx_sched_out(.event_type = EVENT_PINNED)
2648 *
2649 * would only update time for the pinned events.
2650 */
2651 if (is_active & EVENT_TIME) {
2652 /* update (and stop) ctx time */
2653 update_context_time(ctx);
2654 update_cgrp_time_from_cpuctx(cpuctx);
2655 }
2656
2657 is_active ^= ctx->is_active; /* changed bits */
2658
2659 if (!ctx->nr_active || !(is_active & EVENT_ALL))
2660 return;
2661
2662 perf_pmu_disable(ctx->pmu);
2663 if (is_active & EVENT_PINNED) {
2664 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2665 group_sched_out(event, cpuctx, ctx);
2666 }
2667
2668 if (is_active & EVENT_FLEXIBLE) {
2669 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2670 group_sched_out(event, cpuctx, ctx);
2671 }
2672 perf_pmu_enable(ctx->pmu);
2673 }
2674
2675 /*
2676 * Test whether two contexts are equivalent, i.e. whether they have both been
2677 * cloned from the same version of the same context.
2678 *
2679 * Equivalence is measured using a generation number in the context that is
2680 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2681 * and list_del_event().
2682 */
2683 static int context_equiv(struct perf_event_context *ctx1,
2684 struct perf_event_context *ctx2)
2685 {
2686 lockdep_assert_held(&ctx1->lock);
2687 lockdep_assert_held(&ctx2->lock);
2688
2689 /* Pinning disables the swap optimization */
2690 if (ctx1->pin_count || ctx2->pin_count)
2691 return 0;
2692
2693 /* If ctx1 is the parent of ctx2 */
2694 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2695 return 1;
2696
2697 /* If ctx2 is the parent of ctx1 */
2698 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2699 return 1;
2700
2701 /*
2702 * If ctx1 and ctx2 have the same parent; we flatten the parent
2703 * hierarchy, see perf_event_init_context().
2704 */
2705 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2706 ctx1->parent_gen == ctx2->parent_gen)
2707 return 1;
2708
2709 /* Unmatched */
2710 return 0;
2711 }
2712
2713 static void __perf_event_sync_stat(struct perf_event *event,
2714 struct perf_event *next_event)
2715 {
2716 u64 value;
2717
2718 if (!event->attr.inherit_stat)
2719 return;
2720
2721 /*
2722 * Update the event value, we cannot use perf_event_read()
2723 * because we're in the middle of a context switch and have IRQs
2724 * disabled, which upsets smp_call_function_single(), however
2725 * we know the event must be on the current CPU, therefore we
2726 * don't need to use it.
2727 */
2728 switch (event->state) {
2729 case PERF_EVENT_STATE_ACTIVE:
2730 event->pmu->read(event);
2731 /* fall-through */
2732
2733 case PERF_EVENT_STATE_INACTIVE:
2734 update_event_times(event);
2735 break;
2736
2737 default:
2738 break;
2739 }
2740
2741 /*
2742 * In order to keep per-task stats reliable we need to flip the event
2743 * values when we flip the contexts.
2744 */
2745 value = local64_read(&next_event->count);
2746 value = local64_xchg(&event->count, value);
2747 local64_set(&next_event->count, value);
2748
2749 swap(event->total_time_enabled, next_event->total_time_enabled);
2750 swap(event->total_time_running, next_event->total_time_running);
2751
2752 /*
2753 * Since we swizzled the values, update the user visible data too.
2754 */
2755 perf_event_update_userpage(event);
2756 perf_event_update_userpage(next_event);
2757 }
2758
2759 static void perf_event_sync_stat(struct perf_event_context *ctx,
2760 struct perf_event_context *next_ctx)
2761 {
2762 struct perf_event *event, *next_event;
2763
2764 if (!ctx->nr_stat)
2765 return;
2766
2767 update_context_time(ctx);
2768
2769 event = list_first_entry(&ctx->event_list,
2770 struct perf_event, event_entry);
2771
2772 next_event = list_first_entry(&next_ctx->event_list,
2773 struct perf_event, event_entry);
2774
2775 while (&event->event_entry != &ctx->event_list &&
2776 &next_event->event_entry != &next_ctx->event_list) {
2777
2778 __perf_event_sync_stat(event, next_event);
2779
2780 event = list_next_entry(event, event_entry);
2781 next_event = list_next_entry(next_event, event_entry);
2782 }
2783 }
2784
2785 static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2786 struct task_struct *next)
2787 {
2788 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2789 struct perf_event_context *next_ctx;
2790 struct perf_event_context *parent, *next_parent;
2791 struct perf_cpu_context *cpuctx;
2792 int do_switch = 1;
2793
2794 if (likely(!ctx))
2795 return;
2796
2797 cpuctx = __get_cpu_context(ctx);
2798 if (!cpuctx->task_ctx)
2799 return;
2800
2801 rcu_read_lock();
2802 next_ctx = next->perf_event_ctxp[ctxn];
2803 if (!next_ctx)
2804 goto unlock;
2805
2806 parent = rcu_dereference(ctx->parent_ctx);
2807 next_parent = rcu_dereference(next_ctx->parent_ctx);
2808
2809 /* If neither context have a parent context; they cannot be clones. */
2810 if (!parent && !next_parent)
2811 goto unlock;
2812
2813 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
2814 /*
2815 * Looks like the two contexts are clones, so we might be
2816 * able to optimize the context switch. We lock both
2817 * contexts and check that they are clones under the
2818 * lock (including re-checking that neither has been
2819 * uncloned in the meantime). It doesn't matter which
2820 * order we take the locks because no other cpu could
2821 * be trying to lock both of these tasks.
2822 */
2823 raw_spin_lock(&ctx->lock);
2824 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2825 if (context_equiv(ctx, next_ctx)) {
2826 WRITE_ONCE(ctx->task, next);
2827 WRITE_ONCE(next_ctx->task, task);
2828
2829 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2830
2831 /*
2832 * RCU_INIT_POINTER here is safe because we've not
2833 * modified the ctx and the above modification of
2834 * ctx->task and ctx->task_ctx_data are immaterial
2835 * since those values are always verified under
2836 * ctx->lock which we're now holding.
2837 */
2838 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2839 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2840
2841 do_switch = 0;
2842
2843 perf_event_sync_stat(ctx, next_ctx);
2844 }
2845 raw_spin_unlock(&next_ctx->lock);
2846 raw_spin_unlock(&ctx->lock);
2847 }
2848 unlock:
2849 rcu_read_unlock();
2850
2851 if (do_switch) {
2852 raw_spin_lock(&ctx->lock);
2853 task_ctx_sched_out(cpuctx, ctx);
2854 raw_spin_unlock(&ctx->lock);
2855 }
2856 }
2857
2858 static DEFINE_PER_CPU(struct list_head, sched_cb_list);
2859
2860 void perf_sched_cb_dec(struct pmu *pmu)
2861 {
2862 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2863
2864 this_cpu_dec(perf_sched_cb_usages);
2865
2866 if (!--cpuctx->sched_cb_usage)
2867 list_del(&cpuctx->sched_cb_entry);
2868 }
2869
2870
2871 void perf_sched_cb_inc(struct pmu *pmu)
2872 {
2873 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2874
2875 if (!cpuctx->sched_cb_usage++)
2876 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
2877
2878 this_cpu_inc(perf_sched_cb_usages);
2879 }
2880
2881 /*
2882 * This function provides the context switch callback to the lower code
2883 * layer. It is invoked ONLY when the context switch callback is enabled.
2884 *
2885 * This callback is relevant even to per-cpu events; for example multi event
2886 * PEBS requires this to provide PID/TID information. This requires we flush
2887 * all queued PEBS records before we context switch to a new task.
2888 */
2889 static void perf_pmu_sched_task(struct task_struct *prev,
2890 struct task_struct *next,
2891 bool sched_in)
2892 {
2893 struct perf_cpu_context *cpuctx;
2894 struct pmu *pmu;
2895
2896 if (prev == next)
2897 return;
2898
2899 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
2900 pmu = cpuctx->unique_pmu; /* software PMUs will not have sched_task */
2901
2902 if (WARN_ON_ONCE(!pmu->sched_task))
2903 continue;
2904
2905 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2906 perf_pmu_disable(pmu);
2907
2908 pmu->sched_task(cpuctx->task_ctx, sched_in);
2909
2910 perf_pmu_enable(pmu);
2911 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2912 }
2913 }
2914
2915 static void perf_event_switch(struct task_struct *task,
2916 struct task_struct *next_prev, bool sched_in);
2917
2918 #define for_each_task_context_nr(ctxn) \
2919 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2920
2921 /*
2922 * Called from scheduler to remove the events of the current task,
2923 * with interrupts disabled.
2924 *
2925 * We stop each event and update the event value in event->count.
2926 *
2927 * This does not protect us against NMI, but disable()
2928 * sets the disabled bit in the control field of event _before_
2929 * accessing the event control register. If a NMI hits, then it will
2930 * not restart the event.
2931 */
2932 void __perf_event_task_sched_out(struct task_struct *task,
2933 struct task_struct *next)
2934 {
2935 int ctxn;
2936
2937 if (__this_cpu_read(perf_sched_cb_usages))
2938 perf_pmu_sched_task(task, next, false);
2939
2940 if (atomic_read(&nr_switch_events))
2941 perf_event_switch(task, next, false);
2942
2943 for_each_task_context_nr(ctxn)
2944 perf_event_context_sched_out(task, ctxn, next);
2945
2946 /*
2947 * if cgroup events exist on this CPU, then we need
2948 * to check if we have to switch out PMU state.
2949 * cgroup event are system-wide mode only
2950 */
2951 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2952 perf_cgroup_sched_out(task, next);
2953 }
2954
2955 /*
2956 * Called with IRQs disabled
2957 */
2958 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2959 enum event_type_t event_type)
2960 {
2961 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2962 }
2963
2964 static void
2965 ctx_pinned_sched_in(struct perf_event_context *ctx,
2966 struct perf_cpu_context *cpuctx)
2967 {
2968 struct perf_event *event;
2969
2970 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2971 if (event->state <= PERF_EVENT_STATE_OFF)
2972 continue;
2973 if (!event_filter_match(event))
2974 continue;
2975
2976 /* may need to reset tstamp_enabled */
2977 if (is_cgroup_event(event))
2978 perf_cgroup_mark_enabled(event, ctx);
2979
2980 if (group_can_go_on(event, cpuctx, 1))
2981 group_sched_in(event, cpuctx, ctx);
2982
2983 /*
2984 * If this pinned group hasn't been scheduled,
2985 * put it in error state.
2986 */
2987 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2988 update_group_times(event);
2989 event->state = PERF_EVENT_STATE_ERROR;
2990 }
2991 }
2992 }
2993
2994 static void
2995 ctx_flexible_sched_in(struct perf_event_context *ctx,
2996 struct perf_cpu_context *cpuctx)
2997 {
2998 struct perf_event *event;
2999 int can_add_hw = 1;
3000
3001 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
3002 /* Ignore events in OFF or ERROR state */
3003 if (event->state <= PERF_EVENT_STATE_OFF)
3004 continue;
3005 /*
3006 * Listen to the 'cpu' scheduling filter constraint
3007 * of events:
3008 */
3009 if (!event_filter_match(event))
3010 continue;
3011
3012 /* may need to reset tstamp_enabled */
3013 if (is_cgroup_event(event))
3014 perf_cgroup_mark_enabled(event, ctx);
3015
3016 if (group_can_go_on(event, cpuctx, can_add_hw)) {
3017 if (group_sched_in(event, cpuctx, ctx))
3018 can_add_hw = 0;
3019 }
3020 }
3021 }
3022
3023 static void
3024 ctx_sched_in(struct perf_event_context *ctx,
3025 struct perf_cpu_context *cpuctx,
3026 enum event_type_t event_type,
3027 struct task_struct *task)
3028 {
3029 int is_active = ctx->is_active;
3030 u64 now;
3031
3032 lockdep_assert_held(&ctx->lock);
3033
3034 if (likely(!ctx->nr_events))
3035 return;
3036
3037 ctx->is_active |= (event_type | EVENT_TIME);
3038 if (ctx->task) {
3039 if (!is_active)
3040 cpuctx->task_ctx = ctx;
3041 else
3042 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3043 }
3044
3045 is_active ^= ctx->is_active; /* changed bits */
3046
3047 if (is_active & EVENT_TIME) {
3048 /* start ctx time */
3049 now = perf_clock();
3050 ctx->timestamp = now;
3051 perf_cgroup_set_timestamp(task, ctx);
3052 }
3053
3054 /*
3055 * First go through the list and put on any pinned groups
3056 * in order to give them the best chance of going on.
3057 */
3058 if (is_active & EVENT_PINNED)
3059 ctx_pinned_sched_in(ctx, cpuctx);
3060
3061 /* Then walk through the lower prio flexible groups */
3062 if (is_active & EVENT_FLEXIBLE)
3063 ctx_flexible_sched_in(ctx, cpuctx);
3064 }
3065
3066 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
3067 enum event_type_t event_type,
3068 struct task_struct *task)
3069 {
3070 struct perf_event_context *ctx = &cpuctx->ctx;
3071
3072 ctx_sched_in(ctx, cpuctx, event_type, task);
3073 }
3074
3075 static void perf_event_context_sched_in(struct perf_event_context *ctx,
3076 struct task_struct *task)
3077 {
3078 struct perf_cpu_context *cpuctx;
3079
3080 cpuctx = __get_cpu_context(ctx);
3081 if (cpuctx->task_ctx == ctx)
3082 return;
3083
3084 perf_ctx_lock(cpuctx, ctx);
3085 perf_pmu_disable(ctx->pmu);
3086 /*
3087 * We want to keep the following priority order:
3088 * cpu pinned (that don't need to move), task pinned,
3089 * cpu flexible, task flexible.
3090 */
3091 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3092 perf_event_sched_in(cpuctx, ctx, task);
3093 perf_pmu_enable(ctx->pmu);
3094 perf_ctx_unlock(cpuctx, ctx);
3095 }
3096
3097 /*
3098 * Called from scheduler to add the events of the current task
3099 * with interrupts disabled.
3100 *
3101 * We restore the event value and then enable it.
3102 *
3103 * This does not protect us against NMI, but enable()
3104 * sets the enabled bit in the control field of event _before_
3105 * accessing the event control register. If a NMI hits, then it will
3106 * keep the event running.
3107 */
3108 void __perf_event_task_sched_in(struct task_struct *prev,
3109 struct task_struct *task)
3110 {
3111 struct perf_event_context *ctx;
3112 int ctxn;
3113
3114 /*
3115 * If cgroup events exist on this CPU, then we need to check if we have
3116 * to switch in PMU state; cgroup event are system-wide mode only.
3117 *
3118 * Since cgroup events are CPU events, we must schedule these in before
3119 * we schedule in the task events.
3120 */
3121 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3122 perf_cgroup_sched_in(prev, task);
3123
3124 for_each_task_context_nr(ctxn) {
3125 ctx = task->perf_event_ctxp[ctxn];
3126 if (likely(!ctx))
3127 continue;
3128
3129 perf_event_context_sched_in(ctx, task);
3130 }
3131
3132 if (atomic_read(&nr_switch_events))
3133 perf_event_switch(task, prev, true);
3134
3135 if (__this_cpu_read(perf_sched_cb_usages))
3136 perf_pmu_sched_task(prev, task, true);
3137 }
3138
3139 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3140 {
3141 u64 frequency = event->attr.sample_freq;
3142 u64 sec = NSEC_PER_SEC;
3143 u64 divisor, dividend;
3144
3145 int count_fls, nsec_fls, frequency_fls, sec_fls;
3146
3147 count_fls = fls64(count);
3148 nsec_fls = fls64(nsec);
3149 frequency_fls = fls64(frequency);
3150 sec_fls = 30;
3151
3152 /*
3153 * We got @count in @nsec, with a target of sample_freq HZ
3154 * the target period becomes:
3155 *
3156 * @count * 10^9
3157 * period = -------------------
3158 * @nsec * sample_freq
3159 *
3160 */
3161
3162 /*
3163 * Reduce accuracy by one bit such that @a and @b converge
3164 * to a similar magnitude.
3165 */
3166 #define REDUCE_FLS(a, b) \
3167 do { \
3168 if (a##_fls > b##_fls) { \
3169 a >>= 1; \
3170 a##_fls--; \
3171 } else { \
3172 b >>= 1; \
3173 b##_fls--; \
3174 } \
3175 } while (0)
3176
3177 /*
3178 * Reduce accuracy until either term fits in a u64, then proceed with
3179 * the other, so that finally we can do a u64/u64 division.
3180 */
3181 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3182 REDUCE_FLS(nsec, frequency);
3183 REDUCE_FLS(sec, count);
3184 }
3185
3186 if (count_fls + sec_fls > 64) {
3187 divisor = nsec * frequency;
3188
3189 while (count_fls + sec_fls > 64) {
3190 REDUCE_FLS(count, sec);
3191 divisor >>= 1;
3192 }
3193
3194 dividend = count * sec;
3195 } else {
3196 dividend = count * sec;
3197
3198 while (nsec_fls + frequency_fls > 64) {
3199 REDUCE_FLS(nsec, frequency);
3200 dividend >>= 1;
3201 }
3202
3203 divisor = nsec * frequency;
3204 }
3205
3206 if (!divisor)
3207 return dividend;
3208
3209 return div64_u64(dividend, divisor);
3210 }
3211
3212 static DEFINE_PER_CPU(int, perf_throttled_count);
3213 static DEFINE_PER_CPU(u64, perf_throttled_seq);
3214
3215 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
3216 {
3217 struct hw_perf_event *hwc = &event->hw;
3218 s64 period, sample_period;
3219 s64 delta;
3220
3221 period = perf_calculate_period(event, nsec, count);
3222
3223 delta = (s64)(period - hwc->sample_period);
3224 delta = (delta + 7) / 8; /* low pass filter */
3225
3226 sample_period = hwc->sample_period + delta;
3227
3228 if (!sample_period)
3229 sample_period = 1;
3230
3231 hwc->sample_period = sample_period;
3232
3233 if (local64_read(&hwc->period_left) > 8*sample_period) {
3234 if (disable)
3235 event->pmu->stop(event, PERF_EF_UPDATE);
3236
3237 local64_set(&hwc->period_left, 0);
3238
3239 if (disable)
3240 event->pmu->start(event, PERF_EF_RELOAD);
3241 }
3242 }
3243
3244 /*
3245 * combine freq adjustment with unthrottling to avoid two passes over the
3246 * events. At the same time, make sure, having freq events does not change
3247 * the rate of unthrottling as that would introduce bias.
3248 */
3249 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3250 int needs_unthr)
3251 {
3252 struct perf_event *event;
3253 struct hw_perf_event *hwc;
3254 u64 now, period = TICK_NSEC;
3255 s64 delta;
3256
3257 /*
3258 * only need to iterate over all events iff:
3259 * - context have events in frequency mode (needs freq adjust)
3260 * - there are events to unthrottle on this cpu
3261 */
3262 if (!(ctx->nr_freq || needs_unthr))
3263 return;
3264
3265 raw_spin_lock(&ctx->lock);
3266 perf_pmu_disable(ctx->pmu);
3267
3268 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3269 if (event->state != PERF_EVENT_STATE_ACTIVE)
3270 continue;
3271
3272 if (!event_filter_match(event))
3273 continue;
3274
3275 perf_pmu_disable(event->pmu);
3276
3277 hwc = &event->hw;
3278
3279 if (hwc->interrupts == MAX_INTERRUPTS) {
3280 hwc->interrupts = 0;
3281 perf_log_throttle(event, 1);
3282 event->pmu->start(event, 0);
3283 }
3284
3285 if (!event->attr.freq || !event->attr.sample_freq)
3286 goto next;
3287
3288 /*
3289 * stop the event and update event->count
3290 */
3291 event->pmu->stop(event, PERF_EF_UPDATE);
3292
3293 now = local64_read(&event->count);
3294 delta = now - hwc->freq_count_stamp;
3295 hwc->freq_count_stamp = now;
3296
3297 /*
3298 * restart the event
3299 * reload only if value has changed
3300 * we have stopped the event so tell that
3301 * to perf_adjust_period() to avoid stopping it
3302 * twice.
3303 */
3304 if (delta > 0)
3305 perf_adjust_period(event, period, delta, false);
3306
3307 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
3308 next:
3309 perf_pmu_enable(event->pmu);
3310 }
3311
3312 perf_pmu_enable(ctx->pmu);
3313 raw_spin_unlock(&ctx->lock);
3314 }
3315
3316 /*
3317 * Round-robin a context's events:
3318 */
3319 static void rotate_ctx(struct perf_event_context *ctx)
3320 {
3321 /*
3322 * Rotate the first entry last of non-pinned groups. Rotation might be
3323 * disabled by the inheritance code.
3324 */
3325 if (!ctx->rotate_disable)
3326 list_rotate_left(&ctx->flexible_groups);
3327 }
3328
3329 static int perf_rotate_context(struct perf_cpu_context *cpuctx)
3330 {
3331 struct perf_event_context *ctx = NULL;
3332 int rotate = 0;
3333
3334 if (cpuctx->ctx.nr_events) {
3335 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3336 rotate = 1;
3337 }
3338
3339 ctx = cpuctx->task_ctx;
3340 if (ctx && ctx->nr_events) {
3341 if (ctx->nr_events != ctx->nr_active)
3342 rotate = 1;
3343 }
3344
3345 if (!rotate)
3346 goto done;
3347
3348 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3349 perf_pmu_disable(cpuctx->ctx.pmu);
3350
3351 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3352 if (ctx)
3353 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
3354
3355 rotate_ctx(&cpuctx->ctx);
3356 if (ctx)
3357 rotate_ctx(ctx);
3358
3359 perf_event_sched_in(cpuctx, ctx, current);
3360
3361 perf_pmu_enable(cpuctx->ctx.pmu);
3362 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
3363 done:
3364
3365 return rotate;
3366 }
3367
3368 void perf_event_task_tick(void)
3369 {
3370 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3371 struct perf_event_context *ctx, *tmp;
3372 int throttled;
3373
3374 WARN_ON(!irqs_disabled());
3375
3376 __this_cpu_inc(perf_throttled_seq);
3377 throttled = __this_cpu_xchg(perf_throttled_count, 0);
3378 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
3379
3380 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
3381 perf_adjust_freq_unthr_context(ctx, throttled);
3382 }
3383
3384 static int event_enable_on_exec(struct perf_event *event,
3385 struct perf_event_context *ctx)
3386 {
3387 if (!event->attr.enable_on_exec)
3388 return 0;
3389
3390 event->attr.enable_on_exec = 0;
3391 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3392 return 0;
3393
3394 __perf_event_mark_enabled(event);
3395
3396 return 1;
3397 }
3398
3399 /*
3400 * Enable all of a task's events that have been marked enable-on-exec.
3401 * This expects task == current.
3402 */
3403 static void perf_event_enable_on_exec(int ctxn)
3404 {
3405 struct perf_event_context *ctx, *clone_ctx = NULL;
3406 struct perf_cpu_context *cpuctx;
3407 struct perf_event *event;
3408 unsigned long flags;
3409 int enabled = 0;
3410
3411 local_irq_save(flags);
3412 ctx = current->perf_event_ctxp[ctxn];
3413 if (!ctx || !ctx->nr_events)
3414 goto out;
3415
3416 cpuctx = __get_cpu_context(ctx);
3417 perf_ctx_lock(cpuctx, ctx);
3418 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
3419 list_for_each_entry(event, &ctx->event_list, event_entry)
3420 enabled |= event_enable_on_exec(event, ctx);
3421
3422 /*
3423 * Unclone and reschedule this context if we enabled any event.
3424 */
3425 if (enabled) {
3426 clone_ctx = unclone_ctx(ctx);
3427 ctx_resched(cpuctx, ctx);
3428 }
3429 perf_ctx_unlock(cpuctx, ctx);
3430
3431 out:
3432 local_irq_restore(flags);
3433
3434 if (clone_ctx)
3435 put_ctx(clone_ctx);
3436 }
3437
3438 struct perf_read_data {
3439 struct perf_event *event;
3440 bool group;
3441 int ret;
3442 };
3443
3444 static int find_cpu_to_read(struct perf_event *event, int local_cpu)
3445 {
3446 int event_cpu = event->oncpu;
3447 u16 local_pkg, event_pkg;
3448
3449 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
3450 event_pkg = topology_physical_package_id(event_cpu);
3451 local_pkg = topology_physical_package_id(local_cpu);
3452
3453 if (event_pkg == local_pkg)
3454 return local_cpu;
3455 }
3456
3457 return event_cpu;
3458 }
3459
3460 /*
3461 * Cross CPU call to read the hardware event
3462 */
3463 static void __perf_event_read(void *info)
3464 {
3465 struct perf_read_data *data = info;
3466 struct perf_event *sub, *event = data->event;
3467 struct perf_event_context *ctx = event->ctx;
3468 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
3469 struct pmu *pmu = event->pmu;
3470
3471 /*
3472 * If this is a task context, we need to check whether it is
3473 * the current task context of this cpu. If not it has been
3474 * scheduled out before the smp call arrived. In that case
3475 * event->count would have been updated to a recent sample
3476 * when the event was scheduled out.
3477 */
3478 if (ctx->task && cpuctx->task_ctx != ctx)
3479 return;
3480
3481 raw_spin_lock(&ctx->lock);
3482 if (ctx->is_active) {
3483 update_context_time(ctx);
3484 update_cgrp_time_from_event(event);
3485 }
3486
3487 update_event_times(event);
3488 if (event->state != PERF_EVENT_STATE_ACTIVE)
3489 goto unlock;
3490
3491 if (!data->group) {
3492 pmu->read(event);
3493 data->ret = 0;
3494 goto unlock;
3495 }
3496
3497 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3498
3499 pmu->read(event);
3500
3501 list_for_each_entry(sub, &event->sibling_list, group_entry) {
3502 update_event_times(sub);
3503 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3504 /*
3505 * Use sibling's PMU rather than @event's since
3506 * sibling could be on different (eg: software) PMU.
3507 */
3508 sub->pmu->read(sub);
3509 }
3510 }
3511
3512 data->ret = pmu->commit_txn(pmu);
3513
3514 unlock:
3515 raw_spin_unlock(&ctx->lock);
3516 }
3517
3518 static inline u64 perf_event_count(struct perf_event *event)
3519 {
3520 if (event->pmu->count)
3521 return event->pmu->count(event);
3522
3523 return __perf_event_count(event);
3524 }
3525
3526 /*
3527 * NMI-safe method to read a local event, that is an event that
3528 * is:
3529 * - either for the current task, or for this CPU
3530 * - does not have inherit set, for inherited task events
3531 * will not be local and we cannot read them atomically
3532 * - must not have a pmu::count method
3533 */
3534 u64 perf_event_read_local(struct perf_event *event)
3535 {
3536 unsigned long flags;
3537 u64 val;
3538
3539 /*
3540 * Disabling interrupts avoids all counter scheduling (context
3541 * switches, timer based rotation and IPIs).
3542 */
3543 local_irq_save(flags);
3544
3545 /* If this is a per-task event, it must be for current */
3546 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
3547 event->hw.target != current);
3548
3549 /* If this is a per-CPU event, it must be for this CPU */
3550 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
3551 event->cpu != smp_processor_id());
3552
3553 /*
3554 * It must not be an event with inherit set, we cannot read
3555 * all child counters from atomic context.
3556 */
3557 WARN_ON_ONCE(event->attr.inherit);
3558
3559 /*
3560 * It must not have a pmu::count method, those are not
3561 * NMI safe.
3562 */
3563 WARN_ON_ONCE(event->pmu->count);
3564
3565 /*
3566 * If the event is currently on this CPU, its either a per-task event,
3567 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3568 * oncpu == -1).
3569 */
3570 if (event->oncpu == smp_processor_id())
3571 event->pmu->read(event);
3572
3573 val = local64_read(&event->count);
3574 local_irq_restore(flags);
3575
3576 return val;
3577 }
3578
3579 static int perf_event_read(struct perf_event *event, bool group)
3580 {
3581 int ret = 0, cpu_to_read, local_cpu;
3582
3583 /*
3584 * If event is enabled and currently active on a CPU, update the
3585 * value in the event structure:
3586 */
3587 if (event->state == PERF_EVENT_STATE_ACTIVE) {
3588 struct perf_read_data data = {
3589 .event = event,
3590 .group = group,
3591 .ret = 0,
3592 };
3593
3594 local_cpu = get_cpu();
3595 cpu_to_read = find_cpu_to_read(event, local_cpu);
3596 put_cpu();
3597
3598 /*
3599 * Purposely ignore the smp_call_function_single() return
3600 * value.
3601 *
3602 * If event->oncpu isn't a valid CPU it means the event got
3603 * scheduled out and that will have updated the event count.
3604 *
3605 * Therefore, either way, we'll have an up-to-date event count
3606 * after this.
3607 */
3608 (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
3609 ret = data.ret;
3610 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
3611 struct perf_event_context *ctx = event->ctx;
3612 unsigned long flags;
3613
3614 raw_spin_lock_irqsave(&ctx->lock, flags);
3615 /*
3616 * may read while context is not active
3617 * (e.g., thread is blocked), in that case
3618 * we cannot update context time
3619 */
3620 if (ctx->is_active) {
3621 update_context_time(ctx);
3622 update_cgrp_time_from_event(event);
3623 }
3624 if (group)
3625 update_group_times(event);
3626 else
3627 update_event_times(event);
3628 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3629 }
3630
3631 return ret;
3632 }
3633
3634 /*
3635 * Initialize the perf_event context in a task_struct:
3636 */
3637 static void __perf_event_init_context(struct perf_event_context *ctx)
3638 {
3639 raw_spin_lock_init(&ctx->lock);
3640 mutex_init(&ctx->mutex);
3641 INIT_LIST_HEAD(&ctx->active_ctx_list);
3642 INIT_LIST_HEAD(&ctx->pinned_groups);
3643 INIT_LIST_HEAD(&ctx->flexible_groups);
3644 INIT_LIST_HEAD(&ctx->event_list);
3645 atomic_set(&ctx->refcount, 1);
3646 }
3647
3648 static struct perf_event_context *
3649 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3650 {
3651 struct perf_event_context *ctx;
3652
3653 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3654 if (!ctx)
3655 return NULL;
3656
3657 __perf_event_init_context(ctx);
3658 if (task) {
3659 ctx->task = task;
3660 get_task_struct(task);
3661 }
3662 ctx->pmu = pmu;
3663
3664 return ctx;
3665 }
3666
3667 static struct task_struct *
3668 find_lively_task_by_vpid(pid_t vpid)
3669 {
3670 struct task_struct *task;
3671
3672 rcu_read_lock();
3673 if (!vpid)
3674 task = current;
3675 else
3676 task = find_task_by_vpid(vpid);
3677 if (task)
3678 get_task_struct(task);
3679 rcu_read_unlock();
3680
3681 if (!task)
3682 return ERR_PTR(-ESRCH);
3683
3684 return task;
3685 }
3686
3687 /*
3688 * Returns a matching context with refcount and pincount.
3689 */
3690 static struct perf_event_context *
3691 find_get_context(struct pmu *pmu, struct task_struct *task,
3692 struct perf_event *event)
3693 {
3694 struct perf_event_context *ctx, *clone_ctx = NULL;
3695 struct perf_cpu_context *cpuctx;
3696 void *task_ctx_data = NULL;
3697 unsigned long flags;
3698 int ctxn, err;
3699 int cpu = event->cpu;
3700
3701 if (!task) {
3702 /* Must be root to operate on a CPU event: */
3703 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3704 return ERR_PTR(-EACCES);
3705
3706 /*
3707 * We could be clever and allow to attach a event to an
3708 * offline CPU and activate it when the CPU comes up, but
3709 * that's for later.
3710 */
3711 if (!cpu_online(cpu))
3712 return ERR_PTR(-ENODEV);
3713
3714 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
3715 ctx = &cpuctx->ctx;
3716 get_ctx(ctx);
3717 ++ctx->pin_count;
3718
3719 return ctx;
3720 }
3721
3722 err = -EINVAL;
3723 ctxn = pmu->task_ctx_nr;
3724 if (ctxn < 0)
3725 goto errout;
3726
3727 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3728 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3729 if (!task_ctx_data) {
3730 err = -ENOMEM;
3731 goto errout;
3732 }
3733 }
3734
3735 retry:
3736 ctx = perf_lock_task_context(task, ctxn, &flags);
3737 if (ctx) {
3738 clone_ctx = unclone_ctx(ctx);
3739 ++ctx->pin_count;
3740
3741 if (task_ctx_data && !ctx->task_ctx_data) {
3742 ctx->task_ctx_data = task_ctx_data;
3743 task_ctx_data = NULL;
3744 }
3745 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3746
3747 if (clone_ctx)
3748 put_ctx(clone_ctx);
3749 } else {
3750 ctx = alloc_perf_context(pmu, task);
3751 err = -ENOMEM;
3752 if (!ctx)
3753 goto errout;
3754
3755 if (task_ctx_data) {
3756 ctx->task_ctx_data = task_ctx_data;
3757 task_ctx_data = NULL;
3758 }
3759
3760 err = 0;
3761 mutex_lock(&task->perf_event_mutex);
3762 /*
3763 * If it has already passed perf_event_exit_task().
3764 * we must see PF_EXITING, it takes this mutex too.
3765 */
3766 if (task->flags & PF_EXITING)
3767 err = -ESRCH;
3768 else if (task->perf_event_ctxp[ctxn])
3769 err = -EAGAIN;
3770 else {
3771 get_ctx(ctx);
3772 ++ctx->pin_count;
3773 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3774 }
3775 mutex_unlock(&task->perf_event_mutex);
3776
3777 if (unlikely(err)) {
3778 put_ctx(ctx);
3779
3780 if (err == -EAGAIN)
3781 goto retry;
3782 goto errout;
3783 }
3784 }
3785
3786 kfree(task_ctx_data);
3787 return ctx;
3788
3789 errout:
3790 kfree(task_ctx_data);
3791 return ERR_PTR(err);
3792 }
3793
3794 static void perf_event_free_filter(struct perf_event *event);
3795 static void perf_event_free_bpf_prog(struct perf_event *event);
3796
3797 static void free_event_rcu(struct rcu_head *head)
3798 {
3799 struct perf_event *event;
3800
3801 event = container_of(head, struct perf_event, rcu_head);
3802 if (event->ns)
3803 put_pid_ns(event->ns);
3804 perf_event_free_filter(event);
3805 kfree(event);
3806 }
3807
3808 static void ring_buffer_attach(struct perf_event *event,
3809 struct ring_buffer *rb);
3810
3811 static void detach_sb_event(struct perf_event *event)
3812 {
3813 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
3814
3815 raw_spin_lock(&pel->lock);
3816 list_del_rcu(&event->sb_list);
3817 raw_spin_unlock(&pel->lock);
3818 }
3819
3820 static bool is_sb_event(struct perf_event *event)
3821 {
3822 struct perf_event_attr *attr = &event->attr;
3823
3824 if (event->parent)
3825 return false;
3826
3827 if (event->attach_state & PERF_ATTACH_TASK)
3828 return false;
3829
3830 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
3831 attr->comm || attr->comm_exec ||
3832 attr->task ||
3833 attr->context_switch)
3834 return true;
3835 return false;
3836 }
3837
3838 static void unaccount_pmu_sb_event(struct perf_event *event)
3839 {
3840 if (is_sb_event(event))
3841 detach_sb_event(event);
3842 }
3843
3844 static void unaccount_event_cpu(struct perf_event *event, int cpu)
3845 {
3846 if (event->parent)
3847 return;
3848
3849 if (is_cgroup_event(event))
3850 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3851 }
3852
3853 #ifdef CONFIG_NO_HZ_FULL
3854 static DEFINE_SPINLOCK(nr_freq_lock);
3855 #endif
3856
3857 static void unaccount_freq_event_nohz(void)
3858 {
3859 #ifdef CONFIG_NO_HZ_FULL
3860 spin_lock(&nr_freq_lock);
3861 if (atomic_dec_and_test(&nr_freq_events))
3862 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
3863 spin_unlock(&nr_freq_lock);
3864 #endif
3865 }
3866
3867 static void unaccount_freq_event(void)
3868 {
3869 if (tick_nohz_full_enabled())
3870 unaccount_freq_event_nohz();
3871 else
3872 atomic_dec(&nr_freq_events);
3873 }
3874
3875 static void unaccount_event(struct perf_event *event)
3876 {
3877 bool dec = false;
3878
3879 if (event->parent)
3880 return;
3881
3882 if (event->attach_state & PERF_ATTACH_TASK)
3883 dec = true;
3884 if (event->attr.mmap || event->attr.mmap_data)
3885 atomic_dec(&nr_mmap_events);
3886 if (event->attr.comm)
3887 atomic_dec(&nr_comm_events);
3888 if (event->attr.task)
3889 atomic_dec(&nr_task_events);
3890 if (event->attr.freq)
3891 unaccount_freq_event();
3892 if (event->attr.context_switch) {
3893 dec = true;
3894 atomic_dec(&nr_switch_events);
3895 }
3896 if (is_cgroup_event(event))
3897 dec = true;
3898 if (has_branch_stack(event))
3899 dec = true;
3900
3901 if (dec) {
3902 if (!atomic_add_unless(&perf_sched_count, -1, 1))
3903 schedule_delayed_work(&perf_sched_work, HZ);
3904 }
3905
3906 unaccount_event_cpu(event, event->cpu);
3907
3908 unaccount_pmu_sb_event(event);
3909 }
3910
3911 static void perf_sched_delayed(struct work_struct *work)
3912 {
3913 mutex_lock(&perf_sched_mutex);
3914 if (atomic_dec_and_test(&perf_sched_count))
3915 static_branch_disable(&perf_sched_events);
3916 mutex_unlock(&perf_sched_mutex);
3917 }
3918
3919 /*
3920 * The following implement mutual exclusion of events on "exclusive" pmus
3921 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3922 * at a time, so we disallow creating events that might conflict, namely:
3923 *
3924 * 1) cpu-wide events in the presence of per-task events,
3925 * 2) per-task events in the presence of cpu-wide events,
3926 * 3) two matching events on the same context.
3927 *
3928 * The former two cases are handled in the allocation path (perf_event_alloc(),
3929 * _free_event()), the latter -- before the first perf_install_in_context().
3930 */
3931 static int exclusive_event_init(struct perf_event *event)
3932 {
3933 struct pmu *pmu = event->pmu;
3934
3935 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3936 return 0;
3937
3938 /*
3939 * Prevent co-existence of per-task and cpu-wide events on the
3940 * same exclusive pmu.
3941 *
3942 * Negative pmu::exclusive_cnt means there are cpu-wide
3943 * events on this "exclusive" pmu, positive means there are
3944 * per-task events.
3945 *
3946 * Since this is called in perf_event_alloc() path, event::ctx
3947 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3948 * to mean "per-task event", because unlike other attach states it
3949 * never gets cleared.
3950 */
3951 if (event->attach_state & PERF_ATTACH_TASK) {
3952 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3953 return -EBUSY;
3954 } else {
3955 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
3956 return -EBUSY;
3957 }
3958
3959 return 0;
3960 }
3961
3962 static void exclusive_event_destroy(struct perf_event *event)
3963 {
3964 struct pmu *pmu = event->pmu;
3965
3966 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3967 return;
3968
3969 /* see comment in exclusive_event_init() */
3970 if (event->attach_state & PERF_ATTACH_TASK)
3971 atomic_dec(&pmu->exclusive_cnt);
3972 else
3973 atomic_inc(&pmu->exclusive_cnt);
3974 }
3975
3976 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3977 {
3978 if ((e1->pmu == e2->pmu) &&
3979 (e1->cpu == e2->cpu ||
3980 e1->cpu == -1 ||
3981 e2->cpu == -1))
3982 return true;
3983 return false;
3984 }
3985
3986 /* Called under the same ctx::mutex as perf_install_in_context() */
3987 static bool exclusive_event_installable(struct perf_event *event,
3988 struct perf_event_context *ctx)
3989 {
3990 struct perf_event *iter_event;
3991 struct pmu *pmu = event->pmu;
3992
3993 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3994 return true;
3995
3996 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
3997 if (exclusive_event_match(iter_event, event))
3998 return false;
3999 }
4000
4001 return true;
4002 }
4003
4004 static void perf_addr_filters_splice(struct perf_event *event,
4005 struct list_head *head);
4006
4007 static void _free_event(struct perf_event *event)
4008 {
4009 irq_work_sync(&event->pending);
4010
4011 unaccount_event(event);
4012
4013 if (event->rb) {
4014 /*
4015 * Can happen when we close an event with re-directed output.
4016 *
4017 * Since we have a 0 refcount, perf_mmap_close() will skip
4018 * over us; possibly making our ring_buffer_put() the last.
4019 */
4020 mutex_lock(&event->mmap_mutex);
4021 ring_buffer_attach(event, NULL);
4022 mutex_unlock(&event->mmap_mutex);
4023 }
4024
4025 if (is_cgroup_event(event))
4026 perf_detach_cgroup(event);
4027
4028 if (!event->parent) {
4029 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
4030 put_callchain_buffers();
4031 }
4032
4033 perf_event_free_bpf_prog(event);
4034 perf_addr_filters_splice(event, NULL);
4035 kfree(event->addr_filters_offs);
4036
4037 if (event->destroy)
4038 event->destroy(event);
4039
4040 if (event->ctx)
4041 put_ctx(event->ctx);
4042
4043 exclusive_event_destroy(event);
4044 module_put(event->pmu->module);
4045
4046 call_rcu(&event->rcu_head, free_event_rcu);
4047 }
4048
4049 /*
4050 * Used to free events which have a known refcount of 1, such as in error paths
4051 * where the event isn't exposed yet and inherited events.
4052 */
4053 static void free_event(struct perf_event *event)
4054 {
4055 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
4056 "unexpected event refcount: %ld; ptr=%p\n",
4057 atomic_long_read(&event->refcount), event)) {
4058 /* leak to avoid use-after-free */
4059 return;
4060 }
4061
4062 _free_event(event);
4063 }
4064
4065 /*
4066 * Remove user event from the owner task.
4067 */
4068 static void perf_remove_from_owner(struct perf_event *event)
4069 {
4070 struct task_struct *owner;
4071
4072 rcu_read_lock();
4073 /*
4074 * Matches the smp_store_release() in perf_event_exit_task(). If we
4075 * observe !owner it means the list deletion is complete and we can
4076 * indeed free this event, otherwise we need to serialize on
4077 * owner->perf_event_mutex.
4078 */
4079 owner = lockless_dereference(event->owner);
4080 if (owner) {
4081 /*
4082 * Since delayed_put_task_struct() also drops the last
4083 * task reference we can safely take a new reference
4084 * while holding the rcu_read_lock().
4085 */
4086 get_task_struct(owner);
4087 }
4088 rcu_read_unlock();
4089
4090 if (owner) {
4091 /*
4092 * If we're here through perf_event_exit_task() we're already
4093 * holding ctx->mutex which would be an inversion wrt. the
4094 * normal lock order.
4095 *
4096 * However we can safely take this lock because its the child
4097 * ctx->mutex.
4098 */
4099 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
4100
4101 /*
4102 * We have to re-check the event->owner field, if it is cleared
4103 * we raced with perf_event_exit_task(), acquiring the mutex
4104 * ensured they're done, and we can proceed with freeing the
4105 * event.
4106 */
4107 if (event->owner) {
4108 list_del_init(&event->owner_entry);
4109 smp_store_release(&event->owner, NULL);
4110 }
4111 mutex_unlock(&owner->perf_event_mutex);
4112 put_task_struct(owner);
4113 }
4114 }
4115
4116 static void put_event(struct perf_event *event)
4117 {
4118 if (!atomic_long_dec_and_test(&event->refcount))
4119 return;
4120
4121 _free_event(event);
4122 }
4123
4124 /*
4125 * Kill an event dead; while event:refcount will preserve the event
4126 * object, it will not preserve its functionality. Once the last 'user'
4127 * gives up the object, we'll destroy the thing.
4128 */
4129 int perf_event_release_kernel(struct perf_event *event)
4130 {
4131 struct perf_event_context *ctx = event->ctx;
4132 struct perf_event *child, *tmp;
4133
4134 /*
4135 * If we got here through err_file: fput(event_file); we will not have
4136 * attached to a context yet.
4137 */
4138 if (!ctx) {
4139 WARN_ON_ONCE(event->attach_state &
4140 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4141 goto no_ctx;
4142 }
4143
4144 if (!is_kernel_event(event))
4145 perf_remove_from_owner(event);
4146
4147 ctx = perf_event_ctx_lock(event);
4148 WARN_ON_ONCE(ctx->parent_ctx);
4149 perf_remove_from_context(event, DETACH_GROUP);
4150
4151 raw_spin_lock_irq(&ctx->lock);
4152 /*
4153 * Mark this even as STATE_DEAD, there is no external reference to it
4154 * anymore.
4155 *
4156 * Anybody acquiring event->child_mutex after the below loop _must_
4157 * also see this, most importantly inherit_event() which will avoid
4158 * placing more children on the list.
4159 *
4160 * Thus this guarantees that we will in fact observe and kill _ALL_
4161 * child events.
4162 */
4163 event->state = PERF_EVENT_STATE_DEAD;
4164 raw_spin_unlock_irq(&ctx->lock);
4165
4166 perf_event_ctx_unlock(event, ctx);
4167
4168 again:
4169 mutex_lock(&event->child_mutex);
4170 list_for_each_entry(child, &event->child_list, child_list) {
4171
4172 /*
4173 * Cannot change, child events are not migrated, see the
4174 * comment with perf_event_ctx_lock_nested().
4175 */
4176 ctx = lockless_dereference(child->ctx);
4177 /*
4178 * Since child_mutex nests inside ctx::mutex, we must jump
4179 * through hoops. We start by grabbing a reference on the ctx.
4180 *
4181 * Since the event cannot get freed while we hold the
4182 * child_mutex, the context must also exist and have a !0
4183 * reference count.
4184 */
4185 get_ctx(ctx);
4186
4187 /*
4188 * Now that we have a ctx ref, we can drop child_mutex, and
4189 * acquire ctx::mutex without fear of it going away. Then we
4190 * can re-acquire child_mutex.
4191 */
4192 mutex_unlock(&event->child_mutex);
4193 mutex_lock(&ctx->mutex);
4194 mutex_lock(&event->child_mutex);
4195
4196 /*
4197 * Now that we hold ctx::mutex and child_mutex, revalidate our
4198 * state, if child is still the first entry, it didn't get freed
4199 * and we can continue doing so.
4200 */
4201 tmp = list_first_entry_or_null(&event->child_list,
4202 struct perf_event, child_list);
4203 if (tmp == child) {
4204 perf_remove_from_context(child, DETACH_GROUP);
4205 list_del(&child->child_list);
4206 free_event(child);
4207 /*
4208 * This matches the refcount bump in inherit_event();
4209 * this can't be the last reference.
4210 */
4211 put_event(event);
4212 }
4213
4214 mutex_unlock(&event->child_mutex);
4215 mutex_unlock(&ctx->mutex);
4216 put_ctx(ctx);
4217 goto again;
4218 }
4219 mutex_unlock(&event->child_mutex);
4220
4221 no_ctx:
4222 put_event(event); /* Must be the 'last' reference */
4223 return 0;
4224 }
4225 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
4226
4227 /*
4228 * Called when the last reference to the file is gone.
4229 */
4230 static int perf_release(struct inode *inode, struct file *file)
4231 {
4232 perf_event_release_kernel(file->private_data);
4233 return 0;
4234 }
4235
4236 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
4237 {
4238 struct perf_event *child;
4239 u64 total = 0;
4240
4241 *enabled = 0;
4242 *running = 0;
4243
4244 mutex_lock(&event->child_mutex);
4245
4246 (void)perf_event_read(event, false);
4247 total += perf_event_count(event);
4248
4249 *enabled += event->total_time_enabled +
4250 atomic64_read(&event->child_total_time_enabled);
4251 *running += event->total_time_running +
4252 atomic64_read(&event->child_total_time_running);
4253
4254 list_for_each_entry(child, &event->child_list, child_list) {
4255 (void)perf_event_read(child, false);
4256 total += perf_event_count(child);
4257 *enabled += child->total_time_enabled;
4258 *running += child->total_time_running;
4259 }
4260 mutex_unlock(&event->child_mutex);
4261
4262 return total;
4263 }
4264 EXPORT_SYMBOL_GPL(perf_event_read_value);
4265
4266 static int __perf_read_group_add(struct perf_event *leader,
4267 u64 read_format, u64 *values)
4268 {
4269 struct perf_event *sub;
4270 int n = 1; /* skip @nr */
4271 int ret;
4272
4273 ret = perf_event_read(leader, true);
4274 if (ret)
4275 return ret;
4276
4277 /*
4278 * Since we co-schedule groups, {enabled,running} times of siblings
4279 * will be identical to those of the leader, so we only publish one
4280 * set.
4281 */
4282 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4283 values[n++] += leader->total_time_enabled +
4284 atomic64_read(&leader->child_total_time_enabled);
4285 }
4286
4287 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4288 values[n++] += leader->total_time_running +
4289 atomic64_read(&leader->child_total_time_running);
4290 }
4291
4292 /*
4293 * Write {count,id} tuples for every sibling.
4294 */
4295 values[n++] += perf_event_count(leader);
4296 if (read_format & PERF_FORMAT_ID)
4297 values[n++] = primary_event_id(leader);
4298
4299 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4300 values[n++] += perf_event_count(sub);
4301 if (read_format & PERF_FORMAT_ID)
4302 values[n++] = primary_event_id(sub);
4303 }
4304
4305 return 0;
4306 }
4307
4308 static int perf_read_group(struct perf_event *event,
4309 u64 read_format, char __user *buf)
4310 {
4311 struct perf_event *leader = event->group_leader, *child;
4312 struct perf_event_context *ctx = leader->ctx;
4313 int ret;
4314 u64 *values;
4315
4316 lockdep_assert_held(&ctx->mutex);
4317
4318 values = kzalloc(event->read_size, GFP_KERNEL);
4319 if (!values)
4320 return -ENOMEM;
4321
4322 values[0] = 1 + leader->nr_siblings;
4323
4324 /*
4325 * By locking the child_mutex of the leader we effectively
4326 * lock the child list of all siblings.. XXX explain how.
4327 */
4328 mutex_lock(&leader->child_mutex);
4329
4330 ret = __perf_read_group_add(leader, read_format, values);
4331 if (ret)
4332 goto unlock;
4333
4334 list_for_each_entry(child, &leader->child_list, child_list) {
4335 ret = __perf_read_group_add(child, read_format, values);
4336 if (ret)
4337 goto unlock;
4338 }
4339
4340 mutex_unlock(&leader->child_mutex);
4341
4342 ret = event->read_size;
4343 if (copy_to_user(buf, values, event->read_size))
4344 ret = -EFAULT;
4345 goto out;
4346
4347 unlock:
4348 mutex_unlock(&leader->child_mutex);
4349 out:
4350 kfree(values);
4351 return ret;
4352 }
4353
4354 static int perf_read_one(struct perf_event *event,
4355 u64 read_format, char __user *buf)
4356 {
4357 u64 enabled, running;
4358 u64 values[4];
4359 int n = 0;
4360
4361 values[n++] = perf_event_read_value(event, &enabled, &running);
4362 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4363 values[n++] = enabled;
4364 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4365 values[n++] = running;
4366 if (read_format & PERF_FORMAT_ID)
4367 values[n++] = primary_event_id(event);
4368
4369 if (copy_to_user(buf, values, n * sizeof(u64)))
4370 return -EFAULT;
4371
4372 return n * sizeof(u64);
4373 }
4374
4375 static bool is_event_hup(struct perf_event *event)
4376 {
4377 bool no_children;
4378
4379 if (event->state > PERF_EVENT_STATE_EXIT)
4380 return false;
4381
4382 mutex_lock(&event->child_mutex);
4383 no_children = list_empty(&event->child_list);
4384 mutex_unlock(&event->child_mutex);
4385 return no_children;
4386 }
4387
4388 /*
4389 * Read the performance event - simple non blocking version for now
4390 */
4391 static ssize_t
4392 __perf_read(struct perf_event *event, char __user *buf, size_t count)
4393 {
4394 u64 read_format = event->attr.read_format;
4395 int ret;
4396
4397 /*
4398 * Return end-of-file for a read on a event that is in
4399 * error state (i.e. because it was pinned but it couldn't be
4400 * scheduled on to the CPU at some point).
4401 */
4402 if (event->state == PERF_EVENT_STATE_ERROR)
4403 return 0;
4404
4405 if (count < event->read_size)
4406 return -ENOSPC;
4407
4408 WARN_ON_ONCE(event->ctx->parent_ctx);
4409 if (read_format & PERF_FORMAT_GROUP)
4410 ret = perf_read_group(event, read_format, buf);
4411 else
4412 ret = perf_read_one(event, read_format, buf);
4413
4414 return ret;
4415 }
4416
4417 static ssize_t
4418 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4419 {
4420 struct perf_event *event = file->private_data;
4421 struct perf_event_context *ctx;
4422 int ret;
4423
4424 ctx = perf_event_ctx_lock(event);
4425 ret = __perf_read(event, buf, count);
4426 perf_event_ctx_unlock(event, ctx);
4427
4428 return ret;
4429 }
4430
4431 static unsigned int perf_poll(struct file *file, poll_table *wait)
4432 {
4433 struct perf_event *event = file->private_data;
4434 struct ring_buffer *rb;
4435 unsigned int events = POLLHUP;
4436
4437 poll_wait(file, &event->waitq, wait);
4438
4439 if (is_event_hup(event))
4440 return events;
4441
4442 /*
4443 * Pin the event->rb by taking event->mmap_mutex; otherwise
4444 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
4445 */
4446 mutex_lock(&event->mmap_mutex);
4447 rb = event->rb;
4448 if (rb)
4449 events = atomic_xchg(&rb->poll, 0);
4450 mutex_unlock(&event->mmap_mutex);
4451 return events;
4452 }
4453
4454 static void _perf_event_reset(struct perf_event *event)
4455 {
4456 (void)perf_event_read(event, false);
4457 local64_set(&event->count, 0);
4458 perf_event_update_userpage(event);
4459 }
4460
4461 /*
4462 * Holding the top-level event's child_mutex means that any
4463 * descendant process that has inherited this event will block
4464 * in perf_event_exit_event() if it goes to exit, thus satisfying the
4465 * task existence requirements of perf_event_enable/disable.
4466 */
4467 static void perf_event_for_each_child(struct perf_event *event,
4468 void (*func)(struct perf_event *))
4469 {
4470 struct perf_event *child;
4471
4472 WARN_ON_ONCE(event->ctx->parent_ctx);
4473
4474 mutex_lock(&event->child_mutex);
4475 func(event);
4476 list_for_each_entry(child, &event->child_list, child_list)
4477 func(child);
4478 mutex_unlock(&event->child_mutex);
4479 }
4480
4481 static void perf_event_for_each(struct perf_event *event,
4482 void (*func)(struct perf_event *))
4483 {
4484 struct perf_event_context *ctx = event->ctx;
4485 struct perf_event *sibling;
4486
4487 lockdep_assert_held(&ctx->mutex);
4488
4489 event = event->group_leader;
4490
4491 perf_event_for_each_child(event, func);
4492 list_for_each_entry(sibling, &event->sibling_list, group_entry)
4493 perf_event_for_each_child(sibling, func);
4494 }
4495
4496 static void __perf_event_period(struct perf_event *event,
4497 struct perf_cpu_context *cpuctx,
4498 struct perf_event_context *ctx,
4499 void *info)
4500 {
4501 u64 value = *((u64 *)info);
4502 bool active;
4503
4504 if (event->attr.freq) {
4505 event->attr.sample_freq = value;
4506 } else {
4507 event->attr.sample_period = value;
4508 event->hw.sample_period = value;
4509 }
4510
4511 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4512 if (active) {
4513 perf_pmu_disable(ctx->pmu);
4514 /*
4515 * We could be throttled; unthrottle now to avoid the tick
4516 * trying to unthrottle while we already re-started the event.
4517 */
4518 if (event->hw.interrupts == MAX_INTERRUPTS) {
4519 event->hw.interrupts = 0;
4520 perf_log_throttle(event, 1);
4521 }
4522 event->pmu->stop(event, PERF_EF_UPDATE);
4523 }
4524
4525 local64_set(&event->hw.period_left, 0);
4526
4527 if (active) {
4528 event->pmu->start(event, PERF_EF_RELOAD);
4529 perf_pmu_enable(ctx->pmu);
4530 }
4531 }
4532
4533 static int perf_event_period(struct perf_event *event, u64 __user *arg)
4534 {
4535 u64 value;
4536
4537 if (!is_sampling_event(event))
4538 return -EINVAL;
4539
4540 if (copy_from_user(&value, arg, sizeof(value)))
4541 return -EFAULT;
4542
4543 if (!value)
4544 return -EINVAL;
4545
4546 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4547 return -EINVAL;
4548
4549 event_function_call(event, __perf_event_period, &value);
4550
4551 return 0;
4552 }
4553
4554 static const struct file_operations perf_fops;
4555
4556 static inline int perf_fget_light(int fd, struct fd *p)
4557 {
4558 struct fd f = fdget(fd);
4559 if (!f.file)
4560 return -EBADF;
4561
4562 if (f.file->f_op != &perf_fops) {
4563 fdput(f);
4564 return -EBADF;
4565 }
4566 *p = f;
4567 return 0;
4568 }
4569
4570 static int perf_event_set_output(struct perf_event *event,
4571 struct perf_event *output_event);
4572 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
4573 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
4574
4575 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
4576 {
4577 void (*func)(struct perf_event *);
4578 u32 flags = arg;
4579
4580 switch (cmd) {
4581 case PERF_EVENT_IOC_ENABLE:
4582 func = _perf_event_enable;
4583 break;
4584 case PERF_EVENT_IOC_DISABLE:
4585 func = _perf_event_disable;
4586 break;
4587 case PERF_EVENT_IOC_RESET:
4588 func = _perf_event_reset;
4589 break;
4590
4591 case PERF_EVENT_IOC_REFRESH:
4592 return _perf_event_refresh(event, arg);
4593
4594 case PERF_EVENT_IOC_PERIOD:
4595 return perf_event_period(event, (u64 __user *)arg);
4596
4597 case PERF_EVENT_IOC_ID:
4598 {
4599 u64 id = primary_event_id(event);
4600
4601 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4602 return -EFAULT;
4603 return 0;
4604 }
4605
4606 case PERF_EVENT_IOC_SET_OUTPUT:
4607 {
4608 int ret;
4609 if (arg != -1) {
4610 struct perf_event *output_event;
4611 struct fd output;
4612 ret = perf_fget_light(arg, &output);
4613 if (ret)
4614 return ret;
4615 output_event = output.file->private_data;
4616 ret = perf_event_set_output(event, output_event);
4617 fdput(output);
4618 } else {
4619 ret = perf_event_set_output(event, NULL);
4620 }
4621 return ret;
4622 }
4623
4624 case PERF_EVENT_IOC_SET_FILTER:
4625 return perf_event_set_filter(event, (void __user *)arg);
4626
4627 case PERF_EVENT_IOC_SET_BPF:
4628 return perf_event_set_bpf_prog(event, arg);
4629
4630 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
4631 struct ring_buffer *rb;
4632
4633 rcu_read_lock();
4634 rb = rcu_dereference(event->rb);
4635 if (!rb || !rb->nr_pages) {
4636 rcu_read_unlock();
4637 return -EINVAL;
4638 }
4639 rb_toggle_paused(rb, !!arg);
4640 rcu_read_unlock();
4641 return 0;
4642 }
4643 default:
4644 return -ENOTTY;
4645 }
4646
4647 if (flags & PERF_IOC_FLAG_GROUP)
4648 perf_event_for_each(event, func);
4649 else
4650 perf_event_for_each_child(event, func);
4651
4652 return 0;
4653 }
4654
4655 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4656 {
4657 struct perf_event *event = file->private_data;
4658 struct perf_event_context *ctx;
4659 long ret;
4660
4661 ctx = perf_event_ctx_lock(event);
4662 ret = _perf_ioctl(event, cmd, arg);
4663 perf_event_ctx_unlock(event, ctx);
4664
4665 return ret;
4666 }
4667
4668 #ifdef CONFIG_COMPAT
4669 static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4670 unsigned long arg)
4671 {
4672 switch (_IOC_NR(cmd)) {
4673 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4674 case _IOC_NR(PERF_EVENT_IOC_ID):
4675 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4676 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4677 cmd &= ~IOCSIZE_MASK;
4678 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4679 }
4680 break;
4681 }
4682 return perf_ioctl(file, cmd, arg);
4683 }
4684 #else
4685 # define perf_compat_ioctl NULL
4686 #endif
4687
4688 int perf_event_task_enable(void)
4689 {
4690 struct perf_event_context *ctx;
4691 struct perf_event *event;
4692
4693 mutex_lock(&current->perf_event_mutex);
4694 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4695 ctx = perf_event_ctx_lock(event);
4696 perf_event_for_each_child(event, _perf_event_enable);
4697 perf_event_ctx_unlock(event, ctx);
4698 }
4699 mutex_unlock(&current->perf_event_mutex);
4700
4701 return 0;
4702 }
4703
4704 int perf_event_task_disable(void)
4705 {
4706 struct perf_event_context *ctx;
4707 struct perf_event *event;
4708
4709 mutex_lock(&current->perf_event_mutex);
4710 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4711 ctx = perf_event_ctx_lock(event);
4712 perf_event_for_each_child(event, _perf_event_disable);
4713 perf_event_ctx_unlock(event, ctx);
4714 }
4715 mutex_unlock(&current->perf_event_mutex);
4716
4717 return 0;
4718 }
4719
4720 static int perf_event_index(struct perf_event *event)
4721 {
4722 if (event->hw.state & PERF_HES_STOPPED)
4723 return 0;
4724
4725 if (event->state != PERF_EVENT_STATE_ACTIVE)
4726 return 0;
4727
4728 return event->pmu->event_idx(event);
4729 }
4730
4731 static void calc_timer_values(struct perf_event *event,
4732 u64 *now,
4733 u64 *enabled,
4734 u64 *running)
4735 {
4736 u64 ctx_time;
4737
4738 *now = perf_clock();
4739 ctx_time = event->shadow_ctx_time + *now;
4740 *enabled = ctx_time - event->tstamp_enabled;
4741 *running = ctx_time - event->tstamp_running;
4742 }
4743
4744 static void perf_event_init_userpage(struct perf_event *event)
4745 {
4746 struct perf_event_mmap_page *userpg;
4747 struct ring_buffer *rb;
4748
4749 rcu_read_lock();
4750 rb = rcu_dereference(event->rb);
4751 if (!rb)
4752 goto unlock;
4753
4754 userpg = rb->user_page;
4755
4756 /* Allow new userspace to detect that bit 0 is deprecated */
4757 userpg->cap_bit0_is_deprecated = 1;
4758 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
4759 userpg->data_offset = PAGE_SIZE;
4760 userpg->data_size = perf_data_size(rb);
4761
4762 unlock:
4763 rcu_read_unlock();
4764 }
4765
4766 void __weak arch_perf_update_userpage(
4767 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
4768 {
4769 }
4770
4771 /*
4772 * Callers need to ensure there can be no nesting of this function, otherwise
4773 * the seqlock logic goes bad. We can not serialize this because the arch
4774 * code calls this from NMI context.
4775 */
4776 void perf_event_update_userpage(struct perf_event *event)
4777 {
4778 struct perf_event_mmap_page *userpg;
4779 struct ring_buffer *rb;
4780 u64 enabled, running, now;
4781
4782 rcu_read_lock();
4783 rb = rcu_dereference(event->rb);
4784 if (!rb)
4785 goto unlock;
4786
4787 /*
4788 * compute total_time_enabled, total_time_running
4789 * based on snapshot values taken when the event
4790 * was last scheduled in.
4791 *
4792 * we cannot simply called update_context_time()
4793 * because of locking issue as we can be called in
4794 * NMI context
4795 */
4796 calc_timer_values(event, &now, &enabled, &running);
4797
4798 userpg = rb->user_page;
4799 /*
4800 * Disable preemption so as to not let the corresponding user-space
4801 * spin too long if we get preempted.
4802 */
4803 preempt_disable();
4804 ++userpg->lock;
4805 barrier();
4806 userpg->index = perf_event_index(event);
4807 userpg->offset = perf_event_count(event);
4808 if (userpg->index)
4809 userpg->offset -= local64_read(&event->hw.prev_count);
4810
4811 userpg->time_enabled = enabled +
4812 atomic64_read(&event->child_total_time_enabled);
4813
4814 userpg->time_running = running +
4815 atomic64_read(&event->child_total_time_running);
4816
4817 arch_perf_update_userpage(event, userpg, now);
4818
4819 barrier();
4820 ++userpg->lock;
4821 preempt_enable();
4822 unlock:
4823 rcu_read_unlock();
4824 }
4825
4826 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4827 {
4828 struct perf_event *event = vma->vm_file->private_data;
4829 struct ring_buffer *rb;
4830 int ret = VM_FAULT_SIGBUS;
4831
4832 if (vmf->flags & FAULT_FLAG_MKWRITE) {
4833 if (vmf->pgoff == 0)
4834 ret = 0;
4835 return ret;
4836 }
4837
4838 rcu_read_lock();
4839 rb = rcu_dereference(event->rb);
4840 if (!rb)
4841 goto unlock;
4842
4843 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4844 goto unlock;
4845
4846 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
4847 if (!vmf->page)
4848 goto unlock;
4849
4850 get_page(vmf->page);
4851 vmf->page->mapping = vma->vm_file->f_mapping;
4852 vmf->page->index = vmf->pgoff;
4853
4854 ret = 0;
4855 unlock:
4856 rcu_read_unlock();
4857
4858 return ret;
4859 }
4860
4861 static void ring_buffer_attach(struct perf_event *event,
4862 struct ring_buffer *rb)
4863 {
4864 struct ring_buffer *old_rb = NULL;
4865 unsigned long flags;
4866
4867 if (event->rb) {
4868 /*
4869 * Should be impossible, we set this when removing
4870 * event->rb_entry and wait/clear when adding event->rb_entry.
4871 */
4872 WARN_ON_ONCE(event->rcu_pending);
4873
4874 old_rb = event->rb;
4875 spin_lock_irqsave(&old_rb->event_lock, flags);
4876 list_del_rcu(&event->rb_entry);
4877 spin_unlock_irqrestore(&old_rb->event_lock, flags);
4878
4879 event->rcu_batches = get_state_synchronize_rcu();
4880 event->rcu_pending = 1;
4881 }
4882
4883 if (rb) {
4884 if (event->rcu_pending) {
4885 cond_synchronize_rcu(event->rcu_batches);
4886 event->rcu_pending = 0;
4887 }
4888
4889 spin_lock_irqsave(&rb->event_lock, flags);
4890 list_add_rcu(&event->rb_entry, &rb->event_list);
4891 spin_unlock_irqrestore(&rb->event_lock, flags);
4892 }
4893
4894 /*
4895 * Avoid racing with perf_mmap_close(AUX): stop the event
4896 * before swizzling the event::rb pointer; if it's getting
4897 * unmapped, its aux_mmap_count will be 0 and it won't
4898 * restart. See the comment in __perf_pmu_output_stop().
4899 *
4900 * Data will inevitably be lost when set_output is done in
4901 * mid-air, but then again, whoever does it like this is
4902 * not in for the data anyway.
4903 */
4904 if (has_aux(event))
4905 perf_event_stop(event, 0);
4906
4907 rcu_assign_pointer(event->rb, rb);
4908
4909 if (old_rb) {
4910 ring_buffer_put(old_rb);
4911 /*
4912 * Since we detached before setting the new rb, so that we
4913 * could attach the new rb, we could have missed a wakeup.
4914 * Provide it now.
4915 */
4916 wake_up_all(&event->waitq);
4917 }
4918 }
4919
4920 static void ring_buffer_wakeup(struct perf_event *event)
4921 {
4922 struct ring_buffer *rb;
4923
4924 rcu_read_lock();
4925 rb = rcu_dereference(event->rb);
4926 if (rb) {
4927 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4928 wake_up_all(&event->waitq);
4929 }
4930 rcu_read_unlock();
4931 }
4932
4933 struct ring_buffer *ring_buffer_get(struct perf_event *event)
4934 {
4935 struct ring_buffer *rb;
4936
4937 rcu_read_lock();
4938 rb = rcu_dereference(event->rb);
4939 if (rb) {
4940 if (!atomic_inc_not_zero(&rb->refcount))
4941 rb = NULL;
4942 }
4943 rcu_read_unlock();
4944
4945 return rb;
4946 }
4947
4948 void ring_buffer_put(struct ring_buffer *rb)
4949 {
4950 if (!atomic_dec_and_test(&rb->refcount))
4951 return;
4952
4953 WARN_ON_ONCE(!list_empty(&rb->event_list));
4954
4955 call_rcu(&rb->rcu_head, rb_free_rcu);
4956 }
4957
4958 static void perf_mmap_open(struct vm_area_struct *vma)
4959 {
4960 struct perf_event *event = vma->vm_file->private_data;
4961
4962 atomic_inc(&event->mmap_count);
4963 atomic_inc(&event->rb->mmap_count);
4964
4965 if (vma->vm_pgoff)
4966 atomic_inc(&event->rb->aux_mmap_count);
4967
4968 if (event->pmu->event_mapped)
4969 event->pmu->event_mapped(event);
4970 }
4971
4972 static void perf_pmu_output_stop(struct perf_event *event);
4973
4974 /*
4975 * A buffer can be mmap()ed multiple times; either directly through the same
4976 * event, or through other events by use of perf_event_set_output().
4977 *
4978 * In order to undo the VM accounting done by perf_mmap() we need to destroy
4979 * the buffer here, where we still have a VM context. This means we need
4980 * to detach all events redirecting to us.
4981 */
4982 static void perf_mmap_close(struct vm_area_struct *vma)
4983 {
4984 struct perf_event *event = vma->vm_file->private_data;
4985
4986 struct ring_buffer *rb = ring_buffer_get(event);
4987 struct user_struct *mmap_user = rb->mmap_user;
4988 int mmap_locked = rb->mmap_locked;
4989 unsigned long size = perf_data_size(rb);
4990
4991 if (event->pmu->event_unmapped)
4992 event->pmu->event_unmapped(event);
4993
4994 /*
4995 * rb->aux_mmap_count will always drop before rb->mmap_count and
4996 * event->mmap_count, so it is ok to use event->mmap_mutex to
4997 * serialize with perf_mmap here.
4998 */
4999 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
5000 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
5001 /*
5002 * Stop all AUX events that are writing to this buffer,
5003 * so that we can free its AUX pages and corresponding PMU
5004 * data. Note that after rb::aux_mmap_count dropped to zero,
5005 * they won't start any more (see perf_aux_output_begin()).
5006 */
5007 perf_pmu_output_stop(event);
5008
5009 /* now it's safe to free the pages */
5010 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
5011 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
5012
5013 /* this has to be the last one */
5014 rb_free_aux(rb);
5015 WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
5016
5017 mutex_unlock(&event->mmap_mutex);
5018 }
5019
5020 atomic_dec(&rb->mmap_count);
5021
5022 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
5023 goto out_put;
5024
5025 ring_buffer_attach(event, NULL);
5026 mutex_unlock(&event->mmap_mutex);
5027
5028 /* If there's still other mmap()s of this buffer, we're done. */
5029 if (atomic_read(&rb->mmap_count))
5030 goto out_put;
5031
5032 /*
5033 * No other mmap()s, detach from all other events that might redirect
5034 * into the now unreachable buffer. Somewhat complicated by the
5035 * fact that rb::event_lock otherwise nests inside mmap_mutex.
5036 */
5037 again:
5038 rcu_read_lock();
5039 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
5040 if (!atomic_long_inc_not_zero(&event->refcount)) {
5041 /*
5042 * This event is en-route to free_event() which will
5043 * detach it and remove it from the list.
5044 */
5045 continue;
5046 }
5047 rcu_read_unlock();
5048
5049 mutex_lock(&event->mmap_mutex);
5050 /*
5051 * Check we didn't race with perf_event_set_output() which can
5052 * swizzle the rb from under us while we were waiting to
5053 * acquire mmap_mutex.
5054 *
5055 * If we find a different rb; ignore this event, a next
5056 * iteration will no longer find it on the list. We have to
5057 * still restart the iteration to make sure we're not now
5058 * iterating the wrong list.
5059 */
5060 if (event->rb == rb)
5061 ring_buffer_attach(event, NULL);
5062
5063 mutex_unlock(&event->mmap_mutex);
5064 put_event(event);
5065
5066 /*
5067 * Restart the iteration; either we're on the wrong list or
5068 * destroyed its integrity by doing a deletion.
5069 */
5070 goto again;
5071 }
5072 rcu_read_unlock();
5073
5074 /*
5075 * It could be there's still a few 0-ref events on the list; they'll
5076 * get cleaned up by free_event() -- they'll also still have their
5077 * ref on the rb and will free it whenever they are done with it.
5078 *
5079 * Aside from that, this buffer is 'fully' detached and unmapped,
5080 * undo the VM accounting.
5081 */
5082
5083 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
5084 vma->vm_mm->pinned_vm -= mmap_locked;
5085 free_uid(mmap_user);
5086
5087 out_put:
5088 ring_buffer_put(rb); /* could be last */
5089 }
5090
5091 static const struct vm_operations_struct perf_mmap_vmops = {
5092 .open = perf_mmap_open,
5093 .close = perf_mmap_close, /* non mergable */
5094 .fault = perf_mmap_fault,
5095 .page_mkwrite = perf_mmap_fault,
5096 };
5097
5098 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
5099 {
5100 struct perf_event *event = file->private_data;
5101 unsigned long user_locked, user_lock_limit;
5102 struct user_struct *user = current_user();
5103 unsigned long locked, lock_limit;
5104 struct ring_buffer *rb = NULL;
5105 unsigned long vma_size;
5106 unsigned long nr_pages;
5107 long user_extra = 0, extra = 0;
5108 int ret = 0, flags = 0;
5109
5110 /*
5111 * Don't allow mmap() of inherited per-task counters. This would
5112 * create a performance issue due to all children writing to the
5113 * same rb.
5114 */
5115 if (event->cpu == -1 && event->attr.inherit)
5116 return -EINVAL;
5117
5118 if (!(vma->vm_flags & VM_SHARED))
5119 return -EINVAL;
5120
5121 vma_size = vma->vm_end - vma->vm_start;
5122
5123 if (vma->vm_pgoff == 0) {
5124 nr_pages = (vma_size / PAGE_SIZE) - 1;
5125 } else {
5126 /*
5127 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
5128 * mapped, all subsequent mappings should have the same size
5129 * and offset. Must be above the normal perf buffer.
5130 */
5131 u64 aux_offset, aux_size;
5132
5133 if (!event->rb)
5134 return -EINVAL;
5135
5136 nr_pages = vma_size / PAGE_SIZE;
5137
5138 mutex_lock(&event->mmap_mutex);
5139 ret = -EINVAL;
5140
5141 rb = event->rb;
5142 if (!rb)
5143 goto aux_unlock;
5144
5145 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
5146 aux_size = ACCESS_ONCE(rb->user_page->aux_size);
5147
5148 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
5149 goto aux_unlock;
5150
5151 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
5152 goto aux_unlock;
5153
5154 /* already mapped with a different offset */
5155 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
5156 goto aux_unlock;
5157
5158 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
5159 goto aux_unlock;
5160
5161 /* already mapped with a different size */
5162 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
5163 goto aux_unlock;
5164
5165 if (!is_power_of_2(nr_pages))
5166 goto aux_unlock;
5167
5168 if (!atomic_inc_not_zero(&rb->mmap_count))
5169 goto aux_unlock;
5170
5171 if (rb_has_aux(rb)) {
5172 atomic_inc(&rb->aux_mmap_count);
5173 ret = 0;
5174 goto unlock;
5175 }
5176
5177 atomic_set(&rb->aux_mmap_count, 1);
5178 user_extra = nr_pages;
5179
5180 goto accounting;
5181 }
5182
5183 /*
5184 * If we have rb pages ensure they're a power-of-two number, so we
5185 * can do bitmasks instead of modulo.
5186 */
5187 if (nr_pages != 0 && !is_power_of_2(nr_pages))
5188 return -EINVAL;
5189
5190 if (vma_size != PAGE_SIZE * (1 + nr_pages))
5191 return -EINVAL;
5192
5193 WARN_ON_ONCE(event->ctx->parent_ctx);
5194 again:
5195 mutex_lock(&event->mmap_mutex);
5196 if (event->rb) {
5197 if (event->rb->nr_pages != nr_pages) {
5198 ret = -EINVAL;
5199 goto unlock;
5200 }
5201
5202 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
5203 /*
5204 * Raced against perf_mmap_close() through
5205 * perf_event_set_output(). Try again, hope for better
5206 * luck.
5207 */
5208 mutex_unlock(&event->mmap_mutex);
5209 goto again;
5210 }
5211
5212 goto unlock;
5213 }
5214
5215 user_extra = nr_pages + 1;
5216
5217 accounting:
5218 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
5219
5220 /*
5221 * Increase the limit linearly with more CPUs:
5222 */
5223 user_lock_limit *= num_online_cpus();
5224
5225 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
5226
5227 if (user_locked > user_lock_limit)
5228 extra = user_locked - user_lock_limit;
5229
5230 lock_limit = rlimit(RLIMIT_MEMLOCK);
5231 lock_limit >>= PAGE_SHIFT;
5232 locked = vma->vm_mm->pinned_vm + extra;
5233
5234 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
5235 !capable(CAP_IPC_LOCK)) {
5236 ret = -EPERM;
5237 goto unlock;
5238 }
5239
5240 WARN_ON(!rb && event->rb);
5241
5242 if (vma->vm_flags & VM_WRITE)
5243 flags |= RING_BUFFER_WRITABLE;
5244
5245 if (!rb) {
5246 rb = rb_alloc(nr_pages,
5247 event->attr.watermark ? event->attr.wakeup_watermark : 0,
5248 event->cpu, flags);
5249
5250 if (!rb) {
5251 ret = -ENOMEM;
5252 goto unlock;
5253 }
5254
5255 atomic_set(&rb->mmap_count, 1);
5256 rb->mmap_user = get_current_user();
5257 rb->mmap_locked = extra;
5258
5259 ring_buffer_attach(event, rb);
5260
5261 perf_event_init_userpage(event);
5262 perf_event_update_userpage(event);
5263 } else {
5264 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
5265 event->attr.aux_watermark, flags);
5266 if (!ret)
5267 rb->aux_mmap_locked = extra;
5268 }
5269
5270 unlock:
5271 if (!ret) {
5272 atomic_long_add(user_extra, &user->locked_vm);
5273 vma->vm_mm->pinned_vm += extra;
5274
5275 atomic_inc(&event->mmap_count);
5276 } else if (rb) {
5277 atomic_dec(&rb->mmap_count);
5278 }
5279 aux_unlock:
5280 mutex_unlock(&event->mmap_mutex);
5281
5282 /*
5283 * Since pinned accounting is per vm we cannot allow fork() to copy our
5284 * vma.
5285 */
5286 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
5287 vma->vm_ops = &perf_mmap_vmops;
5288
5289 if (event->pmu->event_mapped)
5290 event->pmu->event_mapped(event);
5291
5292 return ret;
5293 }
5294
5295 static int perf_fasync(int fd, struct file *filp, int on)
5296 {
5297 struct inode *inode = file_inode(filp);
5298 struct perf_event *event = filp->private_data;
5299 int retval;
5300
5301 inode_lock(inode);
5302 retval = fasync_helper(fd, filp, on, &event->fasync);
5303 inode_unlock(inode);
5304
5305 if (retval < 0)
5306 return retval;
5307
5308 return 0;
5309 }
5310
5311 static const struct file_operations perf_fops = {
5312 .llseek = no_llseek,
5313 .release = perf_release,
5314 .read = perf_read,
5315 .poll = perf_poll,
5316 .unlocked_ioctl = perf_ioctl,
5317 .compat_ioctl = perf_compat_ioctl,
5318 .mmap = perf_mmap,
5319 .fasync = perf_fasync,
5320 };
5321
5322 /*
5323 * Perf event wakeup
5324 *
5325 * If there's data, ensure we set the poll() state and publish everything
5326 * to user-space before waking everybody up.
5327 */
5328
5329 static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5330 {
5331 /* only the parent has fasync state */
5332 if (event->parent)
5333 event = event->parent;
5334 return &event->fasync;
5335 }
5336
5337 void perf_event_wakeup(struct perf_event *event)
5338 {
5339 ring_buffer_wakeup(event);
5340
5341 if (event->pending_kill) {
5342 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
5343 event->pending_kill = 0;
5344 }
5345 }
5346
5347 static void perf_pending_event(struct irq_work *entry)
5348 {
5349 struct perf_event *event = container_of(entry,
5350 struct perf_event, pending);
5351 int rctx;
5352
5353 rctx = perf_swevent_get_recursion_context();
5354 /*
5355 * If we 'fail' here, that's OK, it means recursion is already disabled
5356 * and we won't recurse 'further'.
5357 */
5358
5359 if (event->pending_disable) {
5360 event->pending_disable = 0;
5361 perf_event_disable_local(event);
5362 }
5363
5364 if (event->pending_wakeup) {
5365 event->pending_wakeup = 0;
5366 perf_event_wakeup(event);
5367 }
5368
5369 if (rctx >= 0)
5370 perf_swevent_put_recursion_context(rctx);
5371 }
5372
5373 /*
5374 * We assume there is only KVM supporting the callbacks.
5375 * Later on, we might change it to a list if there is
5376 * another virtualization implementation supporting the callbacks.
5377 */
5378 struct perf_guest_info_callbacks *perf_guest_cbs;
5379
5380 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5381 {
5382 perf_guest_cbs = cbs;
5383 return 0;
5384 }
5385 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5386
5387 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5388 {
5389 perf_guest_cbs = NULL;
5390 return 0;
5391 }
5392 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5393
5394 static void
5395 perf_output_sample_regs(struct perf_output_handle *handle,
5396 struct pt_regs *regs, u64 mask)
5397 {
5398 int bit;
5399 DECLARE_BITMAP(_mask, 64);
5400
5401 bitmap_from_u64(_mask, mask);
5402 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
5403 u64 val;
5404
5405 val = perf_reg_value(regs, bit);
5406 perf_output_put(handle, val);
5407 }
5408 }
5409
5410 static void perf_sample_regs_user(struct perf_regs *regs_user,
5411 struct pt_regs *regs,
5412 struct pt_regs *regs_user_copy)
5413 {
5414 if (user_mode(regs)) {
5415 regs_user->abi = perf_reg_abi(current);
5416 regs_user->regs = regs;
5417 } else if (current->mm) {
5418 perf_get_regs_user(regs_user, regs, regs_user_copy);
5419 } else {
5420 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5421 regs_user->regs = NULL;
5422 }
5423 }
5424
5425 static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5426 struct pt_regs *regs)
5427 {
5428 regs_intr->regs = regs;
5429 regs_intr->abi = perf_reg_abi(current);
5430 }
5431
5432
5433 /*
5434 * Get remaining task size from user stack pointer.
5435 *
5436 * It'd be better to take stack vma map and limit this more
5437 * precisly, but there's no way to get it safely under interrupt,
5438 * so using TASK_SIZE as limit.
5439 */
5440 static u64 perf_ustack_task_size(struct pt_regs *regs)
5441 {
5442 unsigned long addr = perf_user_stack_pointer(regs);
5443
5444 if (!addr || addr >= TASK_SIZE)
5445 return 0;
5446
5447 return TASK_SIZE - addr;
5448 }
5449
5450 static u16
5451 perf_sample_ustack_size(u16 stack_size, u16 header_size,
5452 struct pt_regs *regs)
5453 {
5454 u64 task_size;
5455
5456 /* No regs, no stack pointer, no dump. */
5457 if (!regs)
5458 return 0;
5459
5460 /*
5461 * Check if we fit in with the requested stack size into the:
5462 * - TASK_SIZE
5463 * If we don't, we limit the size to the TASK_SIZE.
5464 *
5465 * - remaining sample size
5466 * If we don't, we customize the stack size to
5467 * fit in to the remaining sample size.
5468 */
5469
5470 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5471 stack_size = min(stack_size, (u16) task_size);
5472
5473 /* Current header size plus static size and dynamic size. */
5474 header_size += 2 * sizeof(u64);
5475
5476 /* Do we fit in with the current stack dump size? */
5477 if ((u16) (header_size + stack_size) < header_size) {
5478 /*
5479 * If we overflow the maximum size for the sample,
5480 * we customize the stack dump size to fit in.
5481 */
5482 stack_size = USHRT_MAX - header_size - sizeof(u64);
5483 stack_size = round_up(stack_size, sizeof(u64));
5484 }
5485
5486 return stack_size;
5487 }
5488
5489 static void
5490 perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5491 struct pt_regs *regs)
5492 {
5493 /* Case of a kernel thread, nothing to dump */
5494 if (!regs) {
5495 u64 size = 0;
5496 perf_output_put(handle, size);
5497 } else {
5498 unsigned long sp;
5499 unsigned int rem;
5500 u64 dyn_size;
5501
5502 /*
5503 * We dump:
5504 * static size
5505 * - the size requested by user or the best one we can fit
5506 * in to the sample max size
5507 * data
5508 * - user stack dump data
5509 * dynamic size
5510 * - the actual dumped size
5511 */
5512
5513 /* Static size. */
5514 perf_output_put(handle, dump_size);
5515
5516 /* Data. */
5517 sp = perf_user_stack_pointer(regs);
5518 rem = __output_copy_user(handle, (void *) sp, dump_size);
5519 dyn_size = dump_size - rem;
5520
5521 perf_output_skip(handle, rem);
5522
5523 /* Dynamic size. */
5524 perf_output_put(handle, dyn_size);
5525 }
5526 }
5527
5528 static void __perf_event_header__init_id(struct perf_event_header *header,
5529 struct perf_sample_data *data,
5530 struct perf_event *event)
5531 {
5532 u64 sample_type = event->attr.sample_type;
5533
5534 data->type = sample_type;
5535 header->size += event->id_header_size;
5536
5537 if (sample_type & PERF_SAMPLE_TID) {
5538 /* namespace issues */
5539 data->tid_entry.pid = perf_event_pid(event, current);
5540 data->tid_entry.tid = perf_event_tid(event, current);
5541 }
5542
5543 if (sample_type & PERF_SAMPLE_TIME)
5544 data->time = perf_event_clock(event);
5545
5546 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
5547 data->id = primary_event_id(event);
5548
5549 if (sample_type & PERF_SAMPLE_STREAM_ID)
5550 data->stream_id = event->id;
5551
5552 if (sample_type & PERF_SAMPLE_CPU) {
5553 data->cpu_entry.cpu = raw_smp_processor_id();
5554 data->cpu_entry.reserved = 0;
5555 }
5556 }
5557
5558 void perf_event_header__init_id(struct perf_event_header *header,
5559 struct perf_sample_data *data,
5560 struct perf_event *event)
5561 {
5562 if (event->attr.sample_id_all)
5563 __perf_event_header__init_id(header, data, event);
5564 }
5565
5566 static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5567 struct perf_sample_data *data)
5568 {
5569 u64 sample_type = data->type;
5570
5571 if (sample_type & PERF_SAMPLE_TID)
5572 perf_output_put(handle, data->tid_entry);
5573
5574 if (sample_type & PERF_SAMPLE_TIME)
5575 perf_output_put(handle, data->time);
5576
5577 if (sample_type & PERF_SAMPLE_ID)
5578 perf_output_put(handle, data->id);
5579
5580 if (sample_type & PERF_SAMPLE_STREAM_ID)
5581 perf_output_put(handle, data->stream_id);
5582
5583 if (sample_type & PERF_SAMPLE_CPU)
5584 perf_output_put(handle, data->cpu_entry);
5585
5586 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5587 perf_output_put(handle, data->id);
5588 }
5589
5590 void perf_event__output_id_sample(struct perf_event *event,
5591 struct perf_output_handle *handle,
5592 struct perf_sample_data *sample)
5593 {
5594 if (event->attr.sample_id_all)
5595 __perf_event__output_id_sample(handle, sample);
5596 }
5597
5598 static void perf_output_read_one(struct perf_output_handle *handle,
5599 struct perf_event *event,
5600 u64 enabled, u64 running)
5601 {
5602 u64 read_format = event->attr.read_format;
5603 u64 values[4];
5604 int n = 0;
5605
5606 values[n++] = perf_event_count(event);
5607 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
5608 values[n++] = enabled +
5609 atomic64_read(&event->child_total_time_enabled);
5610 }
5611 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
5612 values[n++] = running +
5613 atomic64_read(&event->child_total_time_running);
5614 }
5615 if (read_format & PERF_FORMAT_ID)
5616 values[n++] = primary_event_id(event);
5617
5618 __output_copy(handle, values, n * sizeof(u64));
5619 }
5620
5621 /*
5622 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
5623 */
5624 static void perf_output_read_group(struct perf_output_handle *handle,
5625 struct perf_event *event,
5626 u64 enabled, u64 running)
5627 {
5628 struct perf_event *leader = event->group_leader, *sub;
5629 u64 read_format = event->attr.read_format;
5630 u64 values[5];
5631 int n = 0;
5632
5633 values[n++] = 1 + leader->nr_siblings;
5634
5635 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
5636 values[n++] = enabled;
5637
5638 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5639 values[n++] = running;
5640
5641 if (leader != event)
5642 leader->pmu->read(leader);
5643
5644 values[n++] = perf_event_count(leader);
5645 if (read_format & PERF_FORMAT_ID)
5646 values[n++] = primary_event_id(leader);
5647
5648 __output_copy(handle, values, n * sizeof(u64));
5649
5650 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
5651 n = 0;
5652
5653 if ((sub != event) &&
5654 (sub->state == PERF_EVENT_STATE_ACTIVE))
5655 sub->pmu->read(sub);
5656
5657 values[n++] = perf_event_count(sub);
5658 if (read_format & PERF_FORMAT_ID)
5659 values[n++] = primary_event_id(sub);
5660
5661 __output_copy(handle, values, n * sizeof(u64));
5662 }
5663 }
5664
5665 #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5666 PERF_FORMAT_TOTAL_TIME_RUNNING)
5667
5668 static void perf_output_read(struct perf_output_handle *handle,
5669 struct perf_event *event)
5670 {
5671 u64 enabled = 0, running = 0, now;
5672 u64 read_format = event->attr.read_format;
5673
5674 /*
5675 * compute total_time_enabled, total_time_running
5676 * based on snapshot values taken when the event
5677 * was last scheduled in.
5678 *
5679 * we cannot simply called update_context_time()
5680 * because of locking issue as we are called in
5681 * NMI context
5682 */
5683 if (read_format & PERF_FORMAT_TOTAL_TIMES)
5684 calc_timer_values(event, &now, &enabled, &running);
5685
5686 if (event->attr.read_format & PERF_FORMAT_GROUP)
5687 perf_output_read_group(handle, event, enabled, running);
5688 else
5689 perf_output_read_one(handle, event, enabled, running);
5690 }
5691
5692 void perf_output_sample(struct perf_output_handle *handle,
5693 struct perf_event_header *header,
5694 struct perf_sample_data *data,
5695 struct perf_event *event)
5696 {
5697 u64 sample_type = data->type;
5698
5699 perf_output_put(handle, *header);
5700
5701 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5702 perf_output_put(handle, data->id);
5703
5704 if (sample_type & PERF_SAMPLE_IP)
5705 perf_output_put(handle, data->ip);
5706
5707 if (sample_type & PERF_SAMPLE_TID)
5708 perf_output_put(handle, data->tid_entry);
5709
5710 if (sample_type & PERF_SAMPLE_TIME)
5711 perf_output_put(handle, data->time);
5712
5713 if (sample_type & PERF_SAMPLE_ADDR)
5714 perf_output_put(handle, data->addr);
5715
5716 if (sample_type & PERF_SAMPLE_ID)
5717 perf_output_put(handle, data->id);
5718
5719 if (sample_type & PERF_SAMPLE_STREAM_ID)
5720 perf_output_put(handle, data->stream_id);
5721
5722 if (sample_type & PERF_SAMPLE_CPU)
5723 perf_output_put(handle, data->cpu_entry);
5724
5725 if (sample_type & PERF_SAMPLE_PERIOD)
5726 perf_output_put(handle, data->period);
5727
5728 if (sample_type & PERF_SAMPLE_READ)
5729 perf_output_read(handle, event);
5730
5731 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5732 if (data->callchain) {
5733 int size = 1;
5734
5735 if (data->callchain)
5736 size += data->callchain->nr;
5737
5738 size *= sizeof(u64);
5739
5740 __output_copy(handle, data->callchain, size);
5741 } else {
5742 u64 nr = 0;
5743 perf_output_put(handle, nr);
5744 }
5745 }
5746
5747 if (sample_type & PERF_SAMPLE_RAW) {
5748 struct perf_raw_record *raw = data->raw;
5749
5750 if (raw) {
5751 struct perf_raw_frag *frag = &raw->frag;
5752
5753 perf_output_put(handle, raw->size);
5754 do {
5755 if (frag->copy) {
5756 __output_custom(handle, frag->copy,
5757 frag->data, frag->size);
5758 } else {
5759 __output_copy(handle, frag->data,
5760 frag->size);
5761 }
5762 if (perf_raw_frag_last(frag))
5763 break;
5764 frag = frag->next;
5765 } while (1);
5766 if (frag->pad)
5767 __output_skip(handle, NULL, frag->pad);
5768 } else {
5769 struct {
5770 u32 size;
5771 u32 data;
5772 } raw = {
5773 .size = sizeof(u32),
5774 .data = 0,
5775 };
5776 perf_output_put(handle, raw);
5777 }
5778 }
5779
5780 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5781 if (data->br_stack) {
5782 size_t size;
5783
5784 size = data->br_stack->nr
5785 * sizeof(struct perf_branch_entry);
5786
5787 perf_output_put(handle, data->br_stack->nr);
5788 perf_output_copy(handle, data->br_stack->entries, size);
5789 } else {
5790 /*
5791 * we always store at least the value of nr
5792 */
5793 u64 nr = 0;
5794 perf_output_put(handle, nr);
5795 }
5796 }
5797
5798 if (sample_type & PERF_SAMPLE_REGS_USER) {
5799 u64 abi = data->regs_user.abi;
5800
5801 /*
5802 * If there are no regs to dump, notice it through
5803 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5804 */
5805 perf_output_put(handle, abi);
5806
5807 if (abi) {
5808 u64 mask = event->attr.sample_regs_user;
5809 perf_output_sample_regs(handle,
5810 data->regs_user.regs,
5811 mask);
5812 }
5813 }
5814
5815 if (sample_type & PERF_SAMPLE_STACK_USER) {
5816 perf_output_sample_ustack(handle,
5817 data->stack_user_size,
5818 data->regs_user.regs);
5819 }
5820
5821 if (sample_type & PERF_SAMPLE_WEIGHT)
5822 perf_output_put(handle, data->weight);
5823
5824 if (sample_type & PERF_SAMPLE_DATA_SRC)
5825 perf_output_put(handle, data->data_src.val);
5826
5827 if (sample_type & PERF_SAMPLE_TRANSACTION)
5828 perf_output_put(handle, data->txn);
5829
5830 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5831 u64 abi = data->regs_intr.abi;
5832 /*
5833 * If there are no regs to dump, notice it through
5834 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5835 */
5836 perf_output_put(handle, abi);
5837
5838 if (abi) {
5839 u64 mask = event->attr.sample_regs_intr;
5840
5841 perf_output_sample_regs(handle,
5842 data->regs_intr.regs,
5843 mask);
5844 }
5845 }
5846
5847 if (!event->attr.watermark) {
5848 int wakeup_events = event->attr.wakeup_events;
5849
5850 if (wakeup_events) {
5851 struct ring_buffer *rb = handle->rb;
5852 int events = local_inc_return(&rb->events);
5853
5854 if (events >= wakeup_events) {
5855 local_sub(wakeup_events, &rb->events);
5856 local_inc(&rb->wakeup);
5857 }
5858 }
5859 }
5860 }
5861
5862 void perf_prepare_sample(struct perf_event_header *header,
5863 struct perf_sample_data *data,
5864 struct perf_event *event,
5865 struct pt_regs *regs)
5866 {
5867 u64 sample_type = event->attr.sample_type;
5868
5869 header->type = PERF_RECORD_SAMPLE;
5870 header->size = sizeof(*header) + event->header_size;
5871
5872 header->misc = 0;
5873 header->misc |= perf_misc_flags(regs);
5874
5875 __perf_event_header__init_id(header, data, event);
5876
5877 if (sample_type & PERF_SAMPLE_IP)
5878 data->ip = perf_instruction_pointer(regs);
5879
5880 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5881 int size = 1;
5882
5883 data->callchain = perf_callchain(event, regs);
5884
5885 if (data->callchain)
5886 size += data->callchain->nr;
5887
5888 header->size += size * sizeof(u64);
5889 }
5890
5891 if (sample_type & PERF_SAMPLE_RAW) {
5892 struct perf_raw_record *raw = data->raw;
5893 int size;
5894
5895 if (raw) {
5896 struct perf_raw_frag *frag = &raw->frag;
5897 u32 sum = 0;
5898
5899 do {
5900 sum += frag->size;
5901 if (perf_raw_frag_last(frag))
5902 break;
5903 frag = frag->next;
5904 } while (1);
5905
5906 size = round_up(sum + sizeof(u32), sizeof(u64));
5907 raw->size = size - sizeof(u32);
5908 frag->pad = raw->size - sum;
5909 } else {
5910 size = sizeof(u64);
5911 }
5912
5913 header->size += size;
5914 }
5915
5916 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5917 int size = sizeof(u64); /* nr */
5918 if (data->br_stack) {
5919 size += data->br_stack->nr
5920 * sizeof(struct perf_branch_entry);
5921 }
5922 header->size += size;
5923 }
5924
5925 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
5926 perf_sample_regs_user(&data->regs_user, regs,
5927 &data->regs_user_copy);
5928
5929 if (sample_type & PERF_SAMPLE_REGS_USER) {
5930 /* regs dump ABI info */
5931 int size = sizeof(u64);
5932
5933 if (data->regs_user.regs) {
5934 u64 mask = event->attr.sample_regs_user;
5935 size += hweight64(mask) * sizeof(u64);
5936 }
5937
5938 header->size += size;
5939 }
5940
5941 if (sample_type & PERF_SAMPLE_STACK_USER) {
5942 /*
5943 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
5944 * processed as the last one or have additional check added
5945 * in case new sample type is added, because we could eat
5946 * up the rest of the sample size.
5947 */
5948 u16 stack_size = event->attr.sample_stack_user;
5949 u16 size = sizeof(u64);
5950
5951 stack_size = perf_sample_ustack_size(stack_size, header->size,
5952 data->regs_user.regs);
5953
5954 /*
5955 * If there is something to dump, add space for the dump
5956 * itself and for the field that tells the dynamic size,
5957 * which is how many have been actually dumped.
5958 */
5959 if (stack_size)
5960 size += sizeof(u64) + stack_size;
5961
5962 data->stack_user_size = stack_size;
5963 header->size += size;
5964 }
5965
5966 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5967 /* regs dump ABI info */
5968 int size = sizeof(u64);
5969
5970 perf_sample_regs_intr(&data->regs_intr, regs);
5971
5972 if (data->regs_intr.regs) {
5973 u64 mask = event->attr.sample_regs_intr;
5974
5975 size += hweight64(mask) * sizeof(u64);
5976 }
5977
5978 header->size += size;
5979 }
5980 }
5981
5982 static void __always_inline
5983 __perf_event_output(struct perf_event *event,
5984 struct perf_sample_data *data,
5985 struct pt_regs *regs,
5986 int (*output_begin)(struct perf_output_handle *,
5987 struct perf_event *,
5988 unsigned int))
5989 {
5990 struct perf_output_handle handle;
5991 struct perf_event_header header;
5992
5993 /* protect the callchain buffers */
5994 rcu_read_lock();
5995
5996 perf_prepare_sample(&header, data, event, regs);
5997
5998 if (output_begin(&handle, event, header.size))
5999 goto exit;
6000
6001 perf_output_sample(&handle, &header, data, event);
6002
6003 perf_output_end(&handle);
6004
6005 exit:
6006 rcu_read_unlock();
6007 }
6008
6009 void
6010 perf_event_output_forward(struct perf_event *event,
6011 struct perf_sample_data *data,
6012 struct pt_regs *regs)
6013 {
6014 __perf_event_output(event, data, regs, perf_output_begin_forward);
6015 }
6016
6017 void
6018 perf_event_output_backward(struct perf_event *event,
6019 struct perf_sample_data *data,
6020 struct pt_regs *regs)
6021 {
6022 __perf_event_output(event, data, regs, perf_output_begin_backward);
6023 }
6024
6025 void
6026 perf_event_output(struct perf_event *event,
6027 struct perf_sample_data *data,
6028 struct pt_regs *regs)
6029 {
6030 __perf_event_output(event, data, regs, perf_output_begin);
6031 }
6032
6033 /*
6034 * read event_id
6035 */
6036
6037 struct perf_read_event {
6038 struct perf_event_header header;
6039
6040 u32 pid;
6041 u32 tid;
6042 };
6043
6044 static void
6045 perf_event_read_event(struct perf_event *event,
6046 struct task_struct *task)
6047 {
6048 struct perf_output_handle handle;
6049 struct perf_sample_data sample;
6050 struct perf_read_event read_event = {
6051 .header = {
6052 .type = PERF_RECORD_READ,
6053 .misc = 0,
6054 .size = sizeof(read_event) + event->read_size,
6055 },
6056 .pid = perf_event_pid(event, task),
6057 .tid = perf_event_tid(event, task),
6058 };
6059 int ret;
6060
6061 perf_event_header__init_id(&read_event.header, &sample, event);
6062 ret = perf_output_begin(&handle, event, read_event.header.size);
6063 if (ret)
6064 return;
6065
6066 perf_output_put(&handle, read_event);
6067 perf_output_read(&handle, event);
6068 perf_event__output_id_sample(event, &handle, &sample);
6069
6070 perf_output_end(&handle);
6071 }
6072
6073 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
6074
6075 static void
6076 perf_iterate_ctx(struct perf_event_context *ctx,
6077 perf_iterate_f output,
6078 void *data, bool all)
6079 {
6080 struct perf_event *event;
6081
6082 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
6083 if (!all) {
6084 if (event->state < PERF_EVENT_STATE_INACTIVE)
6085 continue;
6086 if (!event_filter_match(event))
6087 continue;
6088 }
6089
6090 output(event, data);
6091 }
6092 }
6093
6094 static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
6095 {
6096 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
6097 struct perf_event *event;
6098
6099 list_for_each_entry_rcu(event, &pel->list, sb_list) {
6100 /*
6101 * Skip events that are not fully formed yet; ensure that
6102 * if we observe event->ctx, both event and ctx will be
6103 * complete enough. See perf_install_in_context().
6104 */
6105 if (!smp_load_acquire(&event->ctx))
6106 continue;
6107
6108 if (event->state < PERF_EVENT_STATE_INACTIVE)
6109 continue;
6110 if (!event_filter_match(event))
6111 continue;
6112 output(event, data);
6113 }
6114 }
6115
6116 /*
6117 * Iterate all events that need to receive side-band events.
6118 *
6119 * For new callers; ensure that account_pmu_sb_event() includes
6120 * your event, otherwise it might not get delivered.
6121 */
6122 static void
6123 perf_iterate_sb(perf_iterate_f output, void *data,
6124 struct perf_event_context *task_ctx)
6125 {
6126 struct perf_event_context *ctx;
6127 int ctxn;
6128
6129 rcu_read_lock();
6130 preempt_disable();
6131
6132 /*
6133 * If we have task_ctx != NULL we only notify the task context itself.
6134 * The task_ctx is set only for EXIT events before releasing task
6135 * context.
6136 */
6137 if (task_ctx) {
6138 perf_iterate_ctx(task_ctx, output, data, false);
6139 goto done;
6140 }
6141
6142 perf_iterate_sb_cpu(output, data);
6143
6144 for_each_task_context_nr(ctxn) {
6145 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6146 if (ctx)
6147 perf_iterate_ctx(ctx, output, data, false);
6148 }
6149 done:
6150 preempt_enable();
6151 rcu_read_unlock();
6152 }
6153
6154 /*
6155 * Clear all file-based filters at exec, they'll have to be
6156 * re-instated when/if these objects are mmapped again.
6157 */
6158 static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
6159 {
6160 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6161 struct perf_addr_filter *filter;
6162 unsigned int restart = 0, count = 0;
6163 unsigned long flags;
6164
6165 if (!has_addr_filter(event))
6166 return;
6167
6168 raw_spin_lock_irqsave(&ifh->lock, flags);
6169 list_for_each_entry(filter, &ifh->list, entry) {
6170 if (filter->inode) {
6171 event->addr_filters_offs[count] = 0;
6172 restart++;
6173 }
6174
6175 count++;
6176 }
6177
6178 if (restart)
6179 event->addr_filters_gen++;
6180 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6181
6182 if (restart)
6183 perf_event_stop(event, 1);
6184 }
6185
6186 void perf_event_exec(void)
6187 {
6188 struct perf_event_context *ctx;
6189 int ctxn;
6190
6191 rcu_read_lock();
6192 for_each_task_context_nr(ctxn) {
6193 ctx = current->perf_event_ctxp[ctxn];
6194 if (!ctx)
6195 continue;
6196
6197 perf_event_enable_on_exec(ctxn);
6198
6199 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
6200 true);
6201 }
6202 rcu_read_unlock();
6203 }
6204
6205 struct remote_output {
6206 struct ring_buffer *rb;
6207 int err;
6208 };
6209
6210 static void __perf_event_output_stop(struct perf_event *event, void *data)
6211 {
6212 struct perf_event *parent = event->parent;
6213 struct remote_output *ro = data;
6214 struct ring_buffer *rb = ro->rb;
6215 struct stop_event_data sd = {
6216 .event = event,
6217 };
6218
6219 if (!has_aux(event))
6220 return;
6221
6222 if (!parent)
6223 parent = event;
6224
6225 /*
6226 * In case of inheritance, it will be the parent that links to the
6227 * ring-buffer, but it will be the child that's actually using it.
6228 *
6229 * We are using event::rb to determine if the event should be stopped,
6230 * however this may race with ring_buffer_attach() (through set_output),
6231 * which will make us skip the event that actually needs to be stopped.
6232 * So ring_buffer_attach() has to stop an aux event before re-assigning
6233 * its rb pointer.
6234 */
6235 if (rcu_dereference(parent->rb) == rb)
6236 ro->err = __perf_event_stop(&sd);
6237 }
6238
6239 static int __perf_pmu_output_stop(void *info)
6240 {
6241 struct perf_event *event = info;
6242 struct pmu *pmu = event->pmu;
6243 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6244 struct remote_output ro = {
6245 .rb = event->rb,
6246 };
6247
6248 rcu_read_lock();
6249 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
6250 if (cpuctx->task_ctx)
6251 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
6252 &ro, false);
6253 rcu_read_unlock();
6254
6255 return ro.err;
6256 }
6257
6258 static void perf_pmu_output_stop(struct perf_event *event)
6259 {
6260 struct perf_event *iter;
6261 int err, cpu;
6262
6263 restart:
6264 rcu_read_lock();
6265 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
6266 /*
6267 * For per-CPU events, we need to make sure that neither they
6268 * nor their children are running; for cpu==-1 events it's
6269 * sufficient to stop the event itself if it's active, since
6270 * it can't have children.
6271 */
6272 cpu = iter->cpu;
6273 if (cpu == -1)
6274 cpu = READ_ONCE(iter->oncpu);
6275
6276 if (cpu == -1)
6277 continue;
6278
6279 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
6280 if (err == -EAGAIN) {
6281 rcu_read_unlock();
6282 goto restart;
6283 }
6284 }
6285 rcu_read_unlock();
6286 }
6287
6288 /*
6289 * task tracking -- fork/exit
6290 *
6291 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
6292 */
6293
6294 struct perf_task_event {
6295 struct task_struct *task;
6296 struct perf_event_context *task_ctx;
6297
6298 struct {
6299 struct perf_event_header header;
6300
6301 u32 pid;
6302 u32 ppid;
6303 u32 tid;
6304 u32 ptid;
6305 u64 time;
6306 } event_id;
6307 };
6308
6309 static int perf_event_task_match(struct perf_event *event)
6310 {
6311 return event->attr.comm || event->attr.mmap ||
6312 event->attr.mmap2 || event->attr.mmap_data ||
6313 event->attr.task;
6314 }
6315
6316 static void perf_event_task_output(struct perf_event *event,
6317 void *data)
6318 {
6319 struct perf_task_event *task_event = data;
6320 struct perf_output_handle handle;
6321 struct perf_sample_data sample;
6322 struct task_struct *task = task_event->task;
6323 int ret, size = task_event->event_id.header.size;
6324
6325 if (!perf_event_task_match(event))
6326 return;
6327
6328 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
6329
6330 ret = perf_output_begin(&handle, event,
6331 task_event->event_id.header.size);
6332 if (ret)
6333 goto out;
6334
6335 task_event->event_id.pid = perf_event_pid(event, task);
6336 task_event->event_id.ppid = perf_event_pid(event, current);
6337
6338 task_event->event_id.tid = perf_event_tid(event, task);
6339 task_event->event_id.ptid = perf_event_tid(event, current);
6340
6341 task_event->event_id.time = perf_event_clock(event);
6342
6343 perf_output_put(&handle, task_event->event_id);
6344
6345 perf_event__output_id_sample(event, &handle, &sample);
6346
6347 perf_output_end(&handle);
6348 out:
6349 task_event->event_id.header.size = size;
6350 }
6351
6352 static void perf_event_task(struct task_struct *task,
6353 struct perf_event_context *task_ctx,
6354 int new)
6355 {
6356 struct perf_task_event task_event;
6357
6358 if (!atomic_read(&nr_comm_events) &&
6359 !atomic_read(&nr_mmap_events) &&
6360 !atomic_read(&nr_task_events))
6361 return;
6362
6363 task_event = (struct perf_task_event){
6364 .task = task,
6365 .task_ctx = task_ctx,
6366 .event_id = {
6367 .header = {
6368 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
6369 .misc = 0,
6370 .size = sizeof(task_event.event_id),
6371 },
6372 /* .pid */
6373 /* .ppid */
6374 /* .tid */
6375 /* .ptid */
6376 /* .time */
6377 },
6378 };
6379
6380 perf_iterate_sb(perf_event_task_output,
6381 &task_event,
6382 task_ctx);
6383 }
6384
6385 void perf_event_fork(struct task_struct *task)
6386 {
6387 perf_event_task(task, NULL, 1);
6388 }
6389
6390 /*
6391 * comm tracking
6392 */
6393
6394 struct perf_comm_event {
6395 struct task_struct *task;
6396 char *comm;
6397 int comm_size;
6398
6399 struct {
6400 struct perf_event_header header;
6401
6402 u32 pid;
6403 u32 tid;
6404 } event_id;
6405 };
6406
6407 static int perf_event_comm_match(struct perf_event *event)
6408 {
6409 return event->attr.comm;
6410 }
6411
6412 static void perf_event_comm_output(struct perf_event *event,
6413 void *data)
6414 {
6415 struct perf_comm_event *comm_event = data;
6416 struct perf_output_handle handle;
6417 struct perf_sample_data sample;
6418 int size = comm_event->event_id.header.size;
6419 int ret;
6420
6421 if (!perf_event_comm_match(event))
6422 return;
6423
6424 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
6425 ret = perf_output_begin(&handle, event,
6426 comm_event->event_id.header.size);
6427
6428 if (ret)
6429 goto out;
6430
6431 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
6432 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
6433
6434 perf_output_put(&handle, comm_event->event_id);
6435 __output_copy(&handle, comm_event->comm,
6436 comm_event->comm_size);
6437
6438 perf_event__output_id_sample(event, &handle, &sample);
6439
6440 perf_output_end(&handle);
6441 out:
6442 comm_event->event_id.header.size = size;
6443 }
6444
6445 static void perf_event_comm_event(struct perf_comm_event *comm_event)
6446 {
6447 char comm[TASK_COMM_LEN];
6448 unsigned int size;
6449
6450 memset(comm, 0, sizeof(comm));
6451 strlcpy(comm, comm_event->task->comm, sizeof(comm));
6452 size = ALIGN(strlen(comm)+1, sizeof(u64));
6453
6454 comm_event->comm = comm;
6455 comm_event->comm_size = size;
6456
6457 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
6458
6459 perf_iterate_sb(perf_event_comm_output,
6460 comm_event,
6461 NULL);
6462 }
6463
6464 void perf_event_comm(struct task_struct *task, bool exec)
6465 {
6466 struct perf_comm_event comm_event;
6467
6468 if (!atomic_read(&nr_comm_events))
6469 return;
6470
6471 comm_event = (struct perf_comm_event){
6472 .task = task,
6473 /* .comm */
6474 /* .comm_size */
6475 .event_id = {
6476 .header = {
6477 .type = PERF_RECORD_COMM,
6478 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
6479 /* .size */
6480 },
6481 /* .pid */
6482 /* .tid */
6483 },
6484 };
6485
6486 perf_event_comm_event(&comm_event);
6487 }
6488
6489 /*
6490 * mmap tracking
6491 */
6492
6493 struct perf_mmap_event {
6494 struct vm_area_struct *vma;
6495
6496 const char *file_name;
6497 int file_size;
6498 int maj, min;
6499 u64 ino;
6500 u64 ino_generation;
6501 u32 prot, flags;
6502
6503 struct {
6504 struct perf_event_header header;
6505
6506 u32 pid;
6507 u32 tid;
6508 u64 start;
6509 u64 len;
6510 u64 pgoff;
6511 } event_id;
6512 };
6513
6514 static int perf_event_mmap_match(struct perf_event *event,
6515 void *data)
6516 {
6517 struct perf_mmap_event *mmap_event = data;
6518 struct vm_area_struct *vma = mmap_event->vma;
6519 int executable = vma->vm_flags & VM_EXEC;
6520
6521 return (!executable && event->attr.mmap_data) ||
6522 (executable && (event->attr.mmap || event->attr.mmap2));
6523 }
6524
6525 static void perf_event_mmap_output(struct perf_event *event,
6526 void *data)
6527 {
6528 struct perf_mmap_event *mmap_event = data;
6529 struct perf_output_handle handle;
6530 struct perf_sample_data sample;
6531 int size = mmap_event->event_id.header.size;
6532 int ret;
6533
6534 if (!perf_event_mmap_match(event, data))
6535 return;
6536
6537 if (event->attr.mmap2) {
6538 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
6539 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
6540 mmap_event->event_id.header.size += sizeof(mmap_event->min);
6541 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
6542 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
6543 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
6544 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
6545 }
6546
6547 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
6548 ret = perf_output_begin(&handle, event,
6549 mmap_event->event_id.header.size);
6550 if (ret)
6551 goto out;
6552
6553 mmap_event->event_id.pid = perf_event_pid(event, current);
6554 mmap_event->event_id.tid = perf_event_tid(event, current);
6555
6556 perf_output_put(&handle, mmap_event->event_id);
6557
6558 if (event->attr.mmap2) {
6559 perf_output_put(&handle, mmap_event->maj);
6560 perf_output_put(&handle, mmap_event->min);
6561 perf_output_put(&handle, mmap_event->ino);
6562 perf_output_put(&handle, mmap_event->ino_generation);
6563 perf_output_put(&handle, mmap_event->prot);
6564 perf_output_put(&handle, mmap_event->flags);
6565 }
6566
6567 __output_copy(&handle, mmap_event->file_name,
6568 mmap_event->file_size);
6569
6570 perf_event__output_id_sample(event, &handle, &sample);
6571
6572 perf_output_end(&handle);
6573 out:
6574 mmap_event->event_id.header.size = size;
6575 }
6576
6577 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6578 {
6579 struct vm_area_struct *vma = mmap_event->vma;
6580 struct file *file = vma->vm_file;
6581 int maj = 0, min = 0;
6582 u64 ino = 0, gen = 0;
6583 u32 prot = 0, flags = 0;
6584 unsigned int size;
6585 char tmp[16];
6586 char *buf = NULL;
6587 char *name;
6588
6589 if (file) {
6590 struct inode *inode;
6591 dev_t dev;
6592
6593 buf = kmalloc(PATH_MAX, GFP_KERNEL);
6594 if (!buf) {
6595 name = "//enomem";
6596 goto cpy_name;
6597 }
6598 /*
6599 * d_path() works from the end of the rb backwards, so we
6600 * need to add enough zero bytes after the string to handle
6601 * the 64bit alignment we do later.
6602 */
6603 name = file_path(file, buf, PATH_MAX - sizeof(u64));
6604 if (IS_ERR(name)) {
6605 name = "//toolong";
6606 goto cpy_name;
6607 }
6608 inode = file_inode(vma->vm_file);
6609 dev = inode->i_sb->s_dev;
6610 ino = inode->i_ino;
6611 gen = inode->i_generation;
6612 maj = MAJOR(dev);
6613 min = MINOR(dev);
6614
6615 if (vma->vm_flags & VM_READ)
6616 prot |= PROT_READ;
6617 if (vma->vm_flags & VM_WRITE)
6618 prot |= PROT_WRITE;
6619 if (vma->vm_flags & VM_EXEC)
6620 prot |= PROT_EXEC;
6621
6622 if (vma->vm_flags & VM_MAYSHARE)
6623 flags = MAP_SHARED;
6624 else
6625 flags = MAP_PRIVATE;
6626
6627 if (vma->vm_flags & VM_DENYWRITE)
6628 flags |= MAP_DENYWRITE;
6629 if (vma->vm_flags & VM_MAYEXEC)
6630 flags |= MAP_EXECUTABLE;
6631 if (vma->vm_flags & VM_LOCKED)
6632 flags |= MAP_LOCKED;
6633 if (vma->vm_flags & VM_HUGETLB)
6634 flags |= MAP_HUGETLB;
6635
6636 goto got_name;
6637 } else {
6638 if (vma->vm_ops && vma->vm_ops->name) {
6639 name = (char *) vma->vm_ops->name(vma);
6640 if (name)
6641 goto cpy_name;
6642 }
6643
6644 name = (char *)arch_vma_name(vma);
6645 if (name)
6646 goto cpy_name;
6647
6648 if (vma->vm_start <= vma->vm_mm->start_brk &&
6649 vma->vm_end >= vma->vm_mm->brk) {
6650 name = "[heap]";
6651 goto cpy_name;
6652 }
6653 if (vma->vm_start <= vma->vm_mm->start_stack &&
6654 vma->vm_end >= vma->vm_mm->start_stack) {
6655 name = "[stack]";
6656 goto cpy_name;
6657 }
6658
6659 name = "//anon";
6660 goto cpy_name;
6661 }
6662
6663 cpy_name:
6664 strlcpy(tmp, name, sizeof(tmp));
6665 name = tmp;
6666 got_name:
6667 /*
6668 * Since our buffer works in 8 byte units we need to align our string
6669 * size to a multiple of 8. However, we must guarantee the tail end is
6670 * zero'd out to avoid leaking random bits to userspace.
6671 */
6672 size = strlen(name)+1;
6673 while (!IS_ALIGNED(size, sizeof(u64)))
6674 name[size++] = '\0';
6675
6676 mmap_event->file_name = name;
6677 mmap_event->file_size = size;
6678 mmap_event->maj = maj;
6679 mmap_event->min = min;
6680 mmap_event->ino = ino;
6681 mmap_event->ino_generation = gen;
6682 mmap_event->prot = prot;
6683 mmap_event->flags = flags;
6684
6685 if (!(vma->vm_flags & VM_EXEC))
6686 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6687
6688 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
6689
6690 perf_iterate_sb(perf_event_mmap_output,
6691 mmap_event,
6692 NULL);
6693
6694 kfree(buf);
6695 }
6696
6697 /*
6698 * Check whether inode and address range match filter criteria.
6699 */
6700 static bool perf_addr_filter_match(struct perf_addr_filter *filter,
6701 struct file *file, unsigned long offset,
6702 unsigned long size)
6703 {
6704 if (filter->inode != file->f_inode)
6705 return false;
6706
6707 if (filter->offset > offset + size)
6708 return false;
6709
6710 if (filter->offset + filter->size < offset)
6711 return false;
6712
6713 return true;
6714 }
6715
6716 static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
6717 {
6718 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6719 struct vm_area_struct *vma = data;
6720 unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
6721 struct file *file = vma->vm_file;
6722 struct perf_addr_filter *filter;
6723 unsigned int restart = 0, count = 0;
6724
6725 if (!has_addr_filter(event))
6726 return;
6727
6728 if (!file)
6729 return;
6730
6731 raw_spin_lock_irqsave(&ifh->lock, flags);
6732 list_for_each_entry(filter, &ifh->list, entry) {
6733 if (perf_addr_filter_match(filter, file, off,
6734 vma->vm_end - vma->vm_start)) {
6735 event->addr_filters_offs[count] = vma->vm_start;
6736 restart++;
6737 }
6738
6739 count++;
6740 }
6741
6742 if (restart)
6743 event->addr_filters_gen++;
6744 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6745
6746 if (restart)
6747 perf_event_stop(event, 1);
6748 }
6749
6750 /*
6751 * Adjust all task's events' filters to the new vma
6752 */
6753 static void perf_addr_filters_adjust(struct vm_area_struct *vma)
6754 {
6755 struct perf_event_context *ctx;
6756 int ctxn;
6757
6758 /*
6759 * Data tracing isn't supported yet and as such there is no need
6760 * to keep track of anything that isn't related to executable code:
6761 */
6762 if (!(vma->vm_flags & VM_EXEC))
6763 return;
6764
6765 rcu_read_lock();
6766 for_each_task_context_nr(ctxn) {
6767 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6768 if (!ctx)
6769 continue;
6770
6771 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
6772 }
6773 rcu_read_unlock();
6774 }
6775
6776 void perf_event_mmap(struct vm_area_struct *vma)
6777 {
6778 struct perf_mmap_event mmap_event;
6779
6780 if (!atomic_read(&nr_mmap_events))
6781 return;
6782
6783 mmap_event = (struct perf_mmap_event){
6784 .vma = vma,
6785 /* .file_name */
6786 /* .file_size */
6787 .event_id = {
6788 .header = {
6789 .type = PERF_RECORD_MMAP,
6790 .misc = PERF_RECORD_MISC_USER,
6791 /* .size */
6792 },
6793 /* .pid */
6794 /* .tid */
6795 .start = vma->vm_start,
6796 .len = vma->vm_end - vma->vm_start,
6797 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
6798 },
6799 /* .maj (attr_mmap2 only) */
6800 /* .min (attr_mmap2 only) */
6801 /* .ino (attr_mmap2 only) */
6802 /* .ino_generation (attr_mmap2 only) */
6803 /* .prot (attr_mmap2 only) */
6804 /* .flags (attr_mmap2 only) */
6805 };
6806
6807 perf_addr_filters_adjust(vma);
6808 perf_event_mmap_event(&mmap_event);
6809 }
6810
6811 void perf_event_aux_event(struct perf_event *event, unsigned long head,
6812 unsigned long size, u64 flags)
6813 {
6814 struct perf_output_handle handle;
6815 struct perf_sample_data sample;
6816 struct perf_aux_event {
6817 struct perf_event_header header;
6818 u64 offset;
6819 u64 size;
6820 u64 flags;
6821 } rec = {
6822 .header = {
6823 .type = PERF_RECORD_AUX,
6824 .misc = 0,
6825 .size = sizeof(rec),
6826 },
6827 .offset = head,
6828 .size = size,
6829 .flags = flags,
6830 };
6831 int ret;
6832
6833 perf_event_header__init_id(&rec.header, &sample, event);
6834 ret = perf_output_begin(&handle, event, rec.header.size);
6835
6836 if (ret)
6837 return;
6838
6839 perf_output_put(&handle, rec);
6840 perf_event__output_id_sample(event, &handle, &sample);
6841
6842 perf_output_end(&handle);
6843 }
6844
6845 /*
6846 * Lost/dropped samples logging
6847 */
6848 void perf_log_lost_samples(struct perf_event *event, u64 lost)
6849 {
6850 struct perf_output_handle handle;
6851 struct perf_sample_data sample;
6852 int ret;
6853
6854 struct {
6855 struct perf_event_header header;
6856 u64 lost;
6857 } lost_samples_event = {
6858 .header = {
6859 .type = PERF_RECORD_LOST_SAMPLES,
6860 .misc = 0,
6861 .size = sizeof(lost_samples_event),
6862 },
6863 .lost = lost,
6864 };
6865
6866 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
6867
6868 ret = perf_output_begin(&handle, event,
6869 lost_samples_event.header.size);
6870 if (ret)
6871 return;
6872
6873 perf_output_put(&handle, lost_samples_event);
6874 perf_event__output_id_sample(event, &handle, &sample);
6875 perf_output_end(&handle);
6876 }
6877
6878 /*
6879 * context_switch tracking
6880 */
6881
6882 struct perf_switch_event {
6883 struct task_struct *task;
6884 struct task_struct *next_prev;
6885
6886 struct {
6887 struct perf_event_header header;
6888 u32 next_prev_pid;
6889 u32 next_prev_tid;
6890 } event_id;
6891 };
6892
6893 static int perf_event_switch_match(struct perf_event *event)
6894 {
6895 return event->attr.context_switch;
6896 }
6897
6898 static void perf_event_switch_output(struct perf_event *event, void *data)
6899 {
6900 struct perf_switch_event *se = data;
6901 struct perf_output_handle handle;
6902 struct perf_sample_data sample;
6903 int ret;
6904
6905 if (!perf_event_switch_match(event))
6906 return;
6907
6908 /* Only CPU-wide events are allowed to see next/prev pid/tid */
6909 if (event->ctx->task) {
6910 se->event_id.header.type = PERF_RECORD_SWITCH;
6911 se->event_id.header.size = sizeof(se->event_id.header);
6912 } else {
6913 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
6914 se->event_id.header.size = sizeof(se->event_id);
6915 se->event_id.next_prev_pid =
6916 perf_event_pid(event, se->next_prev);
6917 se->event_id.next_prev_tid =
6918 perf_event_tid(event, se->next_prev);
6919 }
6920
6921 perf_event_header__init_id(&se->event_id.header, &sample, event);
6922
6923 ret = perf_output_begin(&handle, event, se->event_id.header.size);
6924 if (ret)
6925 return;
6926
6927 if (event->ctx->task)
6928 perf_output_put(&handle, se->event_id.header);
6929 else
6930 perf_output_put(&handle, se->event_id);
6931
6932 perf_event__output_id_sample(event, &handle, &sample);
6933
6934 perf_output_end(&handle);
6935 }
6936
6937 static void perf_event_switch(struct task_struct *task,
6938 struct task_struct *next_prev, bool sched_in)
6939 {
6940 struct perf_switch_event switch_event;
6941
6942 /* N.B. caller checks nr_switch_events != 0 */
6943
6944 switch_event = (struct perf_switch_event){
6945 .task = task,
6946 .next_prev = next_prev,
6947 .event_id = {
6948 .header = {
6949 /* .type */
6950 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
6951 /* .size */
6952 },
6953 /* .next_prev_pid */
6954 /* .next_prev_tid */
6955 },
6956 };
6957
6958 perf_iterate_sb(perf_event_switch_output,
6959 &switch_event,
6960 NULL);
6961 }
6962
6963 /*
6964 * IRQ throttle logging
6965 */
6966
6967 static void perf_log_throttle(struct perf_event *event, int enable)
6968 {
6969 struct perf_output_handle handle;
6970 struct perf_sample_data sample;
6971 int ret;
6972
6973 struct {
6974 struct perf_event_header header;
6975 u64 time;
6976 u64 id;
6977 u64 stream_id;
6978 } throttle_event = {
6979 .header = {
6980 .type = PERF_RECORD_THROTTLE,
6981 .misc = 0,
6982 .size = sizeof(throttle_event),
6983 },
6984 .time = perf_event_clock(event),
6985 .id = primary_event_id(event),
6986 .stream_id = event->id,
6987 };
6988
6989 if (enable)
6990 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
6991
6992 perf_event_header__init_id(&throttle_event.header, &sample, event);
6993
6994 ret = perf_output_begin(&handle, event,
6995 throttle_event.header.size);
6996 if (ret)
6997 return;
6998
6999 perf_output_put(&handle, throttle_event);
7000 perf_event__output_id_sample(event, &handle, &sample);
7001 perf_output_end(&handle);
7002 }
7003
7004 static void perf_log_itrace_start(struct perf_event *event)
7005 {
7006 struct perf_output_handle handle;
7007 struct perf_sample_data sample;
7008 struct perf_aux_event {
7009 struct perf_event_header header;
7010 u32 pid;
7011 u32 tid;
7012 } rec;
7013 int ret;
7014
7015 if (event->parent)
7016 event = event->parent;
7017
7018 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
7019 event->hw.itrace_started)
7020 return;
7021
7022 rec.header.type = PERF_RECORD_ITRACE_START;
7023 rec.header.misc = 0;
7024 rec.header.size = sizeof(rec);
7025 rec.pid = perf_event_pid(event, current);
7026 rec.tid = perf_event_tid(event, current);
7027
7028 perf_event_header__init_id(&rec.header, &sample, event);
7029 ret = perf_output_begin(&handle, event, rec.header.size);
7030
7031 if (ret)
7032 return;
7033
7034 perf_output_put(&handle, rec);
7035 perf_event__output_id_sample(event, &handle, &sample);
7036
7037 perf_output_end(&handle);
7038 }
7039
7040 /*
7041 * Generic event overflow handling, sampling.
7042 */
7043
7044 static int __perf_event_overflow(struct perf_event *event,
7045 int throttle, struct perf_sample_data *data,
7046 struct pt_regs *regs)
7047 {
7048 int events = atomic_read(&event->event_limit);
7049 struct hw_perf_event *hwc = &event->hw;
7050 u64 seq;
7051 int ret = 0;
7052
7053 /*
7054 * Non-sampling counters might still use the PMI to fold short
7055 * hardware counters, ignore those.
7056 */
7057 if (unlikely(!is_sampling_event(event)))
7058 return 0;
7059
7060 seq = __this_cpu_read(perf_throttled_seq);
7061 if (seq != hwc->interrupts_seq) {
7062 hwc->interrupts_seq = seq;
7063 hwc->interrupts = 1;
7064 } else {
7065 hwc->interrupts++;
7066 if (unlikely(throttle
7067 && hwc->interrupts >= max_samples_per_tick)) {
7068 __this_cpu_inc(perf_throttled_count);
7069 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
7070 hwc->interrupts = MAX_INTERRUPTS;
7071 perf_log_throttle(event, 0);
7072 ret = 1;
7073 }
7074 }
7075
7076 if (event->attr.freq) {
7077 u64 now = perf_clock();
7078 s64 delta = now - hwc->freq_time_stamp;
7079
7080 hwc->freq_time_stamp = now;
7081
7082 if (delta > 0 && delta < 2*TICK_NSEC)
7083 perf_adjust_period(event, delta, hwc->last_period, true);
7084 }
7085
7086 /*
7087 * XXX event_limit might not quite work as expected on inherited
7088 * events
7089 */
7090
7091 event->pending_kill = POLL_IN;
7092 if (events && atomic_dec_and_test(&event->event_limit)) {
7093 ret = 1;
7094 event->pending_kill = POLL_HUP;
7095
7096 perf_event_disable_inatomic(event);
7097 }
7098
7099 READ_ONCE(event->overflow_handler)(event, data, regs);
7100
7101 if (*perf_event_fasync(event) && event->pending_kill) {
7102 event->pending_wakeup = 1;
7103 irq_work_queue(&event->pending);
7104 }
7105
7106 return ret;
7107 }
7108
7109 int perf_event_overflow(struct perf_event *event,
7110 struct perf_sample_data *data,
7111 struct pt_regs *regs)
7112 {
7113 return __perf_event_overflow(event, 1, data, regs);
7114 }
7115
7116 /*
7117 * Generic software event infrastructure
7118 */
7119
7120 struct swevent_htable {
7121 struct swevent_hlist *swevent_hlist;
7122 struct mutex hlist_mutex;
7123 int hlist_refcount;
7124
7125 /* Recursion avoidance in each contexts */
7126 int recursion[PERF_NR_CONTEXTS];
7127 };
7128
7129 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
7130
7131 /*
7132 * We directly increment event->count and keep a second value in
7133 * event->hw.period_left to count intervals. This period event
7134 * is kept in the range [-sample_period, 0] so that we can use the
7135 * sign as trigger.
7136 */
7137
7138 u64 perf_swevent_set_period(struct perf_event *event)
7139 {
7140 struct hw_perf_event *hwc = &event->hw;
7141 u64 period = hwc->last_period;
7142 u64 nr, offset;
7143 s64 old, val;
7144
7145 hwc->last_period = hwc->sample_period;
7146
7147 again:
7148 old = val = local64_read(&hwc->period_left);
7149 if (val < 0)
7150 return 0;
7151
7152 nr = div64_u64(period + val, period);
7153 offset = nr * period;
7154 val -= offset;
7155 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7156 goto again;
7157
7158 return nr;
7159 }
7160
7161 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
7162 struct perf_sample_data *data,
7163 struct pt_regs *regs)
7164 {
7165 struct hw_perf_event *hwc = &event->hw;
7166 int throttle = 0;
7167
7168 if (!overflow)
7169 overflow = perf_swevent_set_period(event);
7170
7171 if (hwc->interrupts == MAX_INTERRUPTS)
7172 return;
7173
7174 for (; overflow; overflow--) {
7175 if (__perf_event_overflow(event, throttle,
7176 data, regs)) {
7177 /*
7178 * We inhibit the overflow from happening when
7179 * hwc->interrupts == MAX_INTERRUPTS.
7180 */
7181 break;
7182 }
7183 throttle = 1;
7184 }
7185 }
7186
7187 static void perf_swevent_event(struct perf_event *event, u64 nr,
7188 struct perf_sample_data *data,
7189 struct pt_regs *regs)
7190 {
7191 struct hw_perf_event *hwc = &event->hw;
7192
7193 local64_add(nr, &event->count);
7194
7195 if (!regs)
7196 return;
7197
7198 if (!is_sampling_event(event))
7199 return;
7200
7201 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
7202 data->period = nr;
7203 return perf_swevent_overflow(event, 1, data, regs);
7204 } else
7205 data->period = event->hw.last_period;
7206
7207 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
7208 return perf_swevent_overflow(event, 1, data, regs);
7209
7210 if (local64_add_negative(nr, &hwc->period_left))
7211 return;
7212
7213 perf_swevent_overflow(event, 0, data, regs);
7214 }
7215
7216 static int perf_exclude_event(struct perf_event *event,
7217 struct pt_regs *regs)
7218 {
7219 if (event->hw.state & PERF_HES_STOPPED)
7220 return 1;
7221
7222 if (regs) {
7223 if (event->attr.exclude_user && user_mode(regs))
7224 return 1;
7225
7226 if (event->attr.exclude_kernel && !user_mode(regs))
7227 return 1;
7228 }
7229
7230 return 0;
7231 }
7232
7233 static int perf_swevent_match(struct perf_event *event,
7234 enum perf_type_id type,
7235 u32 event_id,
7236 struct perf_sample_data *data,
7237 struct pt_regs *regs)
7238 {
7239 if (event->attr.type != type)
7240 return 0;
7241
7242 if (event->attr.config != event_id)
7243 return 0;
7244
7245 if (perf_exclude_event(event, regs))
7246 return 0;
7247
7248 return 1;
7249 }
7250
7251 static inline u64 swevent_hash(u64 type, u32 event_id)
7252 {
7253 u64 val = event_id | (type << 32);
7254
7255 return hash_64(val, SWEVENT_HLIST_BITS);
7256 }
7257
7258 static inline struct hlist_head *
7259 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
7260 {
7261 u64 hash = swevent_hash(type, event_id);
7262
7263 return &hlist->heads[hash];
7264 }
7265
7266 /* For the read side: events when they trigger */
7267 static inline struct hlist_head *
7268 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
7269 {
7270 struct swevent_hlist *hlist;
7271
7272 hlist = rcu_dereference(swhash->swevent_hlist);
7273 if (!hlist)
7274 return NULL;
7275
7276 return __find_swevent_head(hlist, type, event_id);
7277 }
7278
7279 /* For the event head insertion and removal in the hlist */
7280 static inline struct hlist_head *
7281 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
7282 {
7283 struct swevent_hlist *hlist;
7284 u32 event_id = event->attr.config;
7285 u64 type = event->attr.type;
7286
7287 /*
7288 * Event scheduling is always serialized against hlist allocation
7289 * and release. Which makes the protected version suitable here.
7290 * The context lock guarantees that.
7291 */
7292 hlist = rcu_dereference_protected(swhash->swevent_hlist,
7293 lockdep_is_held(&event->ctx->lock));
7294 if (!hlist)
7295 return NULL;
7296
7297 return __find_swevent_head(hlist, type, event_id);
7298 }
7299
7300 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
7301 u64 nr,
7302 struct perf_sample_data *data,
7303 struct pt_regs *regs)
7304 {
7305 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7306 struct perf_event *event;
7307 struct hlist_head *head;
7308
7309 rcu_read_lock();
7310 head = find_swevent_head_rcu(swhash, type, event_id);
7311 if (!head)
7312 goto end;
7313
7314 hlist_for_each_entry_rcu(event, head, hlist_entry) {
7315 if (perf_swevent_match(event, type, event_id, data, regs))
7316 perf_swevent_event(event, nr, data, regs);
7317 }
7318 end:
7319 rcu_read_unlock();
7320 }
7321
7322 DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
7323
7324 int perf_swevent_get_recursion_context(void)
7325 {
7326 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7327
7328 return get_recursion_context(swhash->recursion);
7329 }
7330 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
7331
7332 void perf_swevent_put_recursion_context(int rctx)
7333 {
7334 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7335
7336 put_recursion_context(swhash->recursion, rctx);
7337 }
7338
7339 void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7340 {
7341 struct perf_sample_data data;
7342
7343 if (WARN_ON_ONCE(!regs))
7344 return;
7345
7346 perf_sample_data_init(&data, addr, 0);
7347 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
7348 }
7349
7350 void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7351 {
7352 int rctx;
7353
7354 preempt_disable_notrace();
7355 rctx = perf_swevent_get_recursion_context();
7356 if (unlikely(rctx < 0))
7357 goto fail;
7358
7359 ___perf_sw_event(event_id, nr, regs, addr);
7360
7361 perf_swevent_put_recursion_context(rctx);
7362 fail:
7363 preempt_enable_notrace();
7364 }
7365
7366 static void perf_swevent_read(struct perf_event *event)
7367 {
7368 }
7369
7370 static int perf_swevent_add(struct perf_event *event, int flags)
7371 {
7372 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
7373 struct hw_perf_event *hwc = &event->hw;
7374 struct hlist_head *head;
7375
7376 if (is_sampling_event(event)) {
7377 hwc->last_period = hwc->sample_period;
7378 perf_swevent_set_period(event);
7379 }
7380
7381 hwc->state = !(flags & PERF_EF_START);
7382
7383 head = find_swevent_head(swhash, event);
7384 if (WARN_ON_ONCE(!head))
7385 return -EINVAL;
7386
7387 hlist_add_head_rcu(&event->hlist_entry, head);
7388 perf_event_update_userpage(event);
7389
7390 return 0;
7391 }
7392
7393 static void perf_swevent_del(struct perf_event *event, int flags)
7394 {
7395 hlist_del_rcu(&event->hlist_entry);
7396 }
7397
7398 static void perf_swevent_start(struct perf_event *event, int flags)
7399 {
7400 event->hw.state = 0;
7401 }
7402
7403 static void perf_swevent_stop(struct perf_event *event, int flags)
7404 {
7405 event->hw.state = PERF_HES_STOPPED;
7406 }
7407
7408 /* Deref the hlist from the update side */
7409 static inline struct swevent_hlist *
7410 swevent_hlist_deref(struct swevent_htable *swhash)
7411 {
7412 return rcu_dereference_protected(swhash->swevent_hlist,
7413 lockdep_is_held(&swhash->hlist_mutex));
7414 }
7415
7416 static void swevent_hlist_release(struct swevent_htable *swhash)
7417 {
7418 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
7419
7420 if (!hlist)
7421 return;
7422
7423 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
7424 kfree_rcu(hlist, rcu_head);
7425 }
7426
7427 static void swevent_hlist_put_cpu(int cpu)
7428 {
7429 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7430
7431 mutex_lock(&swhash->hlist_mutex);
7432
7433 if (!--swhash->hlist_refcount)
7434 swevent_hlist_release(swhash);
7435
7436 mutex_unlock(&swhash->hlist_mutex);
7437 }
7438
7439 static void swevent_hlist_put(void)
7440 {
7441 int cpu;
7442
7443 for_each_possible_cpu(cpu)
7444 swevent_hlist_put_cpu(cpu);
7445 }
7446
7447 static int swevent_hlist_get_cpu(int cpu)
7448 {
7449 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7450 int err = 0;
7451
7452 mutex_lock(&swhash->hlist_mutex);
7453 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
7454 struct swevent_hlist *hlist;
7455
7456 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
7457 if (!hlist) {
7458 err = -ENOMEM;
7459 goto exit;
7460 }
7461 rcu_assign_pointer(swhash->swevent_hlist, hlist);
7462 }
7463 swhash->hlist_refcount++;
7464 exit:
7465 mutex_unlock(&swhash->hlist_mutex);
7466
7467 return err;
7468 }
7469
7470 static int swevent_hlist_get(void)
7471 {
7472 int err, cpu, failed_cpu;
7473
7474 get_online_cpus();
7475 for_each_possible_cpu(cpu) {
7476 err = swevent_hlist_get_cpu(cpu);
7477 if (err) {
7478 failed_cpu = cpu;
7479 goto fail;
7480 }
7481 }
7482 put_online_cpus();
7483
7484 return 0;
7485 fail:
7486 for_each_possible_cpu(cpu) {
7487 if (cpu == failed_cpu)
7488 break;
7489 swevent_hlist_put_cpu(cpu);
7490 }
7491
7492 put_online_cpus();
7493 return err;
7494 }
7495
7496 struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
7497
7498 static void sw_perf_event_destroy(struct perf_event *event)
7499 {
7500 u64 event_id = event->attr.config;
7501
7502 WARN_ON(event->parent);
7503
7504 static_key_slow_dec(&perf_swevent_enabled[event_id]);
7505 swevent_hlist_put();
7506 }
7507
7508 static int perf_swevent_init(struct perf_event *event)
7509 {
7510 u64 event_id = event->attr.config;
7511
7512 if (event->attr.type != PERF_TYPE_SOFTWARE)
7513 return -ENOENT;
7514
7515 /*
7516 * no branch sampling for software events
7517 */
7518 if (has_branch_stack(event))
7519 return -EOPNOTSUPP;
7520
7521 switch (event_id) {
7522 case PERF_COUNT_SW_CPU_CLOCK:
7523 case PERF_COUNT_SW_TASK_CLOCK:
7524 return -ENOENT;
7525
7526 default:
7527 break;
7528 }
7529
7530 if (event_id >= PERF_COUNT_SW_MAX)
7531 return -ENOENT;
7532
7533 if (!event->parent) {
7534 int err;
7535
7536 err = swevent_hlist_get();
7537 if (err)
7538 return err;
7539
7540 static_key_slow_inc(&perf_swevent_enabled[event_id]);
7541 event->destroy = sw_perf_event_destroy;
7542 }
7543
7544 return 0;
7545 }
7546
7547 static struct pmu perf_swevent = {
7548 .task_ctx_nr = perf_sw_context,
7549
7550 .capabilities = PERF_PMU_CAP_NO_NMI,
7551
7552 .event_init = perf_swevent_init,
7553 .add = perf_swevent_add,
7554 .del = perf_swevent_del,
7555 .start = perf_swevent_start,
7556 .stop = perf_swevent_stop,
7557 .read = perf_swevent_read,
7558 };
7559
7560 #ifdef CONFIG_EVENT_TRACING
7561
7562 static int perf_tp_filter_match(struct perf_event *event,
7563 struct perf_sample_data *data)
7564 {
7565 void *record = data->raw->frag.data;
7566
7567 /* only top level events have filters set */
7568 if (event->parent)
7569 event = event->parent;
7570
7571 if (likely(!event->filter) || filter_match_preds(event->filter, record))
7572 return 1;
7573 return 0;
7574 }
7575
7576 static int perf_tp_event_match(struct perf_event *event,
7577 struct perf_sample_data *data,
7578 struct pt_regs *regs)
7579 {
7580 if (event->hw.state & PERF_HES_STOPPED)
7581 return 0;
7582 /*
7583 * All tracepoints are from kernel-space.
7584 */
7585 if (event->attr.exclude_kernel)
7586 return 0;
7587
7588 if (!perf_tp_filter_match(event, data))
7589 return 0;
7590
7591 return 1;
7592 }
7593
7594 void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
7595 struct trace_event_call *call, u64 count,
7596 struct pt_regs *regs, struct hlist_head *head,
7597 struct task_struct *task)
7598 {
7599 struct bpf_prog *prog = call->prog;
7600
7601 if (prog) {
7602 *(struct pt_regs **)raw_data = regs;
7603 if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
7604 perf_swevent_put_recursion_context(rctx);
7605 return;
7606 }
7607 }
7608 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
7609 rctx, task);
7610 }
7611 EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
7612
7613 void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
7614 struct pt_regs *regs, struct hlist_head *head, int rctx,
7615 struct task_struct *task)
7616 {
7617 struct perf_sample_data data;
7618 struct perf_event *event;
7619
7620 struct perf_raw_record raw = {
7621 .frag = {
7622 .size = entry_size,
7623 .data = record,
7624 },
7625 };
7626
7627 perf_sample_data_init(&data, 0, 0);
7628 data.raw = &raw;
7629
7630 perf_trace_buf_update(record, event_type);
7631
7632 hlist_for_each_entry_rcu(event, head, hlist_entry) {
7633 if (perf_tp_event_match(event, &data, regs))
7634 perf_swevent_event(event, count, &data, regs);
7635 }
7636
7637 /*
7638 * If we got specified a target task, also iterate its context and
7639 * deliver this event there too.
7640 */
7641 if (task && task != current) {
7642 struct perf_event_context *ctx;
7643 struct trace_entry *entry = record;
7644
7645 rcu_read_lock();
7646 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
7647 if (!ctx)
7648 goto unlock;
7649
7650 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7651 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7652 continue;
7653 if (event->attr.config != entry->type)
7654 continue;
7655 if (perf_tp_event_match(event, &data, regs))
7656 perf_swevent_event(event, count, &data, regs);
7657 }
7658 unlock:
7659 rcu_read_unlock();
7660 }
7661
7662 perf_swevent_put_recursion_context(rctx);
7663 }
7664 EXPORT_SYMBOL_GPL(perf_tp_event);
7665
7666 static void tp_perf_event_destroy(struct perf_event *event)
7667 {
7668 perf_trace_destroy(event);
7669 }
7670
7671 static int perf_tp_event_init(struct perf_event *event)
7672 {
7673 int err;
7674
7675 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7676 return -ENOENT;
7677
7678 /*
7679 * no branch sampling for tracepoint events
7680 */
7681 if (has_branch_stack(event))
7682 return -EOPNOTSUPP;
7683
7684 err = perf_trace_init(event);
7685 if (err)
7686 return err;
7687
7688 event->destroy = tp_perf_event_destroy;
7689
7690 return 0;
7691 }
7692
7693 static struct pmu perf_tracepoint = {
7694 .task_ctx_nr = perf_sw_context,
7695
7696 .event_init = perf_tp_event_init,
7697 .add = perf_trace_add,
7698 .del = perf_trace_del,
7699 .start = perf_swevent_start,
7700 .stop = perf_swevent_stop,
7701 .read = perf_swevent_read,
7702 };
7703
7704 static inline void perf_tp_register(void)
7705 {
7706 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
7707 }
7708
7709 static void perf_event_free_filter(struct perf_event *event)
7710 {
7711 ftrace_profile_free_filter(event);
7712 }
7713
7714 #ifdef CONFIG_BPF_SYSCALL
7715 static void bpf_overflow_handler(struct perf_event *event,
7716 struct perf_sample_data *data,
7717 struct pt_regs *regs)
7718 {
7719 struct bpf_perf_event_data_kern ctx = {
7720 .data = data,
7721 .regs = regs,
7722 };
7723 int ret = 0;
7724
7725 preempt_disable();
7726 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
7727 goto out;
7728 rcu_read_lock();
7729 ret = BPF_PROG_RUN(event->prog, (void *)&ctx);
7730 rcu_read_unlock();
7731 out:
7732 __this_cpu_dec(bpf_prog_active);
7733 preempt_enable();
7734 if (!ret)
7735 return;
7736
7737 event->orig_overflow_handler(event, data, regs);
7738 }
7739
7740 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
7741 {
7742 struct bpf_prog *prog;
7743
7744 if (event->overflow_handler_context)
7745 /* hw breakpoint or kernel counter */
7746 return -EINVAL;
7747
7748 if (event->prog)
7749 return -EEXIST;
7750
7751 prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT);
7752 if (IS_ERR(prog))
7753 return PTR_ERR(prog);
7754
7755 event->prog = prog;
7756 event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
7757 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
7758 return 0;
7759 }
7760
7761 static void perf_event_free_bpf_handler(struct perf_event *event)
7762 {
7763 struct bpf_prog *prog = event->prog;
7764
7765 if (!prog)
7766 return;
7767
7768 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
7769 event->prog = NULL;
7770 bpf_prog_put(prog);
7771 }
7772 #else
7773 static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
7774 {
7775 return -EOPNOTSUPP;
7776 }
7777 static void perf_event_free_bpf_handler(struct perf_event *event)
7778 {
7779 }
7780 #endif
7781
7782 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7783 {
7784 bool is_kprobe, is_tracepoint;
7785 struct bpf_prog *prog;
7786
7787 if (event->attr.type == PERF_TYPE_HARDWARE ||
7788 event->attr.type == PERF_TYPE_SOFTWARE)
7789 return perf_event_set_bpf_handler(event, prog_fd);
7790
7791 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7792 return -EINVAL;
7793
7794 if (event->tp_event->prog)
7795 return -EEXIST;
7796
7797 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
7798 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
7799 if (!is_kprobe && !is_tracepoint)
7800 /* bpf programs can only be attached to u/kprobe or tracepoint */
7801 return -EINVAL;
7802
7803 prog = bpf_prog_get(prog_fd);
7804 if (IS_ERR(prog))
7805 return PTR_ERR(prog);
7806
7807 if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
7808 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
7809 /* valid fd, but invalid bpf program type */
7810 bpf_prog_put(prog);
7811 return -EINVAL;
7812 }
7813
7814 if (is_tracepoint) {
7815 int off = trace_event_get_offsets(event->tp_event);
7816
7817 if (prog->aux->max_ctx_offset > off) {
7818 bpf_prog_put(prog);
7819 return -EACCES;
7820 }
7821 }
7822 event->tp_event->prog = prog;
7823
7824 return 0;
7825 }
7826
7827 static void perf_event_free_bpf_prog(struct perf_event *event)
7828 {
7829 struct bpf_prog *prog;
7830
7831 perf_event_free_bpf_handler(event);
7832
7833 if (!event->tp_event)
7834 return;
7835
7836 prog = event->tp_event->prog;
7837 if (prog) {
7838 event->tp_event->prog = NULL;
7839 bpf_prog_put(prog);
7840 }
7841 }
7842
7843 #else
7844
7845 static inline void perf_tp_register(void)
7846 {
7847 }
7848
7849 static void perf_event_free_filter(struct perf_event *event)
7850 {
7851 }
7852
7853 static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7854 {
7855 return -ENOENT;
7856 }
7857
7858 static void perf_event_free_bpf_prog(struct perf_event *event)
7859 {
7860 }
7861 #endif /* CONFIG_EVENT_TRACING */
7862
7863 #ifdef CONFIG_HAVE_HW_BREAKPOINT
7864 void perf_bp_event(struct perf_event *bp, void *data)
7865 {
7866 struct perf_sample_data sample;
7867 struct pt_regs *regs = data;
7868
7869 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
7870
7871 if (!bp->hw.state && !perf_exclude_event(bp, regs))
7872 perf_swevent_event(bp, 1, &sample, regs);
7873 }
7874 #endif
7875
7876 /*
7877 * Allocate a new address filter
7878 */
7879 static struct perf_addr_filter *
7880 perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
7881 {
7882 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
7883 struct perf_addr_filter *filter;
7884
7885 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
7886 if (!filter)
7887 return NULL;
7888
7889 INIT_LIST_HEAD(&filter->entry);
7890 list_add_tail(&filter->entry, filters);
7891
7892 return filter;
7893 }
7894
7895 static void free_filters_list(struct list_head *filters)
7896 {
7897 struct perf_addr_filter *filter, *iter;
7898
7899 list_for_each_entry_safe(filter, iter, filters, entry) {
7900 if (filter->inode)
7901 iput(filter->inode);
7902 list_del(&filter->entry);
7903 kfree(filter);
7904 }
7905 }
7906
7907 /*
7908 * Free existing address filters and optionally install new ones
7909 */
7910 static void perf_addr_filters_splice(struct perf_event *event,
7911 struct list_head *head)
7912 {
7913 unsigned long flags;
7914 LIST_HEAD(list);
7915
7916 if (!has_addr_filter(event))
7917 return;
7918
7919 /* don't bother with children, they don't have their own filters */
7920 if (event->parent)
7921 return;
7922
7923 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
7924
7925 list_splice_init(&event->addr_filters.list, &list);
7926 if (head)
7927 list_splice(head, &event->addr_filters.list);
7928
7929 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
7930
7931 free_filters_list(&list);
7932 }
7933
7934 /*
7935 * Scan through mm's vmas and see if one of them matches the
7936 * @filter; if so, adjust filter's address range.
7937 * Called with mm::mmap_sem down for reading.
7938 */
7939 static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
7940 struct mm_struct *mm)
7941 {
7942 struct vm_area_struct *vma;
7943
7944 for (vma = mm->mmap; vma; vma = vma->vm_next) {
7945 struct file *file = vma->vm_file;
7946 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
7947 unsigned long vma_size = vma->vm_end - vma->vm_start;
7948
7949 if (!file)
7950 continue;
7951
7952 if (!perf_addr_filter_match(filter, file, off, vma_size))
7953 continue;
7954
7955 return vma->vm_start;
7956 }
7957
7958 return 0;
7959 }
7960
7961 /*
7962 * Update event's address range filters based on the
7963 * task's existing mappings, if any.
7964 */
7965 static void perf_event_addr_filters_apply(struct perf_event *event)
7966 {
7967 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
7968 struct task_struct *task = READ_ONCE(event->ctx->task);
7969 struct perf_addr_filter *filter;
7970 struct mm_struct *mm = NULL;
7971 unsigned int count = 0;
7972 unsigned long flags;
7973
7974 /*
7975 * We may observe TASK_TOMBSTONE, which means that the event tear-down
7976 * will stop on the parent's child_mutex that our caller is also holding
7977 */
7978 if (task == TASK_TOMBSTONE)
7979 return;
7980
7981 mm = get_task_mm(event->ctx->task);
7982 if (!mm)
7983 goto restart;
7984
7985 down_read(&mm->mmap_sem);
7986
7987 raw_spin_lock_irqsave(&ifh->lock, flags);
7988 list_for_each_entry(filter, &ifh->list, entry) {
7989 event->addr_filters_offs[count] = 0;
7990
7991 /*
7992 * Adjust base offset if the filter is associated to a binary
7993 * that needs to be mapped:
7994 */
7995 if (filter->inode)
7996 event->addr_filters_offs[count] =
7997 perf_addr_filter_apply(filter, mm);
7998
7999 count++;
8000 }
8001
8002 event->addr_filters_gen++;
8003 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8004
8005 up_read(&mm->mmap_sem);
8006
8007 mmput(mm);
8008
8009 restart:
8010 perf_event_stop(event, 1);
8011 }
8012
8013 /*
8014 * Address range filtering: limiting the data to certain
8015 * instruction address ranges. Filters are ioctl()ed to us from
8016 * userspace as ascii strings.
8017 *
8018 * Filter string format:
8019 *
8020 * ACTION RANGE_SPEC
8021 * where ACTION is one of the
8022 * * "filter": limit the trace to this region
8023 * * "start": start tracing from this address
8024 * * "stop": stop tracing at this address/region;
8025 * RANGE_SPEC is
8026 * * for kernel addresses: <start address>[/<size>]
8027 * * for object files: <start address>[/<size>]@</path/to/object/file>
8028 *
8029 * if <size> is not specified, the range is treated as a single address.
8030 */
8031 enum {
8032 IF_ACT_NONE = -1,
8033 IF_ACT_FILTER,
8034 IF_ACT_START,
8035 IF_ACT_STOP,
8036 IF_SRC_FILE,
8037 IF_SRC_KERNEL,
8038 IF_SRC_FILEADDR,
8039 IF_SRC_KERNELADDR,
8040 };
8041
8042 enum {
8043 IF_STATE_ACTION = 0,
8044 IF_STATE_SOURCE,
8045 IF_STATE_END,
8046 };
8047
8048 static const match_table_t if_tokens = {
8049 { IF_ACT_FILTER, "filter" },
8050 { IF_ACT_START, "start" },
8051 { IF_ACT_STOP, "stop" },
8052 { IF_SRC_FILE, "%u/%u@%s" },
8053 { IF_SRC_KERNEL, "%u/%u" },
8054 { IF_SRC_FILEADDR, "%u@%s" },
8055 { IF_SRC_KERNELADDR, "%u" },
8056 { IF_ACT_NONE, NULL },
8057 };
8058
8059 /*
8060 * Address filter string parser
8061 */
8062 static int
8063 perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
8064 struct list_head *filters)
8065 {
8066 struct perf_addr_filter *filter = NULL;
8067 char *start, *orig, *filename = NULL;
8068 struct path path;
8069 substring_t args[MAX_OPT_ARGS];
8070 int state = IF_STATE_ACTION, token;
8071 unsigned int kernel = 0;
8072 int ret = -EINVAL;
8073
8074 orig = fstr = kstrdup(fstr, GFP_KERNEL);
8075 if (!fstr)
8076 return -ENOMEM;
8077
8078 while ((start = strsep(&fstr, " ,\n")) != NULL) {
8079 ret = -EINVAL;
8080
8081 if (!*start)
8082 continue;
8083
8084 /* filter definition begins */
8085 if (state == IF_STATE_ACTION) {
8086 filter = perf_addr_filter_new(event, filters);
8087 if (!filter)
8088 goto fail;
8089 }
8090
8091 token = match_token(start, if_tokens, args);
8092 switch (token) {
8093 case IF_ACT_FILTER:
8094 case IF_ACT_START:
8095 filter->filter = 1;
8096
8097 case IF_ACT_STOP:
8098 if (state != IF_STATE_ACTION)
8099 goto fail;
8100
8101 state = IF_STATE_SOURCE;
8102 break;
8103
8104 case IF_SRC_KERNELADDR:
8105 case IF_SRC_KERNEL:
8106 kernel = 1;
8107
8108 case IF_SRC_FILEADDR:
8109 case IF_SRC_FILE:
8110 if (state != IF_STATE_SOURCE)
8111 goto fail;
8112
8113 if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
8114 filter->range = 1;
8115
8116 *args[0].to = 0;
8117 ret = kstrtoul(args[0].from, 0, &filter->offset);
8118 if (ret)
8119 goto fail;
8120
8121 if (filter->range) {
8122 *args[1].to = 0;
8123 ret = kstrtoul(args[1].from, 0, &filter->size);
8124 if (ret)
8125 goto fail;
8126 }
8127
8128 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
8129 int fpos = filter->range ? 2 : 1;
8130
8131 filename = match_strdup(&args[fpos]);
8132 if (!filename) {
8133 ret = -ENOMEM;
8134 goto fail;
8135 }
8136 }
8137
8138 state = IF_STATE_END;
8139 break;
8140
8141 default:
8142 goto fail;
8143 }
8144
8145 /*
8146 * Filter definition is fully parsed, validate and install it.
8147 * Make sure that it doesn't contradict itself or the event's
8148 * attribute.
8149 */
8150 if (state == IF_STATE_END) {
8151 if (kernel && event->attr.exclude_kernel)
8152 goto fail;
8153
8154 if (!kernel) {
8155 if (!filename)
8156 goto fail;
8157
8158 /* look up the path and grab its inode */
8159 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
8160 if (ret)
8161 goto fail_free_name;
8162
8163 filter->inode = igrab(d_inode(path.dentry));
8164 path_put(&path);
8165 kfree(filename);
8166 filename = NULL;
8167
8168 ret = -EINVAL;
8169 if (!filter->inode ||
8170 !S_ISREG(filter->inode->i_mode))
8171 /* free_filters_list() will iput() */
8172 goto fail;
8173 }
8174
8175 /* ready to consume more filters */
8176 state = IF_STATE_ACTION;
8177 filter = NULL;
8178 }
8179 }
8180
8181 if (state != IF_STATE_ACTION)
8182 goto fail;
8183
8184 kfree(orig);
8185
8186 return 0;
8187
8188 fail_free_name:
8189 kfree(filename);
8190 fail:
8191 free_filters_list(filters);
8192 kfree(orig);
8193
8194 return ret;
8195 }
8196
8197 static int
8198 perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
8199 {
8200 LIST_HEAD(filters);
8201 int ret;
8202
8203 /*
8204 * Since this is called in perf_ioctl() path, we're already holding
8205 * ctx::mutex.
8206 */
8207 lockdep_assert_held(&event->ctx->mutex);
8208
8209 if (WARN_ON_ONCE(event->parent))
8210 return -EINVAL;
8211
8212 /*
8213 * For now, we only support filtering in per-task events; doing so
8214 * for CPU-wide events requires additional context switching trickery,
8215 * since same object code will be mapped at different virtual
8216 * addresses in different processes.
8217 */
8218 if (!event->ctx->task)
8219 return -EOPNOTSUPP;
8220
8221 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
8222 if (ret)
8223 return ret;
8224
8225 ret = event->pmu->addr_filters_validate(&filters);
8226 if (ret) {
8227 free_filters_list(&filters);
8228 return ret;
8229 }
8230
8231 /* remove existing filters, if any */
8232 perf_addr_filters_splice(event, &filters);
8233
8234 /* install new filters */
8235 perf_event_for_each_child(event, perf_event_addr_filters_apply);
8236
8237 return ret;
8238 }
8239
8240 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
8241 {
8242 char *filter_str;
8243 int ret = -EINVAL;
8244
8245 if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
8246 !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
8247 !has_addr_filter(event))
8248 return -EINVAL;
8249
8250 filter_str = strndup_user(arg, PAGE_SIZE);
8251 if (IS_ERR(filter_str))
8252 return PTR_ERR(filter_str);
8253
8254 if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
8255 event->attr.type == PERF_TYPE_TRACEPOINT)
8256 ret = ftrace_profile_set_filter(event, event->attr.config,
8257 filter_str);
8258 else if (has_addr_filter(event))
8259 ret = perf_event_set_addr_filter(event, filter_str);
8260
8261 kfree(filter_str);
8262 return ret;
8263 }
8264
8265 /*
8266 * hrtimer based swevent callback
8267 */
8268
8269 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
8270 {
8271 enum hrtimer_restart ret = HRTIMER_RESTART;
8272 struct perf_sample_data data;
8273 struct pt_regs *regs;
8274 struct perf_event *event;
8275 u64 period;
8276
8277 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
8278
8279 if (event->state != PERF_EVENT_STATE_ACTIVE)
8280 return HRTIMER_NORESTART;
8281
8282 event->pmu->read(event);
8283
8284 perf_sample_data_init(&data, 0, event->hw.last_period);
8285 regs = get_irq_regs();
8286
8287 if (regs && !perf_exclude_event(event, regs)) {
8288 if (!(event->attr.exclude_idle && is_idle_task(current)))
8289 if (__perf_event_overflow(event, 1, &data, regs))
8290 ret = HRTIMER_NORESTART;
8291 }
8292
8293 period = max_t(u64, 10000, event->hw.sample_period);
8294 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
8295
8296 return ret;
8297 }
8298
8299 static void perf_swevent_start_hrtimer(struct perf_event *event)
8300 {
8301 struct hw_perf_event *hwc = &event->hw;
8302 s64 period;
8303
8304 if (!is_sampling_event(event))
8305 return;
8306
8307 period = local64_read(&hwc->period_left);
8308 if (period) {
8309 if (period < 0)
8310 period = 10000;
8311
8312 local64_set(&hwc->period_left, 0);
8313 } else {
8314 period = max_t(u64, 10000, hwc->sample_period);
8315 }
8316 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
8317 HRTIMER_MODE_REL_PINNED);
8318 }
8319
8320 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
8321 {
8322 struct hw_perf_event *hwc = &event->hw;
8323
8324 if (is_sampling_event(event)) {
8325 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
8326 local64_set(&hwc->period_left, ktime_to_ns(remaining));
8327
8328 hrtimer_cancel(&hwc->hrtimer);
8329 }
8330 }
8331
8332 static void perf_swevent_init_hrtimer(struct perf_event *event)
8333 {
8334 struct hw_perf_event *hwc = &event->hw;
8335
8336 if (!is_sampling_event(event))
8337 return;
8338
8339 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8340 hwc->hrtimer.function = perf_swevent_hrtimer;
8341
8342 /*
8343 * Since hrtimers have a fixed rate, we can do a static freq->period
8344 * mapping and avoid the whole period adjust feedback stuff.
8345 */
8346 if (event->attr.freq) {
8347 long freq = event->attr.sample_freq;
8348
8349 event->attr.sample_period = NSEC_PER_SEC / freq;
8350 hwc->sample_period = event->attr.sample_period;
8351 local64_set(&hwc->period_left, hwc->sample_period);
8352 hwc->last_period = hwc->sample_period;
8353 event->attr.freq = 0;
8354 }
8355 }
8356
8357 /*
8358 * Software event: cpu wall time clock
8359 */
8360
8361 static void cpu_clock_event_update(struct perf_event *event)
8362 {
8363 s64 prev;
8364 u64 now;
8365
8366 now = local_clock();
8367 prev = local64_xchg(&event->hw.prev_count, now);
8368 local64_add(now - prev, &event->count);
8369 }
8370
8371 static void cpu_clock_event_start(struct perf_event *event, int flags)
8372 {
8373 local64_set(&event->hw.prev_count, local_clock());
8374 perf_swevent_start_hrtimer(event);
8375 }
8376
8377 static void cpu_clock_event_stop(struct perf_event *event, int flags)
8378 {
8379 perf_swevent_cancel_hrtimer(event);
8380 cpu_clock_event_update(event);
8381 }
8382
8383 static int cpu_clock_event_add(struct perf_event *event, int flags)
8384 {
8385 if (flags & PERF_EF_START)
8386 cpu_clock_event_start(event, flags);
8387 perf_event_update_userpage(event);
8388
8389 return 0;
8390 }
8391
8392 static void cpu_clock_event_del(struct perf_event *event, int flags)
8393 {
8394 cpu_clock_event_stop(event, flags);
8395 }
8396
8397 static void cpu_clock_event_read(struct perf_event *event)
8398 {
8399 cpu_clock_event_update(event);
8400 }
8401
8402 static int cpu_clock_event_init(struct perf_event *event)
8403 {
8404 if (event->attr.type != PERF_TYPE_SOFTWARE)
8405 return -ENOENT;
8406
8407 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
8408 return -ENOENT;
8409
8410 /*
8411 * no branch sampling for software events
8412 */
8413 if (has_branch_stack(event))
8414 return -EOPNOTSUPP;
8415
8416 perf_swevent_init_hrtimer(event);
8417
8418 return 0;
8419 }
8420
8421 static struct pmu perf_cpu_clock = {
8422 .task_ctx_nr = perf_sw_context,
8423
8424 .capabilities = PERF_PMU_CAP_NO_NMI,
8425
8426 .event_init = cpu_clock_event_init,
8427 .add = cpu_clock_event_add,
8428 .del = cpu_clock_event_del,
8429 .start = cpu_clock_event_start,
8430 .stop = cpu_clock_event_stop,
8431 .read = cpu_clock_event_read,
8432 };
8433
8434 /*
8435 * Software event: task time clock
8436 */
8437
8438 static void task_clock_event_update(struct perf_event *event, u64 now)
8439 {
8440 u64 prev;
8441 s64 delta;
8442
8443 prev = local64_xchg(&event->hw.prev_count, now);
8444 delta = now - prev;
8445 local64_add(delta, &event->count);
8446 }
8447
8448 static void task_clock_event_start(struct perf_event *event, int flags)
8449 {
8450 local64_set(&event->hw.prev_count, event->ctx->time);
8451 perf_swevent_start_hrtimer(event);
8452 }
8453
8454 static void task_clock_event_stop(struct perf_event *event, int flags)
8455 {
8456 perf_swevent_cancel_hrtimer(event);
8457 task_clock_event_update(event, event->ctx->time);
8458 }
8459
8460 static int task_clock_event_add(struct perf_event *event, int flags)
8461 {
8462 if (flags & PERF_EF_START)
8463 task_clock_event_start(event, flags);
8464 perf_event_update_userpage(event);
8465
8466 return 0;
8467 }
8468
8469 static void task_clock_event_del(struct perf_event *event, int flags)
8470 {
8471 task_clock_event_stop(event, PERF_EF_UPDATE);
8472 }
8473
8474 static void task_clock_event_read(struct perf_event *event)
8475 {
8476 u64 now = perf_clock();
8477 u64 delta = now - event->ctx->timestamp;
8478 u64 time = event->ctx->time + delta;
8479
8480 task_clock_event_update(event, time);
8481 }
8482
8483 static int task_clock_event_init(struct perf_event *event)
8484 {
8485 if (event->attr.type != PERF_TYPE_SOFTWARE)
8486 return -ENOENT;
8487
8488 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
8489 return -ENOENT;
8490
8491 /*
8492 * no branch sampling for software events
8493 */
8494 if (has_branch_stack(event))
8495 return -EOPNOTSUPP;
8496
8497 perf_swevent_init_hrtimer(event);
8498
8499 return 0;
8500 }
8501
8502 static struct pmu perf_task_clock = {
8503 .task_ctx_nr = perf_sw_context,
8504
8505 .capabilities = PERF_PMU_CAP_NO_NMI,
8506
8507 .event_init = task_clock_event_init,
8508 .add = task_clock_event_add,
8509 .del = task_clock_event_del,
8510 .start = task_clock_event_start,
8511 .stop = task_clock_event_stop,
8512 .read = task_clock_event_read,
8513 };
8514
8515 static void perf_pmu_nop_void(struct pmu *pmu)
8516 {
8517 }
8518
8519 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
8520 {
8521 }
8522
8523 static int perf_pmu_nop_int(struct pmu *pmu)
8524 {
8525 return 0;
8526 }
8527
8528 static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
8529
8530 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
8531 {
8532 __this_cpu_write(nop_txn_flags, flags);
8533
8534 if (flags & ~PERF_PMU_TXN_ADD)
8535 return;
8536
8537 perf_pmu_disable(pmu);
8538 }
8539
8540 static int perf_pmu_commit_txn(struct pmu *pmu)
8541 {
8542 unsigned int flags = __this_cpu_read(nop_txn_flags);
8543
8544 __this_cpu_write(nop_txn_flags, 0);
8545
8546 if (flags & ~PERF_PMU_TXN_ADD)
8547 return 0;
8548
8549 perf_pmu_enable(pmu);
8550 return 0;
8551 }
8552
8553 static void perf_pmu_cancel_txn(struct pmu *pmu)
8554 {
8555 unsigned int flags = __this_cpu_read(nop_txn_flags);
8556
8557 __this_cpu_write(nop_txn_flags, 0);
8558
8559 if (flags & ~PERF_PMU_TXN_ADD)
8560 return;
8561
8562 perf_pmu_enable(pmu);
8563 }
8564
8565 static int perf_event_idx_default(struct perf_event *event)
8566 {
8567 return 0;
8568 }
8569
8570 /*
8571 * Ensures all contexts with the same task_ctx_nr have the same
8572 * pmu_cpu_context too.
8573 */
8574 static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
8575 {
8576 struct pmu *pmu;
8577
8578 if (ctxn < 0)
8579 return NULL;
8580
8581 list_for_each_entry(pmu, &pmus, entry) {
8582 if (pmu->task_ctx_nr == ctxn)
8583 return pmu->pmu_cpu_context;
8584 }
8585
8586 return NULL;
8587 }
8588
8589 static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
8590 {
8591 int cpu;
8592
8593 for_each_possible_cpu(cpu) {
8594 struct perf_cpu_context *cpuctx;
8595
8596 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8597
8598 if (cpuctx->unique_pmu == old_pmu)
8599 cpuctx->unique_pmu = pmu;
8600 }
8601 }
8602
8603 static void free_pmu_context(struct pmu *pmu)
8604 {
8605 struct pmu *i;
8606
8607 mutex_lock(&pmus_lock);
8608 /*
8609 * Like a real lame refcount.
8610 */
8611 list_for_each_entry(i, &pmus, entry) {
8612 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
8613 update_pmu_context(i, pmu);
8614 goto out;
8615 }
8616 }
8617
8618 free_percpu(pmu->pmu_cpu_context);
8619 out:
8620 mutex_unlock(&pmus_lock);
8621 }
8622
8623 /*
8624 * Let userspace know that this PMU supports address range filtering:
8625 */
8626 static ssize_t nr_addr_filters_show(struct device *dev,
8627 struct device_attribute *attr,
8628 char *page)
8629 {
8630 struct pmu *pmu = dev_get_drvdata(dev);
8631
8632 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
8633 }
8634 DEVICE_ATTR_RO(nr_addr_filters);
8635
8636 static struct idr pmu_idr;
8637
8638 static ssize_t
8639 type_show(struct device *dev, struct device_attribute *attr, char *page)
8640 {
8641 struct pmu *pmu = dev_get_drvdata(dev);
8642
8643 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
8644 }
8645 static DEVICE_ATTR_RO(type);
8646
8647 static ssize_t
8648 perf_event_mux_interval_ms_show(struct device *dev,
8649 struct device_attribute *attr,
8650 char *page)
8651 {
8652 struct pmu *pmu = dev_get_drvdata(dev);
8653
8654 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
8655 }
8656
8657 static DEFINE_MUTEX(mux_interval_mutex);
8658
8659 static ssize_t
8660 perf_event_mux_interval_ms_store(struct device *dev,
8661 struct device_attribute *attr,
8662 const char *buf, size_t count)
8663 {
8664 struct pmu *pmu = dev_get_drvdata(dev);
8665 int timer, cpu, ret;
8666
8667 ret = kstrtoint(buf, 0, &timer);
8668 if (ret)
8669 return ret;
8670
8671 if (timer < 1)
8672 return -EINVAL;
8673
8674 /* same value, noting to do */
8675 if (timer == pmu->hrtimer_interval_ms)
8676 return count;
8677
8678 mutex_lock(&mux_interval_mutex);
8679 pmu->hrtimer_interval_ms = timer;
8680
8681 /* update all cpuctx for this PMU */
8682 get_online_cpus();
8683 for_each_online_cpu(cpu) {
8684 struct perf_cpu_context *cpuctx;
8685 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8686 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
8687
8688 cpu_function_call(cpu,
8689 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
8690 }
8691 put_online_cpus();
8692 mutex_unlock(&mux_interval_mutex);
8693
8694 return count;
8695 }
8696 static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
8697
8698 static struct attribute *pmu_dev_attrs[] = {
8699 &dev_attr_type.attr,
8700 &dev_attr_perf_event_mux_interval_ms.attr,
8701 NULL,
8702 };
8703 ATTRIBUTE_GROUPS(pmu_dev);
8704
8705 static int pmu_bus_running;
8706 static struct bus_type pmu_bus = {
8707 .name = "event_source",
8708 .dev_groups = pmu_dev_groups,
8709 };
8710
8711 static void pmu_dev_release(struct device *dev)
8712 {
8713 kfree(dev);
8714 }
8715
8716 static int pmu_dev_alloc(struct pmu *pmu)
8717 {
8718 int ret = -ENOMEM;
8719
8720 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
8721 if (!pmu->dev)
8722 goto out;
8723
8724 pmu->dev->groups = pmu->attr_groups;
8725 device_initialize(pmu->dev);
8726 ret = dev_set_name(pmu->dev, "%s", pmu->name);
8727 if (ret)
8728 goto free_dev;
8729
8730 dev_set_drvdata(pmu->dev, pmu);
8731 pmu->dev->bus = &pmu_bus;
8732 pmu->dev->release = pmu_dev_release;
8733 ret = device_add(pmu->dev);
8734 if (ret)
8735 goto free_dev;
8736
8737 /* For PMUs with address filters, throw in an extra attribute: */
8738 if (pmu->nr_addr_filters)
8739 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
8740
8741 if (ret)
8742 goto del_dev;
8743
8744 out:
8745 return ret;
8746
8747 del_dev:
8748 device_del(pmu->dev);
8749
8750 free_dev:
8751 put_device(pmu->dev);
8752 goto out;
8753 }
8754
8755 static struct lock_class_key cpuctx_mutex;
8756 static struct lock_class_key cpuctx_lock;
8757
8758 int perf_pmu_register(struct pmu *pmu, const char *name, int type)
8759 {
8760 int cpu, ret;
8761
8762 mutex_lock(&pmus_lock);
8763 ret = -ENOMEM;
8764 pmu->pmu_disable_count = alloc_percpu(int);
8765 if (!pmu->pmu_disable_count)
8766 goto unlock;
8767
8768 pmu->type = -1;
8769 if (!name)
8770 goto skip_type;
8771 pmu->name = name;
8772
8773 if (type < 0) {
8774 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
8775 if (type < 0) {
8776 ret = type;
8777 goto free_pdc;
8778 }
8779 }
8780 pmu->type = type;
8781
8782 if (pmu_bus_running) {
8783 ret = pmu_dev_alloc(pmu);
8784 if (ret)
8785 goto free_idr;
8786 }
8787
8788 skip_type:
8789 if (pmu->task_ctx_nr == perf_hw_context) {
8790 static int hw_context_taken = 0;
8791
8792 /*
8793 * Other than systems with heterogeneous CPUs, it never makes
8794 * sense for two PMUs to share perf_hw_context. PMUs which are
8795 * uncore must use perf_invalid_context.
8796 */
8797 if (WARN_ON_ONCE(hw_context_taken &&
8798 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
8799 pmu->task_ctx_nr = perf_invalid_context;
8800
8801 hw_context_taken = 1;
8802 }
8803
8804 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
8805 if (pmu->pmu_cpu_context)
8806 goto got_cpu_context;
8807
8808 ret = -ENOMEM;
8809 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
8810 if (!pmu->pmu_cpu_context)
8811 goto free_dev;
8812
8813 for_each_possible_cpu(cpu) {
8814 struct perf_cpu_context *cpuctx;
8815
8816 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8817 __perf_event_init_context(&cpuctx->ctx);
8818 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
8819 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
8820 cpuctx->ctx.pmu = pmu;
8821
8822 __perf_mux_hrtimer_init(cpuctx, cpu);
8823
8824 cpuctx->unique_pmu = pmu;
8825 }
8826
8827 got_cpu_context:
8828 if (!pmu->start_txn) {
8829 if (pmu->pmu_enable) {
8830 /*
8831 * If we have pmu_enable/pmu_disable calls, install
8832 * transaction stubs that use that to try and batch
8833 * hardware accesses.
8834 */
8835 pmu->start_txn = perf_pmu_start_txn;
8836 pmu->commit_txn = perf_pmu_commit_txn;
8837 pmu->cancel_txn = perf_pmu_cancel_txn;
8838 } else {
8839 pmu->start_txn = perf_pmu_nop_txn;
8840 pmu->commit_txn = perf_pmu_nop_int;
8841 pmu->cancel_txn = perf_pmu_nop_void;
8842 }
8843 }
8844
8845 if (!pmu->pmu_enable) {
8846 pmu->pmu_enable = perf_pmu_nop_void;
8847 pmu->pmu_disable = perf_pmu_nop_void;
8848 }
8849
8850 if (!pmu->event_idx)
8851 pmu->event_idx = perf_event_idx_default;
8852
8853 list_add_rcu(&pmu->entry, &pmus);
8854 atomic_set(&pmu->exclusive_cnt, 0);
8855 ret = 0;
8856 unlock:
8857 mutex_unlock(&pmus_lock);
8858
8859 return ret;
8860
8861 free_dev:
8862 device_del(pmu->dev);
8863 put_device(pmu->dev);
8864
8865 free_idr:
8866 if (pmu->type >= PERF_TYPE_MAX)
8867 idr_remove(&pmu_idr, pmu->type);
8868
8869 free_pdc:
8870 free_percpu(pmu->pmu_disable_count);
8871 goto unlock;
8872 }
8873 EXPORT_SYMBOL_GPL(perf_pmu_register);
8874
8875 void perf_pmu_unregister(struct pmu *pmu)
8876 {
8877 int remove_device;
8878
8879 mutex_lock(&pmus_lock);
8880 remove_device = pmu_bus_running;
8881 list_del_rcu(&pmu->entry);
8882 mutex_unlock(&pmus_lock);
8883
8884 /*
8885 * We dereference the pmu list under both SRCU and regular RCU, so
8886 * synchronize against both of those.
8887 */
8888 synchronize_srcu(&pmus_srcu);
8889 synchronize_rcu();
8890
8891 free_percpu(pmu->pmu_disable_count);
8892 if (pmu->type >= PERF_TYPE_MAX)
8893 idr_remove(&pmu_idr, pmu->type);
8894 if (remove_device) {
8895 if (pmu->nr_addr_filters)
8896 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
8897 device_del(pmu->dev);
8898 put_device(pmu->dev);
8899 }
8900 free_pmu_context(pmu);
8901 }
8902 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
8903
8904 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
8905 {
8906 struct perf_event_context *ctx = NULL;
8907 int ret;
8908
8909 if (!try_module_get(pmu->module))
8910 return -ENODEV;
8911
8912 if (event->group_leader != event) {
8913 /*
8914 * This ctx->mutex can nest when we're called through
8915 * inheritance. See the perf_event_ctx_lock_nested() comment.
8916 */
8917 ctx = perf_event_ctx_lock_nested(event->group_leader,
8918 SINGLE_DEPTH_NESTING);
8919 BUG_ON(!ctx);
8920 }
8921
8922 event->pmu = pmu;
8923 ret = pmu->event_init(event);
8924
8925 if (ctx)
8926 perf_event_ctx_unlock(event->group_leader, ctx);
8927
8928 if (ret)
8929 module_put(pmu->module);
8930
8931 return ret;
8932 }
8933
8934 static struct pmu *perf_init_event(struct perf_event *event)
8935 {
8936 struct pmu *pmu = NULL;
8937 int idx;
8938 int ret;
8939
8940 idx = srcu_read_lock(&pmus_srcu);
8941
8942 rcu_read_lock();
8943 pmu = idr_find(&pmu_idr, event->attr.type);
8944 rcu_read_unlock();
8945 if (pmu) {
8946 ret = perf_try_init_event(pmu, event);
8947 if (ret)
8948 pmu = ERR_PTR(ret);
8949 goto unlock;
8950 }
8951
8952 list_for_each_entry_rcu(pmu, &pmus, entry) {
8953 ret = perf_try_init_event(pmu, event);
8954 if (!ret)
8955 goto unlock;
8956
8957 if (ret != -ENOENT) {
8958 pmu = ERR_PTR(ret);
8959 goto unlock;
8960 }
8961 }
8962 pmu = ERR_PTR(-ENOENT);
8963 unlock:
8964 srcu_read_unlock(&pmus_srcu, idx);
8965
8966 return pmu;
8967 }
8968
8969 static void attach_sb_event(struct perf_event *event)
8970 {
8971 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
8972
8973 raw_spin_lock(&pel->lock);
8974 list_add_rcu(&event->sb_list, &pel->list);
8975 raw_spin_unlock(&pel->lock);
8976 }
8977
8978 /*
8979 * We keep a list of all !task (and therefore per-cpu) events
8980 * that need to receive side-band records.
8981 *
8982 * This avoids having to scan all the various PMU per-cpu contexts
8983 * looking for them.
8984 */
8985 static void account_pmu_sb_event(struct perf_event *event)
8986 {
8987 if (is_sb_event(event))
8988 attach_sb_event(event);
8989 }
8990
8991 static void account_event_cpu(struct perf_event *event, int cpu)
8992 {
8993 if (event->parent)
8994 return;
8995
8996 if (is_cgroup_event(event))
8997 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
8998 }
8999
9000 /* Freq events need the tick to stay alive (see perf_event_task_tick). */
9001 static void account_freq_event_nohz(void)
9002 {
9003 #ifdef CONFIG_NO_HZ_FULL
9004 /* Lock so we don't race with concurrent unaccount */
9005 spin_lock(&nr_freq_lock);
9006 if (atomic_inc_return(&nr_freq_events) == 1)
9007 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
9008 spin_unlock(&nr_freq_lock);
9009 #endif
9010 }
9011
9012 static void account_freq_event(void)
9013 {
9014 if (tick_nohz_full_enabled())
9015 account_freq_event_nohz();
9016 else
9017 atomic_inc(&nr_freq_events);
9018 }
9019
9020
9021 static void account_event(struct perf_event *event)
9022 {
9023 bool inc = false;
9024
9025 if (event->parent)
9026 return;
9027
9028 if (event->attach_state & PERF_ATTACH_TASK)
9029 inc = true;
9030 if (event->attr.mmap || event->attr.mmap_data)
9031 atomic_inc(&nr_mmap_events);
9032 if (event->attr.comm)
9033 atomic_inc(&nr_comm_events);
9034 if (event->attr.task)
9035 atomic_inc(&nr_task_events);
9036 if (event->attr.freq)
9037 account_freq_event();
9038 if (event->attr.context_switch) {
9039 atomic_inc(&nr_switch_events);
9040 inc = true;
9041 }
9042 if (has_branch_stack(event))
9043 inc = true;
9044 if (is_cgroup_event(event))
9045 inc = true;
9046
9047 if (inc) {
9048 if (atomic_inc_not_zero(&perf_sched_count))
9049 goto enabled;
9050
9051 mutex_lock(&perf_sched_mutex);
9052 if (!atomic_read(&perf_sched_count)) {
9053 static_branch_enable(&perf_sched_events);
9054 /*
9055 * Guarantee that all CPUs observe they key change and
9056 * call the perf scheduling hooks before proceeding to
9057 * install events that need them.
9058 */
9059 synchronize_sched();
9060 }
9061 /*
9062 * Now that we have waited for the sync_sched(), allow further
9063 * increments to by-pass the mutex.
9064 */
9065 atomic_inc(&perf_sched_count);
9066 mutex_unlock(&perf_sched_mutex);
9067 }
9068 enabled:
9069
9070 account_event_cpu(event, event->cpu);
9071
9072 account_pmu_sb_event(event);
9073 }
9074
9075 /*
9076 * Allocate and initialize a event structure
9077 */
9078 static struct perf_event *
9079 perf_event_alloc(struct perf_event_attr *attr, int cpu,
9080 struct task_struct *task,
9081 struct perf_event *group_leader,
9082 struct perf_event *parent_event,
9083 perf_overflow_handler_t overflow_handler,
9084 void *context, int cgroup_fd)
9085 {
9086 struct pmu *pmu;
9087 struct perf_event *event;
9088 struct hw_perf_event *hwc;
9089 long err = -EINVAL;
9090
9091 if ((unsigned)cpu >= nr_cpu_ids) {
9092 if (!task || cpu != -1)
9093 return ERR_PTR(-EINVAL);
9094 }
9095
9096 event = kzalloc(sizeof(*event), GFP_KERNEL);
9097 if (!event)
9098 return ERR_PTR(-ENOMEM);
9099
9100 /*
9101 * Single events are their own group leaders, with an
9102 * empty sibling list:
9103 */
9104 if (!group_leader)
9105 group_leader = event;
9106
9107 mutex_init(&event->child_mutex);
9108 INIT_LIST_HEAD(&event->child_list);
9109
9110 INIT_LIST_HEAD(&event->group_entry);
9111 INIT_LIST_HEAD(&event->event_entry);
9112 INIT_LIST_HEAD(&event->sibling_list);
9113 INIT_LIST_HEAD(&event->rb_entry);
9114 INIT_LIST_HEAD(&event->active_entry);
9115 INIT_LIST_HEAD(&event->addr_filters.list);
9116 INIT_HLIST_NODE(&event->hlist_entry);
9117
9118
9119 init_waitqueue_head(&event->waitq);
9120 init_irq_work(&event->pending, perf_pending_event);
9121
9122 mutex_init(&event->mmap_mutex);
9123 raw_spin_lock_init(&event->addr_filters.lock);
9124
9125 atomic_long_set(&event->refcount, 1);
9126 event->cpu = cpu;
9127 event->attr = *attr;
9128 event->group_leader = group_leader;
9129 event->pmu = NULL;
9130 event->oncpu = -1;
9131
9132 event->parent = parent_event;
9133
9134 event->ns = get_pid_ns(task_active_pid_ns(current));
9135 event->id = atomic64_inc_return(&perf_event_id);
9136
9137 event->state = PERF_EVENT_STATE_INACTIVE;
9138
9139 if (task) {
9140 event->attach_state = PERF_ATTACH_TASK;
9141 /*
9142 * XXX pmu::event_init needs to know what task to account to
9143 * and we cannot use the ctx information because we need the
9144 * pmu before we get a ctx.
9145 */
9146 event->hw.target = task;
9147 }
9148
9149 event->clock = &local_clock;
9150 if (parent_event)
9151 event->clock = parent_event->clock;
9152
9153 if (!overflow_handler && parent_event) {
9154 overflow_handler = parent_event->overflow_handler;
9155 context = parent_event->overflow_handler_context;
9156 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
9157 if (overflow_handler == bpf_overflow_handler) {
9158 struct bpf_prog *prog = bpf_prog_inc(parent_event->prog);
9159
9160 if (IS_ERR(prog)) {
9161 err = PTR_ERR(prog);
9162 goto err_ns;
9163 }
9164 event->prog = prog;
9165 event->orig_overflow_handler =
9166 parent_event->orig_overflow_handler;
9167 }
9168 #endif
9169 }
9170
9171 if (overflow_handler) {
9172 event->overflow_handler = overflow_handler;
9173 event->overflow_handler_context = context;
9174 } else if (is_write_backward(event)){
9175 event->overflow_handler = perf_event_output_backward;
9176 event->overflow_handler_context = NULL;
9177 } else {
9178 event->overflow_handler = perf_event_output_forward;
9179 event->overflow_handler_context = NULL;
9180 }
9181
9182 perf_event__state_init(event);
9183
9184 pmu = NULL;
9185
9186 hwc = &event->hw;
9187 hwc->sample_period = attr->sample_period;
9188 if (attr->freq && attr->sample_freq)
9189 hwc->sample_period = 1;
9190 hwc->last_period = hwc->sample_period;
9191
9192 local64_set(&hwc->period_left, hwc->sample_period);
9193
9194 /*
9195 * we currently do not support PERF_FORMAT_GROUP on inherited events
9196 */
9197 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
9198 goto err_ns;
9199
9200 if (!has_branch_stack(event))
9201 event->attr.branch_sample_type = 0;
9202
9203 if (cgroup_fd != -1) {
9204 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
9205 if (err)
9206 goto err_ns;
9207 }
9208
9209 pmu = perf_init_event(event);
9210 if (!pmu)
9211 goto err_ns;
9212 else if (IS_ERR(pmu)) {
9213 err = PTR_ERR(pmu);
9214 goto err_ns;
9215 }
9216
9217 err = exclusive_event_init(event);
9218 if (err)
9219 goto err_pmu;
9220
9221 if (has_addr_filter(event)) {
9222 event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
9223 sizeof(unsigned long),
9224 GFP_KERNEL);
9225 if (!event->addr_filters_offs)
9226 goto err_per_task;
9227
9228 /* force hw sync on the address filters */
9229 event->addr_filters_gen = 1;
9230 }
9231
9232 if (!event->parent) {
9233 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
9234 err = get_callchain_buffers(attr->sample_max_stack);
9235 if (err)
9236 goto err_addr_filters;
9237 }
9238 }
9239
9240 /* symmetric to unaccount_event() in _free_event() */
9241 account_event(event);
9242
9243 return event;
9244
9245 err_addr_filters:
9246 kfree(event->addr_filters_offs);
9247
9248 err_per_task:
9249 exclusive_event_destroy(event);
9250
9251 err_pmu:
9252 if (event->destroy)
9253 event->destroy(event);
9254 module_put(pmu->module);
9255 err_ns:
9256 if (is_cgroup_event(event))
9257 perf_detach_cgroup(event);
9258 if (event->ns)
9259 put_pid_ns(event->ns);
9260 kfree(event);
9261
9262 return ERR_PTR(err);
9263 }
9264
9265 static int perf_copy_attr(struct perf_event_attr __user *uattr,
9266 struct perf_event_attr *attr)
9267 {
9268 u32 size;
9269 int ret;
9270
9271 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
9272 return -EFAULT;
9273
9274 /*
9275 * zero the full structure, so that a short copy will be nice.
9276 */
9277 memset(attr, 0, sizeof(*attr));
9278
9279 ret = get_user(size, &uattr->size);
9280 if (ret)
9281 return ret;
9282
9283 if (size > PAGE_SIZE) /* silly large */
9284 goto err_size;
9285
9286 if (!size) /* abi compat */
9287 size = PERF_ATTR_SIZE_VER0;
9288
9289 if (size < PERF_ATTR_SIZE_VER0)
9290 goto err_size;
9291
9292 /*
9293 * If we're handed a bigger struct than we know of,
9294 * ensure all the unknown bits are 0 - i.e. new
9295 * user-space does not rely on any kernel feature
9296 * extensions we dont know about yet.
9297 */
9298 if (size > sizeof(*attr)) {
9299 unsigned char __user *addr;
9300 unsigned char __user *end;
9301 unsigned char val;
9302
9303 addr = (void __user *)uattr + sizeof(*attr);
9304 end = (void __user *)uattr + size;
9305
9306 for (; addr < end; addr++) {
9307 ret = get_user(val, addr);
9308 if (ret)
9309 return ret;
9310 if (val)
9311 goto err_size;
9312 }
9313 size = sizeof(*attr);
9314 }
9315
9316 ret = copy_from_user(attr, uattr, size);
9317 if (ret)
9318 return -EFAULT;
9319
9320 if (attr->__reserved_1)
9321 return -EINVAL;
9322
9323 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
9324 return -EINVAL;
9325
9326 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
9327 return -EINVAL;
9328
9329 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
9330 u64 mask = attr->branch_sample_type;
9331
9332 /* only using defined bits */
9333 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
9334 return -EINVAL;
9335
9336 /* at least one branch bit must be set */
9337 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
9338 return -EINVAL;
9339
9340 /* propagate priv level, when not set for branch */
9341 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
9342
9343 /* exclude_kernel checked on syscall entry */
9344 if (!attr->exclude_kernel)
9345 mask |= PERF_SAMPLE_BRANCH_KERNEL;
9346
9347 if (!attr->exclude_user)
9348 mask |= PERF_SAMPLE_BRANCH_USER;
9349
9350 if (!attr->exclude_hv)
9351 mask |= PERF_SAMPLE_BRANCH_HV;
9352 /*
9353 * adjust user setting (for HW filter setup)
9354 */
9355 attr->branch_sample_type = mask;
9356 }
9357 /* privileged levels capture (kernel, hv): check permissions */
9358 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
9359 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9360 return -EACCES;
9361 }
9362
9363 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
9364 ret = perf_reg_validate(attr->sample_regs_user);
9365 if (ret)
9366 return ret;
9367 }
9368
9369 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
9370 if (!arch_perf_have_user_stack_dump())
9371 return -ENOSYS;
9372
9373 /*
9374 * We have __u32 type for the size, but so far
9375 * we can only use __u16 as maximum due to the
9376 * __u16 sample size limit.
9377 */
9378 if (attr->sample_stack_user >= USHRT_MAX)
9379 ret = -EINVAL;
9380 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
9381 ret = -EINVAL;
9382 }
9383
9384 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
9385 ret = perf_reg_validate(attr->sample_regs_intr);
9386 out:
9387 return ret;
9388
9389 err_size:
9390 put_user(sizeof(*attr), &uattr->size);
9391 ret = -E2BIG;
9392 goto out;
9393 }
9394
9395 static int
9396 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
9397 {
9398 struct ring_buffer *rb = NULL;
9399 int ret = -EINVAL;
9400
9401 if (!output_event)
9402 goto set;
9403
9404 /* don't allow circular references */
9405 if (event == output_event)
9406 goto out;
9407
9408 /*
9409 * Don't allow cross-cpu buffers
9410 */
9411 if (output_event->cpu != event->cpu)
9412 goto out;
9413
9414 /*
9415 * If its not a per-cpu rb, it must be the same task.
9416 */
9417 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
9418 goto out;
9419
9420 /*
9421 * Mixing clocks in the same buffer is trouble you don't need.
9422 */
9423 if (output_event->clock != event->clock)
9424 goto out;
9425
9426 /*
9427 * Either writing ring buffer from beginning or from end.
9428 * Mixing is not allowed.
9429 */
9430 if (is_write_backward(output_event) != is_write_backward(event))
9431 goto out;
9432
9433 /*
9434 * If both events generate aux data, they must be on the same PMU
9435 */
9436 if (has_aux(event) && has_aux(output_event) &&
9437 event->pmu != output_event->pmu)
9438 goto out;
9439
9440 set:
9441 mutex_lock(&event->mmap_mutex);
9442 /* Can't redirect output if we've got an active mmap() */
9443 if (atomic_read(&event->mmap_count))
9444 goto unlock;
9445
9446 if (output_event) {
9447 /* get the rb we want to redirect to */
9448 rb = ring_buffer_get(output_event);
9449 if (!rb)
9450 goto unlock;
9451 }
9452
9453 ring_buffer_attach(event, rb);
9454
9455 ret = 0;
9456 unlock:
9457 mutex_unlock(&event->mmap_mutex);
9458
9459 out:
9460 return ret;
9461 }
9462
9463 static void mutex_lock_double(struct mutex *a, struct mutex *b)
9464 {
9465 if (b < a)
9466 swap(a, b);
9467
9468 mutex_lock(a);
9469 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
9470 }
9471
9472 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
9473 {
9474 bool nmi_safe = false;
9475
9476 switch (clk_id) {
9477 case CLOCK_MONOTONIC:
9478 event->clock = &ktime_get_mono_fast_ns;
9479 nmi_safe = true;
9480 break;
9481
9482 case CLOCK_MONOTONIC_RAW:
9483 event->clock = &ktime_get_raw_fast_ns;
9484 nmi_safe = true;
9485 break;
9486
9487 case CLOCK_REALTIME:
9488 event->clock = &ktime_get_real_ns;
9489 break;
9490
9491 case CLOCK_BOOTTIME:
9492 event->clock = &ktime_get_boot_ns;
9493 break;
9494
9495 case CLOCK_TAI:
9496 event->clock = &ktime_get_tai_ns;
9497 break;
9498
9499 default:
9500 return -EINVAL;
9501 }
9502
9503 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
9504 return -EINVAL;
9505
9506 return 0;
9507 }
9508
9509 /**
9510 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9511 *
9512 * @attr_uptr: event_id type attributes for monitoring/sampling
9513 * @pid: target pid
9514 * @cpu: target cpu
9515 * @group_fd: group leader event fd
9516 */
9517 SYSCALL_DEFINE5(perf_event_open,
9518 struct perf_event_attr __user *, attr_uptr,
9519 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
9520 {
9521 struct perf_event *group_leader = NULL, *output_event = NULL;
9522 struct perf_event *event, *sibling;
9523 struct perf_event_attr attr;
9524 struct perf_event_context *ctx, *uninitialized_var(gctx);
9525 struct file *event_file = NULL;
9526 struct fd group = {NULL, 0};
9527 struct task_struct *task = NULL;
9528 struct pmu *pmu;
9529 int event_fd;
9530 int move_group = 0;
9531 int err;
9532 int f_flags = O_RDWR;
9533 int cgroup_fd = -1;
9534
9535 /* for future expandability... */
9536 if (flags & ~PERF_FLAG_ALL)
9537 return -EINVAL;
9538
9539 err = perf_copy_attr(attr_uptr, &attr);
9540 if (err)
9541 return err;
9542
9543 if (!attr.exclude_kernel) {
9544 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9545 return -EACCES;
9546 }
9547
9548 if (attr.freq) {
9549 if (attr.sample_freq > sysctl_perf_event_sample_rate)
9550 return -EINVAL;
9551 } else {
9552 if (attr.sample_period & (1ULL << 63))
9553 return -EINVAL;
9554 }
9555
9556 if (!attr.sample_max_stack)
9557 attr.sample_max_stack = sysctl_perf_event_max_stack;
9558
9559 /*
9560 * In cgroup mode, the pid argument is used to pass the fd
9561 * opened to the cgroup directory in cgroupfs. The cpu argument
9562 * designates the cpu on which to monitor threads from that
9563 * cgroup.
9564 */
9565 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
9566 return -EINVAL;
9567
9568 if (flags & PERF_FLAG_FD_CLOEXEC)
9569 f_flags |= O_CLOEXEC;
9570
9571 event_fd = get_unused_fd_flags(f_flags);
9572 if (event_fd < 0)
9573 return event_fd;
9574
9575 if (group_fd != -1) {
9576 err = perf_fget_light(group_fd, &group);
9577 if (err)
9578 goto err_fd;
9579 group_leader = group.file->private_data;
9580 if (flags & PERF_FLAG_FD_OUTPUT)
9581 output_event = group_leader;
9582 if (flags & PERF_FLAG_FD_NO_GROUP)
9583 group_leader = NULL;
9584 }
9585
9586 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
9587 task = find_lively_task_by_vpid(pid);
9588 if (IS_ERR(task)) {
9589 err = PTR_ERR(task);
9590 goto err_group_fd;
9591 }
9592 }
9593
9594 if (task && group_leader &&
9595 group_leader->attr.inherit != attr.inherit) {
9596 err = -EINVAL;
9597 goto err_task;
9598 }
9599
9600 get_online_cpus();
9601
9602 if (task) {
9603 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
9604 if (err)
9605 goto err_cpus;
9606
9607 /*
9608 * Reuse ptrace permission checks for now.
9609 *
9610 * We must hold cred_guard_mutex across this and any potential
9611 * perf_install_in_context() call for this new event to
9612 * serialize against exec() altering our credentials (and the
9613 * perf_event_exit_task() that could imply).
9614 */
9615 err = -EACCES;
9616 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
9617 goto err_cred;
9618 }
9619
9620 if (flags & PERF_FLAG_PID_CGROUP)
9621 cgroup_fd = pid;
9622
9623 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
9624 NULL, NULL, cgroup_fd);
9625 if (IS_ERR(event)) {
9626 err = PTR_ERR(event);
9627 goto err_cred;
9628 }
9629
9630 if (is_sampling_event(event)) {
9631 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
9632 err = -EOPNOTSUPP;
9633 goto err_alloc;
9634 }
9635 }
9636
9637 /*
9638 * Special case software events and allow them to be part of
9639 * any hardware group.
9640 */
9641 pmu = event->pmu;
9642
9643 if (attr.use_clockid) {
9644 err = perf_event_set_clock(event, attr.clockid);
9645 if (err)
9646 goto err_alloc;
9647 }
9648
9649 if (pmu->task_ctx_nr == perf_sw_context)
9650 event->event_caps |= PERF_EV_CAP_SOFTWARE;
9651
9652 if (group_leader &&
9653 (is_software_event(event) != is_software_event(group_leader))) {
9654 if (is_software_event(event)) {
9655 /*
9656 * If event and group_leader are not both a software
9657 * event, and event is, then group leader is not.
9658 *
9659 * Allow the addition of software events to !software
9660 * groups, this is safe because software events never
9661 * fail to schedule.
9662 */
9663 pmu = group_leader->pmu;
9664 } else if (is_software_event(group_leader) &&
9665 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
9666 /*
9667 * In case the group is a pure software group, and we
9668 * try to add a hardware event, move the whole group to
9669 * the hardware context.
9670 */
9671 move_group = 1;
9672 }
9673 }
9674
9675 /*
9676 * Get the target context (task or percpu):
9677 */
9678 ctx = find_get_context(pmu, task, event);
9679 if (IS_ERR(ctx)) {
9680 err = PTR_ERR(ctx);
9681 goto err_alloc;
9682 }
9683
9684 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
9685 err = -EBUSY;
9686 goto err_context;
9687 }
9688
9689 /*
9690 * Look up the group leader (we will attach this event to it):
9691 */
9692 if (group_leader) {
9693 err = -EINVAL;
9694
9695 /*
9696 * Do not allow a recursive hierarchy (this new sibling
9697 * becoming part of another group-sibling):
9698 */
9699 if (group_leader->group_leader != group_leader)
9700 goto err_context;
9701
9702 /* All events in a group should have the same clock */
9703 if (group_leader->clock != event->clock)
9704 goto err_context;
9705
9706 /*
9707 * Do not allow to attach to a group in a different
9708 * task or CPU context:
9709 */
9710 if (move_group) {
9711 /*
9712 * Make sure we're both on the same task, or both
9713 * per-cpu events.
9714 */
9715 if (group_leader->ctx->task != ctx->task)
9716 goto err_context;
9717
9718 /*
9719 * Make sure we're both events for the same CPU;
9720 * grouping events for different CPUs is broken; since
9721 * you can never concurrently schedule them anyhow.
9722 */
9723 if (group_leader->cpu != event->cpu)
9724 goto err_context;
9725 } else {
9726 if (group_leader->ctx != ctx)
9727 goto err_context;
9728 }
9729
9730 /*
9731 * Only a group leader can be exclusive or pinned
9732 */
9733 if (attr.exclusive || attr.pinned)
9734 goto err_context;
9735 }
9736
9737 if (output_event) {
9738 err = perf_event_set_output(event, output_event);
9739 if (err)
9740 goto err_context;
9741 }
9742
9743 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
9744 f_flags);
9745 if (IS_ERR(event_file)) {
9746 err = PTR_ERR(event_file);
9747 event_file = NULL;
9748 goto err_context;
9749 }
9750
9751 if (move_group) {
9752 gctx = group_leader->ctx;
9753 mutex_lock_double(&gctx->mutex, &ctx->mutex);
9754 if (gctx->task == TASK_TOMBSTONE) {
9755 err = -ESRCH;
9756 goto err_locked;
9757 }
9758 } else {
9759 mutex_lock(&ctx->mutex);
9760 }
9761
9762 if (ctx->task == TASK_TOMBSTONE) {
9763 err = -ESRCH;
9764 goto err_locked;
9765 }
9766
9767 if (!perf_event_validate_size(event)) {
9768 err = -E2BIG;
9769 goto err_locked;
9770 }
9771
9772 /*
9773 * Must be under the same ctx::mutex as perf_install_in_context(),
9774 * because we need to serialize with concurrent event creation.
9775 */
9776 if (!exclusive_event_installable(event, ctx)) {
9777 /* exclusive and group stuff are assumed mutually exclusive */
9778 WARN_ON_ONCE(move_group);
9779
9780 err = -EBUSY;
9781 goto err_locked;
9782 }
9783
9784 WARN_ON_ONCE(ctx->parent_ctx);
9785
9786 /*
9787 * This is the point on no return; we cannot fail hereafter. This is
9788 * where we start modifying current state.
9789 */
9790
9791 if (move_group) {
9792 /*
9793 * See perf_event_ctx_lock() for comments on the details
9794 * of swizzling perf_event::ctx.
9795 */
9796 perf_remove_from_context(group_leader, 0);
9797
9798 list_for_each_entry(sibling, &group_leader->sibling_list,
9799 group_entry) {
9800 perf_remove_from_context(sibling, 0);
9801 put_ctx(gctx);
9802 }
9803
9804 /*
9805 * Wait for everybody to stop referencing the events through
9806 * the old lists, before installing it on new lists.
9807 */
9808 synchronize_rcu();
9809
9810 /*
9811 * Install the group siblings before the group leader.
9812 *
9813 * Because a group leader will try and install the entire group
9814 * (through the sibling list, which is still in-tact), we can
9815 * end up with siblings installed in the wrong context.
9816 *
9817 * By installing siblings first we NO-OP because they're not
9818 * reachable through the group lists.
9819 */
9820 list_for_each_entry(sibling, &group_leader->sibling_list,
9821 group_entry) {
9822 perf_event__state_init(sibling);
9823 perf_install_in_context(ctx, sibling, sibling->cpu);
9824 get_ctx(ctx);
9825 }
9826
9827 /*
9828 * Removing from the context ends up with disabled
9829 * event. What we want here is event in the initial
9830 * startup state, ready to be add into new context.
9831 */
9832 perf_event__state_init(group_leader);
9833 perf_install_in_context(ctx, group_leader, group_leader->cpu);
9834 get_ctx(ctx);
9835
9836 /*
9837 * Now that all events are installed in @ctx, nothing
9838 * references @gctx anymore, so drop the last reference we have
9839 * on it.
9840 */
9841 put_ctx(gctx);
9842 }
9843
9844 /*
9845 * Precalculate sample_data sizes; do while holding ctx::mutex such
9846 * that we're serialized against further additions and before
9847 * perf_install_in_context() which is the point the event is active and
9848 * can use these values.
9849 */
9850 perf_event__header_size(event);
9851 perf_event__id_header_size(event);
9852
9853 event->owner = current;
9854
9855 perf_install_in_context(ctx, event, event->cpu);
9856 perf_unpin_context(ctx);
9857
9858 if (move_group)
9859 mutex_unlock(&gctx->mutex);
9860 mutex_unlock(&ctx->mutex);
9861
9862 if (task) {
9863 mutex_unlock(&task->signal->cred_guard_mutex);
9864 put_task_struct(task);
9865 }
9866
9867 put_online_cpus();
9868
9869 mutex_lock(&current->perf_event_mutex);
9870 list_add_tail(&event->owner_entry, &current->perf_event_list);
9871 mutex_unlock(&current->perf_event_mutex);
9872
9873 /*
9874 * Drop the reference on the group_event after placing the
9875 * new event on the sibling_list. This ensures destruction
9876 * of the group leader will find the pointer to itself in
9877 * perf_group_detach().
9878 */
9879 fdput(group);
9880 fd_install(event_fd, event_file);
9881 return event_fd;
9882
9883 err_locked:
9884 if (move_group)
9885 mutex_unlock(&gctx->mutex);
9886 mutex_unlock(&ctx->mutex);
9887 /* err_file: */
9888 fput(event_file);
9889 err_context:
9890 perf_unpin_context(ctx);
9891 put_ctx(ctx);
9892 err_alloc:
9893 /*
9894 * If event_file is set, the fput() above will have called ->release()
9895 * and that will take care of freeing the event.
9896 */
9897 if (!event_file)
9898 free_event(event);
9899 err_cred:
9900 if (task)
9901 mutex_unlock(&task->signal->cred_guard_mutex);
9902 err_cpus:
9903 put_online_cpus();
9904 err_task:
9905 if (task)
9906 put_task_struct(task);
9907 err_group_fd:
9908 fdput(group);
9909 err_fd:
9910 put_unused_fd(event_fd);
9911 return err;
9912 }
9913
9914 /**
9915 * perf_event_create_kernel_counter
9916 *
9917 * @attr: attributes of the counter to create
9918 * @cpu: cpu in which the counter is bound
9919 * @task: task to profile (NULL for percpu)
9920 */
9921 struct perf_event *
9922 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
9923 struct task_struct *task,
9924 perf_overflow_handler_t overflow_handler,
9925 void *context)
9926 {
9927 struct perf_event_context *ctx;
9928 struct perf_event *event;
9929 int err;
9930
9931 /*
9932 * Get the target context (task or percpu):
9933 */
9934
9935 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
9936 overflow_handler, context, -1);
9937 if (IS_ERR(event)) {
9938 err = PTR_ERR(event);
9939 goto err;
9940 }
9941
9942 /* Mark owner so we could distinguish it from user events. */
9943 event->owner = TASK_TOMBSTONE;
9944
9945 ctx = find_get_context(event->pmu, task, event);
9946 if (IS_ERR(ctx)) {
9947 err = PTR_ERR(ctx);
9948 goto err_free;
9949 }
9950
9951 WARN_ON_ONCE(ctx->parent_ctx);
9952 mutex_lock(&ctx->mutex);
9953 if (ctx->task == TASK_TOMBSTONE) {
9954 err = -ESRCH;
9955 goto err_unlock;
9956 }
9957
9958 if (!exclusive_event_installable(event, ctx)) {
9959 err = -EBUSY;
9960 goto err_unlock;
9961 }
9962
9963 perf_install_in_context(ctx, event, cpu);
9964 perf_unpin_context(ctx);
9965 mutex_unlock(&ctx->mutex);
9966
9967 return event;
9968
9969 err_unlock:
9970 mutex_unlock(&ctx->mutex);
9971 perf_unpin_context(ctx);
9972 put_ctx(ctx);
9973 err_free:
9974 free_event(event);
9975 err:
9976 return ERR_PTR(err);
9977 }
9978 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9979
9980 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
9981 {
9982 struct perf_event_context *src_ctx;
9983 struct perf_event_context *dst_ctx;
9984 struct perf_event *event, *tmp;
9985 LIST_HEAD(events);
9986
9987 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
9988 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
9989
9990 /*
9991 * See perf_event_ctx_lock() for comments on the details
9992 * of swizzling perf_event::ctx.
9993 */
9994 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
9995 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
9996 event_entry) {
9997 perf_remove_from_context(event, 0);
9998 unaccount_event_cpu(event, src_cpu);
9999 put_ctx(src_ctx);
10000 list_add(&event->migrate_entry, &events);
10001 }
10002
10003 /*
10004 * Wait for the events to quiesce before re-instating them.
10005 */
10006 synchronize_rcu();
10007
10008 /*
10009 * Re-instate events in 2 passes.
10010 *
10011 * Skip over group leaders and only install siblings on this first
10012 * pass, siblings will not get enabled without a leader, however a
10013 * leader will enable its siblings, even if those are still on the old
10014 * context.
10015 */
10016 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10017 if (event->group_leader == event)
10018 continue;
10019
10020 list_del(&event->migrate_entry);
10021 if (event->state >= PERF_EVENT_STATE_OFF)
10022 event->state = PERF_EVENT_STATE_INACTIVE;
10023 account_event_cpu(event, dst_cpu);
10024 perf_install_in_context(dst_ctx, event, dst_cpu);
10025 get_ctx(dst_ctx);
10026 }
10027
10028 /*
10029 * Once all the siblings are setup properly, install the group leaders
10030 * to make it go.
10031 */
10032 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
10033 list_del(&event->migrate_entry);
10034 if (event->state >= PERF_EVENT_STATE_OFF)
10035 event->state = PERF_EVENT_STATE_INACTIVE;
10036 account_event_cpu(event, dst_cpu);
10037 perf_install_in_context(dst_ctx, event, dst_cpu);
10038 get_ctx(dst_ctx);
10039 }
10040 mutex_unlock(&dst_ctx->mutex);
10041 mutex_unlock(&src_ctx->mutex);
10042 }
10043 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
10044
10045 static void sync_child_event(struct perf_event *child_event,
10046 struct task_struct *child)
10047 {
10048 struct perf_event *parent_event = child_event->parent;
10049 u64 child_val;
10050
10051 if (child_event->attr.inherit_stat)
10052 perf_event_read_event(child_event, child);
10053
10054 child_val = perf_event_count(child_event);
10055
10056 /*
10057 * Add back the child's count to the parent's count:
10058 */
10059 atomic64_add(child_val, &parent_event->child_count);
10060 atomic64_add(child_event->total_time_enabled,
10061 &parent_event->child_total_time_enabled);
10062 atomic64_add(child_event->total_time_running,
10063 &parent_event->child_total_time_running);
10064 }
10065
10066 static void
10067 perf_event_exit_event(struct perf_event *child_event,
10068 struct perf_event_context *child_ctx,
10069 struct task_struct *child)
10070 {
10071 struct perf_event *parent_event = child_event->parent;
10072
10073 /*
10074 * Do not destroy the 'original' grouping; because of the context
10075 * switch optimization the original events could've ended up in a
10076 * random child task.
10077 *
10078 * If we were to destroy the original group, all group related
10079 * operations would cease to function properly after this random
10080 * child dies.
10081 *
10082 * Do destroy all inherited groups, we don't care about those
10083 * and being thorough is better.
10084 */
10085 raw_spin_lock_irq(&child_ctx->lock);
10086 WARN_ON_ONCE(child_ctx->is_active);
10087
10088 if (parent_event)
10089 perf_group_detach(child_event);
10090 list_del_event(child_event, child_ctx);
10091 child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
10092 raw_spin_unlock_irq(&child_ctx->lock);
10093
10094 /*
10095 * Parent events are governed by their filedesc, retain them.
10096 */
10097 if (!parent_event) {
10098 perf_event_wakeup(child_event);
10099 return;
10100 }
10101 /*
10102 * Child events can be cleaned up.
10103 */
10104
10105 sync_child_event(child_event, child);
10106
10107 /*
10108 * Remove this event from the parent's list
10109 */
10110 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
10111 mutex_lock(&parent_event->child_mutex);
10112 list_del_init(&child_event->child_list);
10113 mutex_unlock(&parent_event->child_mutex);
10114
10115 /*
10116 * Kick perf_poll() for is_event_hup().
10117 */
10118 perf_event_wakeup(parent_event);
10119 free_event(child_event);
10120 put_event(parent_event);
10121 }
10122
10123 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
10124 {
10125 struct perf_event_context *child_ctx, *clone_ctx = NULL;
10126 struct perf_event *child_event, *next;
10127
10128 WARN_ON_ONCE(child != current);
10129
10130 child_ctx = perf_pin_task_context(child, ctxn);
10131 if (!child_ctx)
10132 return;
10133
10134 /*
10135 * In order to reduce the amount of tricky in ctx tear-down, we hold
10136 * ctx::mutex over the entire thing. This serializes against almost
10137 * everything that wants to access the ctx.
10138 *
10139 * The exception is sys_perf_event_open() /
10140 * perf_event_create_kernel_count() which does find_get_context()
10141 * without ctx::mutex (it cannot because of the move_group double mutex
10142 * lock thing). See the comments in perf_install_in_context().
10143 */
10144 mutex_lock(&child_ctx->mutex);
10145
10146 /*
10147 * In a single ctx::lock section, de-schedule the events and detach the
10148 * context from the task such that we cannot ever get it scheduled back
10149 * in.
10150 */
10151 raw_spin_lock_irq(&child_ctx->lock);
10152 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
10153
10154 /*
10155 * Now that the context is inactive, destroy the task <-> ctx relation
10156 * and mark the context dead.
10157 */
10158 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
10159 put_ctx(child_ctx); /* cannot be last */
10160 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
10161 put_task_struct(current); /* cannot be last */
10162
10163 clone_ctx = unclone_ctx(child_ctx);
10164 raw_spin_unlock_irq(&child_ctx->lock);
10165
10166 if (clone_ctx)
10167 put_ctx(clone_ctx);
10168
10169 /*
10170 * Report the task dead after unscheduling the events so that we
10171 * won't get any samples after PERF_RECORD_EXIT. We can however still
10172 * get a few PERF_RECORD_READ events.
10173 */
10174 perf_event_task(child, child_ctx, 0);
10175
10176 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
10177 perf_event_exit_event(child_event, child_ctx, child);
10178
10179 mutex_unlock(&child_ctx->mutex);
10180
10181 put_ctx(child_ctx);
10182 }
10183
10184 /*
10185 * When a child task exits, feed back event values to parent events.
10186 *
10187 * Can be called with cred_guard_mutex held when called from
10188 * install_exec_creds().
10189 */
10190 void perf_event_exit_task(struct task_struct *child)
10191 {
10192 struct perf_event *event, *tmp;
10193 int ctxn;
10194
10195 mutex_lock(&child->perf_event_mutex);
10196 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
10197 owner_entry) {
10198 list_del_init(&event->owner_entry);
10199
10200 /*
10201 * Ensure the list deletion is visible before we clear
10202 * the owner, closes a race against perf_release() where
10203 * we need to serialize on the owner->perf_event_mutex.
10204 */
10205 smp_store_release(&event->owner, NULL);
10206 }
10207 mutex_unlock(&child->perf_event_mutex);
10208
10209 for_each_task_context_nr(ctxn)
10210 perf_event_exit_task_context(child, ctxn);
10211
10212 /*
10213 * The perf_event_exit_task_context calls perf_event_task
10214 * with child's task_ctx, which generates EXIT events for
10215 * child contexts and sets child->perf_event_ctxp[] to NULL.
10216 * At this point we need to send EXIT events to cpu contexts.
10217 */
10218 perf_event_task(child, NULL, 0);
10219 }
10220
10221 static void perf_free_event(struct perf_event *event,
10222 struct perf_event_context *ctx)
10223 {
10224 struct perf_event *parent = event->parent;
10225
10226 if (WARN_ON_ONCE(!parent))
10227 return;
10228
10229 mutex_lock(&parent->child_mutex);
10230 list_del_init(&event->child_list);
10231 mutex_unlock(&parent->child_mutex);
10232
10233 put_event(parent);
10234
10235 raw_spin_lock_irq(&ctx->lock);
10236 perf_group_detach(event);
10237 list_del_event(event, ctx);
10238 raw_spin_unlock_irq(&ctx->lock);
10239 free_event(event);
10240 }
10241
10242 /*
10243 * Free an unexposed, unused context as created by inheritance by
10244 * perf_event_init_task below, used by fork() in case of fail.
10245 *
10246 * Not all locks are strictly required, but take them anyway to be nice and
10247 * help out with the lockdep assertions.
10248 */
10249 void perf_event_free_task(struct task_struct *task)
10250 {
10251 struct perf_event_context *ctx;
10252 struct perf_event *event, *tmp;
10253 int ctxn;
10254
10255 for_each_task_context_nr(ctxn) {
10256 ctx = task->perf_event_ctxp[ctxn];
10257 if (!ctx)
10258 continue;
10259
10260 mutex_lock(&ctx->mutex);
10261 again:
10262 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
10263 group_entry)
10264 perf_free_event(event, ctx);
10265
10266 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
10267 group_entry)
10268 perf_free_event(event, ctx);
10269
10270 if (!list_empty(&ctx->pinned_groups) ||
10271 !list_empty(&ctx->flexible_groups))
10272 goto again;
10273
10274 mutex_unlock(&ctx->mutex);
10275
10276 put_ctx(ctx);
10277 }
10278 }
10279
10280 void perf_event_delayed_put(struct task_struct *task)
10281 {
10282 int ctxn;
10283
10284 for_each_task_context_nr(ctxn)
10285 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
10286 }
10287
10288 struct file *perf_event_get(unsigned int fd)
10289 {
10290 struct file *file;
10291
10292 file = fget_raw(fd);
10293 if (!file)
10294 return ERR_PTR(-EBADF);
10295
10296 if (file->f_op != &perf_fops) {
10297 fput(file);
10298 return ERR_PTR(-EBADF);
10299 }
10300
10301 return file;
10302 }
10303
10304 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10305 {
10306 if (!event)
10307 return ERR_PTR(-EINVAL);
10308
10309 return &event->attr;
10310 }
10311
10312 /*
10313 * inherit a event from parent task to child task:
10314 */
10315 static struct perf_event *
10316 inherit_event(struct perf_event *parent_event,
10317 struct task_struct *parent,
10318 struct perf_event_context *parent_ctx,
10319 struct task_struct *child,
10320 struct perf_event *group_leader,
10321 struct perf_event_context *child_ctx)
10322 {
10323 enum perf_event_active_state parent_state = parent_event->state;
10324 struct perf_event *child_event;
10325 unsigned long flags;
10326
10327 /*
10328 * Instead of creating recursive hierarchies of events,
10329 * we link inherited events back to the original parent,
10330 * which has a filp for sure, which we use as the reference
10331 * count:
10332 */
10333 if (parent_event->parent)
10334 parent_event = parent_event->parent;
10335
10336 child_event = perf_event_alloc(&parent_event->attr,
10337 parent_event->cpu,
10338 child,
10339 group_leader, parent_event,
10340 NULL, NULL, -1);
10341 if (IS_ERR(child_event))
10342 return child_event;
10343
10344 /*
10345 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
10346 * must be under the same lock in order to serialize against
10347 * perf_event_release_kernel(), such that either we must observe
10348 * is_orphaned_event() or they will observe us on the child_list.
10349 */
10350 mutex_lock(&parent_event->child_mutex);
10351 if (is_orphaned_event(parent_event) ||
10352 !atomic_long_inc_not_zero(&parent_event->refcount)) {
10353 mutex_unlock(&parent_event->child_mutex);
10354 free_event(child_event);
10355 return NULL;
10356 }
10357
10358 get_ctx(child_ctx);
10359
10360 /*
10361 * Make the child state follow the state of the parent event,
10362 * not its attr.disabled bit. We hold the parent's mutex,
10363 * so we won't race with perf_event_{en, dis}able_family.
10364 */
10365 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
10366 child_event->state = PERF_EVENT_STATE_INACTIVE;
10367 else
10368 child_event->state = PERF_EVENT_STATE_OFF;
10369
10370 if (parent_event->attr.freq) {
10371 u64 sample_period = parent_event->hw.sample_period;
10372 struct hw_perf_event *hwc = &child_event->hw;
10373
10374 hwc->sample_period = sample_period;
10375 hwc->last_period = sample_period;
10376
10377 local64_set(&hwc->period_left, sample_period);
10378 }
10379
10380 child_event->ctx = child_ctx;
10381 child_event->overflow_handler = parent_event->overflow_handler;
10382 child_event->overflow_handler_context
10383 = parent_event->overflow_handler_context;
10384
10385 /*
10386 * Precalculate sample_data sizes
10387 */
10388 perf_event__header_size(child_event);
10389 perf_event__id_header_size(child_event);
10390
10391 /*
10392 * Link it up in the child's context:
10393 */
10394 raw_spin_lock_irqsave(&child_ctx->lock, flags);
10395 add_event_to_ctx(child_event, child_ctx);
10396 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
10397
10398 /*
10399 * Link this into the parent event's child list
10400 */
10401 list_add_tail(&child_event->child_list, &parent_event->child_list);
10402 mutex_unlock(&parent_event->child_mutex);
10403
10404 return child_event;
10405 }
10406
10407 static int inherit_group(struct perf_event *parent_event,
10408 struct task_struct *parent,
10409 struct perf_event_context *parent_ctx,
10410 struct task_struct *child,
10411 struct perf_event_context *child_ctx)
10412 {
10413 struct perf_event *leader;
10414 struct perf_event *sub;
10415 struct perf_event *child_ctr;
10416
10417 leader = inherit_event(parent_event, parent, parent_ctx,
10418 child, NULL, child_ctx);
10419 if (IS_ERR(leader))
10420 return PTR_ERR(leader);
10421 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10422 child_ctr = inherit_event(sub, parent, parent_ctx,
10423 child, leader, child_ctx);
10424 if (IS_ERR(child_ctr))
10425 return PTR_ERR(child_ctr);
10426 }
10427 return 0;
10428 }
10429
10430 static int
10431 inherit_task_group(struct perf_event *event, struct task_struct *parent,
10432 struct perf_event_context *parent_ctx,
10433 struct task_struct *child, int ctxn,
10434 int *inherited_all)
10435 {
10436 int ret;
10437 struct perf_event_context *child_ctx;
10438
10439 if (!event->attr.inherit) {
10440 *inherited_all = 0;
10441 return 0;
10442 }
10443
10444 child_ctx = child->perf_event_ctxp[ctxn];
10445 if (!child_ctx) {
10446 /*
10447 * This is executed from the parent task context, so
10448 * inherit events that have been marked for cloning.
10449 * First allocate and initialize a context for the
10450 * child.
10451 */
10452
10453 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
10454 if (!child_ctx)
10455 return -ENOMEM;
10456
10457 child->perf_event_ctxp[ctxn] = child_ctx;
10458 }
10459
10460 ret = inherit_group(event, parent, parent_ctx,
10461 child, child_ctx);
10462
10463 if (ret)
10464 *inherited_all = 0;
10465
10466 return ret;
10467 }
10468
10469 /*
10470 * Initialize the perf_event context in task_struct
10471 */
10472 static int perf_event_init_context(struct task_struct *child, int ctxn)
10473 {
10474 struct perf_event_context *child_ctx, *parent_ctx;
10475 struct perf_event_context *cloned_ctx;
10476 struct perf_event *event;
10477 struct task_struct *parent = current;
10478 int inherited_all = 1;
10479 unsigned long flags;
10480 int ret = 0;
10481
10482 if (likely(!parent->perf_event_ctxp[ctxn]))
10483 return 0;
10484
10485 /*
10486 * If the parent's context is a clone, pin it so it won't get
10487 * swapped under us.
10488 */
10489 parent_ctx = perf_pin_task_context(parent, ctxn);
10490 if (!parent_ctx)
10491 return 0;
10492
10493 /*
10494 * No need to check if parent_ctx != NULL here; since we saw
10495 * it non-NULL earlier, the only reason for it to become NULL
10496 * is if we exit, and since we're currently in the middle of
10497 * a fork we can't be exiting at the same time.
10498 */
10499
10500 /*
10501 * Lock the parent list. No need to lock the child - not PID
10502 * hashed yet and not running, so nobody can access it.
10503 */
10504 mutex_lock(&parent_ctx->mutex);
10505
10506 /*
10507 * We dont have to disable NMIs - we are only looking at
10508 * the list, not manipulating it:
10509 */
10510 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
10511 ret = inherit_task_group(event, parent, parent_ctx,
10512 child, ctxn, &inherited_all);
10513 if (ret)
10514 break;
10515 }
10516
10517 /*
10518 * We can't hold ctx->lock when iterating the ->flexible_group list due
10519 * to allocations, but we need to prevent rotation because
10520 * rotate_ctx() will change the list from interrupt context.
10521 */
10522 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10523 parent_ctx->rotate_disable = 1;
10524 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10525
10526 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
10527 ret = inherit_task_group(event, parent, parent_ctx,
10528 child, ctxn, &inherited_all);
10529 if (ret)
10530 break;
10531 }
10532
10533 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10534 parent_ctx->rotate_disable = 0;
10535
10536 child_ctx = child->perf_event_ctxp[ctxn];
10537
10538 if (child_ctx && inherited_all) {
10539 /*
10540 * Mark the child context as a clone of the parent
10541 * context, or of whatever the parent is a clone of.
10542 *
10543 * Note that if the parent is a clone, the holding of
10544 * parent_ctx->lock avoids it from being uncloned.
10545 */
10546 cloned_ctx = parent_ctx->parent_ctx;
10547 if (cloned_ctx) {
10548 child_ctx->parent_ctx = cloned_ctx;
10549 child_ctx->parent_gen = parent_ctx->parent_gen;
10550 } else {
10551 child_ctx->parent_ctx = parent_ctx;
10552 child_ctx->parent_gen = parent_ctx->generation;
10553 }
10554 get_ctx(child_ctx->parent_ctx);
10555 }
10556
10557 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10558 mutex_unlock(&parent_ctx->mutex);
10559
10560 perf_unpin_context(parent_ctx);
10561 put_ctx(parent_ctx);
10562
10563 return ret;
10564 }
10565
10566 /*
10567 * Initialize the perf_event context in task_struct
10568 */
10569 int perf_event_init_task(struct task_struct *child)
10570 {
10571 int ctxn, ret;
10572
10573 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
10574 mutex_init(&child->perf_event_mutex);
10575 INIT_LIST_HEAD(&child->perf_event_list);
10576
10577 for_each_task_context_nr(ctxn) {
10578 ret = perf_event_init_context(child, ctxn);
10579 if (ret) {
10580 perf_event_free_task(child);
10581 return ret;
10582 }
10583 }
10584
10585 return 0;
10586 }
10587
10588 static void __init perf_event_init_all_cpus(void)
10589 {
10590 struct swevent_htable *swhash;
10591 int cpu;
10592
10593 for_each_possible_cpu(cpu) {
10594 swhash = &per_cpu(swevent_htable, cpu);
10595 mutex_init(&swhash->hlist_mutex);
10596 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
10597
10598 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
10599 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
10600
10601 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
10602 }
10603 }
10604
10605 int perf_event_init_cpu(unsigned int cpu)
10606 {
10607 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
10608
10609 mutex_lock(&swhash->hlist_mutex);
10610 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
10611 struct swevent_hlist *hlist;
10612
10613 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
10614 WARN_ON(!hlist);
10615 rcu_assign_pointer(swhash->swevent_hlist, hlist);
10616 }
10617 mutex_unlock(&swhash->hlist_mutex);
10618 return 0;
10619 }
10620
10621 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
10622 static void __perf_event_exit_context(void *__info)
10623 {
10624 struct perf_event_context *ctx = __info;
10625 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
10626 struct perf_event *event;
10627
10628 raw_spin_lock(&ctx->lock);
10629 list_for_each_entry(event, &ctx->event_list, event_entry)
10630 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
10631 raw_spin_unlock(&ctx->lock);
10632 }
10633
10634 static void perf_event_exit_cpu_context(int cpu)
10635 {
10636 struct perf_event_context *ctx;
10637 struct pmu *pmu;
10638 int idx;
10639
10640 idx = srcu_read_lock(&pmus_srcu);
10641 list_for_each_entry_rcu(pmu, &pmus, entry) {
10642 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
10643
10644 mutex_lock(&ctx->mutex);
10645 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
10646 mutex_unlock(&ctx->mutex);
10647 }
10648 srcu_read_unlock(&pmus_srcu, idx);
10649 }
10650 #else
10651
10652 static void perf_event_exit_cpu_context(int cpu) { }
10653
10654 #endif
10655
10656 int perf_event_exit_cpu(unsigned int cpu)
10657 {
10658 perf_event_exit_cpu_context(cpu);
10659 return 0;
10660 }
10661
10662 static int
10663 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
10664 {
10665 int cpu;
10666
10667 for_each_online_cpu(cpu)
10668 perf_event_exit_cpu(cpu);
10669
10670 return NOTIFY_OK;
10671 }
10672
10673 /*
10674 * Run the perf reboot notifier at the very last possible moment so that
10675 * the generic watchdog code runs as long as possible.
10676 */
10677 static struct notifier_block perf_reboot_notifier = {
10678 .notifier_call = perf_reboot,
10679 .priority = INT_MIN,
10680 };
10681
10682 void __init perf_event_init(void)
10683 {
10684 int ret;
10685
10686 idr_init(&pmu_idr);
10687
10688 perf_event_init_all_cpus();
10689 init_srcu_struct(&pmus_srcu);
10690 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
10691 perf_pmu_register(&perf_cpu_clock, NULL, -1);
10692 perf_pmu_register(&perf_task_clock, NULL, -1);
10693 perf_tp_register();
10694 perf_event_init_cpu(smp_processor_id());
10695 register_reboot_notifier(&perf_reboot_notifier);
10696
10697 ret = init_hw_breakpoint();
10698 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
10699
10700 /*
10701 * Build time assertion that we keep the data_head at the intended
10702 * location. IOW, validation we got the __reserved[] size right.
10703 */
10704 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
10705 != 1024);
10706 }
10707
10708 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
10709 char *page)
10710 {
10711 struct perf_pmu_events_attr *pmu_attr =
10712 container_of(attr, struct perf_pmu_events_attr, attr);
10713
10714 if (pmu_attr->event_str)
10715 return sprintf(page, "%s\n", pmu_attr->event_str);
10716
10717 return 0;
10718 }
10719 EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
10720
10721 static int __init perf_event_sysfs_init(void)
10722 {
10723 struct pmu *pmu;
10724 int ret;
10725
10726 mutex_lock(&pmus_lock);
10727
10728 ret = bus_register(&pmu_bus);
10729 if (ret)
10730 goto unlock;
10731
10732 list_for_each_entry(pmu, &pmus, entry) {
10733 if (!pmu->name || pmu->type < 0)
10734 continue;
10735
10736 ret = pmu_dev_alloc(pmu);
10737 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
10738 }
10739 pmu_bus_running = 1;
10740 ret = 0;
10741
10742 unlock:
10743 mutex_unlock(&pmus_lock);
10744
10745 return ret;
10746 }
10747 device_initcall(perf_event_sysfs_init);
10748
10749 #ifdef CONFIG_CGROUP_PERF
10750 static struct cgroup_subsys_state *
10751 perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
10752 {
10753 struct perf_cgroup *jc;
10754
10755 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
10756 if (!jc)
10757 return ERR_PTR(-ENOMEM);
10758
10759 jc->info = alloc_percpu(struct perf_cgroup_info);
10760 if (!jc->info) {
10761 kfree(jc);
10762 return ERR_PTR(-ENOMEM);
10763 }
10764
10765 return &jc->css;
10766 }
10767
10768 static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
10769 {
10770 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
10771
10772 free_percpu(jc->info);
10773 kfree(jc);
10774 }
10775
10776 static int __perf_cgroup_move(void *info)
10777 {
10778 struct task_struct *task = info;
10779 rcu_read_lock();
10780 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
10781 rcu_read_unlock();
10782 return 0;
10783 }
10784
10785 static void perf_cgroup_attach(struct cgroup_taskset *tset)
10786 {
10787 struct task_struct *task;
10788 struct cgroup_subsys_state *css;
10789
10790 cgroup_taskset_for_each(task, css, tset)
10791 task_function_call(task, __perf_cgroup_move, task);
10792 }
10793
10794 struct cgroup_subsys perf_event_cgrp_subsys = {
10795 .css_alloc = perf_cgroup_css_alloc,
10796 .css_free = perf_cgroup_css_free,
10797 .attach = perf_cgroup_attach,
10798 };
10799 #endif /* CONFIG_CGROUP_PERF */