]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/events/core.c
checkpatch: Remove awareness of uninitialized_var() macro
[mirror_ubuntu-hirsute-kernel.git] / kernel / events / core.c
CommitLineData
8e86e015 1// SPDX-License-Identifier: GPL-2.0
0793a61d 2/*
57c0c15b 3 * Performance events core code:
0793a61d 4 *
98144511 5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e 6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
90eec103 7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
d36b6910 8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
0793a61d
TG
9 */
10
11#include <linux/fs.h>
b9cacc7b 12#include <linux/mm.h>
0793a61d
TG
13#include <linux/cpu.h>
14#include <linux/smp.h>
2e80a82a 15#include <linux/idr.h>
04289bb9 16#include <linux/file.h>
0793a61d 17#include <linux/poll.h>
5a0e3ad6 18#include <linux/slab.h>
76e1d904 19#include <linux/hash.h>
12351ef8 20#include <linux/tick.h>
0793a61d 21#include <linux/sysfs.h>
22a4f650 22#include <linux/dcache.h>
0793a61d 23#include <linux/percpu.h>
22a4f650 24#include <linux/ptrace.h>
c277443c 25#include <linux/reboot.h>
b9cacc7b 26#include <linux/vmstat.h>
abe43400 27#include <linux/device.h>
6e5fdeed 28#include <linux/export.h>
906010b2 29#include <linux/vmalloc.h>
b9cacc7b 30#include <linux/hardirq.h>
03911132 31#include <linux/hugetlb.h>
b9cacc7b 32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
39bed6cb 37#include <linux/cgroup.h>
cdd6c482 38#include <linux/perf_event.h>
af658dca 39#include <linux/trace_events.h>
3c502e7a 40#include <linux/hw_breakpoint.h>
c5ebcedb 41#include <linux/mm_types.h>
c464c76e 42#include <linux/module.h>
f972eb63 43#include <linux/mman.h>
b3f20785 44#include <linux/compat.h>
2541517c
AS
45#include <linux/bpf.h>
46#include <linux/filter.h>
375637bc
AS
47#include <linux/namei.h>
48#include <linux/parser.h>
e6017571 49#include <linux/sched/clock.h>
6e84f315 50#include <linux/sched/mm.h>
e4222673
HB
51#include <linux/proc_ns.h>
52#include <linux/mount.h>
6eef8a71 53#include <linux/min_heap.h>
0793a61d 54
76369139
FW
55#include "internal.h"
56
4e193bd4
TB
57#include <asm/irq_regs.h>
58
272325c4
PZ
59typedef int (*remote_function_f)(void *);
60
fe4b04fa 61struct remote_function_call {
e7e7ee2e 62 struct task_struct *p;
272325c4 63 remote_function_f func;
e7e7ee2e
IM
64 void *info;
65 int ret;
fe4b04fa
PZ
66};
67
68static void remote_function(void *data)
69{
70 struct remote_function_call *tfc = data;
71 struct task_struct *p = tfc->p;
72
73 if (p) {
0da4cf3e
PZ
74 /* -EAGAIN */
75 if (task_cpu(p) != smp_processor_id())
76 return;
77
78 /*
79 * Now that we're on right CPU with IRQs disabled, we can test
80 * if we hit the right task without races.
81 */
82
83 tfc->ret = -ESRCH; /* No such (running) process */
84 if (p != current)
fe4b04fa
PZ
85 return;
86 }
87
88 tfc->ret = tfc->func(tfc->info);
89}
90
91/**
92 * task_function_call - call a function on the cpu on which a task runs
93 * @p: the task to evaluate
94 * @func: the function to be called
95 * @info: the function call argument
96 *
97 * Calls the function @func when the task is currently running. This might
2ed6edd3
BR
98 * be on the current CPU, which just calls the function directly. This will
99 * retry due to any failures in smp_call_function_single(), such as if the
100 * task_cpu() goes offline concurrently.
fe4b04fa 101 *
2ed6edd3 102 * returns @func return value or -ESRCH when the process isn't running
fe4b04fa
PZ
103 */
104static int
272325c4 105task_function_call(struct task_struct *p, remote_function_f func, void *info)
fe4b04fa
PZ
106{
107 struct remote_function_call data = {
e7e7ee2e
IM
108 .p = p,
109 .func = func,
110 .info = info,
0da4cf3e 111 .ret = -EAGAIN,
fe4b04fa 112 };
0da4cf3e 113 int ret;
fe4b04fa 114
2ed6edd3
BR
115 for (;;) {
116 ret = smp_call_function_single(task_cpu(p), remote_function,
117 &data, 1);
118 ret = !ret ? data.ret : -EAGAIN;
119
120 if (ret != -EAGAIN)
121 break;
122
123 cond_resched();
124 }
fe4b04fa 125
0da4cf3e 126 return ret;
fe4b04fa
PZ
127}
128
129/**
130 * cpu_function_call - call a function on the cpu
131 * @func: the function to be called
132 * @info: the function call argument
133 *
134 * Calls the function @func on the remote cpu.
135 *
136 * returns: @func return value or -ENXIO when the cpu is offline
137 */
272325c4 138static int cpu_function_call(int cpu, remote_function_f func, void *info)
fe4b04fa
PZ
139{
140 struct remote_function_call data = {
e7e7ee2e
IM
141 .p = NULL,
142 .func = func,
143 .info = info,
144 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
145 };
146
147 smp_call_function_single(cpu, remote_function, &data, 1);
148
149 return data.ret;
150}
151
fae3fde6
PZ
152static inline struct perf_cpu_context *
153__get_cpu_context(struct perf_event_context *ctx)
154{
155 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
156}
157
158static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
159 struct perf_event_context *ctx)
0017960f 160{
fae3fde6
PZ
161 raw_spin_lock(&cpuctx->ctx.lock);
162 if (ctx)
163 raw_spin_lock(&ctx->lock);
164}
165
166static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
167 struct perf_event_context *ctx)
168{
169 if (ctx)
170 raw_spin_unlock(&ctx->lock);
171 raw_spin_unlock(&cpuctx->ctx.lock);
172}
173
63b6da39
PZ
174#define TASK_TOMBSTONE ((void *)-1L)
175
176static bool is_kernel_event(struct perf_event *event)
177{
f47c02c0 178 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
63b6da39
PZ
179}
180
39a43640
PZ
181/*
182 * On task ctx scheduling...
183 *
184 * When !ctx->nr_events a task context will not be scheduled. This means
185 * we can disable the scheduler hooks (for performance) without leaving
186 * pending task ctx state.
187 *
188 * This however results in two special cases:
189 *
190 * - removing the last event from a task ctx; this is relatively straight
191 * forward and is done in __perf_remove_from_context.
192 *
193 * - adding the first event to a task ctx; this is tricky because we cannot
194 * rely on ctx->is_active and therefore cannot use event_function_call().
195 * See perf_install_in_context().
196 *
39a43640
PZ
197 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
198 */
199
fae3fde6
PZ
200typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
201 struct perf_event_context *, void *);
202
203struct event_function_struct {
204 struct perf_event *event;
205 event_f func;
206 void *data;
207};
208
209static int event_function(void *info)
210{
211 struct event_function_struct *efs = info;
212 struct perf_event *event = efs->event;
0017960f 213 struct perf_event_context *ctx = event->ctx;
fae3fde6
PZ
214 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
215 struct perf_event_context *task_ctx = cpuctx->task_ctx;
63b6da39 216 int ret = 0;
fae3fde6 217
16444645 218 lockdep_assert_irqs_disabled();
fae3fde6 219
63b6da39 220 perf_ctx_lock(cpuctx, task_ctx);
fae3fde6
PZ
221 /*
222 * Since we do the IPI call without holding ctx->lock things can have
223 * changed, double check we hit the task we set out to hit.
fae3fde6
PZ
224 */
225 if (ctx->task) {
63b6da39 226 if (ctx->task != current) {
0da4cf3e 227 ret = -ESRCH;
63b6da39
PZ
228 goto unlock;
229 }
fae3fde6 230
fae3fde6
PZ
231 /*
232 * We only use event_function_call() on established contexts,
233 * and event_function() is only ever called when active (or
234 * rather, we'll have bailed in task_function_call() or the
235 * above ctx->task != current test), therefore we must have
236 * ctx->is_active here.
237 */
238 WARN_ON_ONCE(!ctx->is_active);
239 /*
240 * And since we have ctx->is_active, cpuctx->task_ctx must
241 * match.
242 */
63b6da39
PZ
243 WARN_ON_ONCE(task_ctx != ctx);
244 } else {
245 WARN_ON_ONCE(&cpuctx->ctx != ctx);
fae3fde6 246 }
63b6da39 247
fae3fde6 248 efs->func(event, cpuctx, ctx, efs->data);
63b6da39 249unlock:
fae3fde6
PZ
250 perf_ctx_unlock(cpuctx, task_ctx);
251
63b6da39 252 return ret;
fae3fde6
PZ
253}
254
fae3fde6 255static void event_function_call(struct perf_event *event, event_f func, void *data)
0017960f
PZ
256{
257 struct perf_event_context *ctx = event->ctx;
63b6da39 258 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
fae3fde6
PZ
259 struct event_function_struct efs = {
260 .event = event,
261 .func = func,
262 .data = data,
263 };
0017960f 264
c97f4736
PZ
265 if (!event->parent) {
266 /*
267 * If this is a !child event, we must hold ctx::mutex to
268 * stabilize the the event->ctx relation. See
269 * perf_event_ctx_lock().
270 */
271 lockdep_assert_held(&ctx->mutex);
272 }
0017960f
PZ
273
274 if (!task) {
fae3fde6 275 cpu_function_call(event->cpu, event_function, &efs);
0017960f
PZ
276 return;
277 }
278
63b6da39
PZ
279 if (task == TASK_TOMBSTONE)
280 return;
281
a096309b 282again:
fae3fde6 283 if (!task_function_call(task, event_function, &efs))
0017960f
PZ
284 return;
285
286 raw_spin_lock_irq(&ctx->lock);
63b6da39
PZ
287 /*
288 * Reload the task pointer, it might have been changed by
289 * a concurrent perf_event_context_sched_out().
290 */
291 task = ctx->task;
a096309b
PZ
292 if (task == TASK_TOMBSTONE) {
293 raw_spin_unlock_irq(&ctx->lock);
294 return;
0017960f 295 }
a096309b
PZ
296 if (ctx->is_active) {
297 raw_spin_unlock_irq(&ctx->lock);
298 goto again;
299 }
300 func(event, NULL, ctx, data);
0017960f
PZ
301 raw_spin_unlock_irq(&ctx->lock);
302}
303
cca20946
PZ
304/*
305 * Similar to event_function_call() + event_function(), but hard assumes IRQs
306 * are already disabled and we're on the right CPU.
307 */
308static void event_function_local(struct perf_event *event, event_f func, void *data)
309{
310 struct perf_event_context *ctx = event->ctx;
311 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
312 struct task_struct *task = READ_ONCE(ctx->task);
313 struct perf_event_context *task_ctx = NULL;
314
16444645 315 lockdep_assert_irqs_disabled();
cca20946
PZ
316
317 if (task) {
318 if (task == TASK_TOMBSTONE)
319 return;
320
321 task_ctx = ctx;
322 }
323
324 perf_ctx_lock(cpuctx, task_ctx);
325
326 task = ctx->task;
327 if (task == TASK_TOMBSTONE)
328 goto unlock;
329
330 if (task) {
331 /*
332 * We must be either inactive or active and the right task,
333 * otherwise we're screwed, since we cannot IPI to somewhere
334 * else.
335 */
336 if (ctx->is_active) {
337 if (WARN_ON_ONCE(task != current))
338 goto unlock;
339
340 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
341 goto unlock;
342 }
343 } else {
344 WARN_ON_ONCE(&cpuctx->ctx != ctx);
345 }
346
347 func(event, cpuctx, ctx, data);
348unlock:
349 perf_ctx_unlock(cpuctx, task_ctx);
350}
351
e5d1367f
SE
352#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
353 PERF_FLAG_FD_OUTPUT |\
a21b0b35
YD
354 PERF_FLAG_PID_CGROUP |\
355 PERF_FLAG_FD_CLOEXEC)
e5d1367f 356
bce38cd5
SE
357/*
358 * branch priv levels that need permission checks
359 */
360#define PERF_SAMPLE_BRANCH_PERM_PLM \
361 (PERF_SAMPLE_BRANCH_KERNEL |\
362 PERF_SAMPLE_BRANCH_HV)
363
0b3fcf17
SE
364enum event_type_t {
365 EVENT_FLEXIBLE = 0x1,
366 EVENT_PINNED = 0x2,
3cbaa590 367 EVENT_TIME = 0x4,
487f05e1
AS
368 /* see ctx_resched() for details */
369 EVENT_CPU = 0x8,
0b3fcf17
SE
370 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
371};
372
e5d1367f
SE
373/*
374 * perf_sched_events : >0 events exist
375 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
376 */
9107c89e
PZ
377
378static void perf_sched_delayed(struct work_struct *work);
379DEFINE_STATIC_KEY_FALSE(perf_sched_events);
380static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
381static DEFINE_MUTEX(perf_sched_mutex);
382static atomic_t perf_sched_count;
383
e5d1367f 384static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
ba532500 385static DEFINE_PER_CPU(int, perf_sched_cb_usages);
f2fb6bef 386static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
e5d1367f 387
cdd6c482
IM
388static atomic_t nr_mmap_events __read_mostly;
389static atomic_t nr_comm_events __read_mostly;
e4222673 390static atomic_t nr_namespaces_events __read_mostly;
cdd6c482 391static atomic_t nr_task_events __read_mostly;
948b26b6 392static atomic_t nr_freq_events __read_mostly;
45ac1403 393static atomic_t nr_switch_events __read_mostly;
76193a94 394static atomic_t nr_ksymbol_events __read_mostly;
6ee52e2a 395static atomic_t nr_bpf_events __read_mostly;
96aaab68 396static atomic_t nr_cgroup_events __read_mostly;
9ee318a7 397
108b02cf
PZ
398static LIST_HEAD(pmus);
399static DEFINE_MUTEX(pmus_lock);
400static struct srcu_struct pmus_srcu;
a63fbed7 401static cpumask_var_t perf_online_mask;
108b02cf 402
0764771d 403/*
cdd6c482 404 * perf event paranoia level:
0fbdea19
IM
405 * -1 - not paranoid at all
406 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 407 * 1 - disallow cpu events for unpriv
0fbdea19 408 * 2 - disallow kernel profiling for unpriv
0764771d 409 */
0161028b 410int sysctl_perf_event_paranoid __read_mostly = 2;
0764771d 411
20443384
FW
412/* Minimum for 512 kiB + 1 user control page */
413int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
414
415/*
cdd6c482 416 * max perf event sample rate
df58ab24 417 */
14c63f17
DH
418#define DEFAULT_MAX_SAMPLE_RATE 100000
419#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
420#define DEFAULT_CPU_TIME_MAX_PERCENT 25
421
422int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
423
424static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
425static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
426
d9494cb4
PZ
427static int perf_sample_allowed_ns __read_mostly =
428 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
14c63f17 429
18ab2cd3 430static void update_perf_cpu_limits(void)
14c63f17
DH
431{
432 u64 tmp = perf_sample_period_ns;
433
434 tmp *= sysctl_perf_cpu_time_max_percent;
91a612ee
PZ
435 tmp = div_u64(tmp, 100);
436 if (!tmp)
437 tmp = 1;
438
439 WRITE_ONCE(perf_sample_allowed_ns, tmp);
14c63f17 440}
163ec435 441
8d5bce0c 442static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
9e630205 443
163ec435 444int perf_proc_update_handler(struct ctl_table *table, int write,
32927393 445 void *buffer, size_t *lenp, loff_t *ppos)
163ec435 446{
1a51c5da
SE
447 int ret;
448 int perf_cpu = sysctl_perf_cpu_time_max_percent;
ab7fdefb
KL
449 /*
450 * If throttling is disabled don't allow the write:
451 */
1a51c5da 452 if (write && (perf_cpu == 100 || perf_cpu == 0))
ab7fdefb
KL
453 return -EINVAL;
454
1a51c5da
SE
455 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
456 if (ret || !write)
457 return ret;
458
163ec435 459 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
460 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
461 update_perf_cpu_limits();
462
463 return 0;
464}
465
466int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
467
468int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
32927393 469 void *buffer, size_t *lenp, loff_t *ppos)
14c63f17 470{
1572e45a 471 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
14c63f17
DH
472
473 if (ret || !write)
474 return ret;
475
b303e7c1
PZ
476 if (sysctl_perf_cpu_time_max_percent == 100 ||
477 sysctl_perf_cpu_time_max_percent == 0) {
91a612ee
PZ
478 printk(KERN_WARNING
479 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
480 WRITE_ONCE(perf_sample_allowed_ns, 0);
481 } else {
482 update_perf_cpu_limits();
483 }
163ec435
PZ
484
485 return 0;
486}
1ccd1549 487
14c63f17
DH
488/*
489 * perf samples are done in some very critical code paths (NMIs).
490 * If they take too much CPU time, the system can lock up and not
491 * get any real work done. This will drop the sample rate when
492 * we detect that events are taking too long.
493 */
494#define NR_ACCUMULATED_SAMPLES 128
d9494cb4 495static DEFINE_PER_CPU(u64, running_sample_length);
14c63f17 496
91a612ee
PZ
497static u64 __report_avg;
498static u64 __report_allowed;
499
6a02ad66 500static void perf_duration_warn(struct irq_work *w)
14c63f17 501{
0d87d7ec 502 printk_ratelimited(KERN_INFO
91a612ee
PZ
503 "perf: interrupt took too long (%lld > %lld), lowering "
504 "kernel.perf_event_max_sample_rate to %d\n",
505 __report_avg, __report_allowed,
506 sysctl_perf_event_sample_rate);
6a02ad66
PZ
507}
508
509static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
510
511void perf_sample_event_took(u64 sample_len_ns)
512{
91a612ee
PZ
513 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
514 u64 running_len;
515 u64 avg_len;
516 u32 max;
14c63f17 517
91a612ee 518 if (max_len == 0)
14c63f17
DH
519 return;
520
91a612ee
PZ
521 /* Decay the counter by 1 average sample. */
522 running_len = __this_cpu_read(running_sample_length);
523 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
524 running_len += sample_len_ns;
525 __this_cpu_write(running_sample_length, running_len);
14c63f17
DH
526
527 /*
91a612ee
PZ
528 * Note: this will be biased artifically low until we have
529 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
14c63f17
DH
530 * from having to maintain a count.
531 */
91a612ee
PZ
532 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
533 if (avg_len <= max_len)
14c63f17
DH
534 return;
535
91a612ee
PZ
536 __report_avg = avg_len;
537 __report_allowed = max_len;
14c63f17 538
91a612ee
PZ
539 /*
540 * Compute a throttle threshold 25% below the current duration.
541 */
542 avg_len += avg_len / 4;
543 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
544 if (avg_len < max)
545 max /= (u32)avg_len;
546 else
547 max = 1;
14c63f17 548
91a612ee
PZ
549 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
550 WRITE_ONCE(max_samples_per_tick, max);
551
552 sysctl_perf_event_sample_rate = max * HZ;
553 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
6a02ad66 554
cd578abb 555 if (!irq_work_queue(&perf_duration_work)) {
91a612ee 556 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
cd578abb 557 "kernel.perf_event_max_sample_rate to %d\n",
91a612ee 558 __report_avg, __report_allowed,
cd578abb
PZ
559 sysctl_perf_event_sample_rate);
560 }
14c63f17
DH
561}
562
cdd6c482 563static atomic64_t perf_event_id;
a96bbc16 564
0b3fcf17
SE
565static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
566 enum event_type_t event_type);
567
568static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
569 enum event_type_t event_type,
570 struct task_struct *task);
571
572static void update_context_time(struct perf_event_context *ctx);
573static u64 perf_event_time(struct perf_event *event);
0b3fcf17 574
cdd6c482 575void __weak perf_event_print_debug(void) { }
0793a61d 576
84c79910 577extern __weak const char *perf_pmu_name(void)
0793a61d 578{
84c79910 579 return "pmu";
0793a61d
TG
580}
581
0b3fcf17
SE
582static inline u64 perf_clock(void)
583{
584 return local_clock();
585}
586
34f43927
PZ
587static inline u64 perf_event_clock(struct perf_event *event)
588{
589 return event->clock();
590}
591
0d3d73aa
PZ
592/*
593 * State based event timekeeping...
594 *
595 * The basic idea is to use event->state to determine which (if any) time
596 * fields to increment with the current delta. This means we only need to
597 * update timestamps when we change state or when they are explicitly requested
598 * (read).
599 *
600 * Event groups make things a little more complicated, but not terribly so. The
601 * rules for a group are that if the group leader is OFF the entire group is
602 * OFF, irrespecive of what the group member states are. This results in
603 * __perf_effective_state().
604 *
605 * A futher ramification is that when a group leader flips between OFF and
606 * !OFF, we need to update all group member times.
607 *
608 *
609 * NOTE: perf_event_time() is based on the (cgroup) context time, and thus we
610 * need to make sure the relevant context time is updated before we try and
611 * update our timestamps.
612 */
613
614static __always_inline enum perf_event_state
615__perf_effective_state(struct perf_event *event)
616{
617 struct perf_event *leader = event->group_leader;
618
619 if (leader->state <= PERF_EVENT_STATE_OFF)
620 return leader->state;
621
622 return event->state;
623}
624
625static __always_inline void
626__perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running)
627{
628 enum perf_event_state state = __perf_effective_state(event);
629 u64 delta = now - event->tstamp;
630
631 *enabled = event->total_time_enabled;
632 if (state >= PERF_EVENT_STATE_INACTIVE)
633 *enabled += delta;
634
635 *running = event->total_time_running;
636 if (state >= PERF_EVENT_STATE_ACTIVE)
637 *running += delta;
638}
639
640static void perf_event_update_time(struct perf_event *event)
641{
642 u64 now = perf_event_time(event);
643
644 __perf_update_times(event, now, &event->total_time_enabled,
645 &event->total_time_running);
646 event->tstamp = now;
647}
648
649static void perf_event_update_sibling_time(struct perf_event *leader)
650{
651 struct perf_event *sibling;
652
edb39592 653 for_each_sibling_event(sibling, leader)
0d3d73aa
PZ
654 perf_event_update_time(sibling);
655}
656
657static void
658perf_event_set_state(struct perf_event *event, enum perf_event_state state)
659{
660 if (event->state == state)
661 return;
662
663 perf_event_update_time(event);
664 /*
665 * If a group leader gets enabled/disabled all its siblings
666 * are affected too.
667 */
668 if ((event->state < 0) ^ (state < 0))
669 perf_event_update_sibling_time(event);
670
671 WRITE_ONCE(event->state, state);
672}
673
e5d1367f
SE
674#ifdef CONFIG_CGROUP_PERF
675
e5d1367f
SE
676static inline bool
677perf_cgroup_match(struct perf_event *event)
678{
679 struct perf_event_context *ctx = event->ctx;
680 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
681
ef824fa1
TH
682 /* @event doesn't care about cgroup */
683 if (!event->cgrp)
684 return true;
685
686 /* wants specific cgroup scope but @cpuctx isn't associated with any */
687 if (!cpuctx->cgrp)
688 return false;
689
690 /*
691 * Cgroup scoping is recursive. An event enabled for a cgroup is
692 * also enabled for all its descendant cgroups. If @cpuctx's
693 * cgroup is a descendant of @event's (the test covers identity
694 * case), it's a match.
695 */
696 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
697 event->cgrp->css.cgroup);
e5d1367f
SE
698}
699
e5d1367f
SE
700static inline void perf_detach_cgroup(struct perf_event *event)
701{
4e2ba650 702 css_put(&event->cgrp->css);
e5d1367f
SE
703 event->cgrp = NULL;
704}
705
706static inline int is_cgroup_event(struct perf_event *event)
707{
708 return event->cgrp != NULL;
709}
710
711static inline u64 perf_cgroup_event_time(struct perf_event *event)
712{
713 struct perf_cgroup_info *t;
714
715 t = per_cpu_ptr(event->cgrp->info, event->cpu);
716 return t->time;
717}
718
719static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
720{
721 struct perf_cgroup_info *info;
722 u64 now;
723
724 now = perf_clock();
725
726 info = this_cpu_ptr(cgrp->info);
727
728 info->time += now - info->timestamp;
729 info->timestamp = now;
730}
731
732static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
733{
c917e0f2
SL
734 struct perf_cgroup *cgrp = cpuctx->cgrp;
735 struct cgroup_subsys_state *css;
736
737 if (cgrp) {
738 for (css = &cgrp->css; css; css = css->parent) {
739 cgrp = container_of(css, struct perf_cgroup, css);
740 __update_cgrp_time(cgrp);
741 }
742 }
e5d1367f
SE
743}
744
745static inline void update_cgrp_time_from_event(struct perf_event *event)
746{
3f7cce3c
SE
747 struct perf_cgroup *cgrp;
748
e5d1367f 749 /*
3f7cce3c
SE
750 * ensure we access cgroup data only when needed and
751 * when we know the cgroup is pinned (css_get)
e5d1367f 752 */
3f7cce3c 753 if (!is_cgroup_event(event))
e5d1367f
SE
754 return;
755
614e4c4e 756 cgrp = perf_cgroup_from_task(current, event->ctx);
3f7cce3c
SE
757 /*
758 * Do not update time when cgroup is not active
759 */
28fa741c 760 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
3f7cce3c 761 __update_cgrp_time(event->cgrp);
e5d1367f
SE
762}
763
764static inline void
3f7cce3c
SE
765perf_cgroup_set_timestamp(struct task_struct *task,
766 struct perf_event_context *ctx)
e5d1367f
SE
767{
768 struct perf_cgroup *cgrp;
769 struct perf_cgroup_info *info;
c917e0f2 770 struct cgroup_subsys_state *css;
e5d1367f 771
3f7cce3c
SE
772 /*
773 * ctx->lock held by caller
774 * ensure we do not access cgroup data
775 * unless we have the cgroup pinned (css_get)
776 */
777 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
778 return;
779
614e4c4e 780 cgrp = perf_cgroup_from_task(task, ctx);
c917e0f2
SL
781
782 for (css = &cgrp->css; css; css = css->parent) {
783 cgrp = container_of(css, struct perf_cgroup, css);
784 info = this_cpu_ptr(cgrp->info);
785 info->timestamp = ctx->timestamp;
786 }
e5d1367f
SE
787}
788
058fe1c0
DCC
789static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
790
e5d1367f
SE
791#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
792#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
793
794/*
795 * reschedule events based on the cgroup constraint of task.
796 *
797 * mode SWOUT : schedule out everything
798 * mode SWIN : schedule in based on cgroup for next
799 */
18ab2cd3 800static void perf_cgroup_switch(struct task_struct *task, int mode)
e5d1367f
SE
801{
802 struct perf_cpu_context *cpuctx;
058fe1c0 803 struct list_head *list;
e5d1367f
SE
804 unsigned long flags;
805
806 /*
058fe1c0
DCC
807 * Disable interrupts and preemption to avoid this CPU's
808 * cgrp_cpuctx_entry to change under us.
e5d1367f
SE
809 */
810 local_irq_save(flags);
811
058fe1c0
DCC
812 list = this_cpu_ptr(&cgrp_cpuctx_list);
813 list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
814 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
e5d1367f 815
058fe1c0
DCC
816 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
817 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f 818
058fe1c0
DCC
819 if (mode & PERF_CGROUP_SWOUT) {
820 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
821 /*
822 * must not be done before ctxswout due
823 * to event_filter_match() in event_sched_out()
824 */
825 cpuctx->cgrp = NULL;
826 }
e5d1367f 827
058fe1c0
DCC
828 if (mode & PERF_CGROUP_SWIN) {
829 WARN_ON_ONCE(cpuctx->cgrp);
830 /*
831 * set cgrp before ctxsw in to allow
832 * event_filter_match() to not have to pass
833 * task around
834 * we pass the cpuctx->ctx to perf_cgroup_from_task()
835 * because cgorup events are only per-cpu
836 */
837 cpuctx->cgrp = perf_cgroup_from_task(task,
838 &cpuctx->ctx);
839 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
e5d1367f 840 }
058fe1c0
DCC
841 perf_pmu_enable(cpuctx->ctx.pmu);
842 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f
SE
843 }
844
e5d1367f
SE
845 local_irq_restore(flags);
846}
847
a8d757ef
SE
848static inline void perf_cgroup_sched_out(struct task_struct *task,
849 struct task_struct *next)
e5d1367f 850{
a8d757ef
SE
851 struct perf_cgroup *cgrp1;
852 struct perf_cgroup *cgrp2 = NULL;
853
ddaaf4e2 854 rcu_read_lock();
a8d757ef
SE
855 /*
856 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
857 * we do not need to pass the ctx here because we know
858 * we are holding the rcu lock
a8d757ef 859 */
614e4c4e 860 cgrp1 = perf_cgroup_from_task(task, NULL);
70a01657 861 cgrp2 = perf_cgroup_from_task(next, NULL);
a8d757ef
SE
862
863 /*
864 * only schedule out current cgroup events if we know
865 * that we are switching to a different cgroup. Otherwise,
866 * do no touch the cgroup events.
867 */
868 if (cgrp1 != cgrp2)
869 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
ddaaf4e2
SE
870
871 rcu_read_unlock();
e5d1367f
SE
872}
873
a8d757ef
SE
874static inline void perf_cgroup_sched_in(struct task_struct *prev,
875 struct task_struct *task)
e5d1367f 876{
a8d757ef
SE
877 struct perf_cgroup *cgrp1;
878 struct perf_cgroup *cgrp2 = NULL;
879
ddaaf4e2 880 rcu_read_lock();
a8d757ef
SE
881 /*
882 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
883 * we do not need to pass the ctx here because we know
884 * we are holding the rcu lock
a8d757ef 885 */
614e4c4e 886 cgrp1 = perf_cgroup_from_task(task, NULL);
614e4c4e 887 cgrp2 = perf_cgroup_from_task(prev, NULL);
a8d757ef
SE
888
889 /*
890 * only need to schedule in cgroup events if we are changing
891 * cgroup during ctxsw. Cgroup events were not scheduled
892 * out of ctxsw out if that was not the case.
893 */
894 if (cgrp1 != cgrp2)
895 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
ddaaf4e2
SE
896
897 rcu_read_unlock();
e5d1367f
SE
898}
899
c2283c93
IR
900static int perf_cgroup_ensure_storage(struct perf_event *event,
901 struct cgroup_subsys_state *css)
902{
903 struct perf_cpu_context *cpuctx;
904 struct perf_event **storage;
905 int cpu, heap_size, ret = 0;
906
907 /*
908 * Allow storage to have sufficent space for an iterator for each
909 * possibly nested cgroup plus an iterator for events with no cgroup.
910 */
911 for (heap_size = 1; css; css = css->parent)
912 heap_size++;
913
914 for_each_possible_cpu(cpu) {
915 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu);
916 if (heap_size <= cpuctx->heap_size)
917 continue;
918
919 storage = kmalloc_node(heap_size * sizeof(struct perf_event *),
920 GFP_KERNEL, cpu_to_node(cpu));
921 if (!storage) {
922 ret = -ENOMEM;
923 break;
924 }
925
926 raw_spin_lock_irq(&cpuctx->ctx.lock);
927 if (cpuctx->heap_size < heap_size) {
928 swap(cpuctx->heap, storage);
929 if (storage == cpuctx->heap_default)
930 storage = NULL;
931 cpuctx->heap_size = heap_size;
932 }
933 raw_spin_unlock_irq(&cpuctx->ctx.lock);
934
935 kfree(storage);
936 }
937
938 return ret;
939}
940
e5d1367f
SE
941static inline int perf_cgroup_connect(int fd, struct perf_event *event,
942 struct perf_event_attr *attr,
943 struct perf_event *group_leader)
944{
945 struct perf_cgroup *cgrp;
946 struct cgroup_subsys_state *css;
2903ff01
AV
947 struct fd f = fdget(fd);
948 int ret = 0;
e5d1367f 949
2903ff01 950 if (!f.file)
e5d1367f
SE
951 return -EBADF;
952
b583043e 953 css = css_tryget_online_from_dir(f.file->f_path.dentry,
ec903c0c 954 &perf_event_cgrp_subsys);
3db272c0
LZ
955 if (IS_ERR(css)) {
956 ret = PTR_ERR(css);
957 goto out;
958 }
e5d1367f 959
c2283c93
IR
960 ret = perf_cgroup_ensure_storage(event, css);
961 if (ret)
962 goto out;
963
e5d1367f
SE
964 cgrp = container_of(css, struct perf_cgroup, css);
965 event->cgrp = cgrp;
966
967 /*
968 * all events in a group must monitor
969 * the same cgroup because a task belongs
970 * to only one perf cgroup at a time
971 */
972 if (group_leader && group_leader->cgrp != cgrp) {
973 perf_detach_cgroup(event);
974 ret = -EINVAL;
e5d1367f 975 }
3db272c0 976out:
2903ff01 977 fdput(f);
e5d1367f
SE
978 return ret;
979}
980
981static inline void
982perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
983{
984 struct perf_cgroup_info *t;
985 t = per_cpu_ptr(event->cgrp->info, event->cpu);
986 event->shadow_ctx_time = now - t->timestamp;
987}
988
db4a8356 989static inline void
33238c50 990perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
db4a8356
DCC
991{
992 struct perf_cpu_context *cpuctx;
993
994 if (!is_cgroup_event(event))
995 return;
996
db4a8356
DCC
997 /*
998 * Because cgroup events are always per-cpu events,
07c59729 999 * @ctx == &cpuctx->ctx.
db4a8356 1000 */
07c59729 1001 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
33801b94 1002
1003 /*
1004 * Since setting cpuctx->cgrp is conditional on the current @cgrp
1005 * matching the event's cgroup, we must do this for every new event,
1006 * because if the first would mismatch, the second would not try again
1007 * and we would leave cpuctx->cgrp unset.
1008 */
33238c50 1009 if (ctx->is_active && !cpuctx->cgrp) {
be96b316
TH
1010 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
1011
be96b316
TH
1012 if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
1013 cpuctx->cgrp = cgrp;
058fe1c0 1014 }
33801b94 1015
33238c50 1016 if (ctx->nr_cgroups++)
33801b94 1017 return;
33238c50
PZ
1018
1019 list_add(&cpuctx->cgrp_cpuctx_entry,
1020 per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
1021}
1022
1023static inline void
1024perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1025{
1026 struct perf_cpu_context *cpuctx;
1027
1028 if (!is_cgroup_event(event))
33801b94 1029 return;
1030
33238c50
PZ
1031 /*
1032 * Because cgroup events are always per-cpu events,
1033 * @ctx == &cpuctx->ctx.
1034 */
1035 cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
1036
1037 if (--ctx->nr_cgroups)
1038 return;
1039
1040 if (ctx->is_active && cpuctx->cgrp)
33801b94 1041 cpuctx->cgrp = NULL;
1042
33238c50 1043 list_del(&cpuctx->cgrp_cpuctx_entry);
db4a8356
DCC
1044}
1045
e5d1367f
SE
1046#else /* !CONFIG_CGROUP_PERF */
1047
1048static inline bool
1049perf_cgroup_match(struct perf_event *event)
1050{
1051 return true;
1052}
1053
1054static inline void perf_detach_cgroup(struct perf_event *event)
1055{}
1056
1057static inline int is_cgroup_event(struct perf_event *event)
1058{
1059 return 0;
1060}
1061
e5d1367f
SE
1062static inline void update_cgrp_time_from_event(struct perf_event *event)
1063{
1064}
1065
1066static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
1067{
1068}
1069
a8d757ef
SE
1070static inline void perf_cgroup_sched_out(struct task_struct *task,
1071 struct task_struct *next)
e5d1367f
SE
1072{
1073}
1074
a8d757ef
SE
1075static inline void perf_cgroup_sched_in(struct task_struct *prev,
1076 struct task_struct *task)
e5d1367f
SE
1077{
1078}
1079
1080static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
1081 struct perf_event_attr *attr,
1082 struct perf_event *group_leader)
1083{
1084 return -EINVAL;
1085}
1086
1087static inline void
3f7cce3c
SE
1088perf_cgroup_set_timestamp(struct task_struct *task,
1089 struct perf_event_context *ctx)
e5d1367f
SE
1090{
1091}
1092
d00dbd29 1093static inline void
e5d1367f
SE
1094perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
1095{
1096}
1097
1098static inline void
1099perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
1100{
1101}
1102
1103static inline u64 perf_cgroup_event_time(struct perf_event *event)
1104{
1105 return 0;
1106}
1107
db4a8356 1108static inline void
33238c50 1109perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
db4a8356
DCC
1110{
1111}
1112
33238c50
PZ
1113static inline void
1114perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
1115{
1116}
e5d1367f
SE
1117#endif
1118
9e630205
SE
1119/*
1120 * set default to be dependent on timer tick just
1121 * like original code
1122 */
1123#define PERF_CPU_HRTIMER (1000 / HZ)
1124/*
8a1115ff 1125 * function must be called with interrupts disabled
9e630205 1126 */
272325c4 1127static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
9e630205
SE
1128{
1129 struct perf_cpu_context *cpuctx;
8d5bce0c 1130 bool rotations;
9e630205 1131
16444645 1132 lockdep_assert_irqs_disabled();
9e630205
SE
1133
1134 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
9e630205
SE
1135 rotations = perf_rotate_context(cpuctx);
1136
4cfafd30
PZ
1137 raw_spin_lock(&cpuctx->hrtimer_lock);
1138 if (rotations)
9e630205 1139 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
4cfafd30
PZ
1140 else
1141 cpuctx->hrtimer_active = 0;
1142 raw_spin_unlock(&cpuctx->hrtimer_lock);
9e630205 1143
4cfafd30 1144 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
9e630205
SE
1145}
1146
272325c4 1147static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
9e630205 1148{
272325c4 1149 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 1150 struct pmu *pmu = cpuctx->ctx.pmu;
272325c4 1151 u64 interval;
9e630205
SE
1152
1153 /* no multiplexing needed for SW PMU */
1154 if (pmu->task_ctx_nr == perf_sw_context)
1155 return;
1156
62b85639
SE
1157 /*
1158 * check default is sane, if not set then force to
1159 * default interval (1/tick)
1160 */
272325c4
PZ
1161 interval = pmu->hrtimer_interval_ms;
1162 if (interval < 1)
1163 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
62b85639 1164
272325c4 1165 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
9e630205 1166
4cfafd30 1167 raw_spin_lock_init(&cpuctx->hrtimer_lock);
30f9028b 1168 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
272325c4 1169 timer->function = perf_mux_hrtimer_handler;
9e630205
SE
1170}
1171
272325c4 1172static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
9e630205 1173{
272325c4 1174 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 1175 struct pmu *pmu = cpuctx->ctx.pmu;
4cfafd30 1176 unsigned long flags;
9e630205
SE
1177
1178 /* not for SW PMU */
1179 if (pmu->task_ctx_nr == perf_sw_context)
272325c4 1180 return 0;
9e630205 1181
4cfafd30
PZ
1182 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
1183 if (!cpuctx->hrtimer_active) {
1184 cpuctx->hrtimer_active = 1;
1185 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
30f9028b 1186 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED_HARD);
4cfafd30
PZ
1187 }
1188 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
9e630205 1189
272325c4 1190 return 0;
9e630205
SE
1191}
1192
33696fc0 1193void perf_pmu_disable(struct pmu *pmu)
9e35ad38 1194{
33696fc0
PZ
1195 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1196 if (!(*count)++)
1197 pmu->pmu_disable(pmu);
9e35ad38 1198}
9e35ad38 1199
33696fc0 1200void perf_pmu_enable(struct pmu *pmu)
9e35ad38 1201{
33696fc0
PZ
1202 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1203 if (!--(*count))
1204 pmu->pmu_enable(pmu);
9e35ad38 1205}
9e35ad38 1206
2fde4f94 1207static DEFINE_PER_CPU(struct list_head, active_ctx_list);
e9d2b064
PZ
1208
1209/*
2fde4f94
MR
1210 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1211 * perf_event_task_tick() are fully serialized because they're strictly cpu
1212 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1213 * disabled, while perf_event_task_tick is called from IRQ context.
e9d2b064 1214 */
2fde4f94 1215static void perf_event_ctx_activate(struct perf_event_context *ctx)
9e35ad38 1216{
2fde4f94 1217 struct list_head *head = this_cpu_ptr(&active_ctx_list);
b5ab4cd5 1218
16444645 1219 lockdep_assert_irqs_disabled();
b5ab4cd5 1220
2fde4f94
MR
1221 WARN_ON(!list_empty(&ctx->active_ctx_list));
1222
1223 list_add(&ctx->active_ctx_list, head);
1224}
1225
1226static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1227{
16444645 1228 lockdep_assert_irqs_disabled();
2fde4f94
MR
1229
1230 WARN_ON(list_empty(&ctx->active_ctx_list));
1231
1232 list_del_init(&ctx->active_ctx_list);
9e35ad38 1233}
9e35ad38 1234
cdd6c482 1235static void get_ctx(struct perf_event_context *ctx)
a63eaf34 1236{
8c94abbb 1237 refcount_inc(&ctx->refcount);
a63eaf34
PM
1238}
1239
4af57ef2
YZ
1240static void free_ctx(struct rcu_head *head)
1241{
1242 struct perf_event_context *ctx;
1243
1244 ctx = container_of(head, struct perf_event_context, rcu_head);
1245 kfree(ctx->task_ctx_data);
1246 kfree(ctx);
1247}
1248
cdd6c482 1249static void put_ctx(struct perf_event_context *ctx)
a63eaf34 1250{
8c94abbb 1251 if (refcount_dec_and_test(&ctx->refcount)) {
564c2b21
PM
1252 if (ctx->parent_ctx)
1253 put_ctx(ctx->parent_ctx);
63b6da39 1254 if (ctx->task && ctx->task != TASK_TOMBSTONE)
c93f7669 1255 put_task_struct(ctx->task);
4af57ef2 1256 call_rcu(&ctx->rcu_head, free_ctx);
564c2b21 1257 }
a63eaf34
PM
1258}
1259
f63a8daa
PZ
1260/*
1261 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1262 * perf_pmu_migrate_context() we need some magic.
1263 *
1264 * Those places that change perf_event::ctx will hold both
1265 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1266 *
8b10c5e2
PZ
1267 * Lock ordering is by mutex address. There are two other sites where
1268 * perf_event_context::mutex nests and those are:
1269 *
1270 * - perf_event_exit_task_context() [ child , 0 ]
8ba289b8
PZ
1271 * perf_event_exit_event()
1272 * put_event() [ parent, 1 ]
8b10c5e2
PZ
1273 *
1274 * - perf_event_init_context() [ parent, 0 ]
1275 * inherit_task_group()
1276 * inherit_group()
1277 * inherit_event()
1278 * perf_event_alloc()
1279 * perf_init_event()
1280 * perf_try_init_event() [ child , 1 ]
1281 *
1282 * While it appears there is an obvious deadlock here -- the parent and child
1283 * nesting levels are inverted between the two. This is in fact safe because
1284 * life-time rules separate them. That is an exiting task cannot fork, and a
1285 * spawning task cannot (yet) exit.
1286 *
1287 * But remember that that these are parent<->child context relations, and
1288 * migration does not affect children, therefore these two orderings should not
1289 * interact.
f63a8daa
PZ
1290 *
1291 * The change in perf_event::ctx does not affect children (as claimed above)
1292 * because the sys_perf_event_open() case will install a new event and break
1293 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1294 * concerned with cpuctx and that doesn't have children.
1295 *
1296 * The places that change perf_event::ctx will issue:
1297 *
1298 * perf_remove_from_context();
1299 * synchronize_rcu();
1300 * perf_install_in_context();
1301 *
1302 * to affect the change. The remove_from_context() + synchronize_rcu() should
1303 * quiesce the event, after which we can install it in the new location. This
1304 * means that only external vectors (perf_fops, prctl) can perturb the event
1305 * while in transit. Therefore all such accessors should also acquire
1306 * perf_event_context::mutex to serialize against this.
1307 *
1308 * However; because event->ctx can change while we're waiting to acquire
1309 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1310 * function.
1311 *
1312 * Lock order:
69143038 1313 * exec_update_mutex
f63a8daa
PZ
1314 * task_struct::perf_event_mutex
1315 * perf_event_context::mutex
f63a8daa 1316 * perf_event::child_mutex;
07c4a776 1317 * perf_event_context::lock
f63a8daa 1318 * perf_event::mmap_mutex
c1e8d7c6 1319 * mmap_lock
18736eef 1320 * perf_addr_filters_head::lock
82d94856
PZ
1321 *
1322 * cpu_hotplug_lock
1323 * pmus_lock
1324 * cpuctx->mutex / perf_event_context::mutex
f63a8daa 1325 */
a83fe28e
PZ
1326static struct perf_event_context *
1327perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
f63a8daa
PZ
1328{
1329 struct perf_event_context *ctx;
1330
1331again:
1332 rcu_read_lock();
6aa7de05 1333 ctx = READ_ONCE(event->ctx);
8c94abbb 1334 if (!refcount_inc_not_zero(&ctx->refcount)) {
f63a8daa
PZ
1335 rcu_read_unlock();
1336 goto again;
1337 }
1338 rcu_read_unlock();
1339
a83fe28e 1340 mutex_lock_nested(&ctx->mutex, nesting);
f63a8daa
PZ
1341 if (event->ctx != ctx) {
1342 mutex_unlock(&ctx->mutex);
1343 put_ctx(ctx);
1344 goto again;
1345 }
1346
1347 return ctx;
1348}
1349
a83fe28e
PZ
1350static inline struct perf_event_context *
1351perf_event_ctx_lock(struct perf_event *event)
1352{
1353 return perf_event_ctx_lock_nested(event, 0);
1354}
1355
f63a8daa
PZ
1356static void perf_event_ctx_unlock(struct perf_event *event,
1357 struct perf_event_context *ctx)
1358{
1359 mutex_unlock(&ctx->mutex);
1360 put_ctx(ctx);
1361}
1362
211de6eb
PZ
1363/*
1364 * This must be done under the ctx->lock, such as to serialize against
1365 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1366 * calling scheduler related locks and ctx->lock nests inside those.
1367 */
1368static __must_check struct perf_event_context *
1369unclone_ctx(struct perf_event_context *ctx)
71a851b4 1370{
211de6eb
PZ
1371 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1372
1373 lockdep_assert_held(&ctx->lock);
1374
1375 if (parent_ctx)
71a851b4 1376 ctx->parent_ctx = NULL;
5a3126d4 1377 ctx->generation++;
211de6eb
PZ
1378
1379 return parent_ctx;
71a851b4
PZ
1380}
1381
1d953111
ON
1382static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p,
1383 enum pid_type type)
6844c09d 1384{
1d953111 1385 u32 nr;
6844c09d
ACM
1386 /*
1387 * only top level events have the pid namespace they were created in
1388 */
1389 if (event->parent)
1390 event = event->parent;
1391
1d953111
ON
1392 nr = __task_pid_nr_ns(p, type, event->ns);
1393 /* avoid -1 if it is idle thread or runs in another ns */
1394 if (!nr && !pid_alive(p))
1395 nr = -1;
1396 return nr;
6844c09d
ACM
1397}
1398
1d953111 1399static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
6844c09d 1400{
6883f81a 1401 return perf_event_pid_type(event, p, PIDTYPE_TGID);
1d953111 1402}
6844c09d 1403
1d953111
ON
1404static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1405{
1406 return perf_event_pid_type(event, p, PIDTYPE_PID);
6844c09d
ACM
1407}
1408
7f453c24 1409/*
cdd6c482 1410 * If we inherit events we want to return the parent event id
7f453c24
PZ
1411 * to userspace.
1412 */
cdd6c482 1413static u64 primary_event_id(struct perf_event *event)
7f453c24 1414{
cdd6c482 1415 u64 id = event->id;
7f453c24 1416
cdd6c482
IM
1417 if (event->parent)
1418 id = event->parent->id;
7f453c24
PZ
1419
1420 return id;
1421}
1422
25346b93 1423/*
cdd6c482 1424 * Get the perf_event_context for a task and lock it.
63b6da39 1425 *
25346b93
PM
1426 * This has to cope with with the fact that until it is locked,
1427 * the context could get moved to another task.
1428 */
cdd6c482 1429static struct perf_event_context *
8dc85d54 1430perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 1431{
cdd6c482 1432 struct perf_event_context *ctx;
25346b93 1433
9ed6060d 1434retry:
058ebd0e
PZ
1435 /*
1436 * One of the few rules of preemptible RCU is that one cannot do
1437 * rcu_read_unlock() while holding a scheduler (or nested) lock when
2fd59077 1438 * part of the read side critical section was irqs-enabled -- see
058ebd0e
PZ
1439 * rcu_read_unlock_special().
1440 *
1441 * Since ctx->lock nests under rq->lock we must ensure the entire read
2fd59077 1442 * side critical section has interrupts disabled.
058ebd0e 1443 */
2fd59077 1444 local_irq_save(*flags);
058ebd0e 1445 rcu_read_lock();
8dc85d54 1446 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
1447 if (ctx) {
1448 /*
1449 * If this context is a clone of another, it might
1450 * get swapped for another underneath us by
cdd6c482 1451 * perf_event_task_sched_out, though the
25346b93
PM
1452 * rcu_read_lock() protects us from any context
1453 * getting freed. Lock the context and check if it
1454 * got swapped before we could get the lock, and retry
1455 * if so. If we locked the right context, then it
1456 * can't get swapped on us any more.
1457 */
2fd59077 1458 raw_spin_lock(&ctx->lock);
8dc85d54 1459 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
2fd59077 1460 raw_spin_unlock(&ctx->lock);
058ebd0e 1461 rcu_read_unlock();
2fd59077 1462 local_irq_restore(*flags);
25346b93
PM
1463 goto retry;
1464 }
b49a9e7e 1465
63b6da39 1466 if (ctx->task == TASK_TOMBSTONE ||
8c94abbb 1467 !refcount_inc_not_zero(&ctx->refcount)) {
2fd59077 1468 raw_spin_unlock(&ctx->lock);
b49a9e7e 1469 ctx = NULL;
828b6f0e
PZ
1470 } else {
1471 WARN_ON_ONCE(ctx->task != task);
b49a9e7e 1472 }
25346b93
PM
1473 }
1474 rcu_read_unlock();
2fd59077
PM
1475 if (!ctx)
1476 local_irq_restore(*flags);
25346b93
PM
1477 return ctx;
1478}
1479
1480/*
1481 * Get the context for a task and increment its pin_count so it
1482 * can't get swapped to another task. This also increments its
1483 * reference count so that the context can't get freed.
1484 */
8dc85d54
PZ
1485static struct perf_event_context *
1486perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 1487{
cdd6c482 1488 struct perf_event_context *ctx;
25346b93
PM
1489 unsigned long flags;
1490
8dc85d54 1491 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1492 if (ctx) {
1493 ++ctx->pin_count;
e625cce1 1494 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1495 }
1496 return ctx;
1497}
1498
cdd6c482 1499static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1500{
1501 unsigned long flags;
1502
e625cce1 1503 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1504 --ctx->pin_count;
e625cce1 1505 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1506}
1507
f67218c3
PZ
1508/*
1509 * Update the record of the current time in a context.
1510 */
1511static void update_context_time(struct perf_event_context *ctx)
1512{
1513 u64 now = perf_clock();
1514
1515 ctx->time += now - ctx->timestamp;
1516 ctx->timestamp = now;
1517}
1518
4158755d
SE
1519static u64 perf_event_time(struct perf_event *event)
1520{
1521 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1522
1523 if (is_cgroup_event(event))
1524 return perf_cgroup_event_time(event);
1525
4158755d
SE
1526 return ctx ? ctx->time : 0;
1527}
1528
487f05e1
AS
1529static enum event_type_t get_event_type(struct perf_event *event)
1530{
1531 struct perf_event_context *ctx = event->ctx;
1532 enum event_type_t event_type;
1533
1534 lockdep_assert_held(&ctx->lock);
1535
3bda69c1
AS
1536 /*
1537 * It's 'group type', really, because if our group leader is
1538 * pinned, so are we.
1539 */
1540 if (event->group_leader != event)
1541 event = event->group_leader;
1542
487f05e1
AS
1543 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE;
1544 if (!ctx->task)
1545 event_type |= EVENT_CPU;
1546
1547 return event_type;
1548}
1549
8e1a2031 1550/*
161c85fa 1551 * Helper function to initialize event group nodes.
8e1a2031 1552 */
161c85fa 1553static void init_event_group(struct perf_event *event)
8e1a2031
AB
1554{
1555 RB_CLEAR_NODE(&event->group_node);
1556 event->group_index = 0;
1557}
1558
1559/*
1560 * Extract pinned or flexible groups from the context
161c85fa 1561 * based on event attrs bits.
8e1a2031
AB
1562 */
1563static struct perf_event_groups *
1564get_event_groups(struct perf_event *event, struct perf_event_context *ctx)
889ff015
FW
1565{
1566 if (event->attr.pinned)
1567 return &ctx->pinned_groups;
1568 else
1569 return &ctx->flexible_groups;
1570}
1571
8e1a2031 1572/*
161c85fa 1573 * Helper function to initializes perf_event_group trees.
8e1a2031 1574 */
161c85fa 1575static void perf_event_groups_init(struct perf_event_groups *groups)
8e1a2031
AB
1576{
1577 groups->tree = RB_ROOT;
1578 groups->index = 0;
1579}
1580
1581/*
1582 * Compare function for event groups;
161c85fa
PZ
1583 *
1584 * Implements complex key that first sorts by CPU and then by virtual index
1585 * which provides ordering when rotating groups for the same CPU.
8e1a2031 1586 */
161c85fa
PZ
1587static bool
1588perf_event_groups_less(struct perf_event *left, struct perf_event *right)
8e1a2031 1589{
161c85fa
PZ
1590 if (left->cpu < right->cpu)
1591 return true;
1592 if (left->cpu > right->cpu)
1593 return false;
1594
95ed6c70
IR
1595#ifdef CONFIG_CGROUP_PERF
1596 if (left->cgrp != right->cgrp) {
1597 if (!left->cgrp || !left->cgrp->css.cgroup) {
1598 /*
1599 * Left has no cgroup but right does, no cgroups come
1600 * first.
1601 */
1602 return true;
1603 }
a6763625 1604 if (!right->cgrp || !right->cgrp->css.cgroup) {
95ed6c70
IR
1605 /*
1606 * Right has no cgroup but left does, no cgroups come
1607 * first.
1608 */
1609 return false;
1610 }
1611 /* Two dissimilar cgroups, order by id. */
1612 if (left->cgrp->css.cgroup->kn->id < right->cgrp->css.cgroup->kn->id)
1613 return true;
1614
1615 return false;
1616 }
1617#endif
1618
161c85fa
PZ
1619 if (left->group_index < right->group_index)
1620 return true;
1621 if (left->group_index > right->group_index)
1622 return false;
1623
1624 return false;
8e1a2031
AB
1625}
1626
1627/*
161c85fa
PZ
1628 * Insert @event into @groups' tree; using {@event->cpu, ++@groups->index} for
1629 * key (see perf_event_groups_less). This places it last inside the CPU
1630 * subtree.
8e1a2031
AB
1631 */
1632static void
1633perf_event_groups_insert(struct perf_event_groups *groups,
161c85fa 1634 struct perf_event *event)
8e1a2031
AB
1635{
1636 struct perf_event *node_event;
1637 struct rb_node *parent;
1638 struct rb_node **node;
1639
1640 event->group_index = ++groups->index;
1641
1642 node = &groups->tree.rb_node;
1643 parent = *node;
1644
1645 while (*node) {
1646 parent = *node;
161c85fa 1647 node_event = container_of(*node, struct perf_event, group_node);
8e1a2031
AB
1648
1649 if (perf_event_groups_less(event, node_event))
1650 node = &parent->rb_left;
1651 else
1652 node = &parent->rb_right;
1653 }
1654
1655 rb_link_node(&event->group_node, parent, node);
1656 rb_insert_color(&event->group_node, &groups->tree);
1657}
1658
1659/*
161c85fa 1660 * Helper function to insert event into the pinned or flexible groups.
8e1a2031
AB
1661 */
1662static void
1663add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
1664{
1665 struct perf_event_groups *groups;
1666
1667 groups = get_event_groups(event, ctx);
1668 perf_event_groups_insert(groups, event);
1669}
1670
1671/*
161c85fa 1672 * Delete a group from a tree.
8e1a2031
AB
1673 */
1674static void
1675perf_event_groups_delete(struct perf_event_groups *groups,
161c85fa 1676 struct perf_event *event)
8e1a2031 1677{
161c85fa
PZ
1678 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
1679 RB_EMPTY_ROOT(&groups->tree));
8e1a2031 1680
161c85fa 1681 rb_erase(&event->group_node, &groups->tree);
8e1a2031
AB
1682 init_event_group(event);
1683}
1684
1685/*
161c85fa 1686 * Helper function to delete event from its groups.
8e1a2031
AB
1687 */
1688static void
1689del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
1690{
1691 struct perf_event_groups *groups;
1692
1693 groups = get_event_groups(event, ctx);
1694 perf_event_groups_delete(groups, event);
1695}
1696
1697/*
95ed6c70 1698 * Get the leftmost event in the cpu/cgroup subtree.
8e1a2031
AB
1699 */
1700static struct perf_event *
95ed6c70
IR
1701perf_event_groups_first(struct perf_event_groups *groups, int cpu,
1702 struct cgroup *cgrp)
8e1a2031
AB
1703{
1704 struct perf_event *node_event = NULL, *match = NULL;
1705 struct rb_node *node = groups->tree.rb_node;
95ed6c70
IR
1706#ifdef CONFIG_CGROUP_PERF
1707 u64 node_cgrp_id, cgrp_id = 0;
1708
1709 if (cgrp)
1710 cgrp_id = cgrp->kn->id;
1711#endif
8e1a2031
AB
1712
1713 while (node) {
161c85fa 1714 node_event = container_of(node, struct perf_event, group_node);
8e1a2031
AB
1715
1716 if (cpu < node_event->cpu) {
1717 node = node->rb_left;
95ed6c70
IR
1718 continue;
1719 }
1720 if (cpu > node_event->cpu) {
8e1a2031 1721 node = node->rb_right;
95ed6c70
IR
1722 continue;
1723 }
1724#ifdef CONFIG_CGROUP_PERF
1725 node_cgrp_id = 0;
1726 if (node_event->cgrp && node_event->cgrp->css.cgroup)
1727 node_cgrp_id = node_event->cgrp->css.cgroup->kn->id;
1728
1729 if (cgrp_id < node_cgrp_id) {
8e1a2031 1730 node = node->rb_left;
95ed6c70
IR
1731 continue;
1732 }
1733 if (cgrp_id > node_cgrp_id) {
1734 node = node->rb_right;
1735 continue;
8e1a2031 1736 }
95ed6c70
IR
1737#endif
1738 match = node_event;
1739 node = node->rb_left;
8e1a2031
AB
1740 }
1741
1742 return match;
1743}
1744
1cac7b1a
PZ
1745/*
1746 * Like rb_entry_next_safe() for the @cpu subtree.
1747 */
1748static struct perf_event *
1749perf_event_groups_next(struct perf_event *event)
1750{
1751 struct perf_event *next;
95ed6c70
IR
1752#ifdef CONFIG_CGROUP_PERF
1753 u64 curr_cgrp_id = 0;
1754 u64 next_cgrp_id = 0;
1755#endif
1cac7b1a
PZ
1756
1757 next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
95ed6c70
IR
1758 if (next == NULL || next->cpu != event->cpu)
1759 return NULL;
1760
1761#ifdef CONFIG_CGROUP_PERF
1762 if (event->cgrp && event->cgrp->css.cgroup)
1763 curr_cgrp_id = event->cgrp->css.cgroup->kn->id;
1cac7b1a 1764
95ed6c70
IR
1765 if (next->cgrp && next->cgrp->css.cgroup)
1766 next_cgrp_id = next->cgrp->css.cgroup->kn->id;
1767
1768 if (curr_cgrp_id != next_cgrp_id)
1769 return NULL;
1770#endif
1771 return next;
1cac7b1a
PZ
1772}
1773
8e1a2031 1774/*
161c85fa 1775 * Iterate through the whole groups tree.
8e1a2031 1776 */
6e6804d2
PZ
1777#define perf_event_groups_for_each(event, groups) \
1778 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1779 typeof(*event), group_node); event; \
1780 event = rb_entry_safe(rb_next(&event->group_node), \
1781 typeof(*event), group_node))
8e1a2031 1782
fccc714b 1783/*
788faab7 1784 * Add an event from the lists for its context.
fccc714b
PZ
1785 * Must be called with ctx->mutex and ctx->lock held.
1786 */
04289bb9 1787static void
cdd6c482 1788list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1789{
c994d613
PZ
1790 lockdep_assert_held(&ctx->lock);
1791
8a49542c
PZ
1792 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1793 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9 1794
0d3d73aa
PZ
1795 event->tstamp = perf_event_time(event);
1796
04289bb9 1797 /*
8a49542c
PZ
1798 * If we're a stand alone event or group leader, we go to the context
1799 * list, group events are kept attached to the group so that
1800 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1801 */
8a49542c 1802 if (event->group_leader == event) {
4ff6a8de 1803 event->group_caps = event->event_caps;
8e1a2031 1804 add_event_to_groups(event, ctx);
5c148194 1805 }
592903cd 1806
cdd6c482
IM
1807 list_add_rcu(&event->event_entry, &ctx->event_list);
1808 ctx->nr_events++;
1809 if (event->attr.inherit_stat)
bfbd3381 1810 ctx->nr_stat++;
5a3126d4 1811
33238c50
PZ
1812 if (event->state > PERF_EVENT_STATE_OFF)
1813 perf_cgroup_event_enable(event, ctx);
1814
5a3126d4 1815 ctx->generation++;
04289bb9
IM
1816}
1817
0231bb53
JO
1818/*
1819 * Initialize event state based on the perf_event_attr::disabled.
1820 */
1821static inline void perf_event__state_init(struct perf_event *event)
1822{
1823 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1824 PERF_EVENT_STATE_INACTIVE;
1825}
1826
a723968c 1827static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
c320c7b7
ACM
1828{
1829 int entry = sizeof(u64); /* value */
1830 int size = 0;
1831 int nr = 1;
1832
1833 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1834 size += sizeof(u64);
1835
1836 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1837 size += sizeof(u64);
1838
1839 if (event->attr.read_format & PERF_FORMAT_ID)
1840 entry += sizeof(u64);
1841
1842 if (event->attr.read_format & PERF_FORMAT_GROUP) {
a723968c 1843 nr += nr_siblings;
c320c7b7
ACM
1844 size += sizeof(u64);
1845 }
1846
1847 size += entry * nr;
1848 event->read_size = size;
1849}
1850
a723968c 1851static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
c320c7b7
ACM
1852{
1853 struct perf_sample_data *data;
c320c7b7
ACM
1854 u16 size = 0;
1855
c320c7b7
ACM
1856 if (sample_type & PERF_SAMPLE_IP)
1857 size += sizeof(data->ip);
1858
6844c09d
ACM
1859 if (sample_type & PERF_SAMPLE_ADDR)
1860 size += sizeof(data->addr);
1861
1862 if (sample_type & PERF_SAMPLE_PERIOD)
1863 size += sizeof(data->period);
1864
c3feedf2
AK
1865 if (sample_type & PERF_SAMPLE_WEIGHT)
1866 size += sizeof(data->weight);
1867
6844c09d
ACM
1868 if (sample_type & PERF_SAMPLE_READ)
1869 size += event->read_size;
1870
d6be9ad6
SE
1871 if (sample_type & PERF_SAMPLE_DATA_SRC)
1872 size += sizeof(data->data_src.val);
1873
fdfbbd07
AK
1874 if (sample_type & PERF_SAMPLE_TRANSACTION)
1875 size += sizeof(data->txn);
1876
fc7ce9c7
KL
1877 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1878 size += sizeof(data->phys_addr);
1879
6546b19f
NK
1880 if (sample_type & PERF_SAMPLE_CGROUP)
1881 size += sizeof(data->cgroup);
1882
6844c09d
ACM
1883 event->header_size = size;
1884}
1885
a723968c
PZ
1886/*
1887 * Called at perf_event creation and when events are attached/detached from a
1888 * group.
1889 */
1890static void perf_event__header_size(struct perf_event *event)
1891{
1892 __perf_event_read_size(event,
1893 event->group_leader->nr_siblings);
1894 __perf_event_header_size(event, event->attr.sample_type);
1895}
1896
6844c09d
ACM
1897static void perf_event__id_header_size(struct perf_event *event)
1898{
1899 struct perf_sample_data *data;
1900 u64 sample_type = event->attr.sample_type;
1901 u16 size = 0;
1902
c320c7b7
ACM
1903 if (sample_type & PERF_SAMPLE_TID)
1904 size += sizeof(data->tid_entry);
1905
1906 if (sample_type & PERF_SAMPLE_TIME)
1907 size += sizeof(data->time);
1908
ff3d527c
AH
1909 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1910 size += sizeof(data->id);
1911
c320c7b7
ACM
1912 if (sample_type & PERF_SAMPLE_ID)
1913 size += sizeof(data->id);
1914
1915 if (sample_type & PERF_SAMPLE_STREAM_ID)
1916 size += sizeof(data->stream_id);
1917
1918 if (sample_type & PERF_SAMPLE_CPU)
1919 size += sizeof(data->cpu_entry);
1920
6844c09d 1921 event->id_header_size = size;
c320c7b7
ACM
1922}
1923
a723968c
PZ
1924static bool perf_event_validate_size(struct perf_event *event)
1925{
1926 /*
1927 * The values computed here will be over-written when we actually
1928 * attach the event.
1929 */
1930 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1931 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1932 perf_event__id_header_size(event);
1933
1934 /*
1935 * Sum the lot; should not exceed the 64k limit we have on records.
1936 * Conservative limit to allow for callchains and other variable fields.
1937 */
1938 if (event->read_size + event->header_size +
1939 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1940 return false;
1941
1942 return true;
1943}
1944
8a49542c
PZ
1945static void perf_group_attach(struct perf_event *event)
1946{
c320c7b7 1947 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1948
a76a82a3
PZ
1949 lockdep_assert_held(&event->ctx->lock);
1950
74c3337c
PZ
1951 /*
1952 * We can have double attach due to group movement in perf_event_open.
1953 */
1954 if (event->attach_state & PERF_ATTACH_GROUP)
1955 return;
1956
8a49542c
PZ
1957 event->attach_state |= PERF_ATTACH_GROUP;
1958
1959 if (group_leader == event)
1960 return;
1961
652884fe
PZ
1962 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1963
4ff6a8de 1964 group_leader->group_caps &= event->event_caps;
8a49542c 1965
8343aae6 1966 list_add_tail(&event->sibling_list, &group_leader->sibling_list);
8a49542c 1967 group_leader->nr_siblings++;
c320c7b7
ACM
1968
1969 perf_event__header_size(group_leader);
1970
edb39592 1971 for_each_sibling_event(pos, group_leader)
c320c7b7 1972 perf_event__header_size(pos);
8a49542c
PZ
1973}
1974
a63eaf34 1975/*
788faab7 1976 * Remove an event from the lists for its context.
fccc714b 1977 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1978 */
04289bb9 1979static void
cdd6c482 1980list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1981{
652884fe
PZ
1982 WARN_ON_ONCE(event->ctx != ctx);
1983 lockdep_assert_held(&ctx->lock);
1984
8a49542c
PZ
1985 /*
1986 * We can have double detach due to exit/hot-unplug + close.
1987 */
1988 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1989 return;
8a49542c
PZ
1990
1991 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1992
cdd6c482
IM
1993 ctx->nr_events--;
1994 if (event->attr.inherit_stat)
bfbd3381 1995 ctx->nr_stat--;
8bc20959 1996
cdd6c482 1997 list_del_rcu(&event->event_entry);
04289bb9 1998
8a49542c 1999 if (event->group_leader == event)
8e1a2031 2000 del_event_from_groups(event, ctx);
5c148194 2001
b2e74a26
SE
2002 /*
2003 * If event was in error state, then keep it
2004 * that way, otherwise bogus counts will be
2005 * returned on read(). The only way to get out
2006 * of error state is by explicit re-enabling
2007 * of the event
2008 */
33238c50
PZ
2009 if (event->state > PERF_EVENT_STATE_OFF) {
2010 perf_cgroup_event_disable(event, ctx);
0d3d73aa 2011 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
33238c50 2012 }
5a3126d4
PZ
2013
2014 ctx->generation++;
050735b0
PZ
2015}
2016
ab43762e
AS
2017static int
2018perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event)
2019{
2020 if (!has_aux(aux_event))
2021 return 0;
2022
2023 if (!event->pmu->aux_output_match)
2024 return 0;
2025
2026 return event->pmu->aux_output_match(aux_event);
2027}
2028
2029static void put_event(struct perf_event *event);
2030static void event_sched_out(struct perf_event *event,
2031 struct perf_cpu_context *cpuctx,
2032 struct perf_event_context *ctx);
2033
2034static void perf_put_aux_event(struct perf_event *event)
2035{
2036 struct perf_event_context *ctx = event->ctx;
2037 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2038 struct perf_event *iter;
2039
2040 /*
2041 * If event uses aux_event tear down the link
2042 */
2043 if (event->aux_event) {
2044 iter = event->aux_event;
2045 event->aux_event = NULL;
2046 put_event(iter);
2047 return;
2048 }
2049
2050 /*
2051 * If the event is an aux_event, tear down all links to
2052 * it from other events.
2053 */
2054 for_each_sibling_event(iter, event->group_leader) {
2055 if (iter->aux_event != event)
2056 continue;
2057
2058 iter->aux_event = NULL;
2059 put_event(event);
2060
2061 /*
2062 * If it's ACTIVE, schedule it out and put it into ERROR
2063 * state so that we don't try to schedule it again. Note
2064 * that perf_event_enable() will clear the ERROR status.
2065 */
2066 event_sched_out(iter, cpuctx, ctx);
2067 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
2068 }
2069}
2070
a4faf00d
AS
2071static bool perf_need_aux_event(struct perf_event *event)
2072{
2073 return !!event->attr.aux_output || !!event->attr.aux_sample_size;
2074}
2075
ab43762e
AS
2076static int perf_get_aux_event(struct perf_event *event,
2077 struct perf_event *group_leader)
2078{
2079 /*
2080 * Our group leader must be an aux event if we want to be
2081 * an aux_output. This way, the aux event will precede its
2082 * aux_output events in the group, and therefore will always
2083 * schedule first.
2084 */
2085 if (!group_leader)
2086 return 0;
2087
a4faf00d
AS
2088 /*
2089 * aux_output and aux_sample_size are mutually exclusive.
2090 */
2091 if (event->attr.aux_output && event->attr.aux_sample_size)
2092 return 0;
2093
2094 if (event->attr.aux_output &&
2095 !perf_aux_output_match(event, group_leader))
2096 return 0;
2097
2098 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux)
ab43762e
AS
2099 return 0;
2100
2101 if (!atomic_long_inc_not_zero(&group_leader->refcount))
2102 return 0;
2103
2104 /*
2105 * Link aux_outputs to their aux event; this is undone in
2106 * perf_group_detach() by perf_put_aux_event(). When the
2107 * group in torn down, the aux_output events loose their
2108 * link to the aux_event and can't schedule any more.
2109 */
2110 event->aux_event = group_leader;
2111
2112 return 1;
2113}
2114
ab6f824c
PZ
2115static inline struct list_head *get_event_list(struct perf_event *event)
2116{
2117 struct perf_event_context *ctx = event->ctx;
2118 return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active;
2119}
2120
8a49542c 2121static void perf_group_detach(struct perf_event *event)
050735b0
PZ
2122{
2123 struct perf_event *sibling, *tmp;
6668128a 2124 struct perf_event_context *ctx = event->ctx;
8a49542c 2125
6668128a 2126 lockdep_assert_held(&ctx->lock);
a76a82a3 2127
8a49542c
PZ
2128 /*
2129 * We can have double detach due to exit/hot-unplug + close.
2130 */
2131 if (!(event->attach_state & PERF_ATTACH_GROUP))
2132 return;
2133
2134 event->attach_state &= ~PERF_ATTACH_GROUP;
2135
ab43762e
AS
2136 perf_put_aux_event(event);
2137
8a49542c
PZ
2138 /*
2139 * If this is a sibling, remove it from its group.
2140 */
2141 if (event->group_leader != event) {
8343aae6 2142 list_del_init(&event->sibling_list);
8a49542c 2143 event->group_leader->nr_siblings--;
c320c7b7 2144 goto out;
8a49542c
PZ
2145 }
2146
04289bb9 2147 /*
cdd6c482
IM
2148 * If this was a group event with sibling events then
2149 * upgrade the siblings to singleton events by adding them
8a49542c 2150 * to whatever list we are on.
04289bb9 2151 */
8343aae6 2152 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) {
8e1a2031 2153
04289bb9 2154 sibling->group_leader = sibling;
24868367 2155 list_del_init(&sibling->sibling_list);
d6f962b5
FW
2156
2157 /* Inherit group flags from the previous leader */
4ff6a8de 2158 sibling->group_caps = event->group_caps;
652884fe 2159
8e1a2031 2160 if (!RB_EMPTY_NODE(&event->group_node)) {
8e1a2031 2161 add_event_to_groups(sibling, event->ctx);
6668128a 2162
ab6f824c
PZ
2163 if (sibling->state == PERF_EVENT_STATE_ACTIVE)
2164 list_add_tail(&sibling->active_list, get_event_list(sibling));
8e1a2031
AB
2165 }
2166
652884fe 2167 WARN_ON_ONCE(sibling->ctx != event->ctx);
04289bb9 2168 }
c320c7b7
ACM
2169
2170out:
2171 perf_event__header_size(event->group_leader);
2172
edb39592 2173 for_each_sibling_event(tmp, event->group_leader)
c320c7b7 2174 perf_event__header_size(tmp);
04289bb9
IM
2175}
2176
fadfe7be
JO
2177static bool is_orphaned_event(struct perf_event *event)
2178{
a69b0ca4 2179 return event->state == PERF_EVENT_STATE_DEAD;
fadfe7be
JO
2180}
2181
2c81a647 2182static inline int __pmu_filter_match(struct perf_event *event)
66eb579e
MR
2183{
2184 struct pmu *pmu = event->pmu;
2185 return pmu->filter_match ? pmu->filter_match(event) : 1;
2186}
2187
2c81a647
MR
2188/*
2189 * Check whether we should attempt to schedule an event group based on
2190 * PMU-specific filtering. An event group can consist of HW and SW events,
2191 * potentially with a SW leader, so we must check all the filters, to
2192 * determine whether a group is schedulable:
2193 */
2194static inline int pmu_filter_match(struct perf_event *event)
2195{
edb39592 2196 struct perf_event *sibling;
2c81a647
MR
2197
2198 if (!__pmu_filter_match(event))
2199 return 0;
2200
edb39592
PZ
2201 for_each_sibling_event(sibling, event) {
2202 if (!__pmu_filter_match(sibling))
2c81a647
MR
2203 return 0;
2204 }
2205
2206 return 1;
2207}
2208
fa66f07a
SE
2209static inline int
2210event_filter_match(struct perf_event *event)
2211{
0b8f1e2e
PZ
2212 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
2213 perf_cgroup_match(event) && pmu_filter_match(event);
fa66f07a
SE
2214}
2215
9ffcfa6f
SE
2216static void
2217event_sched_out(struct perf_event *event,
3b6f9e5c 2218 struct perf_cpu_context *cpuctx,
cdd6c482 2219 struct perf_event_context *ctx)
3b6f9e5c 2220{
0d3d73aa 2221 enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
652884fe
PZ
2222
2223 WARN_ON_ONCE(event->ctx != ctx);
2224 lockdep_assert_held(&ctx->lock);
2225
cdd6c482 2226 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 2227 return;
3b6f9e5c 2228
6668128a
PZ
2229 /*
2230 * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but
2231 * we can schedule events _OUT_ individually through things like
2232 * __perf_remove_from_context().
2233 */
2234 list_del_init(&event->active_list);
2235
44377277
AS
2236 perf_pmu_disable(event->pmu);
2237
28a967c3
PZ
2238 event->pmu->del(event, 0);
2239 event->oncpu = -1;
0d3d73aa 2240
1d54ad94
PZ
2241 if (READ_ONCE(event->pending_disable) >= 0) {
2242 WRITE_ONCE(event->pending_disable, -1);
33238c50 2243 perf_cgroup_event_disable(event, ctx);
0d3d73aa 2244 state = PERF_EVENT_STATE_OFF;
970892a9 2245 }
0d3d73aa 2246 perf_event_set_state(event, state);
3b6f9e5c 2247
cdd6c482 2248 if (!is_software_event(event))
3b6f9e5c 2249 cpuctx->active_oncpu--;
2fde4f94
MR
2250 if (!--ctx->nr_active)
2251 perf_event_ctx_deactivate(ctx);
0f5a2601
PZ
2252 if (event->attr.freq && event->attr.sample_freq)
2253 ctx->nr_freq--;
cdd6c482 2254 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c 2255 cpuctx->exclusive = 0;
44377277
AS
2256
2257 perf_pmu_enable(event->pmu);
3b6f9e5c
PM
2258}
2259
d859e29f 2260static void
cdd6c482 2261group_sched_out(struct perf_event *group_event,
d859e29f 2262 struct perf_cpu_context *cpuctx,
cdd6c482 2263 struct perf_event_context *ctx)
d859e29f 2264{
cdd6c482 2265 struct perf_event *event;
0d3d73aa
PZ
2266
2267 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
2268 return;
d859e29f 2269
3f005e7d
MR
2270 perf_pmu_disable(ctx->pmu);
2271
cdd6c482 2272 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
2273
2274 /*
2275 * Schedule out siblings (if any):
2276 */
edb39592 2277 for_each_sibling_event(event, group_event)
cdd6c482 2278 event_sched_out(event, cpuctx, ctx);
d859e29f 2279
3f005e7d
MR
2280 perf_pmu_enable(ctx->pmu);
2281
0d3d73aa 2282 if (group_event->attr.exclusive)
d859e29f
PM
2283 cpuctx->exclusive = 0;
2284}
2285
45a0e07a 2286#define DETACH_GROUP 0x01UL
0017960f 2287
0793a61d 2288/*
cdd6c482 2289 * Cross CPU call to remove a performance event
0793a61d 2290 *
cdd6c482 2291 * We disable the event on the hardware level first. After that we
0793a61d
TG
2292 * remove it from the context list.
2293 */
fae3fde6
PZ
2294static void
2295__perf_remove_from_context(struct perf_event *event,
2296 struct perf_cpu_context *cpuctx,
2297 struct perf_event_context *ctx,
2298 void *info)
0793a61d 2299{
45a0e07a 2300 unsigned long flags = (unsigned long)info;
0793a61d 2301
3c5c8711
PZ
2302 if (ctx->is_active & EVENT_TIME) {
2303 update_context_time(ctx);
2304 update_cgrp_time_from_cpuctx(cpuctx);
2305 }
2306
cdd6c482 2307 event_sched_out(event, cpuctx, ctx);
45a0e07a 2308 if (flags & DETACH_GROUP)
46ce0fe9 2309 perf_group_detach(event);
cdd6c482 2310 list_del_event(event, ctx);
39a43640
PZ
2311
2312 if (!ctx->nr_events && ctx->is_active) {
64ce3126 2313 ctx->is_active = 0;
90c91dfb 2314 ctx->rotate_necessary = 0;
39a43640
PZ
2315 if (ctx->task) {
2316 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2317 cpuctx->task_ctx = NULL;
2318 }
64ce3126 2319 }
0793a61d
TG
2320}
2321
0793a61d 2322/*
cdd6c482 2323 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 2324 *
cdd6c482
IM
2325 * If event->ctx is a cloned context, callers must make sure that
2326 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
2327 * remains valid. This is OK when called from perf_release since
2328 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 2329 * When called from perf_event_exit_task, it's OK because the
c93f7669 2330 * context has been detached from its task.
0793a61d 2331 */
45a0e07a 2332static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
0793a61d 2333{
a76a82a3
PZ
2334 struct perf_event_context *ctx = event->ctx;
2335
2336 lockdep_assert_held(&ctx->mutex);
0793a61d 2337
45a0e07a 2338 event_function_call(event, __perf_remove_from_context, (void *)flags);
a76a82a3
PZ
2339
2340 /*
2341 * The above event_function_call() can NO-OP when it hits
2342 * TASK_TOMBSTONE. In that case we must already have been detached
2343 * from the context (by perf_event_exit_event()) but the grouping
2344 * might still be in-tact.
2345 */
2346 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
2347 if ((flags & DETACH_GROUP) &&
2348 (event->attach_state & PERF_ATTACH_GROUP)) {
2349 /*
2350 * Since in that case we cannot possibly be scheduled, simply
2351 * detach now.
2352 */
2353 raw_spin_lock_irq(&ctx->lock);
2354 perf_group_detach(event);
2355 raw_spin_unlock_irq(&ctx->lock);
2356 }
0793a61d
TG
2357}
2358
d859e29f 2359/*
cdd6c482 2360 * Cross CPU call to disable a performance event
d859e29f 2361 */
fae3fde6
PZ
2362static void __perf_event_disable(struct perf_event *event,
2363 struct perf_cpu_context *cpuctx,
2364 struct perf_event_context *ctx,
2365 void *info)
7b648018 2366{
fae3fde6
PZ
2367 if (event->state < PERF_EVENT_STATE_INACTIVE)
2368 return;
7b648018 2369
3c5c8711
PZ
2370 if (ctx->is_active & EVENT_TIME) {
2371 update_context_time(ctx);
2372 update_cgrp_time_from_event(event);
2373 }
2374
fae3fde6
PZ
2375 if (event == event->group_leader)
2376 group_sched_out(event, cpuctx, ctx);
2377 else
2378 event_sched_out(event, cpuctx, ctx);
0d3d73aa
PZ
2379
2380 perf_event_set_state(event, PERF_EVENT_STATE_OFF);
33238c50 2381 perf_cgroup_event_disable(event, ctx);
7b648018
PZ
2382}
2383
d859e29f 2384/*
788faab7 2385 * Disable an event.
c93f7669 2386 *
cdd6c482
IM
2387 * If event->ctx is a cloned context, callers must make sure that
2388 * every task struct that event->ctx->task could possibly point to
9f014e3a 2389 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2390 * perf_event_for_each_child or perf_event_for_each because they
2391 * hold the top-level event's child_mutex, so any descendant that
8ba289b8
PZ
2392 * goes to exit will block in perf_event_exit_event().
2393 *
cdd6c482 2394 * When called from perf_pending_event it's OK because event->ctx
c93f7669 2395 * is the current context on this CPU and preemption is disabled,
cdd6c482 2396 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 2397 */
f63a8daa 2398static void _perf_event_disable(struct perf_event *event)
d859e29f 2399{
cdd6c482 2400 struct perf_event_context *ctx = event->ctx;
d859e29f 2401
e625cce1 2402 raw_spin_lock_irq(&ctx->lock);
7b648018 2403 if (event->state <= PERF_EVENT_STATE_OFF) {
e625cce1 2404 raw_spin_unlock_irq(&ctx->lock);
7b648018 2405 return;
53cfbf59 2406 }
e625cce1 2407 raw_spin_unlock_irq(&ctx->lock);
7b648018 2408
fae3fde6
PZ
2409 event_function_call(event, __perf_event_disable, NULL);
2410}
2411
2412void perf_event_disable_local(struct perf_event *event)
2413{
2414 event_function_local(event, __perf_event_disable, NULL);
d859e29f 2415}
f63a8daa
PZ
2416
2417/*
2418 * Strictly speaking kernel users cannot create groups and therefore this
2419 * interface does not need the perf_event_ctx_lock() magic.
2420 */
2421void perf_event_disable(struct perf_event *event)
2422{
2423 struct perf_event_context *ctx;
2424
2425 ctx = perf_event_ctx_lock(event);
2426 _perf_event_disable(event);
2427 perf_event_ctx_unlock(event, ctx);
2428}
dcfce4a0 2429EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 2430
5aab90ce
JO
2431void perf_event_disable_inatomic(struct perf_event *event)
2432{
1d54ad94
PZ
2433 WRITE_ONCE(event->pending_disable, smp_processor_id());
2434 /* can fail, see perf_pending_event_disable() */
5aab90ce
JO
2435 irq_work_queue(&event->pending);
2436}
2437
e5d1367f 2438static void perf_set_shadow_time(struct perf_event *event,
0d3d73aa 2439 struct perf_event_context *ctx)
e5d1367f
SE
2440{
2441 /*
2442 * use the correct time source for the time snapshot
2443 *
2444 * We could get by without this by leveraging the
2445 * fact that to get to this function, the caller
2446 * has most likely already called update_context_time()
2447 * and update_cgrp_time_xx() and thus both timestamp
2448 * are identical (or very close). Given that tstamp is,
2449 * already adjusted for cgroup, we could say that:
2450 * tstamp - ctx->timestamp
2451 * is equivalent to
2452 * tstamp - cgrp->timestamp.
2453 *
2454 * Then, in perf_output_read(), the calculation would
2455 * work with no changes because:
2456 * - event is guaranteed scheduled in
2457 * - no scheduled out in between
2458 * - thus the timestamp would be the same
2459 *
2460 * But this is a bit hairy.
2461 *
2462 * So instead, we have an explicit cgroup call to remain
2463 * within the time time source all along. We believe it
2464 * is cleaner and simpler to understand.
2465 */
2466 if (is_cgroup_event(event))
0d3d73aa 2467 perf_cgroup_set_shadow_time(event, event->tstamp);
e5d1367f 2468 else
0d3d73aa 2469 event->shadow_ctx_time = event->tstamp - ctx->timestamp;
e5d1367f
SE
2470}
2471
4fe757dd
PZ
2472#define MAX_INTERRUPTS (~0ULL)
2473
2474static void perf_log_throttle(struct perf_event *event, int enable);
ec0d7729 2475static void perf_log_itrace_start(struct perf_event *event);
4fe757dd 2476
235c7fc7 2477static int
9ffcfa6f 2478event_sched_in(struct perf_event *event,
235c7fc7 2479 struct perf_cpu_context *cpuctx,
6e37738a 2480 struct perf_event_context *ctx)
235c7fc7 2481{
44377277 2482 int ret = 0;
4158755d 2483
ab6f824c
PZ
2484 WARN_ON_ONCE(event->ctx != ctx);
2485
63342411
PZ
2486 lockdep_assert_held(&ctx->lock);
2487
cdd6c482 2488 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
2489 return 0;
2490
95ff4ca2
AS
2491 WRITE_ONCE(event->oncpu, smp_processor_id());
2492 /*
0c1cbc18
PZ
2493 * Order event::oncpu write to happen before the ACTIVE state is
2494 * visible. This allows perf_event_{stop,read}() to observe the correct
2495 * ->oncpu if it sees ACTIVE.
95ff4ca2
AS
2496 */
2497 smp_wmb();
0d3d73aa 2498 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE);
4fe757dd
PZ
2499
2500 /*
2501 * Unthrottle events, since we scheduled we might have missed several
2502 * ticks already, also for a heavily scheduling task there is little
2503 * guarantee it'll get a tick in a timely manner.
2504 */
2505 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
2506 perf_log_throttle(event, 1);
2507 event->hw.interrupts = 0;
2508 }
2509
44377277
AS
2510 perf_pmu_disable(event->pmu);
2511
0d3d73aa 2512 perf_set_shadow_time(event, ctx);
72f669c0 2513
ec0d7729
AS
2514 perf_log_itrace_start(event);
2515
a4eaf7f1 2516 if (event->pmu->add(event, PERF_EF_START)) {
0d3d73aa 2517 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
cdd6c482 2518 event->oncpu = -1;
44377277
AS
2519 ret = -EAGAIN;
2520 goto out;
235c7fc7
IM
2521 }
2522
cdd6c482 2523 if (!is_software_event(event))
3b6f9e5c 2524 cpuctx->active_oncpu++;
2fde4f94
MR
2525 if (!ctx->nr_active++)
2526 perf_event_ctx_activate(ctx);
0f5a2601
PZ
2527 if (event->attr.freq && event->attr.sample_freq)
2528 ctx->nr_freq++;
235c7fc7 2529
cdd6c482 2530 if (event->attr.exclusive)
3b6f9e5c
PM
2531 cpuctx->exclusive = 1;
2532
44377277
AS
2533out:
2534 perf_pmu_enable(event->pmu);
2535
2536 return ret;
235c7fc7
IM
2537}
2538
6751b71e 2539static int
cdd6c482 2540group_sched_in(struct perf_event *group_event,
6751b71e 2541 struct perf_cpu_context *cpuctx,
6e37738a 2542 struct perf_event_context *ctx)
6751b71e 2543{
6bde9b6c 2544 struct perf_event *event, *partial_group = NULL;
4a234593 2545 struct pmu *pmu = ctx->pmu;
6751b71e 2546
cdd6c482 2547 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
2548 return 0;
2549
fbbe0701 2550 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
6bde9b6c 2551
9ffcfa6f 2552 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 2553 pmu->cancel_txn(pmu);
272325c4 2554 perf_mux_hrtimer_restart(cpuctx);
6751b71e 2555 return -EAGAIN;
90151c35 2556 }
6751b71e
PM
2557
2558 /*
2559 * Schedule in siblings as one group (if any):
2560 */
edb39592 2561 for_each_sibling_event(event, group_event) {
9ffcfa6f 2562 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 2563 partial_group = event;
6751b71e
PM
2564 goto group_error;
2565 }
2566 }
2567
9ffcfa6f 2568 if (!pmu->commit_txn(pmu))
6e85158c 2569 return 0;
9ffcfa6f 2570
6751b71e
PM
2571group_error:
2572 /*
2573 * Groups can be scheduled in as one unit only, so undo any
2574 * partial group before returning:
0d3d73aa 2575 * The events up to the failed event are scheduled out normally.
6751b71e 2576 */
edb39592 2577 for_each_sibling_event(event, group_event) {
cdd6c482 2578 if (event == partial_group)
0d3d73aa 2579 break;
d7842da4 2580
0d3d73aa 2581 event_sched_out(event, cpuctx, ctx);
6751b71e 2582 }
9ffcfa6f 2583 event_sched_out(group_event, cpuctx, ctx);
6751b71e 2584
ad5133b7 2585 pmu->cancel_txn(pmu);
90151c35 2586
272325c4 2587 perf_mux_hrtimer_restart(cpuctx);
9e630205 2588
6751b71e
PM
2589 return -EAGAIN;
2590}
2591
3b6f9e5c 2592/*
cdd6c482 2593 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 2594 */
cdd6c482 2595static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
2596 struct perf_cpu_context *cpuctx,
2597 int can_add_hw)
2598{
2599 /*
cdd6c482 2600 * Groups consisting entirely of software events can always go on.
3b6f9e5c 2601 */
4ff6a8de 2602 if (event->group_caps & PERF_EV_CAP_SOFTWARE)
3b6f9e5c
PM
2603 return 1;
2604 /*
2605 * If an exclusive group is already on, no other hardware
cdd6c482 2606 * events can go on.
3b6f9e5c
PM
2607 */
2608 if (cpuctx->exclusive)
2609 return 0;
2610 /*
2611 * If this group is exclusive and there are already
cdd6c482 2612 * events on the CPU, it can't go on.
3b6f9e5c 2613 */
cdd6c482 2614 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
2615 return 0;
2616 /*
2617 * Otherwise, try to add it if all previous groups were able
2618 * to go on.
2619 */
2620 return can_add_hw;
2621}
2622
cdd6c482
IM
2623static void add_event_to_ctx(struct perf_event *event,
2624 struct perf_event_context *ctx)
53cfbf59 2625{
cdd6c482 2626 list_add_event(event, ctx);
8a49542c 2627 perf_group_attach(event);
53cfbf59
PM
2628}
2629
bd2afa49
PZ
2630static void ctx_sched_out(struct perf_event_context *ctx,
2631 struct perf_cpu_context *cpuctx,
2632 enum event_type_t event_type);
2c29ef0f
PZ
2633static void
2634ctx_sched_in(struct perf_event_context *ctx,
2635 struct perf_cpu_context *cpuctx,
2636 enum event_type_t event_type,
2637 struct task_struct *task);
fe4b04fa 2638
bd2afa49 2639static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
487f05e1
AS
2640 struct perf_event_context *ctx,
2641 enum event_type_t event_type)
bd2afa49
PZ
2642{
2643 if (!cpuctx->task_ctx)
2644 return;
2645
2646 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2647 return;
2648
487f05e1 2649 ctx_sched_out(ctx, cpuctx, event_type);
bd2afa49
PZ
2650}
2651
dce5855b
PZ
2652static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2653 struct perf_event_context *ctx,
2654 struct task_struct *task)
2655{
2656 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2657 if (ctx)
2658 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2659 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2660 if (ctx)
2661 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2662}
2663
487f05e1
AS
2664/*
2665 * We want to maintain the following priority of scheduling:
2666 * - CPU pinned (EVENT_CPU | EVENT_PINNED)
2667 * - task pinned (EVENT_PINNED)
2668 * - CPU flexible (EVENT_CPU | EVENT_FLEXIBLE)
2669 * - task flexible (EVENT_FLEXIBLE).
2670 *
2671 * In order to avoid unscheduling and scheduling back in everything every
2672 * time an event is added, only do it for the groups of equal priority and
2673 * below.
2674 *
2675 * This can be called after a batch operation on task events, in which case
2676 * event_type is a bit mask of the types of events involved. For CPU events,
2677 * event_type is only either EVENT_PINNED or EVENT_FLEXIBLE.
2678 */
3e349507 2679static void ctx_resched(struct perf_cpu_context *cpuctx,
487f05e1
AS
2680 struct perf_event_context *task_ctx,
2681 enum event_type_t event_type)
0017960f 2682{
bd903afe 2683 enum event_type_t ctx_event_type;
487f05e1
AS
2684 bool cpu_event = !!(event_type & EVENT_CPU);
2685
2686 /*
2687 * If pinned groups are involved, flexible groups also need to be
2688 * scheduled out.
2689 */
2690 if (event_type & EVENT_PINNED)
2691 event_type |= EVENT_FLEXIBLE;
2692
bd903afe
SL
2693 ctx_event_type = event_type & EVENT_ALL;
2694
3e349507
PZ
2695 perf_pmu_disable(cpuctx->ctx.pmu);
2696 if (task_ctx)
487f05e1
AS
2697 task_ctx_sched_out(cpuctx, task_ctx, event_type);
2698
2699 /*
2700 * Decide which cpu ctx groups to schedule out based on the types
2701 * of events that caused rescheduling:
2702 * - EVENT_CPU: schedule out corresponding groups;
2703 * - EVENT_PINNED task events: schedule out EVENT_FLEXIBLE groups;
2704 * - otherwise, do nothing more.
2705 */
2706 if (cpu_event)
2707 cpu_ctx_sched_out(cpuctx, ctx_event_type);
2708 else if (ctx_event_type & EVENT_PINNED)
2709 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2710
3e349507
PZ
2711 perf_event_sched_in(cpuctx, task_ctx, current);
2712 perf_pmu_enable(cpuctx->ctx.pmu);
0017960f
PZ
2713}
2714
c68d224e
SE
2715void perf_pmu_resched(struct pmu *pmu)
2716{
2717 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2718 struct perf_event_context *task_ctx = cpuctx->task_ctx;
2719
2720 perf_ctx_lock(cpuctx, task_ctx);
2721 ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU);
2722 perf_ctx_unlock(cpuctx, task_ctx);
2723}
2724
0793a61d 2725/*
cdd6c482 2726 * Cross CPU call to install and enable a performance event
682076ae 2727 *
a096309b
PZ
2728 * Very similar to remote_function() + event_function() but cannot assume that
2729 * things like ctx->is_active and cpuctx->task_ctx are set.
0793a61d 2730 */
fe4b04fa 2731static int __perf_install_in_context(void *info)
0793a61d 2732{
a096309b
PZ
2733 struct perf_event *event = info;
2734 struct perf_event_context *ctx = event->ctx;
108b02cf 2735 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f 2736 struct perf_event_context *task_ctx = cpuctx->task_ctx;
63cae12b 2737 bool reprogram = true;
a096309b 2738 int ret = 0;
0793a61d 2739
63b6da39 2740 raw_spin_lock(&cpuctx->ctx.lock);
39a43640 2741 if (ctx->task) {
b58f6b0d
PZ
2742 raw_spin_lock(&ctx->lock);
2743 task_ctx = ctx;
a096309b 2744
63cae12b 2745 reprogram = (ctx->task == current);
b58f6b0d 2746
39a43640 2747 /*
63cae12b
PZ
2748 * If the task is running, it must be running on this CPU,
2749 * otherwise we cannot reprogram things.
2750 *
2751 * If its not running, we don't care, ctx->lock will
2752 * serialize against it becoming runnable.
39a43640 2753 */
63cae12b
PZ
2754 if (task_curr(ctx->task) && !reprogram) {
2755 ret = -ESRCH;
2756 goto unlock;
2757 }
a096309b 2758
63cae12b 2759 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
63b6da39
PZ
2760 } else if (task_ctx) {
2761 raw_spin_lock(&task_ctx->lock);
2c29ef0f 2762 }
b58f6b0d 2763
33801b94 2764#ifdef CONFIG_CGROUP_PERF
33238c50 2765 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
33801b94 2766 /*
2767 * If the current cgroup doesn't match the event's
2768 * cgroup, we should not try to schedule it.
2769 */
2770 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
2771 reprogram = cgroup_is_descendant(cgrp->css.cgroup,
2772 event->cgrp->css.cgroup);
2773 }
2774#endif
2775
63cae12b 2776 if (reprogram) {
a096309b
PZ
2777 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2778 add_event_to_ctx(event, ctx);
487f05e1 2779 ctx_resched(cpuctx, task_ctx, get_event_type(event));
a096309b
PZ
2780 } else {
2781 add_event_to_ctx(event, ctx);
2782 }
2783
63b6da39 2784unlock:
2c29ef0f 2785 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa 2786
a096309b 2787 return ret;
0793a61d
TG
2788}
2789
8a58ddae
AS
2790static bool exclusive_event_installable(struct perf_event *event,
2791 struct perf_event_context *ctx);
2792
0793a61d 2793/*
a096309b
PZ
2794 * Attach a performance event to a context.
2795 *
2796 * Very similar to event_function_call, see comment there.
0793a61d
TG
2797 */
2798static void
cdd6c482
IM
2799perf_install_in_context(struct perf_event_context *ctx,
2800 struct perf_event *event,
0793a61d
TG
2801 int cpu)
2802{
a096309b 2803 struct task_struct *task = READ_ONCE(ctx->task);
39a43640 2804
fe4b04fa
PZ
2805 lockdep_assert_held(&ctx->mutex);
2806
8a58ddae
AS
2807 WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
2808
0cda4c02
YZ
2809 if (event->cpu != -1)
2810 event->cpu = cpu;
c3f00c70 2811
0b8f1e2e
PZ
2812 /*
2813 * Ensures that if we can observe event->ctx, both the event and ctx
2814 * will be 'complete'. See perf_iterate_sb_cpu().
2815 */
2816 smp_store_release(&event->ctx, ctx);
2817
db0503e4
PZ
2818 /*
2819 * perf_event_attr::disabled events will not run and can be initialized
2820 * without IPI. Except when this is the first event for the context, in
2821 * that case we need the magic of the IPI to set ctx->is_active.
2822 *
2823 * The IOC_ENABLE that is sure to follow the creation of a disabled
2824 * event will issue the IPI and reprogram the hardware.
2825 */
2826 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) {
2827 raw_spin_lock_irq(&ctx->lock);
2828 if (ctx->task == TASK_TOMBSTONE) {
2829 raw_spin_unlock_irq(&ctx->lock);
2830 return;
2831 }
2832 add_event_to_ctx(event, ctx);
2833 raw_spin_unlock_irq(&ctx->lock);
2834 return;
2835 }
2836
a096309b
PZ
2837 if (!task) {
2838 cpu_function_call(cpu, __perf_install_in_context, event);
2839 return;
2840 }
2841
2842 /*
2843 * Should not happen, we validate the ctx is still alive before calling.
2844 */
2845 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2846 return;
2847
39a43640
PZ
2848 /*
2849 * Installing events is tricky because we cannot rely on ctx->is_active
2850 * to be set in case this is the nr_events 0 -> 1 transition.
63cae12b
PZ
2851 *
2852 * Instead we use task_curr(), which tells us if the task is running.
2853 * However, since we use task_curr() outside of rq::lock, we can race
2854 * against the actual state. This means the result can be wrong.
2855 *
2856 * If we get a false positive, we retry, this is harmless.
2857 *
2858 * If we get a false negative, things are complicated. If we are after
2859 * perf_event_context_sched_in() ctx::lock will serialize us, and the
2860 * value must be correct. If we're before, it doesn't matter since
2861 * perf_event_context_sched_in() will program the counter.
2862 *
2863 * However, this hinges on the remote context switch having observed
2864 * our task->perf_event_ctxp[] store, such that it will in fact take
2865 * ctx::lock in perf_event_context_sched_in().
2866 *
2867 * We do this by task_function_call(), if the IPI fails to hit the task
2868 * we know any future context switch of task must see the
2869 * perf_event_ctpx[] store.
39a43640 2870 */
63cae12b 2871
63b6da39 2872 /*
63cae12b
PZ
2873 * This smp_mb() orders the task->perf_event_ctxp[] store with the
2874 * task_cpu() load, such that if the IPI then does not find the task
2875 * running, a future context switch of that task must observe the
2876 * store.
63b6da39 2877 */
63cae12b
PZ
2878 smp_mb();
2879again:
2880 if (!task_function_call(task, __perf_install_in_context, event))
a096309b
PZ
2881 return;
2882
2883 raw_spin_lock_irq(&ctx->lock);
2884 task = ctx->task;
84c4e620 2885 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
a096309b
PZ
2886 /*
2887 * Cannot happen because we already checked above (which also
2888 * cannot happen), and we hold ctx->mutex, which serializes us
2889 * against perf_event_exit_task_context().
2890 */
63b6da39
PZ
2891 raw_spin_unlock_irq(&ctx->lock);
2892 return;
2893 }
39a43640 2894 /*
63cae12b
PZ
2895 * If the task is not running, ctx->lock will avoid it becoming so,
2896 * thus we can safely install the event.
39a43640 2897 */
63cae12b
PZ
2898 if (task_curr(task)) {
2899 raw_spin_unlock_irq(&ctx->lock);
2900 goto again;
2901 }
2902 add_event_to_ctx(event, ctx);
2903 raw_spin_unlock_irq(&ctx->lock);
0793a61d
TG
2904}
2905
d859e29f 2906/*
cdd6c482 2907 * Cross CPU call to enable a performance event
d859e29f 2908 */
fae3fde6
PZ
2909static void __perf_event_enable(struct perf_event *event,
2910 struct perf_cpu_context *cpuctx,
2911 struct perf_event_context *ctx,
2912 void *info)
04289bb9 2913{
cdd6c482 2914 struct perf_event *leader = event->group_leader;
fae3fde6 2915 struct perf_event_context *task_ctx;
04289bb9 2916
6e801e01
PZ
2917 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2918 event->state <= PERF_EVENT_STATE_ERROR)
fae3fde6 2919 return;
3cbed429 2920
bd2afa49
PZ
2921 if (ctx->is_active)
2922 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2923
0d3d73aa 2924 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
33238c50 2925 perf_cgroup_event_enable(event, ctx);
04289bb9 2926
fae3fde6
PZ
2927 if (!ctx->is_active)
2928 return;
2929
e5d1367f 2930 if (!event_filter_match(event)) {
bd2afa49 2931 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2932 return;
e5d1367f 2933 }
f4c4176f 2934
04289bb9 2935 /*
cdd6c482 2936 * If the event is in a group and isn't the group leader,
d859e29f 2937 * then don't put it on unless the group is on.
04289bb9 2938 */
bd2afa49
PZ
2939 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2940 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2941 return;
bd2afa49 2942 }
fe4b04fa 2943
fae3fde6
PZ
2944 task_ctx = cpuctx->task_ctx;
2945 if (ctx->task)
2946 WARN_ON_ONCE(task_ctx != ctx);
d859e29f 2947
487f05e1 2948 ctx_resched(cpuctx, task_ctx, get_event_type(event));
7b648018
PZ
2949}
2950
d859e29f 2951/*
788faab7 2952 * Enable an event.
c93f7669 2953 *
cdd6c482
IM
2954 * If event->ctx is a cloned context, callers must make sure that
2955 * every task struct that event->ctx->task could possibly point to
c93f7669 2956 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2957 * perf_event_for_each_child or perf_event_for_each as described
2958 * for perf_event_disable.
d859e29f 2959 */
f63a8daa 2960static void _perf_event_enable(struct perf_event *event)
d859e29f 2961{
cdd6c482 2962 struct perf_event_context *ctx = event->ctx;
d859e29f 2963
7b648018 2964 raw_spin_lock_irq(&ctx->lock);
6e801e01
PZ
2965 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2966 event->state < PERF_EVENT_STATE_ERROR) {
7b648018 2967 raw_spin_unlock_irq(&ctx->lock);
d859e29f
PM
2968 return;
2969 }
2970
d859e29f 2971 /*
cdd6c482 2972 * If the event is in error state, clear that first.
7b648018
PZ
2973 *
2974 * That way, if we see the event in error state below, we know that it
2975 * has gone back into error state, as distinct from the task having
2976 * been scheduled away before the cross-call arrived.
d859e29f 2977 */
cdd6c482
IM
2978 if (event->state == PERF_EVENT_STATE_ERROR)
2979 event->state = PERF_EVENT_STATE_OFF;
e625cce1 2980 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa 2981
fae3fde6 2982 event_function_call(event, __perf_event_enable, NULL);
d859e29f 2983}
f63a8daa
PZ
2984
2985/*
2986 * See perf_event_disable();
2987 */
2988void perf_event_enable(struct perf_event *event)
2989{
2990 struct perf_event_context *ctx;
2991
2992 ctx = perf_event_ctx_lock(event);
2993 _perf_event_enable(event);
2994 perf_event_ctx_unlock(event, ctx);
2995}
dcfce4a0 2996EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2997
375637bc
AS
2998struct stop_event_data {
2999 struct perf_event *event;
3000 unsigned int restart;
3001};
3002
95ff4ca2
AS
3003static int __perf_event_stop(void *info)
3004{
375637bc
AS
3005 struct stop_event_data *sd = info;
3006 struct perf_event *event = sd->event;
95ff4ca2 3007
375637bc 3008 /* if it's already INACTIVE, do nothing */
95ff4ca2
AS
3009 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3010 return 0;
3011
3012 /* matches smp_wmb() in event_sched_in() */
3013 smp_rmb();
3014
3015 /*
3016 * There is a window with interrupts enabled before we get here,
3017 * so we need to check again lest we try to stop another CPU's event.
3018 */
3019 if (READ_ONCE(event->oncpu) != smp_processor_id())
3020 return -EAGAIN;
3021
3022 event->pmu->stop(event, PERF_EF_UPDATE);
3023
375637bc
AS
3024 /*
3025 * May race with the actual stop (through perf_pmu_output_stop()),
3026 * but it is only used for events with AUX ring buffer, and such
3027 * events will refuse to restart because of rb::aux_mmap_count==0,
3028 * see comments in perf_aux_output_begin().
3029 *
788faab7 3030 * Since this is happening on an event-local CPU, no trace is lost
375637bc
AS
3031 * while restarting.
3032 */
3033 if (sd->restart)
c9bbdd48 3034 event->pmu->start(event, 0);
375637bc 3035
95ff4ca2
AS
3036 return 0;
3037}
3038
767ae086 3039static int perf_event_stop(struct perf_event *event, int restart)
375637bc
AS
3040{
3041 struct stop_event_data sd = {
3042 .event = event,
767ae086 3043 .restart = restart,
375637bc
AS
3044 };
3045 int ret = 0;
3046
3047 do {
3048 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
3049 return 0;
3050
3051 /* matches smp_wmb() in event_sched_in() */
3052 smp_rmb();
3053
3054 /*
3055 * We only want to restart ACTIVE events, so if the event goes
3056 * inactive here (event->oncpu==-1), there's nothing more to do;
3057 * fall through with ret==-ENXIO.
3058 */
3059 ret = cpu_function_call(READ_ONCE(event->oncpu),
3060 __perf_event_stop, &sd);
3061 } while (ret == -EAGAIN);
3062
3063 return ret;
3064}
3065
3066/*
3067 * In order to contain the amount of racy and tricky in the address filter
3068 * configuration management, it is a two part process:
3069 *
3070 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
3071 * we update the addresses of corresponding vmas in
c60f83b8 3072 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
375637bc
AS
3073 * (p2) when an event is scheduled in (pmu::add), it calls
3074 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
3075 * if the generation has changed since the previous call.
3076 *
3077 * If (p1) happens while the event is active, we restart it to force (p2).
3078 *
3079 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
3080 * pre-existing mappings, called once when new filters arrive via SET_FILTER
3081 * ioctl;
3082 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
c1e8d7c6 3083 * registered mapping, called for every new mmap(), with mm::mmap_lock down
375637bc
AS
3084 * for reading;
3085 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
3086 * of exec.
3087 */
3088void perf_event_addr_filters_sync(struct perf_event *event)
3089{
3090 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
3091
3092 if (!has_addr_filter(event))
3093 return;
3094
3095 raw_spin_lock(&ifh->lock);
3096 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
3097 event->pmu->addr_filters_sync(event);
3098 event->hw.addr_filters_gen = event->addr_filters_gen;
3099 }
3100 raw_spin_unlock(&ifh->lock);
3101}
3102EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
3103
f63a8daa 3104static int _perf_event_refresh(struct perf_event *event, int refresh)
79f14641 3105{
2023b359 3106 /*
cdd6c482 3107 * not supported on inherited events
2023b359 3108 */
2e939d1d 3109 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
3110 return -EINVAL;
3111
cdd6c482 3112 atomic_add(refresh, &event->event_limit);
f63a8daa 3113 _perf_event_enable(event);
2023b359
PZ
3114
3115 return 0;
79f14641 3116}
f63a8daa
PZ
3117
3118/*
3119 * See perf_event_disable()
3120 */
3121int perf_event_refresh(struct perf_event *event, int refresh)
3122{
3123 struct perf_event_context *ctx;
3124 int ret;
3125
3126 ctx = perf_event_ctx_lock(event);
3127 ret = _perf_event_refresh(event, refresh);
3128 perf_event_ctx_unlock(event, ctx);
3129
3130 return ret;
3131}
26ca5c11 3132EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 3133
32ff77e8
MC
3134static int perf_event_modify_breakpoint(struct perf_event *bp,
3135 struct perf_event_attr *attr)
3136{
3137 int err;
3138
3139 _perf_event_disable(bp);
3140
3141 err = modify_user_hw_breakpoint_check(bp, attr, true);
32ff77e8 3142
bf06278c 3143 if (!bp->attr.disabled)
32ff77e8 3144 _perf_event_enable(bp);
bf06278c
JO
3145
3146 return err;
32ff77e8
MC
3147}
3148
3149static int perf_event_modify_attr(struct perf_event *event,
3150 struct perf_event_attr *attr)
3151{
3152 if (event->attr.type != attr->type)
3153 return -EINVAL;
3154
3155 switch (event->attr.type) {
3156 case PERF_TYPE_BREAKPOINT:
3157 return perf_event_modify_breakpoint(event, attr);
3158 default:
3159 /* Place holder for future additions. */
3160 return -EOPNOTSUPP;
3161 }
3162}
3163
5b0311e1
FW
3164static void ctx_sched_out(struct perf_event_context *ctx,
3165 struct perf_cpu_context *cpuctx,
3166 enum event_type_t event_type)
235c7fc7 3167{
6668128a 3168 struct perf_event *event, *tmp;
db24d33e 3169 int is_active = ctx->is_active;
235c7fc7 3170
c994d613 3171 lockdep_assert_held(&ctx->lock);
235c7fc7 3172
39a43640
PZ
3173 if (likely(!ctx->nr_events)) {
3174 /*
3175 * See __perf_remove_from_context().
3176 */
3177 WARN_ON_ONCE(ctx->is_active);
3178 if (ctx->task)
3179 WARN_ON_ONCE(cpuctx->task_ctx);
facc4307 3180 return;
39a43640
PZ
3181 }
3182
db24d33e 3183 ctx->is_active &= ~event_type;
3cbaa590
PZ
3184 if (!(ctx->is_active & EVENT_ALL))
3185 ctx->is_active = 0;
3186
63e30d3e
PZ
3187 if (ctx->task) {
3188 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3189 if (!ctx->is_active)
3190 cpuctx->task_ctx = NULL;
3191 }
facc4307 3192
8fdc6539
PZ
3193 /*
3194 * Always update time if it was set; not only when it changes.
3195 * Otherwise we can 'forget' to update time for any but the last
3196 * context we sched out. For example:
3197 *
3198 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
3199 * ctx_sched_out(.event_type = EVENT_PINNED)
3200 *
3201 * would only update time for the pinned events.
3202 */
3cbaa590
PZ
3203 if (is_active & EVENT_TIME) {
3204 /* update (and stop) ctx time */
3205 update_context_time(ctx);
3206 update_cgrp_time_from_cpuctx(cpuctx);
3207 }
3208
8fdc6539
PZ
3209 is_active ^= ctx->is_active; /* changed bits */
3210
3cbaa590 3211 if (!ctx->nr_active || !(is_active & EVENT_ALL))
facc4307 3212 return;
5b0311e1 3213
075e0b00 3214 perf_pmu_disable(ctx->pmu);
3cbaa590 3215 if (is_active & EVENT_PINNED) {
6668128a 3216 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
889ff015 3217 group_sched_out(event, cpuctx, ctx);
9ed6060d 3218 }
889ff015 3219
3cbaa590 3220 if (is_active & EVENT_FLEXIBLE) {
6668128a 3221 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
8c9ed8e1 3222 group_sched_out(event, cpuctx, ctx);
90c91dfb
PZ
3223
3224 /*
3225 * Since we cleared EVENT_FLEXIBLE, also clear
3226 * rotate_necessary, is will be reset by
3227 * ctx_flexible_sched_in() when needed.
3228 */
3229 ctx->rotate_necessary = 0;
9ed6060d 3230 }
1b9a644f 3231 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
3232}
3233
564c2b21 3234/*
5a3126d4
PZ
3235 * Test whether two contexts are equivalent, i.e. whether they have both been
3236 * cloned from the same version of the same context.
3237 *
3238 * Equivalence is measured using a generation number in the context that is
3239 * incremented on each modification to it; see unclone_ctx(), list_add_event()
3240 * and list_del_event().
564c2b21 3241 */
cdd6c482
IM
3242static int context_equiv(struct perf_event_context *ctx1,
3243 struct perf_event_context *ctx2)
564c2b21 3244{
211de6eb
PZ
3245 lockdep_assert_held(&ctx1->lock);
3246 lockdep_assert_held(&ctx2->lock);
3247
5a3126d4
PZ
3248 /* Pinning disables the swap optimization */
3249 if (ctx1->pin_count || ctx2->pin_count)
3250 return 0;
3251
3252 /* If ctx1 is the parent of ctx2 */
3253 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
3254 return 1;
3255
3256 /* If ctx2 is the parent of ctx1 */
3257 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
3258 return 1;
3259
3260 /*
3261 * If ctx1 and ctx2 have the same parent; we flatten the parent
3262 * hierarchy, see perf_event_init_context().
3263 */
3264 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
3265 ctx1->parent_gen == ctx2->parent_gen)
3266 return 1;
3267
3268 /* Unmatched */
3269 return 0;
564c2b21
PM
3270}
3271
cdd6c482
IM
3272static void __perf_event_sync_stat(struct perf_event *event,
3273 struct perf_event *next_event)
bfbd3381
PZ
3274{
3275 u64 value;
3276
cdd6c482 3277 if (!event->attr.inherit_stat)
bfbd3381
PZ
3278 return;
3279
3280 /*
cdd6c482 3281 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
3282 * because we're in the middle of a context switch and have IRQs
3283 * disabled, which upsets smp_call_function_single(), however
cdd6c482 3284 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
3285 * don't need to use it.
3286 */
0d3d73aa 3287 if (event->state == PERF_EVENT_STATE_ACTIVE)
3dbebf15 3288 event->pmu->read(event);
bfbd3381 3289
0d3d73aa 3290 perf_event_update_time(event);
bfbd3381
PZ
3291
3292 /*
cdd6c482 3293 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
3294 * values when we flip the contexts.
3295 */
e7850595
PZ
3296 value = local64_read(&next_event->count);
3297 value = local64_xchg(&event->count, value);
3298 local64_set(&next_event->count, value);
bfbd3381 3299
cdd6c482
IM
3300 swap(event->total_time_enabled, next_event->total_time_enabled);
3301 swap(event->total_time_running, next_event->total_time_running);
19d2e755 3302
bfbd3381 3303 /*
19d2e755 3304 * Since we swizzled the values, update the user visible data too.
bfbd3381 3305 */
cdd6c482
IM
3306 perf_event_update_userpage(event);
3307 perf_event_update_userpage(next_event);
bfbd3381
PZ
3308}
3309
cdd6c482
IM
3310static void perf_event_sync_stat(struct perf_event_context *ctx,
3311 struct perf_event_context *next_ctx)
bfbd3381 3312{
cdd6c482 3313 struct perf_event *event, *next_event;
bfbd3381
PZ
3314
3315 if (!ctx->nr_stat)
3316 return;
3317
02ffdbc8
PZ
3318 update_context_time(ctx);
3319
cdd6c482
IM
3320 event = list_first_entry(&ctx->event_list,
3321 struct perf_event, event_entry);
bfbd3381 3322
cdd6c482
IM
3323 next_event = list_first_entry(&next_ctx->event_list,
3324 struct perf_event, event_entry);
bfbd3381 3325
cdd6c482
IM
3326 while (&event->event_entry != &ctx->event_list &&
3327 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 3328
cdd6c482 3329 __perf_event_sync_stat(event, next_event);
bfbd3381 3330
cdd6c482
IM
3331 event = list_next_entry(event, event_entry);
3332 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
3333 }
3334}
3335
fe4b04fa
PZ
3336static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
3337 struct task_struct *next)
0793a61d 3338{
8dc85d54 3339 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482 3340 struct perf_event_context *next_ctx;
5a3126d4 3341 struct perf_event_context *parent, *next_parent;
108b02cf 3342 struct perf_cpu_context *cpuctx;
c93f7669 3343 int do_switch = 1;
0793a61d 3344
108b02cf
PZ
3345 if (likely(!ctx))
3346 return;
10989fb2 3347
108b02cf
PZ
3348 cpuctx = __get_cpu_context(ctx);
3349 if (!cpuctx->task_ctx)
0793a61d
TG
3350 return;
3351
c93f7669 3352 rcu_read_lock();
8dc85d54 3353 next_ctx = next->perf_event_ctxp[ctxn];
5a3126d4
PZ
3354 if (!next_ctx)
3355 goto unlock;
3356
3357 parent = rcu_dereference(ctx->parent_ctx);
3358 next_parent = rcu_dereference(next_ctx->parent_ctx);
3359
3360 /* If neither context have a parent context; they cannot be clones. */
802c8a61 3361 if (!parent && !next_parent)
5a3126d4
PZ
3362 goto unlock;
3363
3364 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
c93f7669
PM
3365 /*
3366 * Looks like the two contexts are clones, so we might be
3367 * able to optimize the context switch. We lock both
3368 * contexts and check that they are clones under the
3369 * lock (including re-checking that neither has been
3370 * uncloned in the meantime). It doesn't matter which
3371 * order we take the locks because no other cpu could
3372 * be trying to lock both of these tasks.
3373 */
e625cce1
TG
3374 raw_spin_lock(&ctx->lock);
3375 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 3376 if (context_equiv(ctx, next_ctx)) {
c2b98a86
AB
3377 struct pmu *pmu = ctx->pmu;
3378
63b6da39
PZ
3379 WRITE_ONCE(ctx->task, next);
3380 WRITE_ONCE(next_ctx->task, task);
5a158c3c 3381
c2b98a86
AB
3382 /*
3383 * PMU specific parts of task perf context can require
3384 * additional synchronization. As an example of such
3385 * synchronization see implementation details of Intel
3386 * LBR call stack data profiling;
3387 */
3388 if (pmu->swap_task_ctx)
3389 pmu->swap_task_ctx(ctx, next_ctx);
3390 else
3391 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
5a158c3c 3392
63b6da39
PZ
3393 /*
3394 * RCU_INIT_POINTER here is safe because we've not
3395 * modified the ctx and the above modification of
3396 * ctx->task and ctx->task_ctx_data are immaterial
3397 * since those values are always verified under
3398 * ctx->lock which we're now holding.
3399 */
3400 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
3401 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
3402
c93f7669 3403 do_switch = 0;
bfbd3381 3404
cdd6c482 3405 perf_event_sync_stat(ctx, next_ctx);
c93f7669 3406 }
e625cce1
TG
3407 raw_spin_unlock(&next_ctx->lock);
3408 raw_spin_unlock(&ctx->lock);
564c2b21 3409 }
5a3126d4 3410unlock:
c93f7669 3411 rcu_read_unlock();
564c2b21 3412
c93f7669 3413 if (do_switch) {
facc4307 3414 raw_spin_lock(&ctx->lock);
487f05e1 3415 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL);
facc4307 3416 raw_spin_unlock(&ctx->lock);
c93f7669 3417 }
0793a61d
TG
3418}
3419
e48c1788
PZ
3420static DEFINE_PER_CPU(struct list_head, sched_cb_list);
3421
ba532500
YZ
3422void perf_sched_cb_dec(struct pmu *pmu)
3423{
e48c1788
PZ
3424 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3425
ba532500 3426 this_cpu_dec(perf_sched_cb_usages);
e48c1788
PZ
3427
3428 if (!--cpuctx->sched_cb_usage)
3429 list_del(&cpuctx->sched_cb_entry);
ba532500
YZ
3430}
3431
e48c1788 3432
ba532500
YZ
3433void perf_sched_cb_inc(struct pmu *pmu)
3434{
e48c1788
PZ
3435 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3436
3437 if (!cpuctx->sched_cb_usage++)
3438 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
3439
ba532500
YZ
3440 this_cpu_inc(perf_sched_cb_usages);
3441}
3442
3443/*
3444 * This function provides the context switch callback to the lower code
3445 * layer. It is invoked ONLY when the context switch callback is enabled.
09e61b4f
PZ
3446 *
3447 * This callback is relevant even to per-cpu events; for example multi event
3448 * PEBS requires this to provide PID/TID information. This requires we flush
3449 * all queued PEBS records before we context switch to a new task.
ba532500
YZ
3450 */
3451static void perf_pmu_sched_task(struct task_struct *prev,
3452 struct task_struct *next,
3453 bool sched_in)
3454{
3455 struct perf_cpu_context *cpuctx;
3456 struct pmu *pmu;
ba532500
YZ
3457
3458 if (prev == next)
3459 return;
3460
e48c1788 3461 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
1fd7e416 3462 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */
ba532500 3463
e48c1788
PZ
3464 if (WARN_ON_ONCE(!pmu->sched_task))
3465 continue;
ba532500 3466
e48c1788
PZ
3467 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
3468 perf_pmu_disable(pmu);
ba532500 3469
e48c1788 3470 pmu->sched_task(cpuctx->task_ctx, sched_in);
ba532500 3471
e48c1788
PZ
3472 perf_pmu_enable(pmu);
3473 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
ba532500 3474 }
ba532500
YZ
3475}
3476
45ac1403
AH
3477static void perf_event_switch(struct task_struct *task,
3478 struct task_struct *next_prev, bool sched_in);
3479
8dc85d54
PZ
3480#define for_each_task_context_nr(ctxn) \
3481 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
3482
3483/*
3484 * Called from scheduler to remove the events of the current task,
3485 * with interrupts disabled.
3486 *
3487 * We stop each event and update the event value in event->count.
3488 *
3489 * This does not protect us against NMI, but disable()
3490 * sets the disabled bit in the control field of event _before_
3491 * accessing the event control register. If a NMI hits, then it will
3492 * not restart the event.
3493 */
ab0cce56
JO
3494void __perf_event_task_sched_out(struct task_struct *task,
3495 struct task_struct *next)
8dc85d54
PZ
3496{
3497 int ctxn;
3498
ba532500
YZ
3499 if (__this_cpu_read(perf_sched_cb_usages))
3500 perf_pmu_sched_task(task, next, false);
3501
45ac1403
AH
3502 if (atomic_read(&nr_switch_events))
3503 perf_event_switch(task, next, false);
3504
8dc85d54
PZ
3505 for_each_task_context_nr(ctxn)
3506 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
3507
3508 /*
3509 * if cgroup events exist on this CPU, then we need
3510 * to check if we have to switch out PMU state.
3511 * cgroup event are system-wide mode only
3512 */
4a32fea9 3513 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
a8d757ef 3514 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
3515}
3516
5b0311e1
FW
3517/*
3518 * Called with IRQs disabled
3519 */
3520static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
3521 enum event_type_t event_type)
3522{
3523 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
3524}
3525
6eef8a71 3526static bool perf_less_group_idx(const void *l, const void *r)
0793a61d 3527{
24fb6b8e
IR
3528 const struct perf_event *le = *(const struct perf_event **)l;
3529 const struct perf_event *re = *(const struct perf_event **)r;
6eef8a71
IR
3530
3531 return le->group_index < re->group_index;
3532}
3533
3534static void swap_ptr(void *l, void *r)
3535{
3536 void **lp = l, **rp = r;
3537
3538 swap(*lp, *rp);
3539}
3540
3541static const struct min_heap_callbacks perf_min_heap = {
3542 .elem_size = sizeof(struct perf_event *),
3543 .less = perf_less_group_idx,
3544 .swp = swap_ptr,
3545};
3546
3547static void __heap_add(struct min_heap *heap, struct perf_event *event)
3548{
3549 struct perf_event **itrs = heap->data;
3550
3551 if (event) {
3552 itrs[heap->nr] = event;
3553 heap->nr++;
3554 }
3555}
3556
836196be
IR
3557static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
3558 struct perf_event_groups *groups, int cpu,
6eef8a71
IR
3559 int (*func)(struct perf_event *, void *),
3560 void *data)
3561{
95ed6c70
IR
3562#ifdef CONFIG_CGROUP_PERF
3563 struct cgroup_subsys_state *css = NULL;
3564#endif
6eef8a71
IR
3565 /* Space for per CPU and/or any CPU event iterators. */
3566 struct perf_event *itrs[2];
836196be
IR
3567 struct min_heap event_heap;
3568 struct perf_event **evt;
1cac7b1a 3569 int ret;
8e1a2031 3570
836196be
IR
3571 if (cpuctx) {
3572 event_heap = (struct min_heap){
3573 .data = cpuctx->heap,
3574 .nr = 0,
3575 .size = cpuctx->heap_size,
3576 };
c2283c93
IR
3577
3578 lockdep_assert_held(&cpuctx->ctx.lock);
95ed6c70
IR
3579
3580#ifdef CONFIG_CGROUP_PERF
3581 if (cpuctx->cgrp)
3582 css = &cpuctx->cgrp->css;
3583#endif
836196be
IR
3584 } else {
3585 event_heap = (struct min_heap){
3586 .data = itrs,
3587 .nr = 0,
3588 .size = ARRAY_SIZE(itrs),
3589 };
3590 /* Events not within a CPU context may be on any CPU. */
95ed6c70 3591 __heap_add(&event_heap, perf_event_groups_first(groups, -1, NULL));
836196be
IR
3592 }
3593 evt = event_heap.data;
3594
95ed6c70
IR
3595 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, NULL));
3596
3597#ifdef CONFIG_CGROUP_PERF
3598 for (; css; css = css->parent)
3599 __heap_add(&event_heap, perf_event_groups_first(groups, cpu, css->cgroup));
3600#endif
1cac7b1a 3601
6eef8a71 3602 min_heapify_all(&event_heap, &perf_min_heap);
1cac7b1a 3603
6eef8a71 3604 while (event_heap.nr) {
1cac7b1a
PZ
3605 ret = func(*evt, data);
3606 if (ret)
3607 return ret;
3608
3609 *evt = perf_event_groups_next(*evt);
6eef8a71
IR
3610 if (*evt)
3611 min_heapify(&event_heap, 0, &perf_min_heap);
3612 else
3613 min_heap_pop(&event_heap, &perf_min_heap);
8e1a2031 3614 }
0793a61d 3615
1cac7b1a
PZ
3616 return 0;
3617}
3618
ab6f824c 3619static int merge_sched_in(struct perf_event *event, void *data)
1cac7b1a 3620{
2c2366c7
PZ
3621 struct perf_event_context *ctx = event->ctx;
3622 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
3623 int *can_add_hw = data;
ab6f824c 3624
1cac7b1a
PZ
3625 if (event->state <= PERF_EVENT_STATE_OFF)
3626 return 0;
3627
3628 if (!event_filter_match(event))
3629 return 0;
3630
2c2366c7
PZ
3631 if (group_can_go_on(event, cpuctx, *can_add_hw)) {
3632 if (!group_sched_in(event, cpuctx, ctx))
ab6f824c 3633 list_add_tail(&event->active_list, get_event_list(event));
6668128a 3634 }
1cac7b1a 3635
ab6f824c 3636 if (event->state == PERF_EVENT_STATE_INACTIVE) {
33238c50
PZ
3637 if (event->attr.pinned) {
3638 perf_cgroup_event_disable(event, ctx);
ab6f824c 3639 perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
33238c50 3640 }
1cac7b1a 3641
2c2366c7
PZ
3642 *can_add_hw = 0;
3643 ctx->rotate_necessary = 1;
3b6f9e5c 3644 }
1cac7b1a
PZ
3645
3646 return 0;
5b0311e1
FW
3647}
3648
3649static void
1cac7b1a
PZ
3650ctx_pinned_sched_in(struct perf_event_context *ctx,
3651 struct perf_cpu_context *cpuctx)
5b0311e1 3652{
2c2366c7 3653 int can_add_hw = 1;
3b6f9e5c 3654
836196be
IR
3655 if (ctx != &cpuctx->ctx)
3656 cpuctx = NULL;
3657
3658 visit_groups_merge(cpuctx, &ctx->pinned_groups,
1cac7b1a 3659 smp_processor_id(),
2c2366c7 3660 merge_sched_in, &can_add_hw);
1cac7b1a 3661}
8e1a2031 3662
1cac7b1a
PZ
3663static void
3664ctx_flexible_sched_in(struct perf_event_context *ctx,
3665 struct perf_cpu_context *cpuctx)
3666{
2c2366c7 3667 int can_add_hw = 1;
0793a61d 3668
836196be
IR
3669 if (ctx != &cpuctx->ctx)
3670 cpuctx = NULL;
3671
3672 visit_groups_merge(cpuctx, &ctx->flexible_groups,
1cac7b1a 3673 smp_processor_id(),
2c2366c7 3674 merge_sched_in, &can_add_hw);
5b0311e1
FW
3675}
3676
3677static void
3678ctx_sched_in(struct perf_event_context *ctx,
3679 struct perf_cpu_context *cpuctx,
e5d1367f
SE
3680 enum event_type_t event_type,
3681 struct task_struct *task)
5b0311e1 3682{
db24d33e 3683 int is_active = ctx->is_active;
c994d613
PZ
3684 u64 now;
3685
3686 lockdep_assert_held(&ctx->lock);
e5d1367f 3687
5b0311e1 3688 if (likely(!ctx->nr_events))
facc4307 3689 return;
5b0311e1 3690
3cbaa590 3691 ctx->is_active |= (event_type | EVENT_TIME);
63e30d3e
PZ
3692 if (ctx->task) {
3693 if (!is_active)
3694 cpuctx->task_ctx = ctx;
3695 else
3696 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
3697 }
3698
3cbaa590
PZ
3699 is_active ^= ctx->is_active; /* changed bits */
3700
3701 if (is_active & EVENT_TIME) {
3702 /* start ctx time */
3703 now = perf_clock();
3704 ctx->timestamp = now;
3705 perf_cgroup_set_timestamp(task, ctx);
3706 }
3707
5b0311e1
FW
3708 /*
3709 * First go through the list and put on any pinned groups
3710 * in order to give them the best chance of going on.
3711 */
3cbaa590 3712 if (is_active & EVENT_PINNED)
6e37738a 3713 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
3714
3715 /* Then walk through the lower prio flexible groups */
3cbaa590 3716 if (is_active & EVENT_FLEXIBLE)
6e37738a 3717 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
3718}
3719
329c0e01 3720static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
3721 enum event_type_t event_type,
3722 struct task_struct *task)
329c0e01
FW
3723{
3724 struct perf_event_context *ctx = &cpuctx->ctx;
3725
e5d1367f 3726 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
3727}
3728
e5d1367f
SE
3729static void perf_event_context_sched_in(struct perf_event_context *ctx,
3730 struct task_struct *task)
235c7fc7 3731{
108b02cf 3732 struct perf_cpu_context *cpuctx;
235c7fc7 3733
108b02cf 3734 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
3735 if (cpuctx->task_ctx == ctx)
3736 return;
3737
facc4307 3738 perf_ctx_lock(cpuctx, ctx);
fdccc3fb 3739 /*
3740 * We must check ctx->nr_events while holding ctx->lock, such
3741 * that we serialize against perf_install_in_context().
3742 */
3743 if (!ctx->nr_events)
3744 goto unlock;
3745
1b9a644f 3746 perf_pmu_disable(ctx->pmu);
329c0e01
FW
3747 /*
3748 * We want to keep the following priority order:
3749 * cpu pinned (that don't need to move), task pinned,
3750 * cpu flexible, task flexible.
fe45bafb
AS
3751 *
3752 * However, if task's ctx is not carrying any pinned
3753 * events, no need to flip the cpuctx's events around.
329c0e01 3754 */
8e1a2031 3755 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
fe45bafb 3756 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
63e30d3e 3757 perf_event_sched_in(cpuctx, ctx, task);
facc4307 3758 perf_pmu_enable(ctx->pmu);
fdccc3fb 3759
3760unlock:
facc4307 3761 perf_ctx_unlock(cpuctx, ctx);
235c7fc7
IM
3762}
3763
8dc85d54
PZ
3764/*
3765 * Called from scheduler to add the events of the current task
3766 * with interrupts disabled.
3767 *
3768 * We restore the event value and then enable it.
3769 *
3770 * This does not protect us against NMI, but enable()
3771 * sets the enabled bit in the control field of event _before_
3772 * accessing the event control register. If a NMI hits, then it will
3773 * keep the event running.
3774 */
ab0cce56
JO
3775void __perf_event_task_sched_in(struct task_struct *prev,
3776 struct task_struct *task)
8dc85d54
PZ
3777{
3778 struct perf_event_context *ctx;
3779 int ctxn;
3780
7e41d177
PZ
3781 /*
3782 * If cgroup events exist on this CPU, then we need to check if we have
3783 * to switch in PMU state; cgroup event are system-wide mode only.
3784 *
3785 * Since cgroup events are CPU events, we must schedule these in before
3786 * we schedule in the task events.
3787 */
3788 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3789 perf_cgroup_sched_in(prev, task);
3790
8dc85d54
PZ
3791 for_each_task_context_nr(ctxn) {
3792 ctx = task->perf_event_ctxp[ctxn];
3793 if (likely(!ctx))
3794 continue;
3795
e5d1367f 3796 perf_event_context_sched_in(ctx, task);
8dc85d54 3797 }
d010b332 3798
45ac1403
AH
3799 if (atomic_read(&nr_switch_events))
3800 perf_event_switch(task, prev, true);
3801
ba532500
YZ
3802 if (__this_cpu_read(perf_sched_cb_usages))
3803 perf_pmu_sched_task(prev, task, true);
235c7fc7
IM
3804}
3805
abd50713
PZ
3806static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3807{
3808 u64 frequency = event->attr.sample_freq;
3809 u64 sec = NSEC_PER_SEC;
3810 u64 divisor, dividend;
3811
3812 int count_fls, nsec_fls, frequency_fls, sec_fls;
3813
3814 count_fls = fls64(count);
3815 nsec_fls = fls64(nsec);
3816 frequency_fls = fls64(frequency);
3817 sec_fls = 30;
3818
3819 /*
3820 * We got @count in @nsec, with a target of sample_freq HZ
3821 * the target period becomes:
3822 *
3823 * @count * 10^9
3824 * period = -------------------
3825 * @nsec * sample_freq
3826 *
3827 */
3828
3829 /*
3830 * Reduce accuracy by one bit such that @a and @b converge
3831 * to a similar magnitude.
3832 */
fe4b04fa 3833#define REDUCE_FLS(a, b) \
abd50713
PZ
3834do { \
3835 if (a##_fls > b##_fls) { \
3836 a >>= 1; \
3837 a##_fls--; \
3838 } else { \
3839 b >>= 1; \
3840 b##_fls--; \
3841 } \
3842} while (0)
3843
3844 /*
3845 * Reduce accuracy until either term fits in a u64, then proceed with
3846 * the other, so that finally we can do a u64/u64 division.
3847 */
3848 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3849 REDUCE_FLS(nsec, frequency);
3850 REDUCE_FLS(sec, count);
3851 }
3852
3853 if (count_fls + sec_fls > 64) {
3854 divisor = nsec * frequency;
3855
3856 while (count_fls + sec_fls > 64) {
3857 REDUCE_FLS(count, sec);
3858 divisor >>= 1;
3859 }
3860
3861 dividend = count * sec;
3862 } else {
3863 dividend = count * sec;
3864
3865 while (nsec_fls + frequency_fls > 64) {
3866 REDUCE_FLS(nsec, frequency);
3867 dividend >>= 1;
3868 }
3869
3870 divisor = nsec * frequency;
3871 }
3872
f6ab91ad
PZ
3873 if (!divisor)
3874 return dividend;
3875
abd50713
PZ
3876 return div64_u64(dividend, divisor);
3877}
3878
e050e3f0
SE
3879static DEFINE_PER_CPU(int, perf_throttled_count);
3880static DEFINE_PER_CPU(u64, perf_throttled_seq);
3881
f39d47ff 3882static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 3883{
cdd6c482 3884 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 3885 s64 period, sample_period;
bd2b5b12
PZ
3886 s64 delta;
3887
abd50713 3888 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
3889
3890 delta = (s64)(period - hwc->sample_period);
3891 delta = (delta + 7) / 8; /* low pass filter */
3892
3893 sample_period = hwc->sample_period + delta;
3894
3895 if (!sample_period)
3896 sample_period = 1;
3897
bd2b5b12 3898 hwc->sample_period = sample_period;
abd50713 3899
e7850595 3900 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
3901 if (disable)
3902 event->pmu->stop(event, PERF_EF_UPDATE);
3903
e7850595 3904 local64_set(&hwc->period_left, 0);
f39d47ff
SE
3905
3906 if (disable)
3907 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 3908 }
bd2b5b12
PZ
3909}
3910
e050e3f0
SE
3911/*
3912 * combine freq adjustment with unthrottling to avoid two passes over the
3913 * events. At the same time, make sure, having freq events does not change
3914 * the rate of unthrottling as that would introduce bias.
3915 */
3916static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3917 int needs_unthr)
60db5e09 3918{
cdd6c482
IM
3919 struct perf_event *event;
3920 struct hw_perf_event *hwc;
e050e3f0 3921 u64 now, period = TICK_NSEC;
abd50713 3922 s64 delta;
60db5e09 3923
e050e3f0
SE
3924 /*
3925 * only need to iterate over all events iff:
3926 * - context have events in frequency mode (needs freq adjust)
3927 * - there are events to unthrottle on this cpu
3928 */
3929 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
3930 return;
3931
e050e3f0 3932 raw_spin_lock(&ctx->lock);
f39d47ff 3933 perf_pmu_disable(ctx->pmu);
e050e3f0 3934
03541f8b 3935 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 3936 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
3937 continue;
3938
5632ab12 3939 if (!event_filter_match(event))
5d27c23d
PZ
3940 continue;
3941
44377277
AS
3942 perf_pmu_disable(event->pmu);
3943
cdd6c482 3944 hwc = &event->hw;
6a24ed6c 3945
ae23bff1 3946 if (hwc->interrupts == MAX_INTERRUPTS) {
e050e3f0 3947 hwc->interrupts = 0;
cdd6c482 3948 perf_log_throttle(event, 1);
a4eaf7f1 3949 event->pmu->start(event, 0);
a78ac325
PZ
3950 }
3951
cdd6c482 3952 if (!event->attr.freq || !event->attr.sample_freq)
44377277 3953 goto next;
60db5e09 3954
e050e3f0
SE
3955 /*
3956 * stop the event and update event->count
3957 */
3958 event->pmu->stop(event, PERF_EF_UPDATE);
3959
e7850595 3960 now = local64_read(&event->count);
abd50713
PZ
3961 delta = now - hwc->freq_count_stamp;
3962 hwc->freq_count_stamp = now;
60db5e09 3963
e050e3f0
SE
3964 /*
3965 * restart the event
3966 * reload only if value has changed
f39d47ff
SE
3967 * we have stopped the event so tell that
3968 * to perf_adjust_period() to avoid stopping it
3969 * twice.
e050e3f0 3970 */
abd50713 3971 if (delta > 0)
f39d47ff 3972 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
3973
3974 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
44377277
AS
3975 next:
3976 perf_pmu_enable(event->pmu);
60db5e09 3977 }
e050e3f0 3978
f39d47ff 3979 perf_pmu_enable(ctx->pmu);
e050e3f0 3980 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
3981}
3982
235c7fc7 3983/*
8703a7cf 3984 * Move @event to the tail of the @ctx's elegible events.
235c7fc7 3985 */
8703a7cf 3986static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
0793a61d 3987{
dddd3379
TG
3988 /*
3989 * Rotate the first entry last of non-pinned groups. Rotation might be
3990 * disabled by the inheritance code.
3991 */
8703a7cf
PZ
3992 if (ctx->rotate_disable)
3993 return;
8e1a2031 3994
8703a7cf
PZ
3995 perf_event_groups_delete(&ctx->flexible_groups, event);
3996 perf_event_groups_insert(&ctx->flexible_groups, event);
235c7fc7
IM
3997}
3998
7fa343b7 3999/* pick an event from the flexible_groups to rotate */
8d5bce0c 4000static inline struct perf_event *
7fa343b7 4001ctx_event_to_rotate(struct perf_event_context *ctx)
235c7fc7 4002{
7fa343b7
SL
4003 struct perf_event *event;
4004
4005 /* pick the first active flexible event */
4006 event = list_first_entry_or_null(&ctx->flexible_active,
4007 struct perf_event, active_list);
4008
4009 /* if no active flexible event, pick the first event */
4010 if (!event) {
4011 event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree),
4012 typeof(*event), group_node);
4013 }
4014
90c91dfb
PZ
4015 /*
4016 * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
4017 * finds there are unschedulable events, it will set it again.
4018 */
4019 ctx->rotate_necessary = 0;
4020
7fa343b7 4021 return event;
8d5bce0c
PZ
4022}
4023
4024static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
4025{
4026 struct perf_event *cpu_event = NULL, *task_event = NULL;
fd7d5517
IR
4027 struct perf_event_context *task_ctx = NULL;
4028 int cpu_rotate, task_rotate;
8d5bce0c
PZ
4029
4030 /*
4031 * Since we run this from IRQ context, nobody can install new
4032 * events, thus the event count values are stable.
4033 */
7fc23a53 4034
fd7d5517
IR
4035 cpu_rotate = cpuctx->ctx.rotate_necessary;
4036 task_ctx = cpuctx->task_ctx;
4037 task_rotate = task_ctx ? task_ctx->rotate_necessary : 0;
9717e6cd 4038
8d5bce0c
PZ
4039 if (!(cpu_rotate || task_rotate))
4040 return false;
0f5a2601 4041
facc4307 4042 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 4043 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 4044
8d5bce0c 4045 if (task_rotate)
7fa343b7 4046 task_event = ctx_event_to_rotate(task_ctx);
8d5bce0c 4047 if (cpu_rotate)
7fa343b7 4048 cpu_event = ctx_event_to_rotate(&cpuctx->ctx);
8703a7cf 4049
8d5bce0c
PZ
4050 /*
4051 * As per the order given at ctx_resched() first 'pop' task flexible
4052 * and then, if needed CPU flexible.
4053 */
fd7d5517
IR
4054 if (task_event || (task_ctx && cpu_event))
4055 ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE);
8d5bce0c
PZ
4056 if (cpu_event)
4057 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
0793a61d 4058
8d5bce0c 4059 if (task_event)
fd7d5517 4060 rotate_ctx(task_ctx, task_event);
8d5bce0c
PZ
4061 if (cpu_event)
4062 rotate_ctx(&cpuctx->ctx, cpu_event);
235c7fc7 4063
fd7d5517 4064 perf_event_sched_in(cpuctx, task_ctx, current);
235c7fc7 4065
0f5a2601
PZ
4066 perf_pmu_enable(cpuctx->ctx.pmu);
4067 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
9e630205 4068
8d5bce0c 4069 return true;
e9d2b064
PZ
4070}
4071
4072void perf_event_task_tick(void)
4073{
2fde4f94
MR
4074 struct list_head *head = this_cpu_ptr(&active_ctx_list);
4075 struct perf_event_context *ctx, *tmp;
e050e3f0 4076 int throttled;
b5ab4cd5 4077
16444645 4078 lockdep_assert_irqs_disabled();
e9d2b064 4079
e050e3f0
SE
4080 __this_cpu_inc(perf_throttled_seq);
4081 throttled = __this_cpu_xchg(perf_throttled_count, 0);
555e0c1e 4082 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
e050e3f0 4083
2fde4f94 4084 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
e050e3f0 4085 perf_adjust_freq_unthr_context(ctx, throttled);
0793a61d
TG
4086}
4087
889ff015
FW
4088static int event_enable_on_exec(struct perf_event *event,
4089 struct perf_event_context *ctx)
4090{
4091 if (!event->attr.enable_on_exec)
4092 return 0;
4093
4094 event->attr.enable_on_exec = 0;
4095 if (event->state >= PERF_EVENT_STATE_INACTIVE)
4096 return 0;
4097
0d3d73aa 4098 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
889ff015
FW
4099
4100 return 1;
4101}
4102
57e7986e 4103/*
cdd6c482 4104 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
4105 * This expects task == current.
4106 */
c1274499 4107static void perf_event_enable_on_exec(int ctxn)
57e7986e 4108{
c1274499 4109 struct perf_event_context *ctx, *clone_ctx = NULL;
487f05e1 4110 enum event_type_t event_type = 0;
3e349507 4111 struct perf_cpu_context *cpuctx;
cdd6c482 4112 struct perf_event *event;
57e7986e
PM
4113 unsigned long flags;
4114 int enabled = 0;
4115
4116 local_irq_save(flags);
c1274499 4117 ctx = current->perf_event_ctxp[ctxn];
cdd6c482 4118 if (!ctx || !ctx->nr_events)
57e7986e
PM
4119 goto out;
4120
3e349507
PZ
4121 cpuctx = __get_cpu_context(ctx);
4122 perf_ctx_lock(cpuctx, ctx);
7fce2509 4123 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
487f05e1 4124 list_for_each_entry(event, &ctx->event_list, event_entry) {
3e349507 4125 enabled |= event_enable_on_exec(event, ctx);
487f05e1
AS
4126 event_type |= get_event_type(event);
4127 }
57e7986e
PM
4128
4129 /*
3e349507 4130 * Unclone and reschedule this context if we enabled any event.
57e7986e 4131 */
3e349507 4132 if (enabled) {
211de6eb 4133 clone_ctx = unclone_ctx(ctx);
487f05e1 4134 ctx_resched(cpuctx, ctx, event_type);
7bbba0eb
PZ
4135 } else {
4136 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
3e349507
PZ
4137 }
4138 perf_ctx_unlock(cpuctx, ctx);
57e7986e 4139
9ed6060d 4140out:
57e7986e 4141 local_irq_restore(flags);
211de6eb
PZ
4142
4143 if (clone_ctx)
4144 put_ctx(clone_ctx);
57e7986e
PM
4145}
4146
0492d4c5
PZ
4147struct perf_read_data {
4148 struct perf_event *event;
4149 bool group;
7d88962e 4150 int ret;
0492d4c5
PZ
4151};
4152
451d24d1 4153static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
d6a2f903 4154{
d6a2f903
DCC
4155 u16 local_pkg, event_pkg;
4156
4157 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
451d24d1
PZ
4158 int local_cpu = smp_processor_id();
4159
4160 event_pkg = topology_physical_package_id(event_cpu);
4161 local_pkg = topology_physical_package_id(local_cpu);
d6a2f903
DCC
4162
4163 if (event_pkg == local_pkg)
4164 return local_cpu;
4165 }
4166
4167 return event_cpu;
4168}
4169
0793a61d 4170/*
cdd6c482 4171 * Cross CPU call to read the hardware event
0793a61d 4172 */
cdd6c482 4173static void __perf_event_read(void *info)
0793a61d 4174{
0492d4c5
PZ
4175 struct perf_read_data *data = info;
4176 struct perf_event *sub, *event = data->event;
cdd6c482 4177 struct perf_event_context *ctx = event->ctx;
108b02cf 4178 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
4a00c16e 4179 struct pmu *pmu = event->pmu;
621a01ea 4180
e1ac3614
PM
4181 /*
4182 * If this is a task context, we need to check whether it is
4183 * the current task context of this cpu. If not it has been
4184 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
4185 * event->count would have been updated to a recent sample
4186 * when the event was scheduled out.
e1ac3614
PM
4187 */
4188 if (ctx->task && cpuctx->task_ctx != ctx)
4189 return;
4190
e625cce1 4191 raw_spin_lock(&ctx->lock);
0c1cbc18 4192 if (ctx->is_active & EVENT_TIME) {
542e72fc 4193 update_context_time(ctx);
e5d1367f
SE
4194 update_cgrp_time_from_event(event);
4195 }
0492d4c5 4196
0d3d73aa
PZ
4197 perf_event_update_time(event);
4198 if (data->group)
4199 perf_event_update_sibling_time(event);
0c1cbc18 4200
4a00c16e
SB
4201 if (event->state != PERF_EVENT_STATE_ACTIVE)
4202 goto unlock;
0492d4c5 4203
4a00c16e
SB
4204 if (!data->group) {
4205 pmu->read(event);
4206 data->ret = 0;
0492d4c5 4207 goto unlock;
4a00c16e
SB
4208 }
4209
4210 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
4211
4212 pmu->read(event);
0492d4c5 4213
edb39592 4214 for_each_sibling_event(sub, event) {
4a00c16e
SB
4215 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
4216 /*
4217 * Use sibling's PMU rather than @event's since
4218 * sibling could be on different (eg: software) PMU.
4219 */
0492d4c5 4220 sub->pmu->read(sub);
4a00c16e 4221 }
0492d4c5 4222 }
4a00c16e
SB
4223
4224 data->ret = pmu->commit_txn(pmu);
0492d4c5
PZ
4225
4226unlock:
e625cce1 4227 raw_spin_unlock(&ctx->lock);
0793a61d
TG
4228}
4229
b5e58793
PZ
4230static inline u64 perf_event_count(struct perf_event *event)
4231{
c39a0e2c 4232 return local64_read(&event->count) + atomic64_read(&event->child_count);
b5e58793
PZ
4233}
4234
ffe8690c
KX
4235/*
4236 * NMI-safe method to read a local event, that is an event that
4237 * is:
4238 * - either for the current task, or for this CPU
4239 * - does not have inherit set, for inherited task events
4240 * will not be local and we cannot read them atomically
4241 * - must not have a pmu::count method
4242 */
7d9285e8
YS
4243int perf_event_read_local(struct perf_event *event, u64 *value,
4244 u64 *enabled, u64 *running)
ffe8690c
KX
4245{
4246 unsigned long flags;
f91840a3 4247 int ret = 0;
ffe8690c
KX
4248
4249 /*
4250 * Disabling interrupts avoids all counter scheduling (context
4251 * switches, timer based rotation and IPIs).
4252 */
4253 local_irq_save(flags);
4254
ffe8690c
KX
4255 /*
4256 * It must not be an event with inherit set, we cannot read
4257 * all child counters from atomic context.
4258 */
f91840a3
AS
4259 if (event->attr.inherit) {
4260 ret = -EOPNOTSUPP;
4261 goto out;
4262 }
ffe8690c 4263
f91840a3
AS
4264 /* If this is a per-task event, it must be for current */
4265 if ((event->attach_state & PERF_ATTACH_TASK) &&
4266 event->hw.target != current) {
4267 ret = -EINVAL;
4268 goto out;
4269 }
4270
4271 /* If this is a per-CPU event, it must be for this CPU */
4272 if (!(event->attach_state & PERF_ATTACH_TASK) &&
4273 event->cpu != smp_processor_id()) {
4274 ret = -EINVAL;
4275 goto out;
4276 }
ffe8690c 4277
befb1b3c
RC
4278 /* If this is a pinned event it must be running on this CPU */
4279 if (event->attr.pinned && event->oncpu != smp_processor_id()) {
4280 ret = -EBUSY;
4281 goto out;
4282 }
4283
ffe8690c
KX
4284 /*
4285 * If the event is currently on this CPU, its either a per-task event,
4286 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
4287 * oncpu == -1).
4288 */
4289 if (event->oncpu == smp_processor_id())
4290 event->pmu->read(event);
4291
f91840a3 4292 *value = local64_read(&event->count);
0d3d73aa
PZ
4293 if (enabled || running) {
4294 u64 now = event->shadow_ctx_time + perf_clock();
4295 u64 __enabled, __running;
4296
4297 __perf_update_times(event, now, &__enabled, &__running);
4298 if (enabled)
4299 *enabled = __enabled;
4300 if (running)
4301 *running = __running;
4302 }
f91840a3 4303out:
ffe8690c
KX
4304 local_irq_restore(flags);
4305
f91840a3 4306 return ret;
ffe8690c
KX
4307}
4308
7d88962e 4309static int perf_event_read(struct perf_event *event, bool group)
0793a61d 4310{
0c1cbc18 4311 enum perf_event_state state = READ_ONCE(event->state);
451d24d1 4312 int event_cpu, ret = 0;
7d88962e 4313
0793a61d 4314 /*
cdd6c482
IM
4315 * If event is enabled and currently active on a CPU, update the
4316 * value in the event structure:
0793a61d 4317 */
0c1cbc18
PZ
4318again:
4319 if (state == PERF_EVENT_STATE_ACTIVE) {
4320 struct perf_read_data data;
4321
4322 /*
4323 * Orders the ->state and ->oncpu loads such that if we see
4324 * ACTIVE we must also see the right ->oncpu.
4325 *
4326 * Matches the smp_wmb() from event_sched_in().
4327 */
4328 smp_rmb();
d6a2f903 4329
451d24d1
PZ
4330 event_cpu = READ_ONCE(event->oncpu);
4331 if ((unsigned)event_cpu >= nr_cpu_ids)
4332 return 0;
4333
0c1cbc18
PZ
4334 data = (struct perf_read_data){
4335 .event = event,
4336 .group = group,
4337 .ret = 0,
4338 };
4339
451d24d1
PZ
4340 preempt_disable();
4341 event_cpu = __perf_event_read_cpu(event, event_cpu);
d6a2f903 4342
58763148
PZ
4343 /*
4344 * Purposely ignore the smp_call_function_single() return
4345 * value.
4346 *
451d24d1 4347 * If event_cpu isn't a valid CPU it means the event got
58763148
PZ
4348 * scheduled out and that will have updated the event count.
4349 *
4350 * Therefore, either way, we'll have an up-to-date event count
4351 * after this.
4352 */
451d24d1
PZ
4353 (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
4354 preempt_enable();
58763148 4355 ret = data.ret;
0c1cbc18
PZ
4356
4357 } else if (state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
4358 struct perf_event_context *ctx = event->ctx;
4359 unsigned long flags;
4360
e625cce1 4361 raw_spin_lock_irqsave(&ctx->lock, flags);
0c1cbc18
PZ
4362 state = event->state;
4363 if (state != PERF_EVENT_STATE_INACTIVE) {
4364 raw_spin_unlock_irqrestore(&ctx->lock, flags);
4365 goto again;
4366 }
4367
c530ccd9 4368 /*
0c1cbc18
PZ
4369 * May read while context is not active (e.g., thread is
4370 * blocked), in that case we cannot update context time
c530ccd9 4371 */
0c1cbc18 4372 if (ctx->is_active & EVENT_TIME) {
c530ccd9 4373 update_context_time(ctx);
e5d1367f
SE
4374 update_cgrp_time_from_event(event);
4375 }
0c1cbc18 4376
0d3d73aa 4377 perf_event_update_time(event);
0492d4c5 4378 if (group)
0d3d73aa 4379 perf_event_update_sibling_time(event);
e625cce1 4380 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d 4381 }
7d88962e
SB
4382
4383 return ret;
0793a61d
TG
4384}
4385
a63eaf34 4386/*
cdd6c482 4387 * Initialize the perf_event context in a task_struct:
a63eaf34 4388 */
eb184479 4389static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 4390{
e625cce1 4391 raw_spin_lock_init(&ctx->lock);
a63eaf34 4392 mutex_init(&ctx->mutex);
2fde4f94 4393 INIT_LIST_HEAD(&ctx->active_ctx_list);
8e1a2031
AB
4394 perf_event_groups_init(&ctx->pinned_groups);
4395 perf_event_groups_init(&ctx->flexible_groups);
a63eaf34 4396 INIT_LIST_HEAD(&ctx->event_list);
6668128a
PZ
4397 INIT_LIST_HEAD(&ctx->pinned_active);
4398 INIT_LIST_HEAD(&ctx->flexible_active);
8c94abbb 4399 refcount_set(&ctx->refcount, 1);
eb184479
PZ
4400}
4401
4402static struct perf_event_context *
4403alloc_perf_context(struct pmu *pmu, struct task_struct *task)
4404{
4405 struct perf_event_context *ctx;
4406
4407 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
4408 if (!ctx)
4409 return NULL;
4410
4411 __perf_event_init_context(ctx);
7b3c92b8
MWO
4412 if (task)
4413 ctx->task = get_task_struct(task);
eb184479
PZ
4414 ctx->pmu = pmu;
4415
4416 return ctx;
a63eaf34
PM
4417}
4418
2ebd4ffb
MH
4419static struct task_struct *
4420find_lively_task_by_vpid(pid_t vpid)
4421{
4422 struct task_struct *task;
0793a61d
TG
4423
4424 rcu_read_lock();
2ebd4ffb 4425 if (!vpid)
0793a61d
TG
4426 task = current;
4427 else
2ebd4ffb 4428 task = find_task_by_vpid(vpid);
0793a61d
TG
4429 if (task)
4430 get_task_struct(task);
4431 rcu_read_unlock();
4432
4433 if (!task)
4434 return ERR_PTR(-ESRCH);
4435
2ebd4ffb 4436 return task;
2ebd4ffb
MH
4437}
4438
fe4b04fa
PZ
4439/*
4440 * Returns a matching context with refcount and pincount.
4441 */
108b02cf 4442static struct perf_event_context *
4af57ef2
YZ
4443find_get_context(struct pmu *pmu, struct task_struct *task,
4444 struct perf_event *event)
0793a61d 4445{
211de6eb 4446 struct perf_event_context *ctx, *clone_ctx = NULL;
22a4f650 4447 struct perf_cpu_context *cpuctx;
4af57ef2 4448 void *task_ctx_data = NULL;
25346b93 4449 unsigned long flags;
8dc85d54 4450 int ctxn, err;
4af57ef2 4451 int cpu = event->cpu;
0793a61d 4452
22a4ec72 4453 if (!task) {
cdd6c482 4454 /* Must be root to operate on a CPU event: */
da97e184
JFG
4455 err = perf_allow_cpu(&event->attr);
4456 if (err)
4457 return ERR_PTR(err);
0793a61d 4458
108b02cf 4459 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 4460 ctx = &cpuctx->ctx;
c93f7669 4461 get_ctx(ctx);
fe4b04fa 4462 ++ctx->pin_count;
0793a61d 4463
0793a61d
TG
4464 return ctx;
4465 }
4466
8dc85d54
PZ
4467 err = -EINVAL;
4468 ctxn = pmu->task_ctx_nr;
4469 if (ctxn < 0)
4470 goto errout;
4471
4af57ef2
YZ
4472 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
4473 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
4474 if (!task_ctx_data) {
4475 err = -ENOMEM;
4476 goto errout;
4477 }
4478 }
4479
9ed6060d 4480retry:
8dc85d54 4481 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 4482 if (ctx) {
211de6eb 4483 clone_ctx = unclone_ctx(ctx);
fe4b04fa 4484 ++ctx->pin_count;
4af57ef2
YZ
4485
4486 if (task_ctx_data && !ctx->task_ctx_data) {
4487 ctx->task_ctx_data = task_ctx_data;
4488 task_ctx_data = NULL;
4489 }
e625cce1 4490 raw_spin_unlock_irqrestore(&ctx->lock, flags);
211de6eb
PZ
4491
4492 if (clone_ctx)
4493 put_ctx(clone_ctx);
9137fb28 4494 } else {
eb184479 4495 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
4496 err = -ENOMEM;
4497 if (!ctx)
4498 goto errout;
eb184479 4499
4af57ef2
YZ
4500 if (task_ctx_data) {
4501 ctx->task_ctx_data = task_ctx_data;
4502 task_ctx_data = NULL;
4503 }
4504
dbe08d82
ON
4505 err = 0;
4506 mutex_lock(&task->perf_event_mutex);
4507 /*
4508 * If it has already passed perf_event_exit_task().
4509 * we must see PF_EXITING, it takes this mutex too.
4510 */
4511 if (task->flags & PF_EXITING)
4512 err = -ESRCH;
4513 else if (task->perf_event_ctxp[ctxn])
4514 err = -EAGAIN;
fe4b04fa 4515 else {
9137fb28 4516 get_ctx(ctx);
fe4b04fa 4517 ++ctx->pin_count;
dbe08d82 4518 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 4519 }
dbe08d82
ON
4520 mutex_unlock(&task->perf_event_mutex);
4521
4522 if (unlikely(err)) {
9137fb28 4523 put_ctx(ctx);
dbe08d82
ON
4524
4525 if (err == -EAGAIN)
4526 goto retry;
4527 goto errout;
a63eaf34
PM
4528 }
4529 }
4530
4af57ef2 4531 kfree(task_ctx_data);
0793a61d 4532 return ctx;
c93f7669 4533
9ed6060d 4534errout:
4af57ef2 4535 kfree(task_ctx_data);
c93f7669 4536 return ERR_PTR(err);
0793a61d
TG
4537}
4538
6fb2915d 4539static void perf_event_free_filter(struct perf_event *event);
2541517c 4540static void perf_event_free_bpf_prog(struct perf_event *event);
6fb2915d 4541
cdd6c482 4542static void free_event_rcu(struct rcu_head *head)
592903cd 4543{
cdd6c482 4544 struct perf_event *event;
592903cd 4545
cdd6c482
IM
4546 event = container_of(head, struct perf_event, rcu_head);
4547 if (event->ns)
4548 put_pid_ns(event->ns);
6fb2915d 4549 perf_event_free_filter(event);
cdd6c482 4550 kfree(event);
592903cd
PZ
4551}
4552
b69cf536 4553static void ring_buffer_attach(struct perf_event *event,
56de4e8f 4554 struct perf_buffer *rb);
925d519a 4555
f2fb6bef
KL
4556static void detach_sb_event(struct perf_event *event)
4557{
4558 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
4559
4560 raw_spin_lock(&pel->lock);
4561 list_del_rcu(&event->sb_list);
4562 raw_spin_unlock(&pel->lock);
4563}
4564
a4f144eb 4565static bool is_sb_event(struct perf_event *event)
f2fb6bef 4566{
a4f144eb
DCC
4567 struct perf_event_attr *attr = &event->attr;
4568
f2fb6bef 4569 if (event->parent)
a4f144eb 4570 return false;
f2fb6bef
KL
4571
4572 if (event->attach_state & PERF_ATTACH_TASK)
a4f144eb 4573 return false;
f2fb6bef 4574
a4f144eb
DCC
4575 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
4576 attr->comm || attr->comm_exec ||
76193a94 4577 attr->task || attr->ksymbol ||
21038f2b
SL
4578 attr->context_switch ||
4579 attr->bpf_event)
a4f144eb
DCC
4580 return true;
4581 return false;
4582}
4583
4584static void unaccount_pmu_sb_event(struct perf_event *event)
4585{
4586 if (is_sb_event(event))
4587 detach_sb_event(event);
f2fb6bef
KL
4588}
4589
4beb31f3 4590static void unaccount_event_cpu(struct perf_event *event, int cpu)
f1600952 4591{
4beb31f3
FW
4592 if (event->parent)
4593 return;
4594
4beb31f3
FW
4595 if (is_cgroup_event(event))
4596 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
4597}
925d519a 4598
555e0c1e
FW
4599#ifdef CONFIG_NO_HZ_FULL
4600static DEFINE_SPINLOCK(nr_freq_lock);
4601#endif
4602
4603static void unaccount_freq_event_nohz(void)
4604{
4605#ifdef CONFIG_NO_HZ_FULL
4606 spin_lock(&nr_freq_lock);
4607 if (atomic_dec_and_test(&nr_freq_events))
4608 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
4609 spin_unlock(&nr_freq_lock);
4610#endif
4611}
4612
4613static void unaccount_freq_event(void)
4614{
4615 if (tick_nohz_full_enabled())
4616 unaccount_freq_event_nohz();
4617 else
4618 atomic_dec(&nr_freq_events);
4619}
4620
4beb31f3
FW
4621static void unaccount_event(struct perf_event *event)
4622{
25432ae9
PZ
4623 bool dec = false;
4624
4beb31f3
FW
4625 if (event->parent)
4626 return;
4627
4628 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 4629 dec = true;
4beb31f3
FW
4630 if (event->attr.mmap || event->attr.mmap_data)
4631 atomic_dec(&nr_mmap_events);
4632 if (event->attr.comm)
4633 atomic_dec(&nr_comm_events);
e4222673
HB
4634 if (event->attr.namespaces)
4635 atomic_dec(&nr_namespaces_events);
96aaab68
NK
4636 if (event->attr.cgroup)
4637 atomic_dec(&nr_cgroup_events);
4beb31f3
FW
4638 if (event->attr.task)
4639 atomic_dec(&nr_task_events);
948b26b6 4640 if (event->attr.freq)
555e0c1e 4641 unaccount_freq_event();
45ac1403 4642 if (event->attr.context_switch) {
25432ae9 4643 dec = true;
45ac1403
AH
4644 atomic_dec(&nr_switch_events);
4645 }
4beb31f3 4646 if (is_cgroup_event(event))
25432ae9 4647 dec = true;
4beb31f3 4648 if (has_branch_stack(event))
25432ae9 4649 dec = true;
76193a94
SL
4650 if (event->attr.ksymbol)
4651 atomic_dec(&nr_ksymbol_events);
6ee52e2a
SL
4652 if (event->attr.bpf_event)
4653 atomic_dec(&nr_bpf_events);
25432ae9 4654
9107c89e
PZ
4655 if (dec) {
4656 if (!atomic_add_unless(&perf_sched_count, -1, 1))
4657 schedule_delayed_work(&perf_sched_work, HZ);
4658 }
4beb31f3
FW
4659
4660 unaccount_event_cpu(event, event->cpu);
f2fb6bef
KL
4661
4662 unaccount_pmu_sb_event(event);
4beb31f3 4663}
925d519a 4664
9107c89e
PZ
4665static void perf_sched_delayed(struct work_struct *work)
4666{
4667 mutex_lock(&perf_sched_mutex);
4668 if (atomic_dec_and_test(&perf_sched_count))
4669 static_branch_disable(&perf_sched_events);
4670 mutex_unlock(&perf_sched_mutex);
4671}
4672
bed5b25a
AS
4673/*
4674 * The following implement mutual exclusion of events on "exclusive" pmus
4675 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
4676 * at a time, so we disallow creating events that might conflict, namely:
4677 *
4678 * 1) cpu-wide events in the presence of per-task events,
4679 * 2) per-task events in the presence of cpu-wide events,
4680 * 3) two matching events on the same context.
4681 *
4682 * The former two cases are handled in the allocation path (perf_event_alloc(),
a0733e69 4683 * _free_event()), the latter -- before the first perf_install_in_context().
bed5b25a
AS
4684 */
4685static int exclusive_event_init(struct perf_event *event)
4686{
4687 struct pmu *pmu = event->pmu;
4688
8a58ddae 4689 if (!is_exclusive_pmu(pmu))
bed5b25a
AS
4690 return 0;
4691
4692 /*
4693 * Prevent co-existence of per-task and cpu-wide events on the
4694 * same exclusive pmu.
4695 *
4696 * Negative pmu::exclusive_cnt means there are cpu-wide
4697 * events on this "exclusive" pmu, positive means there are
4698 * per-task events.
4699 *
4700 * Since this is called in perf_event_alloc() path, event::ctx
4701 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
4702 * to mean "per-task event", because unlike other attach states it
4703 * never gets cleared.
4704 */
4705 if (event->attach_state & PERF_ATTACH_TASK) {
4706 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
4707 return -EBUSY;
4708 } else {
4709 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
4710 return -EBUSY;
4711 }
4712
4713 return 0;
4714}
4715
4716static void exclusive_event_destroy(struct perf_event *event)
4717{
4718 struct pmu *pmu = event->pmu;
4719
8a58ddae 4720 if (!is_exclusive_pmu(pmu))
bed5b25a
AS
4721 return;
4722
4723 /* see comment in exclusive_event_init() */
4724 if (event->attach_state & PERF_ATTACH_TASK)
4725 atomic_dec(&pmu->exclusive_cnt);
4726 else
4727 atomic_inc(&pmu->exclusive_cnt);
4728}
4729
4730static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
4731{
3bf6215a 4732 if ((e1->pmu == e2->pmu) &&
bed5b25a
AS
4733 (e1->cpu == e2->cpu ||
4734 e1->cpu == -1 ||
4735 e2->cpu == -1))
4736 return true;
4737 return false;
4738}
4739
bed5b25a
AS
4740static bool exclusive_event_installable(struct perf_event *event,
4741 struct perf_event_context *ctx)
4742{
4743 struct perf_event *iter_event;
4744 struct pmu *pmu = event->pmu;
4745
8a58ddae
AS
4746 lockdep_assert_held(&ctx->mutex);
4747
4748 if (!is_exclusive_pmu(pmu))
bed5b25a
AS
4749 return true;
4750
4751 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
4752 if (exclusive_event_match(iter_event, event))
4753 return false;
4754 }
4755
4756 return true;
4757}
4758
375637bc
AS
4759static void perf_addr_filters_splice(struct perf_event *event,
4760 struct list_head *head);
4761
683ede43 4762static void _free_event(struct perf_event *event)
f1600952 4763{
e360adbe 4764 irq_work_sync(&event->pending);
925d519a 4765
4beb31f3 4766 unaccount_event(event);
9ee318a7 4767
da97e184
JFG
4768 security_perf_event_free(event);
4769
76369139 4770 if (event->rb) {
9bb5d40c
PZ
4771 /*
4772 * Can happen when we close an event with re-directed output.
4773 *
4774 * Since we have a 0 refcount, perf_mmap_close() will skip
4775 * over us; possibly making our ring_buffer_put() the last.
4776 */
4777 mutex_lock(&event->mmap_mutex);
b69cf536 4778 ring_buffer_attach(event, NULL);
9bb5d40c 4779 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
4780 }
4781
e5d1367f
SE
4782 if (is_cgroup_event(event))
4783 perf_detach_cgroup(event);
4784
a0733e69
PZ
4785 if (!event->parent) {
4786 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
4787 put_callchain_buffers();
4788 }
4789
4790 perf_event_free_bpf_prog(event);
375637bc 4791 perf_addr_filters_splice(event, NULL);
c60f83b8 4792 kfree(event->addr_filter_ranges);
a0733e69
PZ
4793
4794 if (event->destroy)
4795 event->destroy(event);
4796
1cf8dfe8
PZ
4797 /*
4798 * Must be after ->destroy(), due to uprobe_perf_close() using
4799 * hw.target.
4800 */
621b6d2e
PB
4801 if (event->hw.target)
4802 put_task_struct(event->hw.target);
4803
1cf8dfe8
PZ
4804 /*
4805 * perf_event_free_task() relies on put_ctx() being 'last', in particular
4806 * all task references must be cleaned up.
4807 */
4808 if (event->ctx)
4809 put_ctx(event->ctx);
4810
62a92c8f
AS
4811 exclusive_event_destroy(event);
4812 module_put(event->pmu->module);
a0733e69
PZ
4813
4814 call_rcu(&event->rcu_head, free_event_rcu);
f1600952
PZ
4815}
4816
683ede43
PZ
4817/*
4818 * Used to free events which have a known refcount of 1, such as in error paths
4819 * where the event isn't exposed yet and inherited events.
4820 */
4821static void free_event(struct perf_event *event)
0793a61d 4822{
683ede43
PZ
4823 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
4824 "unexpected event refcount: %ld; ptr=%p\n",
4825 atomic_long_read(&event->refcount), event)) {
4826 /* leak to avoid use-after-free */
4827 return;
4828 }
0793a61d 4829
683ede43 4830 _free_event(event);
0793a61d
TG
4831}
4832
a66a3052 4833/*
f8697762 4834 * Remove user event from the owner task.
a66a3052 4835 */
f8697762 4836static void perf_remove_from_owner(struct perf_event *event)
fb0459d7 4837{
8882135b 4838 struct task_struct *owner;
fb0459d7 4839
8882135b 4840 rcu_read_lock();
8882135b 4841 /*
f47c02c0
PZ
4842 * Matches the smp_store_release() in perf_event_exit_task(). If we
4843 * observe !owner it means the list deletion is complete and we can
4844 * indeed free this event, otherwise we need to serialize on
8882135b
PZ
4845 * owner->perf_event_mutex.
4846 */
506458ef 4847 owner = READ_ONCE(event->owner);
8882135b
PZ
4848 if (owner) {
4849 /*
4850 * Since delayed_put_task_struct() also drops the last
4851 * task reference we can safely take a new reference
4852 * while holding the rcu_read_lock().
4853 */
4854 get_task_struct(owner);
4855 }
4856 rcu_read_unlock();
4857
4858 if (owner) {
f63a8daa
PZ
4859 /*
4860 * If we're here through perf_event_exit_task() we're already
4861 * holding ctx->mutex which would be an inversion wrt. the
4862 * normal lock order.
4863 *
4864 * However we can safely take this lock because its the child
4865 * ctx->mutex.
4866 */
4867 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
4868
8882135b
PZ
4869 /*
4870 * We have to re-check the event->owner field, if it is cleared
4871 * we raced with perf_event_exit_task(), acquiring the mutex
4872 * ensured they're done, and we can proceed with freeing the
4873 * event.
4874 */
f47c02c0 4875 if (event->owner) {
8882135b 4876 list_del_init(&event->owner_entry);
f47c02c0
PZ
4877 smp_store_release(&event->owner, NULL);
4878 }
8882135b
PZ
4879 mutex_unlock(&owner->perf_event_mutex);
4880 put_task_struct(owner);
4881 }
f8697762
JO
4882}
4883
f8697762
JO
4884static void put_event(struct perf_event *event)
4885{
f8697762
JO
4886 if (!atomic_long_dec_and_test(&event->refcount))
4887 return;
4888
c6e5b732
PZ
4889 _free_event(event);
4890}
4891
4892/*
4893 * Kill an event dead; while event:refcount will preserve the event
4894 * object, it will not preserve its functionality. Once the last 'user'
4895 * gives up the object, we'll destroy the thing.
4896 */
4897int perf_event_release_kernel(struct perf_event *event)
4898{
a4f4bb6d 4899 struct perf_event_context *ctx = event->ctx;
c6e5b732 4900 struct perf_event *child, *tmp;
82d94856 4901 LIST_HEAD(free_list);
c6e5b732 4902
a4f4bb6d
PZ
4903 /*
4904 * If we got here through err_file: fput(event_file); we will not have
4905 * attached to a context yet.
4906 */
4907 if (!ctx) {
4908 WARN_ON_ONCE(event->attach_state &
4909 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4910 goto no_ctx;
4911 }
4912
f8697762
JO
4913 if (!is_kernel_event(event))
4914 perf_remove_from_owner(event);
8882135b 4915
5fa7c8ec 4916 ctx = perf_event_ctx_lock(event);
a83fe28e 4917 WARN_ON_ONCE(ctx->parent_ctx);
a69b0ca4 4918 perf_remove_from_context(event, DETACH_GROUP);
683ede43 4919
a69b0ca4 4920 raw_spin_lock_irq(&ctx->lock);
683ede43 4921 /*
d8a8cfc7 4922 * Mark this event as STATE_DEAD, there is no external reference to it
a69b0ca4 4923 * anymore.
683ede43 4924 *
a69b0ca4
PZ
4925 * Anybody acquiring event->child_mutex after the below loop _must_
4926 * also see this, most importantly inherit_event() which will avoid
4927 * placing more children on the list.
683ede43 4928 *
c6e5b732
PZ
4929 * Thus this guarantees that we will in fact observe and kill _ALL_
4930 * child events.
683ede43 4931 */
a69b0ca4
PZ
4932 event->state = PERF_EVENT_STATE_DEAD;
4933 raw_spin_unlock_irq(&ctx->lock);
4934
4935 perf_event_ctx_unlock(event, ctx);
683ede43 4936
c6e5b732
PZ
4937again:
4938 mutex_lock(&event->child_mutex);
4939 list_for_each_entry(child, &event->child_list, child_list) {
a6fa941d 4940
c6e5b732
PZ
4941 /*
4942 * Cannot change, child events are not migrated, see the
4943 * comment with perf_event_ctx_lock_nested().
4944 */
506458ef 4945 ctx = READ_ONCE(child->ctx);
c6e5b732
PZ
4946 /*
4947 * Since child_mutex nests inside ctx::mutex, we must jump
4948 * through hoops. We start by grabbing a reference on the ctx.
4949 *
4950 * Since the event cannot get freed while we hold the
4951 * child_mutex, the context must also exist and have a !0
4952 * reference count.
4953 */
4954 get_ctx(ctx);
4955
4956 /*
4957 * Now that we have a ctx ref, we can drop child_mutex, and
4958 * acquire ctx::mutex without fear of it going away. Then we
4959 * can re-acquire child_mutex.
4960 */
4961 mutex_unlock(&event->child_mutex);
4962 mutex_lock(&ctx->mutex);
4963 mutex_lock(&event->child_mutex);
4964
4965 /*
4966 * Now that we hold ctx::mutex and child_mutex, revalidate our
4967 * state, if child is still the first entry, it didn't get freed
4968 * and we can continue doing so.
4969 */
4970 tmp = list_first_entry_or_null(&event->child_list,
4971 struct perf_event, child_list);
4972 if (tmp == child) {
4973 perf_remove_from_context(child, DETACH_GROUP);
82d94856 4974 list_move(&child->child_list, &free_list);
c6e5b732
PZ
4975 /*
4976 * This matches the refcount bump in inherit_event();
4977 * this can't be the last reference.
4978 */
4979 put_event(event);
4980 }
4981
4982 mutex_unlock(&event->child_mutex);
4983 mutex_unlock(&ctx->mutex);
4984 put_ctx(ctx);
4985 goto again;
4986 }
4987 mutex_unlock(&event->child_mutex);
4988
82d94856 4989 list_for_each_entry_safe(child, tmp, &free_list, child_list) {
1cf8dfe8
PZ
4990 void *var = &child->ctx->refcount;
4991
82d94856
PZ
4992 list_del(&child->child_list);
4993 free_event(child);
1cf8dfe8
PZ
4994
4995 /*
4996 * Wake any perf_event_free_task() waiting for this event to be
4997 * freed.
4998 */
4999 smp_mb(); /* pairs with wait_var_event() */
5000 wake_up_var(var);
82d94856
PZ
5001 }
5002
a4f4bb6d
PZ
5003no_ctx:
5004 put_event(event); /* Must be the 'last' reference */
683ede43
PZ
5005 return 0;
5006}
5007EXPORT_SYMBOL_GPL(perf_event_release_kernel);
5008
8b10c5e2
PZ
5009/*
5010 * Called when the last reference to the file is gone.
5011 */
a6fa941d
AV
5012static int perf_release(struct inode *inode, struct file *file)
5013{
c6e5b732 5014 perf_event_release_kernel(file->private_data);
a6fa941d 5015 return 0;
fb0459d7 5016}
fb0459d7 5017
ca0dd44c 5018static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 5019{
cdd6c482 5020 struct perf_event *child;
e53c0994
PZ
5021 u64 total = 0;
5022
59ed446f
PZ
5023 *enabled = 0;
5024 *running = 0;
5025
6f10581a 5026 mutex_lock(&event->child_mutex);
01add3ea 5027
7d88962e 5028 (void)perf_event_read(event, false);
01add3ea
SB
5029 total += perf_event_count(event);
5030
59ed446f
PZ
5031 *enabled += event->total_time_enabled +
5032 atomic64_read(&event->child_total_time_enabled);
5033 *running += event->total_time_running +
5034 atomic64_read(&event->child_total_time_running);
5035
5036 list_for_each_entry(child, &event->child_list, child_list) {
7d88962e 5037 (void)perf_event_read(child, false);
01add3ea 5038 total += perf_event_count(child);
59ed446f
PZ
5039 *enabled += child->total_time_enabled;
5040 *running += child->total_time_running;
5041 }
6f10581a 5042 mutex_unlock(&event->child_mutex);
e53c0994
PZ
5043
5044 return total;
5045}
ca0dd44c
PZ
5046
5047u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
5048{
5049 struct perf_event_context *ctx;
5050 u64 count;
5051
5052 ctx = perf_event_ctx_lock(event);
5053 count = __perf_event_read_value(event, enabled, running);
5054 perf_event_ctx_unlock(event, ctx);
5055
5056 return count;
5057}
fb0459d7 5058EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 5059
7d88962e 5060static int __perf_read_group_add(struct perf_event *leader,
fa8c2693 5061 u64 read_format, u64 *values)
3dab77fb 5062{
2aeb1883 5063 struct perf_event_context *ctx = leader->ctx;
fa8c2693 5064 struct perf_event *sub;
2aeb1883 5065 unsigned long flags;
fa8c2693 5066 int n = 1; /* skip @nr */
7d88962e 5067 int ret;
f63a8daa 5068
7d88962e
SB
5069 ret = perf_event_read(leader, true);
5070 if (ret)
5071 return ret;
abf4868b 5072
a9cd8194
PZ
5073 raw_spin_lock_irqsave(&ctx->lock, flags);
5074
fa8c2693
PZ
5075 /*
5076 * Since we co-schedule groups, {enabled,running} times of siblings
5077 * will be identical to those of the leader, so we only publish one
5078 * set.
5079 */
5080 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
5081 values[n++] += leader->total_time_enabled +
5082 atomic64_read(&leader->child_total_time_enabled);
5083 }
3dab77fb 5084
fa8c2693
PZ
5085 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
5086 values[n++] += leader->total_time_running +
5087 atomic64_read(&leader->child_total_time_running);
5088 }
5089
5090 /*
5091 * Write {count,id} tuples for every sibling.
5092 */
5093 values[n++] += perf_event_count(leader);
abf4868b
PZ
5094 if (read_format & PERF_FORMAT_ID)
5095 values[n++] = primary_event_id(leader);
3dab77fb 5096
edb39592 5097 for_each_sibling_event(sub, leader) {
fa8c2693
PZ
5098 values[n++] += perf_event_count(sub);
5099 if (read_format & PERF_FORMAT_ID)
5100 values[n++] = primary_event_id(sub);
5101 }
7d88962e 5102
2aeb1883 5103 raw_spin_unlock_irqrestore(&ctx->lock, flags);
7d88962e 5104 return 0;
fa8c2693 5105}
3dab77fb 5106
fa8c2693
PZ
5107static int perf_read_group(struct perf_event *event,
5108 u64 read_format, char __user *buf)
5109{
5110 struct perf_event *leader = event->group_leader, *child;
5111 struct perf_event_context *ctx = leader->ctx;
7d88962e 5112 int ret;
fa8c2693 5113 u64 *values;
3dab77fb 5114
fa8c2693 5115 lockdep_assert_held(&ctx->mutex);
3dab77fb 5116
fa8c2693
PZ
5117 values = kzalloc(event->read_size, GFP_KERNEL);
5118 if (!values)
5119 return -ENOMEM;
3dab77fb 5120
fa8c2693
PZ
5121 values[0] = 1 + leader->nr_siblings;
5122
5123 /*
5124 * By locking the child_mutex of the leader we effectively
5125 * lock the child list of all siblings.. XXX explain how.
5126 */
5127 mutex_lock(&leader->child_mutex);
abf4868b 5128
7d88962e
SB
5129 ret = __perf_read_group_add(leader, read_format, values);
5130 if (ret)
5131 goto unlock;
5132
5133 list_for_each_entry(child, &leader->child_list, child_list) {
5134 ret = __perf_read_group_add(child, read_format, values);
5135 if (ret)
5136 goto unlock;
5137 }
abf4868b 5138
fa8c2693 5139 mutex_unlock(&leader->child_mutex);
abf4868b 5140
7d88962e 5141 ret = event->read_size;
fa8c2693
PZ
5142 if (copy_to_user(buf, values, event->read_size))
5143 ret = -EFAULT;
7d88962e 5144 goto out;
fa8c2693 5145
7d88962e
SB
5146unlock:
5147 mutex_unlock(&leader->child_mutex);
5148out:
fa8c2693 5149 kfree(values);
abf4868b 5150 return ret;
3dab77fb
PZ
5151}
5152
b15f495b 5153static int perf_read_one(struct perf_event *event,
3dab77fb
PZ
5154 u64 read_format, char __user *buf)
5155{
59ed446f 5156 u64 enabled, running;
3dab77fb
PZ
5157 u64 values[4];
5158 int n = 0;
5159
ca0dd44c 5160 values[n++] = __perf_event_read_value(event, &enabled, &running);
59ed446f
PZ
5161 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
5162 values[n++] = enabled;
5163 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
5164 values[n++] = running;
3dab77fb 5165 if (read_format & PERF_FORMAT_ID)
cdd6c482 5166 values[n++] = primary_event_id(event);
3dab77fb
PZ
5167
5168 if (copy_to_user(buf, values, n * sizeof(u64)))
5169 return -EFAULT;
5170
5171 return n * sizeof(u64);
5172}
5173
dc633982
JO
5174static bool is_event_hup(struct perf_event *event)
5175{
5176 bool no_children;
5177
a69b0ca4 5178 if (event->state > PERF_EVENT_STATE_EXIT)
dc633982
JO
5179 return false;
5180
5181 mutex_lock(&event->child_mutex);
5182 no_children = list_empty(&event->child_list);
5183 mutex_unlock(&event->child_mutex);
5184 return no_children;
5185}
5186
0793a61d 5187/*
cdd6c482 5188 * Read the performance event - simple non blocking version for now
0793a61d
TG
5189 */
5190static ssize_t
b15f495b 5191__perf_read(struct perf_event *event, char __user *buf, size_t count)
0793a61d 5192{
cdd6c482 5193 u64 read_format = event->attr.read_format;
3dab77fb 5194 int ret;
0793a61d 5195
3b6f9e5c 5196 /*
788faab7 5197 * Return end-of-file for a read on an event that is in
3b6f9e5c
PM
5198 * error state (i.e. because it was pinned but it couldn't be
5199 * scheduled on to the CPU at some point).
5200 */
cdd6c482 5201 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
5202 return 0;
5203
c320c7b7 5204 if (count < event->read_size)
3dab77fb
PZ
5205 return -ENOSPC;
5206
cdd6c482 5207 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 5208 if (read_format & PERF_FORMAT_GROUP)
b15f495b 5209 ret = perf_read_group(event, read_format, buf);
3dab77fb 5210 else
b15f495b 5211 ret = perf_read_one(event, read_format, buf);
0793a61d 5212
3dab77fb 5213 return ret;
0793a61d
TG
5214}
5215
0793a61d
TG
5216static ssize_t
5217perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
5218{
cdd6c482 5219 struct perf_event *event = file->private_data;
f63a8daa
PZ
5220 struct perf_event_context *ctx;
5221 int ret;
0793a61d 5222
da97e184
JFG
5223 ret = security_perf_event_read(event);
5224 if (ret)
5225 return ret;
5226
f63a8daa 5227 ctx = perf_event_ctx_lock(event);
b15f495b 5228 ret = __perf_read(event, buf, count);
f63a8daa
PZ
5229 perf_event_ctx_unlock(event, ctx);
5230
5231 return ret;
0793a61d
TG
5232}
5233
9dd95748 5234static __poll_t perf_poll(struct file *file, poll_table *wait)
0793a61d 5235{
cdd6c482 5236 struct perf_event *event = file->private_data;
56de4e8f 5237 struct perf_buffer *rb;
a9a08845 5238 __poll_t events = EPOLLHUP;
c7138f37 5239
e708d7ad 5240 poll_wait(file, &event->waitq, wait);
179033b3 5241
dc633982 5242 if (is_event_hup(event))
179033b3 5243 return events;
c7138f37 5244
10c6db11 5245 /*
9bb5d40c
PZ
5246 * Pin the event->rb by taking event->mmap_mutex; otherwise
5247 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
5248 */
5249 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
5250 rb = event->rb;
5251 if (rb)
76369139 5252 events = atomic_xchg(&rb->poll, 0);
10c6db11 5253 mutex_unlock(&event->mmap_mutex);
0793a61d
TG
5254 return events;
5255}
5256
f63a8daa 5257static void _perf_event_reset(struct perf_event *event)
6de6a7b9 5258{
7d88962e 5259 (void)perf_event_read(event, false);
e7850595 5260 local64_set(&event->count, 0);
cdd6c482 5261 perf_event_update_userpage(event);
3df5edad
PZ
5262}
5263
52ba4b0b
LX
5264/* Assume it's not an event with inherit set. */
5265u64 perf_event_pause(struct perf_event *event, bool reset)
5266{
5267 struct perf_event_context *ctx;
5268 u64 count;
5269
5270 ctx = perf_event_ctx_lock(event);
5271 WARN_ON_ONCE(event->attr.inherit);
5272 _perf_event_disable(event);
5273 count = local64_read(&event->count);
5274 if (reset)
5275 local64_set(&event->count, 0);
5276 perf_event_ctx_unlock(event, ctx);
5277
5278 return count;
5279}
5280EXPORT_SYMBOL_GPL(perf_event_pause);
5281
c93f7669 5282/*
cdd6c482
IM
5283 * Holding the top-level event's child_mutex means that any
5284 * descendant process that has inherited this event will block
8ba289b8 5285 * in perf_event_exit_event() if it goes to exit, thus satisfying the
cdd6c482 5286 * task existence requirements of perf_event_enable/disable.
c93f7669 5287 */
cdd6c482
IM
5288static void perf_event_for_each_child(struct perf_event *event,
5289 void (*func)(struct perf_event *))
3df5edad 5290{
cdd6c482 5291 struct perf_event *child;
3df5edad 5292
cdd6c482 5293 WARN_ON_ONCE(event->ctx->parent_ctx);
f63a8daa 5294
cdd6c482
IM
5295 mutex_lock(&event->child_mutex);
5296 func(event);
5297 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 5298 func(child);
cdd6c482 5299 mutex_unlock(&event->child_mutex);
3df5edad
PZ
5300}
5301
cdd6c482
IM
5302static void perf_event_for_each(struct perf_event *event,
5303 void (*func)(struct perf_event *))
3df5edad 5304{
cdd6c482
IM
5305 struct perf_event_context *ctx = event->ctx;
5306 struct perf_event *sibling;
3df5edad 5307
f63a8daa
PZ
5308 lockdep_assert_held(&ctx->mutex);
5309
cdd6c482 5310 event = event->group_leader;
75f937f2 5311
cdd6c482 5312 perf_event_for_each_child(event, func);
edb39592 5313 for_each_sibling_event(sibling, event)
724b6daa 5314 perf_event_for_each_child(sibling, func);
6de6a7b9
PZ
5315}
5316
fae3fde6
PZ
5317static void __perf_event_period(struct perf_event *event,
5318 struct perf_cpu_context *cpuctx,
5319 struct perf_event_context *ctx,
5320 void *info)
c7999c6f 5321{
fae3fde6 5322 u64 value = *((u64 *)info);
c7999c6f 5323 bool active;
08247e31 5324
cdd6c482 5325 if (event->attr.freq) {
cdd6c482 5326 event->attr.sample_freq = value;
08247e31 5327 } else {
cdd6c482
IM
5328 event->attr.sample_period = value;
5329 event->hw.sample_period = value;
08247e31 5330 }
bad7192b
PZ
5331
5332 active = (event->state == PERF_EVENT_STATE_ACTIVE);
5333 if (active) {
5334 perf_pmu_disable(ctx->pmu);
1e02cd40
PZ
5335 /*
5336 * We could be throttled; unthrottle now to avoid the tick
5337 * trying to unthrottle while we already re-started the event.
5338 */
5339 if (event->hw.interrupts == MAX_INTERRUPTS) {
5340 event->hw.interrupts = 0;
5341 perf_log_throttle(event, 1);
5342 }
bad7192b
PZ
5343 event->pmu->stop(event, PERF_EF_UPDATE);
5344 }
5345
5346 local64_set(&event->hw.period_left, 0);
5347
5348 if (active) {
5349 event->pmu->start(event, PERF_EF_RELOAD);
5350 perf_pmu_enable(ctx->pmu);
5351 }
c7999c6f
PZ
5352}
5353
81ec3f3c
JO
5354static int perf_event_check_period(struct perf_event *event, u64 value)
5355{
5356 return event->pmu->check_period(event, value);
5357}
5358
3ca270fc 5359static int _perf_event_period(struct perf_event *event, u64 value)
c7999c6f 5360{
c7999c6f
PZ
5361 if (!is_sampling_event(event))
5362 return -EINVAL;
5363
c7999c6f
PZ
5364 if (!value)
5365 return -EINVAL;
5366
5367 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
5368 return -EINVAL;
5369
81ec3f3c
JO
5370 if (perf_event_check_period(event, value))
5371 return -EINVAL;
5372
913a90bc
RB
5373 if (!event->attr.freq && (value & (1ULL << 63)))
5374 return -EINVAL;
5375
fae3fde6 5376 event_function_call(event, __perf_event_period, &value);
08247e31 5377
c7999c6f 5378 return 0;
08247e31
PZ
5379}
5380
3ca270fc
LX
5381int perf_event_period(struct perf_event *event, u64 value)
5382{
5383 struct perf_event_context *ctx;
5384 int ret;
5385
5386 ctx = perf_event_ctx_lock(event);
5387 ret = _perf_event_period(event, value);
5388 perf_event_ctx_unlock(event, ctx);
5389
5390 return ret;
5391}
5392EXPORT_SYMBOL_GPL(perf_event_period);
5393
ac9721f3
PZ
5394static const struct file_operations perf_fops;
5395
2903ff01 5396static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 5397{
2903ff01
AV
5398 struct fd f = fdget(fd);
5399 if (!f.file)
5400 return -EBADF;
ac9721f3 5401
2903ff01
AV
5402 if (f.file->f_op != &perf_fops) {
5403 fdput(f);
5404 return -EBADF;
ac9721f3 5405 }
2903ff01
AV
5406 *p = f;
5407 return 0;
ac9721f3
PZ
5408}
5409
5410static int perf_event_set_output(struct perf_event *event,
5411 struct perf_event *output_event);
6fb2915d 5412static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2541517c 5413static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
32ff77e8
MC
5414static int perf_copy_attr(struct perf_event_attr __user *uattr,
5415 struct perf_event_attr *attr);
a4be7c27 5416
f63a8daa 5417static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
d859e29f 5418{
cdd6c482 5419 void (*func)(struct perf_event *);
3df5edad 5420 u32 flags = arg;
d859e29f
PM
5421
5422 switch (cmd) {
cdd6c482 5423 case PERF_EVENT_IOC_ENABLE:
f63a8daa 5424 func = _perf_event_enable;
d859e29f 5425 break;
cdd6c482 5426 case PERF_EVENT_IOC_DISABLE:
f63a8daa 5427 func = _perf_event_disable;
79f14641 5428 break;
cdd6c482 5429 case PERF_EVENT_IOC_RESET:
f63a8daa 5430 func = _perf_event_reset;
6de6a7b9 5431 break;
3df5edad 5432
cdd6c482 5433 case PERF_EVENT_IOC_REFRESH:
f63a8daa 5434 return _perf_event_refresh(event, arg);
08247e31 5435
cdd6c482 5436 case PERF_EVENT_IOC_PERIOD:
3ca270fc
LX
5437 {
5438 u64 value;
08247e31 5439
3ca270fc
LX
5440 if (copy_from_user(&value, (u64 __user *)arg, sizeof(value)))
5441 return -EFAULT;
08247e31 5442
3ca270fc
LX
5443 return _perf_event_period(event, value);
5444 }
cf4957f1
JO
5445 case PERF_EVENT_IOC_ID:
5446 {
5447 u64 id = primary_event_id(event);
5448
5449 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
5450 return -EFAULT;
5451 return 0;
5452 }
5453
cdd6c482 5454 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 5455 {
ac9721f3 5456 int ret;
ac9721f3 5457 if (arg != -1) {
2903ff01
AV
5458 struct perf_event *output_event;
5459 struct fd output;
5460 ret = perf_fget_light(arg, &output);
5461 if (ret)
5462 return ret;
5463 output_event = output.file->private_data;
5464 ret = perf_event_set_output(event, output_event);
5465 fdput(output);
5466 } else {
5467 ret = perf_event_set_output(event, NULL);
ac9721f3 5468 }
ac9721f3
PZ
5469 return ret;
5470 }
a4be7c27 5471
6fb2915d
LZ
5472 case PERF_EVENT_IOC_SET_FILTER:
5473 return perf_event_set_filter(event, (void __user *)arg);
5474
2541517c
AS
5475 case PERF_EVENT_IOC_SET_BPF:
5476 return perf_event_set_bpf_prog(event, arg);
5477
86e7972f 5478 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
56de4e8f 5479 struct perf_buffer *rb;
86e7972f
WN
5480
5481 rcu_read_lock();
5482 rb = rcu_dereference(event->rb);
5483 if (!rb || !rb->nr_pages) {
5484 rcu_read_unlock();
5485 return -EINVAL;
5486 }
5487 rb_toggle_paused(rb, !!arg);
5488 rcu_read_unlock();
5489 return 0;
5490 }
f371b304
YS
5491
5492 case PERF_EVENT_IOC_QUERY_BPF:
f4e2298e 5493 return perf_event_query_prog_array(event, (void __user *)arg);
32ff77e8
MC
5494
5495 case PERF_EVENT_IOC_MODIFY_ATTRIBUTES: {
5496 struct perf_event_attr new_attr;
5497 int err = perf_copy_attr((struct perf_event_attr __user *)arg,
5498 &new_attr);
5499
5500 if (err)
5501 return err;
5502
5503 return perf_event_modify_attr(event, &new_attr);
5504 }
d859e29f 5505 default:
3df5edad 5506 return -ENOTTY;
d859e29f 5507 }
3df5edad
PZ
5508
5509 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 5510 perf_event_for_each(event, func);
3df5edad 5511 else
cdd6c482 5512 perf_event_for_each_child(event, func);
3df5edad
PZ
5513
5514 return 0;
d859e29f
PM
5515}
5516
f63a8daa
PZ
5517static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5518{
5519 struct perf_event *event = file->private_data;
5520 struct perf_event_context *ctx;
5521 long ret;
5522
da97e184
JFG
5523 /* Treat ioctl like writes as it is likely a mutating operation. */
5524 ret = security_perf_event_write(event);
5525 if (ret)
5526 return ret;
5527
f63a8daa
PZ
5528 ctx = perf_event_ctx_lock(event);
5529 ret = _perf_ioctl(event, cmd, arg);
5530 perf_event_ctx_unlock(event, ctx);
5531
5532 return ret;
5533}
5534
b3f20785
PM
5535#ifdef CONFIG_COMPAT
5536static long perf_compat_ioctl(struct file *file, unsigned int cmd,
5537 unsigned long arg)
5538{
5539 switch (_IOC_NR(cmd)) {
5540 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
5541 case _IOC_NR(PERF_EVENT_IOC_ID):
82489c5f
ES
5542 case _IOC_NR(PERF_EVENT_IOC_QUERY_BPF):
5543 case _IOC_NR(PERF_EVENT_IOC_MODIFY_ATTRIBUTES):
b3f20785
PM
5544 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
5545 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
5546 cmd &= ~IOCSIZE_MASK;
5547 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
5548 }
5549 break;
5550 }
5551 return perf_ioctl(file, cmd, arg);
5552}
5553#else
5554# define perf_compat_ioctl NULL
5555#endif
5556
cdd6c482 5557int perf_event_task_enable(void)
771d7cde 5558{
f63a8daa 5559 struct perf_event_context *ctx;
cdd6c482 5560 struct perf_event *event;
771d7cde 5561
cdd6c482 5562 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
5563 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
5564 ctx = perf_event_ctx_lock(event);
5565 perf_event_for_each_child(event, _perf_event_enable);
5566 perf_event_ctx_unlock(event, ctx);
5567 }
cdd6c482 5568 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
5569
5570 return 0;
5571}
5572
cdd6c482 5573int perf_event_task_disable(void)
771d7cde 5574{
f63a8daa 5575 struct perf_event_context *ctx;
cdd6c482 5576 struct perf_event *event;
771d7cde 5577
cdd6c482 5578 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
5579 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
5580 ctx = perf_event_ctx_lock(event);
5581 perf_event_for_each_child(event, _perf_event_disable);
5582 perf_event_ctx_unlock(event, ctx);
5583 }
cdd6c482 5584 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
5585
5586 return 0;
5587}
5588
cdd6c482 5589static int perf_event_index(struct perf_event *event)
194002b2 5590{
a4eaf7f1
PZ
5591 if (event->hw.state & PERF_HES_STOPPED)
5592 return 0;
5593
cdd6c482 5594 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
5595 return 0;
5596
35edc2a5 5597 return event->pmu->event_idx(event);
194002b2
PZ
5598}
5599
c4794295 5600static void calc_timer_values(struct perf_event *event,
e3f3541c 5601 u64 *now,
7f310a5d
EM
5602 u64 *enabled,
5603 u64 *running)
c4794295 5604{
e3f3541c 5605 u64 ctx_time;
c4794295 5606
e3f3541c
PZ
5607 *now = perf_clock();
5608 ctx_time = event->shadow_ctx_time + *now;
0d3d73aa 5609 __perf_update_times(event, ctx_time, enabled, running);
c4794295
EM
5610}
5611
fa731587
PZ
5612static void perf_event_init_userpage(struct perf_event *event)
5613{
5614 struct perf_event_mmap_page *userpg;
56de4e8f 5615 struct perf_buffer *rb;
fa731587
PZ
5616
5617 rcu_read_lock();
5618 rb = rcu_dereference(event->rb);
5619 if (!rb)
5620 goto unlock;
5621
5622 userpg = rb->user_page;
5623
5624 /* Allow new userspace to detect that bit 0 is deprecated */
5625 userpg->cap_bit0_is_deprecated = 1;
5626 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
e8c6deac
AS
5627 userpg->data_offset = PAGE_SIZE;
5628 userpg->data_size = perf_data_size(rb);
fa731587
PZ
5629
5630unlock:
5631 rcu_read_unlock();
5632}
5633
c1317ec2
AL
5634void __weak arch_perf_update_userpage(
5635 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
5636{
5637}
5638
38ff667b
PZ
5639/*
5640 * Callers need to ensure there can be no nesting of this function, otherwise
5641 * the seqlock logic goes bad. We can not serialize this because the arch
5642 * code calls this from NMI context.
5643 */
cdd6c482 5644void perf_event_update_userpage(struct perf_event *event)
37d81828 5645{
cdd6c482 5646 struct perf_event_mmap_page *userpg;
56de4e8f 5647 struct perf_buffer *rb;
e3f3541c 5648 u64 enabled, running, now;
38ff667b
PZ
5649
5650 rcu_read_lock();
5ec4c599
PZ
5651 rb = rcu_dereference(event->rb);
5652 if (!rb)
5653 goto unlock;
5654
0d641208
EM
5655 /*
5656 * compute total_time_enabled, total_time_running
5657 * based on snapshot values taken when the event
5658 * was last scheduled in.
5659 *
5660 * we cannot simply called update_context_time()
5661 * because of locking issue as we can be called in
5662 * NMI context
5663 */
e3f3541c 5664 calc_timer_values(event, &now, &enabled, &running);
38ff667b 5665
76369139 5666 userpg = rb->user_page;
7b732a75 5667 /*
9d2dcc8f
MF
5668 * Disable preemption to guarantee consistent time stamps are stored to
5669 * the user page.
7b732a75
PZ
5670 */
5671 preempt_disable();
37d81828 5672 ++userpg->lock;
92f22a38 5673 barrier();
cdd6c482 5674 userpg->index = perf_event_index(event);
b5e58793 5675 userpg->offset = perf_event_count(event);
365a4038 5676 if (userpg->index)
e7850595 5677 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 5678
0d641208 5679 userpg->time_enabled = enabled +
cdd6c482 5680 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 5681
0d641208 5682 userpg->time_running = running +
cdd6c482 5683 atomic64_read(&event->child_total_time_running);
7f8b4e4e 5684
c1317ec2 5685 arch_perf_update_userpage(event, userpg, now);
e3f3541c 5686
92f22a38 5687 barrier();
37d81828 5688 ++userpg->lock;
7b732a75 5689 preempt_enable();
38ff667b 5690unlock:
7b732a75 5691 rcu_read_unlock();
37d81828 5692}
82975c46 5693EXPORT_SYMBOL_GPL(perf_event_update_userpage);
37d81828 5694
9e3ed2d7 5695static vm_fault_t perf_mmap_fault(struct vm_fault *vmf)
906010b2 5696{
11bac800 5697 struct perf_event *event = vmf->vma->vm_file->private_data;
56de4e8f 5698 struct perf_buffer *rb;
9e3ed2d7 5699 vm_fault_t ret = VM_FAULT_SIGBUS;
906010b2
PZ
5700
5701 if (vmf->flags & FAULT_FLAG_MKWRITE) {
5702 if (vmf->pgoff == 0)
5703 ret = 0;
5704 return ret;
5705 }
5706
5707 rcu_read_lock();
76369139
FW
5708 rb = rcu_dereference(event->rb);
5709 if (!rb)
906010b2
PZ
5710 goto unlock;
5711
5712 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
5713 goto unlock;
5714
76369139 5715 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
5716 if (!vmf->page)
5717 goto unlock;
5718
5719 get_page(vmf->page);
11bac800 5720 vmf->page->mapping = vmf->vma->vm_file->f_mapping;
906010b2
PZ
5721 vmf->page->index = vmf->pgoff;
5722
5723 ret = 0;
5724unlock:
5725 rcu_read_unlock();
5726
5727 return ret;
5728}
5729
10c6db11 5730static void ring_buffer_attach(struct perf_event *event,
56de4e8f 5731 struct perf_buffer *rb)
10c6db11 5732{
56de4e8f 5733 struct perf_buffer *old_rb = NULL;
10c6db11
PZ
5734 unsigned long flags;
5735
b69cf536
PZ
5736 if (event->rb) {
5737 /*
5738 * Should be impossible, we set this when removing
5739 * event->rb_entry and wait/clear when adding event->rb_entry.
5740 */
5741 WARN_ON_ONCE(event->rcu_pending);
10c6db11 5742
b69cf536 5743 old_rb = event->rb;
b69cf536
PZ
5744 spin_lock_irqsave(&old_rb->event_lock, flags);
5745 list_del_rcu(&event->rb_entry);
5746 spin_unlock_irqrestore(&old_rb->event_lock, flags);
10c6db11 5747
2f993cf0
ON
5748 event->rcu_batches = get_state_synchronize_rcu();
5749 event->rcu_pending = 1;
b69cf536 5750 }
10c6db11 5751
b69cf536 5752 if (rb) {
2f993cf0
ON
5753 if (event->rcu_pending) {
5754 cond_synchronize_rcu(event->rcu_batches);
5755 event->rcu_pending = 0;
5756 }
5757
b69cf536
PZ
5758 spin_lock_irqsave(&rb->event_lock, flags);
5759 list_add_rcu(&event->rb_entry, &rb->event_list);
5760 spin_unlock_irqrestore(&rb->event_lock, flags);
5761 }
5762
767ae086
AS
5763 /*
5764 * Avoid racing with perf_mmap_close(AUX): stop the event
5765 * before swizzling the event::rb pointer; if it's getting
5766 * unmapped, its aux_mmap_count will be 0 and it won't
5767 * restart. See the comment in __perf_pmu_output_stop().
5768 *
5769 * Data will inevitably be lost when set_output is done in
5770 * mid-air, but then again, whoever does it like this is
5771 * not in for the data anyway.
5772 */
5773 if (has_aux(event))
5774 perf_event_stop(event, 0);
5775
b69cf536
PZ
5776 rcu_assign_pointer(event->rb, rb);
5777
5778 if (old_rb) {
5779 ring_buffer_put(old_rb);
5780 /*
5781 * Since we detached before setting the new rb, so that we
5782 * could attach the new rb, we could have missed a wakeup.
5783 * Provide it now.
5784 */
5785 wake_up_all(&event->waitq);
5786 }
10c6db11
PZ
5787}
5788
5789static void ring_buffer_wakeup(struct perf_event *event)
5790{
56de4e8f 5791 struct perf_buffer *rb;
10c6db11
PZ
5792
5793 rcu_read_lock();
5794 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
5795 if (rb) {
5796 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
5797 wake_up_all(&event->waitq);
5798 }
10c6db11
PZ
5799 rcu_read_unlock();
5800}
5801
56de4e8f 5802struct perf_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 5803{
56de4e8f 5804 struct perf_buffer *rb;
7b732a75 5805
ac9721f3 5806 rcu_read_lock();
76369139
FW
5807 rb = rcu_dereference(event->rb);
5808 if (rb) {
fecb8ed2 5809 if (!refcount_inc_not_zero(&rb->refcount))
76369139 5810 rb = NULL;
ac9721f3
PZ
5811 }
5812 rcu_read_unlock();
5813
76369139 5814 return rb;
ac9721f3
PZ
5815}
5816
56de4e8f 5817void ring_buffer_put(struct perf_buffer *rb)
ac9721f3 5818{
fecb8ed2 5819 if (!refcount_dec_and_test(&rb->refcount))
ac9721f3 5820 return;
7b732a75 5821
9bb5d40c 5822 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 5823
76369139 5824 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
5825}
5826
5827static void perf_mmap_open(struct vm_area_struct *vma)
5828{
cdd6c482 5829 struct perf_event *event = vma->vm_file->private_data;
7b732a75 5830
cdd6c482 5831 atomic_inc(&event->mmap_count);
9bb5d40c 5832 atomic_inc(&event->rb->mmap_count);
1e0fb9ec 5833
45bfb2e5
PZ
5834 if (vma->vm_pgoff)
5835 atomic_inc(&event->rb->aux_mmap_count);
5836
1e0fb9ec 5837 if (event->pmu->event_mapped)
bfe33492 5838 event->pmu->event_mapped(event, vma->vm_mm);
7b732a75
PZ
5839}
5840
95ff4ca2
AS
5841static void perf_pmu_output_stop(struct perf_event *event);
5842
9bb5d40c
PZ
5843/*
5844 * A buffer can be mmap()ed multiple times; either directly through the same
5845 * event, or through other events by use of perf_event_set_output().
5846 *
5847 * In order to undo the VM accounting done by perf_mmap() we need to destroy
5848 * the buffer here, where we still have a VM context. This means we need
5849 * to detach all events redirecting to us.
5850 */
7b732a75
PZ
5851static void perf_mmap_close(struct vm_area_struct *vma)
5852{
cdd6c482 5853 struct perf_event *event = vma->vm_file->private_data;
7b732a75 5854
56de4e8f 5855 struct perf_buffer *rb = ring_buffer_get(event);
9bb5d40c
PZ
5856 struct user_struct *mmap_user = rb->mmap_user;
5857 int mmap_locked = rb->mmap_locked;
5858 unsigned long size = perf_data_size(rb);
789f90fc 5859
1e0fb9ec 5860 if (event->pmu->event_unmapped)
bfe33492 5861 event->pmu->event_unmapped(event, vma->vm_mm);
1e0fb9ec 5862
45bfb2e5
PZ
5863 /*
5864 * rb->aux_mmap_count will always drop before rb->mmap_count and
5865 * event->mmap_count, so it is ok to use event->mmap_mutex to
5866 * serialize with perf_mmap here.
5867 */
5868 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
5869 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
95ff4ca2
AS
5870 /*
5871 * Stop all AUX events that are writing to this buffer,
5872 * so that we can free its AUX pages and corresponding PMU
5873 * data. Note that after rb::aux_mmap_count dropped to zero,
5874 * they won't start any more (see perf_aux_output_begin()).
5875 */
5876 perf_pmu_output_stop(event);
5877
5878 /* now it's safe to free the pages */
36b3db03
AS
5879 atomic_long_sub(rb->aux_nr_pages - rb->aux_mmap_locked, &mmap_user->locked_vm);
5880 atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
45bfb2e5 5881
95ff4ca2 5882 /* this has to be the last one */
45bfb2e5 5883 rb_free_aux(rb);
ca3bb3d0 5884 WARN_ON_ONCE(refcount_read(&rb->aux_refcount));
95ff4ca2 5885
45bfb2e5
PZ
5886 mutex_unlock(&event->mmap_mutex);
5887 }
5888
9bb5d40c
PZ
5889 atomic_dec(&rb->mmap_count);
5890
5891 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
b69cf536 5892 goto out_put;
9bb5d40c 5893
b69cf536 5894 ring_buffer_attach(event, NULL);
9bb5d40c
PZ
5895 mutex_unlock(&event->mmap_mutex);
5896
5897 /* If there's still other mmap()s of this buffer, we're done. */
b69cf536
PZ
5898 if (atomic_read(&rb->mmap_count))
5899 goto out_put;
ac9721f3 5900
9bb5d40c
PZ
5901 /*
5902 * No other mmap()s, detach from all other events that might redirect
5903 * into the now unreachable buffer. Somewhat complicated by the
5904 * fact that rb::event_lock otherwise nests inside mmap_mutex.
5905 */
5906again:
5907 rcu_read_lock();
5908 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
5909 if (!atomic_long_inc_not_zero(&event->refcount)) {
5910 /*
5911 * This event is en-route to free_event() which will
5912 * detach it and remove it from the list.
5913 */
5914 continue;
5915 }
5916 rcu_read_unlock();
789f90fc 5917
9bb5d40c
PZ
5918 mutex_lock(&event->mmap_mutex);
5919 /*
5920 * Check we didn't race with perf_event_set_output() which can
5921 * swizzle the rb from under us while we were waiting to
5922 * acquire mmap_mutex.
5923 *
5924 * If we find a different rb; ignore this event, a next
5925 * iteration will no longer find it on the list. We have to
5926 * still restart the iteration to make sure we're not now
5927 * iterating the wrong list.
5928 */
b69cf536
PZ
5929 if (event->rb == rb)
5930 ring_buffer_attach(event, NULL);
5931
cdd6c482 5932 mutex_unlock(&event->mmap_mutex);
9bb5d40c 5933 put_event(event);
ac9721f3 5934
9bb5d40c
PZ
5935 /*
5936 * Restart the iteration; either we're on the wrong list or
5937 * destroyed its integrity by doing a deletion.
5938 */
5939 goto again;
7b732a75 5940 }
9bb5d40c
PZ
5941 rcu_read_unlock();
5942
5943 /*
5944 * It could be there's still a few 0-ref events on the list; they'll
5945 * get cleaned up by free_event() -- they'll also still have their
5946 * ref on the rb and will free it whenever they are done with it.
5947 *
5948 * Aside from that, this buffer is 'fully' detached and unmapped,
5949 * undo the VM accounting.
5950 */
5951
d44248a4
SL
5952 atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
5953 &mmap_user->locked_vm);
70f8a3ca 5954 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
9bb5d40c
PZ
5955 free_uid(mmap_user);
5956
b69cf536 5957out_put:
9bb5d40c 5958 ring_buffer_put(rb); /* could be last */
37d81828
PM
5959}
5960
f0f37e2f 5961static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8 5962 .open = perf_mmap_open,
fca0c116 5963 .close = perf_mmap_close, /* non mergeable */
43a21ea8
PZ
5964 .fault = perf_mmap_fault,
5965 .page_mkwrite = perf_mmap_fault,
37d81828
PM
5966};
5967
5968static int perf_mmap(struct file *file, struct vm_area_struct *vma)
5969{
cdd6c482 5970 struct perf_event *event = file->private_data;
22a4f650 5971 unsigned long user_locked, user_lock_limit;
789f90fc 5972 struct user_struct *user = current_user();
56de4e8f 5973 struct perf_buffer *rb = NULL;
22a4f650 5974 unsigned long locked, lock_limit;
7b732a75
PZ
5975 unsigned long vma_size;
5976 unsigned long nr_pages;
45bfb2e5 5977 long user_extra = 0, extra = 0;
d57e34fd 5978 int ret = 0, flags = 0;
37d81828 5979
c7920614
PZ
5980 /*
5981 * Don't allow mmap() of inherited per-task counters. This would
5982 * create a performance issue due to all children writing to the
76369139 5983 * same rb.
c7920614
PZ
5984 */
5985 if (event->cpu == -1 && event->attr.inherit)
5986 return -EINVAL;
5987
43a21ea8 5988 if (!(vma->vm_flags & VM_SHARED))
37d81828 5989 return -EINVAL;
7b732a75 5990
da97e184
JFG
5991 ret = security_perf_event_read(event);
5992 if (ret)
5993 return ret;
5994
7b732a75 5995 vma_size = vma->vm_end - vma->vm_start;
45bfb2e5
PZ
5996
5997 if (vma->vm_pgoff == 0) {
5998 nr_pages = (vma_size / PAGE_SIZE) - 1;
5999 } else {
6000 /*
6001 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
6002 * mapped, all subsequent mappings should have the same size
6003 * and offset. Must be above the normal perf buffer.
6004 */
6005 u64 aux_offset, aux_size;
6006
6007 if (!event->rb)
6008 return -EINVAL;
6009
6010 nr_pages = vma_size / PAGE_SIZE;
6011
6012 mutex_lock(&event->mmap_mutex);
6013 ret = -EINVAL;
6014
6015 rb = event->rb;
6016 if (!rb)
6017 goto aux_unlock;
6018
6aa7de05
MR
6019 aux_offset = READ_ONCE(rb->user_page->aux_offset);
6020 aux_size = READ_ONCE(rb->user_page->aux_size);
45bfb2e5
PZ
6021
6022 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
6023 goto aux_unlock;
6024
6025 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
6026 goto aux_unlock;
6027
6028 /* already mapped with a different offset */
6029 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
6030 goto aux_unlock;
6031
6032 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
6033 goto aux_unlock;
6034
6035 /* already mapped with a different size */
6036 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
6037 goto aux_unlock;
6038
6039 if (!is_power_of_2(nr_pages))
6040 goto aux_unlock;
6041
6042 if (!atomic_inc_not_zero(&rb->mmap_count))
6043 goto aux_unlock;
6044
6045 if (rb_has_aux(rb)) {
6046 atomic_inc(&rb->aux_mmap_count);
6047 ret = 0;
6048 goto unlock;
6049 }
6050
6051 atomic_set(&rb->aux_mmap_count, 1);
6052 user_extra = nr_pages;
6053
6054 goto accounting;
6055 }
7b732a75 6056
7730d865 6057 /*
76369139 6058 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
6059 * can do bitmasks instead of modulo.
6060 */
2ed11312 6061 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
6062 return -EINVAL;
6063
7b732a75 6064 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
6065 return -EINVAL;
6066
cdd6c482 6067 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 6068again:
cdd6c482 6069 mutex_lock(&event->mmap_mutex);
76369139 6070 if (event->rb) {
9bb5d40c 6071 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 6072 ret = -EINVAL;
9bb5d40c
PZ
6073 goto unlock;
6074 }
6075
6076 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
6077 /*
6078 * Raced against perf_mmap_close() through
6079 * perf_event_set_output(). Try again, hope for better
6080 * luck.
6081 */
6082 mutex_unlock(&event->mmap_mutex);
6083 goto again;
6084 }
6085
ebb3c4c4
PZ
6086 goto unlock;
6087 }
6088
789f90fc 6089 user_extra = nr_pages + 1;
45bfb2e5
PZ
6090
6091accounting:
cdd6c482 6092 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
6093
6094 /*
6095 * Increase the limit linearly with more CPUs:
6096 */
6097 user_lock_limit *= num_online_cpus();
6098
00346155
SL
6099 user_locked = atomic_long_read(&user->locked_vm);
6100
6101 /*
6102 * sysctl_perf_event_mlock may have changed, so that
6103 * user->locked_vm > user_lock_limit
6104 */
6105 if (user_locked > user_lock_limit)
6106 user_locked = user_lock_limit;
6107 user_locked += user_extra;
c5078f78 6108
c4b75479 6109 if (user_locked > user_lock_limit) {
d44248a4
SL
6110 /*
6111 * charge locked_vm until it hits user_lock_limit;
6112 * charge the rest from pinned_vm
6113 */
789f90fc 6114 extra = user_locked - user_lock_limit;
d44248a4
SL
6115 user_extra -= extra;
6116 }
7b732a75 6117
78d7d407 6118 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 6119 lock_limit >>= PAGE_SHIFT;
70f8a3ca 6120 locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
7b732a75 6121
da97e184 6122 if ((locked > lock_limit) && perf_is_paranoid() &&
459ec28a 6123 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
6124 ret = -EPERM;
6125 goto unlock;
6126 }
7b732a75 6127
45bfb2e5 6128 WARN_ON(!rb && event->rb);
906010b2 6129
d57e34fd 6130 if (vma->vm_flags & VM_WRITE)
76369139 6131 flags |= RING_BUFFER_WRITABLE;
d57e34fd 6132
76369139 6133 if (!rb) {
45bfb2e5
PZ
6134 rb = rb_alloc(nr_pages,
6135 event->attr.watermark ? event->attr.wakeup_watermark : 0,
6136 event->cpu, flags);
26cb63ad 6137
45bfb2e5
PZ
6138 if (!rb) {
6139 ret = -ENOMEM;
6140 goto unlock;
6141 }
43a21ea8 6142
45bfb2e5
PZ
6143 atomic_set(&rb->mmap_count, 1);
6144 rb->mmap_user = get_current_user();
6145 rb->mmap_locked = extra;
26cb63ad 6146
45bfb2e5 6147 ring_buffer_attach(event, rb);
ac9721f3 6148
45bfb2e5
PZ
6149 perf_event_init_userpage(event);
6150 perf_event_update_userpage(event);
6151 } else {
1a594131
AS
6152 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
6153 event->attr.aux_watermark, flags);
45bfb2e5
PZ
6154 if (!ret)
6155 rb->aux_mmap_locked = extra;
6156 }
9a0f05cb 6157
ebb3c4c4 6158unlock:
45bfb2e5
PZ
6159 if (!ret) {
6160 atomic_long_add(user_extra, &user->locked_vm);
70f8a3ca 6161 atomic64_add(extra, &vma->vm_mm->pinned_vm);
45bfb2e5 6162
ac9721f3 6163 atomic_inc(&event->mmap_count);
45bfb2e5
PZ
6164 } else if (rb) {
6165 atomic_dec(&rb->mmap_count);
6166 }
6167aux_unlock:
cdd6c482 6168 mutex_unlock(&event->mmap_mutex);
37d81828 6169
9bb5d40c
PZ
6170 /*
6171 * Since pinned accounting is per vm we cannot allow fork() to copy our
6172 * vma.
6173 */
26cb63ad 6174 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 6175 vma->vm_ops = &perf_mmap_vmops;
7b732a75 6176
1e0fb9ec 6177 if (event->pmu->event_mapped)
bfe33492 6178 event->pmu->event_mapped(event, vma->vm_mm);
1e0fb9ec 6179
7b732a75 6180 return ret;
37d81828
PM
6181}
6182
3c446b3d
PZ
6183static int perf_fasync(int fd, struct file *filp, int on)
6184{
496ad9aa 6185 struct inode *inode = file_inode(filp);
cdd6c482 6186 struct perf_event *event = filp->private_data;
3c446b3d
PZ
6187 int retval;
6188
5955102c 6189 inode_lock(inode);
cdd6c482 6190 retval = fasync_helper(fd, filp, on, &event->fasync);
5955102c 6191 inode_unlock(inode);
3c446b3d
PZ
6192
6193 if (retval < 0)
6194 return retval;
6195
6196 return 0;
6197}
6198
0793a61d 6199static const struct file_operations perf_fops = {
3326c1ce 6200 .llseek = no_llseek,
0793a61d
TG
6201 .release = perf_release,
6202 .read = perf_read,
6203 .poll = perf_poll,
d859e29f 6204 .unlocked_ioctl = perf_ioctl,
b3f20785 6205 .compat_ioctl = perf_compat_ioctl,
37d81828 6206 .mmap = perf_mmap,
3c446b3d 6207 .fasync = perf_fasync,
0793a61d
TG
6208};
6209
925d519a 6210/*
cdd6c482 6211 * Perf event wakeup
925d519a
PZ
6212 *
6213 * If there's data, ensure we set the poll() state and publish everything
6214 * to user-space before waking everybody up.
6215 */
6216
fed66e2c
PZ
6217static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
6218{
6219 /* only the parent has fasync state */
6220 if (event->parent)
6221 event = event->parent;
6222 return &event->fasync;
6223}
6224
cdd6c482 6225void perf_event_wakeup(struct perf_event *event)
925d519a 6226{
10c6db11 6227 ring_buffer_wakeup(event);
4c9e2542 6228
cdd6c482 6229 if (event->pending_kill) {
fed66e2c 6230 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
cdd6c482 6231 event->pending_kill = 0;
4c9e2542 6232 }
925d519a
PZ
6233}
6234
1d54ad94
PZ
6235static void perf_pending_event_disable(struct perf_event *event)
6236{
6237 int cpu = READ_ONCE(event->pending_disable);
6238
6239 if (cpu < 0)
6240 return;
6241
6242 if (cpu == smp_processor_id()) {
6243 WRITE_ONCE(event->pending_disable, -1);
6244 perf_event_disable_local(event);
6245 return;
6246 }
6247
6248 /*
6249 * CPU-A CPU-B
6250 *
6251 * perf_event_disable_inatomic()
6252 * @pending_disable = CPU-A;
6253 * irq_work_queue();
6254 *
6255 * sched-out
6256 * @pending_disable = -1;
6257 *
6258 * sched-in
6259 * perf_event_disable_inatomic()
6260 * @pending_disable = CPU-B;
6261 * irq_work_queue(); // FAILS
6262 *
6263 * irq_work_run()
6264 * perf_pending_event()
6265 *
6266 * But the event runs on CPU-B and wants disabling there.
6267 */
6268 irq_work_queue_on(&event->pending, cpu);
6269}
6270
e360adbe 6271static void perf_pending_event(struct irq_work *entry)
79f14641 6272{
1d54ad94 6273 struct perf_event *event = container_of(entry, struct perf_event, pending);
d525211f
PZ
6274 int rctx;
6275
6276 rctx = perf_swevent_get_recursion_context();
6277 /*
6278 * If we 'fail' here, that's OK, it means recursion is already disabled
6279 * and we won't recurse 'further'.
6280 */
79f14641 6281
1d54ad94 6282 perf_pending_event_disable(event);
79f14641 6283
cdd6c482
IM
6284 if (event->pending_wakeup) {
6285 event->pending_wakeup = 0;
6286 perf_event_wakeup(event);
79f14641 6287 }
d525211f
PZ
6288
6289 if (rctx >= 0)
6290 perf_swevent_put_recursion_context(rctx);
79f14641
PZ
6291}
6292
39447b38
ZY
6293/*
6294 * We assume there is only KVM supporting the callbacks.
6295 * Later on, we might change it to a list if there is
6296 * another virtualization implementation supporting the callbacks.
6297 */
6298struct perf_guest_info_callbacks *perf_guest_cbs;
6299
6300int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
6301{
6302 perf_guest_cbs = cbs;
6303 return 0;
6304}
6305EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
6306
6307int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
6308{
6309 perf_guest_cbs = NULL;
6310 return 0;
6311}
6312EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
6313
4018994f
JO
6314static void
6315perf_output_sample_regs(struct perf_output_handle *handle,
6316 struct pt_regs *regs, u64 mask)
6317{
6318 int bit;
29dd3288 6319 DECLARE_BITMAP(_mask, 64);
4018994f 6320
29dd3288
MS
6321 bitmap_from_u64(_mask, mask);
6322 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) {
4018994f
JO
6323 u64 val;
6324
6325 val = perf_reg_value(regs, bit);
6326 perf_output_put(handle, val);
6327 }
6328}
6329
60e2364e 6330static void perf_sample_regs_user(struct perf_regs *regs_user,
88a7c26a
AL
6331 struct pt_regs *regs,
6332 struct pt_regs *regs_user_copy)
4018994f 6333{
88a7c26a
AL
6334 if (user_mode(regs)) {
6335 regs_user->abi = perf_reg_abi(current);
2565711f 6336 regs_user->regs = regs;
085ebfe9 6337 } else if (!(current->flags & PF_KTHREAD)) {
88a7c26a 6338 perf_get_regs_user(regs_user, regs, regs_user_copy);
2565711f
PZ
6339 } else {
6340 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
6341 regs_user->regs = NULL;
4018994f
JO
6342 }
6343}
6344
60e2364e
SE
6345static void perf_sample_regs_intr(struct perf_regs *regs_intr,
6346 struct pt_regs *regs)
6347{
6348 regs_intr->regs = regs;
6349 regs_intr->abi = perf_reg_abi(current);
6350}
6351
6352
c5ebcedb
JO
6353/*
6354 * Get remaining task size from user stack pointer.
6355 *
6356 * It'd be better to take stack vma map and limit this more
9f014e3a 6357 * precisely, but there's no way to get it safely under interrupt,
c5ebcedb
JO
6358 * so using TASK_SIZE as limit.
6359 */
6360static u64 perf_ustack_task_size(struct pt_regs *regs)
6361{
6362 unsigned long addr = perf_user_stack_pointer(regs);
6363
6364 if (!addr || addr >= TASK_SIZE)
6365 return 0;
6366
6367 return TASK_SIZE - addr;
6368}
6369
6370static u16
6371perf_sample_ustack_size(u16 stack_size, u16 header_size,
6372 struct pt_regs *regs)
6373{
6374 u64 task_size;
6375
6376 /* No regs, no stack pointer, no dump. */
6377 if (!regs)
6378 return 0;
6379
6380 /*
6381 * Check if we fit in with the requested stack size into the:
6382 * - TASK_SIZE
6383 * If we don't, we limit the size to the TASK_SIZE.
6384 *
6385 * - remaining sample size
6386 * If we don't, we customize the stack size to
6387 * fit in to the remaining sample size.
6388 */
6389
6390 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
6391 stack_size = min(stack_size, (u16) task_size);
6392
6393 /* Current header size plus static size and dynamic size. */
6394 header_size += 2 * sizeof(u64);
6395
6396 /* Do we fit in with the current stack dump size? */
6397 if ((u16) (header_size + stack_size) < header_size) {
6398 /*
6399 * If we overflow the maximum size for the sample,
6400 * we customize the stack dump size to fit in.
6401 */
6402 stack_size = USHRT_MAX - header_size - sizeof(u64);
6403 stack_size = round_up(stack_size, sizeof(u64));
6404 }
6405
6406 return stack_size;
6407}
6408
6409static void
6410perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
6411 struct pt_regs *regs)
6412{
6413 /* Case of a kernel thread, nothing to dump */
6414 if (!regs) {
6415 u64 size = 0;
6416 perf_output_put(handle, size);
6417 } else {
6418 unsigned long sp;
6419 unsigned int rem;
6420 u64 dyn_size;
02e18447 6421 mm_segment_t fs;
c5ebcedb
JO
6422
6423 /*
6424 * We dump:
6425 * static size
6426 * - the size requested by user or the best one we can fit
6427 * in to the sample max size
6428 * data
6429 * - user stack dump data
6430 * dynamic size
6431 * - the actual dumped size
6432 */
6433
6434 /* Static size. */
6435 perf_output_put(handle, dump_size);
6436
6437 /* Data. */
6438 sp = perf_user_stack_pointer(regs);
02e18447
YC
6439 fs = get_fs();
6440 set_fs(USER_DS);
c5ebcedb 6441 rem = __output_copy_user(handle, (void *) sp, dump_size);
02e18447 6442 set_fs(fs);
c5ebcedb
JO
6443 dyn_size = dump_size - rem;
6444
6445 perf_output_skip(handle, rem);
6446
6447 /* Dynamic size. */
6448 perf_output_put(handle, dyn_size);
6449 }
6450}
6451
a4faf00d
AS
6452static unsigned long perf_prepare_sample_aux(struct perf_event *event,
6453 struct perf_sample_data *data,
6454 size_t size)
6455{
6456 struct perf_event *sampler = event->aux_event;
56de4e8f 6457 struct perf_buffer *rb;
a4faf00d
AS
6458
6459 data->aux_size = 0;
6460
6461 if (!sampler)
6462 goto out;
6463
6464 if (WARN_ON_ONCE(READ_ONCE(sampler->state) != PERF_EVENT_STATE_ACTIVE))
6465 goto out;
6466
6467 if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
6468 goto out;
6469
6470 rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
6471 if (!rb)
6472 goto out;
6473
6474 /*
6475 * If this is an NMI hit inside sampling code, don't take
6476 * the sample. See also perf_aux_sample_output().
6477 */
6478 if (READ_ONCE(rb->aux_in_sampling)) {
6479 data->aux_size = 0;
6480 } else {
6481 size = min_t(size_t, size, perf_aux_size(rb));
6482 data->aux_size = ALIGN(size, sizeof(u64));
6483 }
6484 ring_buffer_put(rb);
6485
6486out:
6487 return data->aux_size;
6488}
6489
56de4e8f 6490long perf_pmu_snapshot_aux(struct perf_buffer *rb,
a4faf00d
AS
6491 struct perf_event *event,
6492 struct perf_output_handle *handle,
6493 unsigned long size)
6494{
6495 unsigned long flags;
6496 long ret;
6497
6498 /*
6499 * Normal ->start()/->stop() callbacks run in IRQ mode in scheduler
6500 * paths. If we start calling them in NMI context, they may race with
6501 * the IRQ ones, that is, for example, re-starting an event that's just
6502 * been stopped, which is why we're using a separate callback that
6503 * doesn't change the event state.
6504 *
6505 * IRQs need to be disabled to prevent IPIs from racing with us.
6506 */
6507 local_irq_save(flags);
6508 /*
6509 * Guard against NMI hits inside the critical section;
6510 * see also perf_prepare_sample_aux().
6511 */
6512 WRITE_ONCE(rb->aux_in_sampling, 1);
6513 barrier();
6514
6515 ret = event->pmu->snapshot_aux(event, handle, size);
6516
6517 barrier();
6518 WRITE_ONCE(rb->aux_in_sampling, 0);
6519 local_irq_restore(flags);
6520
6521 return ret;
6522}
6523
6524static void perf_aux_sample_output(struct perf_event *event,
6525 struct perf_output_handle *handle,
6526 struct perf_sample_data *data)
6527{
6528 struct perf_event *sampler = event->aux_event;
56de4e8f 6529 struct perf_buffer *rb;
a4faf00d 6530 unsigned long pad;
a4faf00d
AS
6531 long size;
6532
6533 if (WARN_ON_ONCE(!sampler || !data->aux_size))
6534 return;
6535
6536 rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
6537 if (!rb)
6538 return;
6539
6540 size = perf_pmu_snapshot_aux(rb, sampler, handle, data->aux_size);
6541
6542 /*
6543 * An error here means that perf_output_copy() failed (returned a
6544 * non-zero surplus that it didn't copy), which in its current
6545 * enlightened implementation is not possible. If that changes, we'd
6546 * like to know.
6547 */
6548 if (WARN_ON_ONCE(size < 0))
6549 goto out_put;
6550
6551 /*
6552 * The pad comes from ALIGN()ing data->aux_size up to u64 in
6553 * perf_prepare_sample_aux(), so should not be more than that.
6554 */
6555 pad = data->aux_size - size;
6556 if (WARN_ON_ONCE(pad >= sizeof(u64)))
6557 pad = 8;
6558
6559 if (pad) {
6560 u64 zero = 0;
6561 perf_output_copy(handle, &zero, pad);
6562 }
6563
6564out_put:
6565 ring_buffer_put(rb);
6566}
6567
c980d109
ACM
6568static void __perf_event_header__init_id(struct perf_event_header *header,
6569 struct perf_sample_data *data,
6570 struct perf_event *event)
6844c09d
ACM
6571{
6572 u64 sample_type = event->attr.sample_type;
6573
6574 data->type = sample_type;
6575 header->size += event->id_header_size;
6576
6577 if (sample_type & PERF_SAMPLE_TID) {
6578 /* namespace issues */
6579 data->tid_entry.pid = perf_event_pid(event, current);
6580 data->tid_entry.tid = perf_event_tid(event, current);
6581 }
6582
6583 if (sample_type & PERF_SAMPLE_TIME)
34f43927 6584 data->time = perf_event_clock(event);
6844c09d 6585
ff3d527c 6586 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
6844c09d
ACM
6587 data->id = primary_event_id(event);
6588
6589 if (sample_type & PERF_SAMPLE_STREAM_ID)
6590 data->stream_id = event->id;
6591
6592 if (sample_type & PERF_SAMPLE_CPU) {
6593 data->cpu_entry.cpu = raw_smp_processor_id();
6594 data->cpu_entry.reserved = 0;
6595 }
6596}
6597
76369139
FW
6598void perf_event_header__init_id(struct perf_event_header *header,
6599 struct perf_sample_data *data,
6600 struct perf_event *event)
c980d109
ACM
6601{
6602 if (event->attr.sample_id_all)
6603 __perf_event_header__init_id(header, data, event);
6604}
6605
6606static void __perf_event__output_id_sample(struct perf_output_handle *handle,
6607 struct perf_sample_data *data)
6608{
6609 u64 sample_type = data->type;
6610
6611 if (sample_type & PERF_SAMPLE_TID)
6612 perf_output_put(handle, data->tid_entry);
6613
6614 if (sample_type & PERF_SAMPLE_TIME)
6615 perf_output_put(handle, data->time);
6616
6617 if (sample_type & PERF_SAMPLE_ID)
6618 perf_output_put(handle, data->id);
6619
6620 if (sample_type & PERF_SAMPLE_STREAM_ID)
6621 perf_output_put(handle, data->stream_id);
6622
6623 if (sample_type & PERF_SAMPLE_CPU)
6624 perf_output_put(handle, data->cpu_entry);
ff3d527c
AH
6625
6626 if (sample_type & PERF_SAMPLE_IDENTIFIER)
6627 perf_output_put(handle, data->id);
c980d109
ACM
6628}
6629
76369139
FW
6630void perf_event__output_id_sample(struct perf_event *event,
6631 struct perf_output_handle *handle,
6632 struct perf_sample_data *sample)
c980d109
ACM
6633{
6634 if (event->attr.sample_id_all)
6635 __perf_event__output_id_sample(handle, sample);
6636}
6637
3dab77fb 6638static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
6639 struct perf_event *event,
6640 u64 enabled, u64 running)
3dab77fb 6641{
cdd6c482 6642 u64 read_format = event->attr.read_format;
3dab77fb
PZ
6643 u64 values[4];
6644 int n = 0;
6645
b5e58793 6646 values[n++] = perf_event_count(event);
3dab77fb 6647 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 6648 values[n++] = enabled +
cdd6c482 6649 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
6650 }
6651 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 6652 values[n++] = running +
cdd6c482 6653 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
6654 }
6655 if (read_format & PERF_FORMAT_ID)
cdd6c482 6656 values[n++] = primary_event_id(event);
3dab77fb 6657
76369139 6658 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
6659}
6660
3dab77fb 6661static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
6662 struct perf_event *event,
6663 u64 enabled, u64 running)
3dab77fb 6664{
cdd6c482
IM
6665 struct perf_event *leader = event->group_leader, *sub;
6666 u64 read_format = event->attr.read_format;
3dab77fb
PZ
6667 u64 values[5];
6668 int n = 0;
6669
6670 values[n++] = 1 + leader->nr_siblings;
6671
6672 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 6673 values[n++] = enabled;
3dab77fb
PZ
6674
6675 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 6676 values[n++] = running;
3dab77fb 6677
9e5b127d
PZ
6678 if ((leader != event) &&
6679 (leader->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
6680 leader->pmu->read(leader);
6681
b5e58793 6682 values[n++] = perf_event_count(leader);
3dab77fb 6683 if (read_format & PERF_FORMAT_ID)
cdd6c482 6684 values[n++] = primary_event_id(leader);
3dab77fb 6685
76369139 6686 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 6687
edb39592 6688 for_each_sibling_event(sub, leader) {
3dab77fb
PZ
6689 n = 0;
6690
6f5ab001
JO
6691 if ((sub != event) &&
6692 (sub->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
6693 sub->pmu->read(sub);
6694
b5e58793 6695 values[n++] = perf_event_count(sub);
3dab77fb 6696 if (read_format & PERF_FORMAT_ID)
cdd6c482 6697 values[n++] = primary_event_id(sub);
3dab77fb 6698
76369139 6699 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
6700 }
6701}
6702
eed01528
SE
6703#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
6704 PERF_FORMAT_TOTAL_TIME_RUNNING)
6705
ba5213ae
PZ
6706/*
6707 * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
6708 *
6709 * The problem is that its both hard and excessively expensive to iterate the
6710 * child list, not to mention that its impossible to IPI the children running
6711 * on another CPU, from interrupt/NMI context.
6712 */
3dab77fb 6713static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 6714 struct perf_event *event)
3dab77fb 6715{
e3f3541c 6716 u64 enabled = 0, running = 0, now;
eed01528
SE
6717 u64 read_format = event->attr.read_format;
6718
6719 /*
6720 * compute total_time_enabled, total_time_running
6721 * based on snapshot values taken when the event
6722 * was last scheduled in.
6723 *
6724 * we cannot simply called update_context_time()
6725 * because of locking issue as we are called in
6726 * NMI context
6727 */
c4794295 6728 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 6729 calc_timer_values(event, &now, &enabled, &running);
eed01528 6730
cdd6c482 6731 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 6732 perf_output_read_group(handle, event, enabled, running);
3dab77fb 6733 else
eed01528 6734 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
6735}
6736
bbfd5e4f
KL
6737static inline bool perf_sample_save_hw_index(struct perf_event *event)
6738{
6739 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
6740}
6741
5622f295
MM
6742void perf_output_sample(struct perf_output_handle *handle,
6743 struct perf_event_header *header,
6744 struct perf_sample_data *data,
cdd6c482 6745 struct perf_event *event)
5622f295
MM
6746{
6747 u64 sample_type = data->type;
6748
6749 perf_output_put(handle, *header);
6750
ff3d527c
AH
6751 if (sample_type & PERF_SAMPLE_IDENTIFIER)
6752 perf_output_put(handle, data->id);
6753
5622f295
MM
6754 if (sample_type & PERF_SAMPLE_IP)
6755 perf_output_put(handle, data->ip);
6756
6757 if (sample_type & PERF_SAMPLE_TID)
6758 perf_output_put(handle, data->tid_entry);
6759
6760 if (sample_type & PERF_SAMPLE_TIME)
6761 perf_output_put(handle, data->time);
6762
6763 if (sample_type & PERF_SAMPLE_ADDR)
6764 perf_output_put(handle, data->addr);
6765
6766 if (sample_type & PERF_SAMPLE_ID)
6767 perf_output_put(handle, data->id);
6768
6769 if (sample_type & PERF_SAMPLE_STREAM_ID)
6770 perf_output_put(handle, data->stream_id);
6771
6772 if (sample_type & PERF_SAMPLE_CPU)
6773 perf_output_put(handle, data->cpu_entry);
6774
6775 if (sample_type & PERF_SAMPLE_PERIOD)
6776 perf_output_put(handle, data->period);
6777
6778 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 6779 perf_output_read(handle, event);
5622f295
MM
6780
6781 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
99e818cc 6782 int size = 1;
5622f295 6783
99e818cc
JO
6784 size += data->callchain->nr;
6785 size *= sizeof(u64);
6786 __output_copy(handle, data->callchain, size);
5622f295
MM
6787 }
6788
6789 if (sample_type & PERF_SAMPLE_RAW) {
7e3f977e
DB
6790 struct perf_raw_record *raw = data->raw;
6791
6792 if (raw) {
6793 struct perf_raw_frag *frag = &raw->frag;
6794
6795 perf_output_put(handle, raw->size);
6796 do {
6797 if (frag->copy) {
6798 __output_custom(handle, frag->copy,
6799 frag->data, frag->size);
6800 } else {
6801 __output_copy(handle, frag->data,
6802 frag->size);
6803 }
6804 if (perf_raw_frag_last(frag))
6805 break;
6806 frag = frag->next;
6807 } while (1);
6808 if (frag->pad)
6809 __output_skip(handle, NULL, frag->pad);
5622f295
MM
6810 } else {
6811 struct {
6812 u32 size;
6813 u32 data;
6814 } raw = {
6815 .size = sizeof(u32),
6816 .data = 0,
6817 };
6818 perf_output_put(handle, raw);
6819 }
6820 }
a7ac67ea 6821
bce38cd5
SE
6822 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
6823 if (data->br_stack) {
6824 size_t size;
6825
6826 size = data->br_stack->nr
6827 * sizeof(struct perf_branch_entry);
6828
6829 perf_output_put(handle, data->br_stack->nr);
bbfd5e4f
KL
6830 if (perf_sample_save_hw_index(event))
6831 perf_output_put(handle, data->br_stack->hw_idx);
bce38cd5
SE
6832 perf_output_copy(handle, data->br_stack->entries, size);
6833 } else {
6834 /*
6835 * we always store at least the value of nr
6836 */
6837 u64 nr = 0;
6838 perf_output_put(handle, nr);
6839 }
6840 }
4018994f
JO
6841
6842 if (sample_type & PERF_SAMPLE_REGS_USER) {
6843 u64 abi = data->regs_user.abi;
6844
6845 /*
6846 * If there are no regs to dump, notice it through
6847 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
6848 */
6849 perf_output_put(handle, abi);
6850
6851 if (abi) {
6852 u64 mask = event->attr.sample_regs_user;
6853 perf_output_sample_regs(handle,
6854 data->regs_user.regs,
6855 mask);
6856 }
6857 }
c5ebcedb 6858
a5cdd40c 6859 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
6860 perf_output_sample_ustack(handle,
6861 data->stack_user_size,
6862 data->regs_user.regs);
a5cdd40c 6863 }
c3feedf2
AK
6864
6865 if (sample_type & PERF_SAMPLE_WEIGHT)
6866 perf_output_put(handle, data->weight);
d6be9ad6
SE
6867
6868 if (sample_type & PERF_SAMPLE_DATA_SRC)
6869 perf_output_put(handle, data->data_src.val);
a5cdd40c 6870
fdfbbd07
AK
6871 if (sample_type & PERF_SAMPLE_TRANSACTION)
6872 perf_output_put(handle, data->txn);
6873
60e2364e
SE
6874 if (sample_type & PERF_SAMPLE_REGS_INTR) {
6875 u64 abi = data->regs_intr.abi;
6876 /*
6877 * If there are no regs to dump, notice it through
6878 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
6879 */
6880 perf_output_put(handle, abi);
6881
6882 if (abi) {
6883 u64 mask = event->attr.sample_regs_intr;
6884
6885 perf_output_sample_regs(handle,
6886 data->regs_intr.regs,
6887 mask);
6888 }
6889 }
6890
fc7ce9c7
KL
6891 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
6892 perf_output_put(handle, data->phys_addr);
6893
6546b19f
NK
6894 if (sample_type & PERF_SAMPLE_CGROUP)
6895 perf_output_put(handle, data->cgroup);
6896
a4faf00d
AS
6897 if (sample_type & PERF_SAMPLE_AUX) {
6898 perf_output_put(handle, data->aux_size);
6899
6900 if (data->aux_size)
6901 perf_aux_sample_output(event, handle, data);
6902 }
6903
a5cdd40c
PZ
6904 if (!event->attr.watermark) {
6905 int wakeup_events = event->attr.wakeup_events;
6906
6907 if (wakeup_events) {
56de4e8f 6908 struct perf_buffer *rb = handle->rb;
a5cdd40c
PZ
6909 int events = local_inc_return(&rb->events);
6910
6911 if (events >= wakeup_events) {
6912 local_sub(wakeup_events, &rb->events);
6913 local_inc(&rb->wakeup);
6914 }
6915 }
6916 }
5622f295
MM
6917}
6918
fc7ce9c7
KL
6919static u64 perf_virt_to_phys(u64 virt)
6920{
6921 u64 phys_addr = 0;
6922 struct page *p = NULL;
6923
6924 if (!virt)
6925 return 0;
6926
6927 if (virt >= TASK_SIZE) {
6928 /* If it's vmalloc()d memory, leave phys_addr as 0 */
6929 if (virt_addr_valid((void *)(uintptr_t)virt) &&
6930 !(virt >= VMALLOC_START && virt < VMALLOC_END))
6931 phys_addr = (u64)virt_to_phys((void *)(uintptr_t)virt);
6932 } else {
6933 /*
6934 * Walking the pages tables for user address.
6935 * Interrupts are disabled, so it prevents any tear down
6936 * of the page tables.
dadbb612 6937 * Try IRQ-safe get_user_page_fast_only first.
fc7ce9c7
KL
6938 * If failed, leave phys_addr as 0.
6939 */
d3296fb3
JO
6940 if (current->mm != NULL) {
6941 pagefault_disable();
dadbb612 6942 if (get_user_page_fast_only(virt, 0, &p))
d3296fb3
JO
6943 phys_addr = page_to_phys(p) + virt % PAGE_SIZE;
6944 pagefault_enable();
6945 }
fc7ce9c7
KL
6946
6947 if (p)
6948 put_page(p);
6949 }
6950
6951 return phys_addr;
6952}
6953
99e818cc
JO
6954static struct perf_callchain_entry __empty_callchain = { .nr = 0, };
6955
6cbc304f 6956struct perf_callchain_entry *
8cf7e0e2
JO
6957perf_callchain(struct perf_event *event, struct pt_regs *regs)
6958{
6959 bool kernel = !event->attr.exclude_callchain_kernel;
6960 bool user = !event->attr.exclude_callchain_user;
6961 /* Disallow cross-task user callchains. */
6962 bool crosstask = event->ctx->task && event->ctx->task != current;
6963 const u32 max_stack = event->attr.sample_max_stack;
99e818cc 6964 struct perf_callchain_entry *callchain;
8cf7e0e2
JO
6965
6966 if (!kernel && !user)
99e818cc 6967 return &__empty_callchain;
8cf7e0e2 6968
99e818cc
JO
6969 callchain = get_perf_callchain(regs, 0, kernel, user,
6970 max_stack, crosstask, true);
6971 return callchain ?: &__empty_callchain;
8cf7e0e2
JO
6972}
6973
5622f295
MM
6974void perf_prepare_sample(struct perf_event_header *header,
6975 struct perf_sample_data *data,
cdd6c482 6976 struct perf_event *event,
5622f295 6977 struct pt_regs *regs)
7b732a75 6978{
cdd6c482 6979 u64 sample_type = event->attr.sample_type;
7b732a75 6980
cdd6c482 6981 header->type = PERF_RECORD_SAMPLE;
c320c7b7 6982 header->size = sizeof(*header) + event->header_size;
5622f295
MM
6983
6984 header->misc = 0;
6985 header->misc |= perf_misc_flags(regs);
6fab0192 6986
c980d109 6987 __perf_event_header__init_id(header, data, event);
6844c09d 6988
c320c7b7 6989 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
6990 data->ip = perf_instruction_pointer(regs);
6991
b23f3325 6992 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 6993 int size = 1;
394ee076 6994
6cbc304f
PZ
6995 if (!(sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
6996 data->callchain = perf_callchain(event, regs);
6997
99e818cc 6998 size += data->callchain->nr;
5622f295
MM
6999
7000 header->size += size * sizeof(u64);
394ee076
PZ
7001 }
7002
3a43ce68 7003 if (sample_type & PERF_SAMPLE_RAW) {
7e3f977e
DB
7004 struct perf_raw_record *raw = data->raw;
7005 int size;
7006
7007 if (raw) {
7008 struct perf_raw_frag *frag = &raw->frag;
7009 u32 sum = 0;
7010
7011 do {
7012 sum += frag->size;
7013 if (perf_raw_frag_last(frag))
7014 break;
7015 frag = frag->next;
7016 } while (1);
7017
7018 size = round_up(sum + sizeof(u32), sizeof(u64));
7019 raw->size = size - sizeof(u32);
7020 frag->pad = raw->size - sum;
7021 } else {
7022 size = sizeof(u64);
7023 }
a044560c 7024
7e3f977e 7025 header->size += size;
7f453c24 7026 }
bce38cd5
SE
7027
7028 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
7029 int size = sizeof(u64); /* nr */
7030 if (data->br_stack) {
bbfd5e4f
KL
7031 if (perf_sample_save_hw_index(event))
7032 size += sizeof(u64);
7033
bce38cd5
SE
7034 size += data->br_stack->nr
7035 * sizeof(struct perf_branch_entry);
7036 }
7037 header->size += size;
7038 }
4018994f 7039
2565711f 7040 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
88a7c26a
AL
7041 perf_sample_regs_user(&data->regs_user, regs,
7042 &data->regs_user_copy);
2565711f 7043
4018994f
JO
7044 if (sample_type & PERF_SAMPLE_REGS_USER) {
7045 /* regs dump ABI info */
7046 int size = sizeof(u64);
7047
4018994f
JO
7048 if (data->regs_user.regs) {
7049 u64 mask = event->attr.sample_regs_user;
7050 size += hweight64(mask) * sizeof(u64);
7051 }
7052
7053 header->size += size;
7054 }
c5ebcedb
JO
7055
7056 if (sample_type & PERF_SAMPLE_STACK_USER) {
7057 /*
9f014e3a 7058 * Either we need PERF_SAMPLE_STACK_USER bit to be always
c5ebcedb
JO
7059 * processed as the last one or have additional check added
7060 * in case new sample type is added, because we could eat
7061 * up the rest of the sample size.
7062 */
c5ebcedb
JO
7063 u16 stack_size = event->attr.sample_stack_user;
7064 u16 size = sizeof(u64);
7065
c5ebcedb 7066 stack_size = perf_sample_ustack_size(stack_size, header->size,
2565711f 7067 data->regs_user.regs);
c5ebcedb
JO
7068
7069 /*
7070 * If there is something to dump, add space for the dump
7071 * itself and for the field that tells the dynamic size,
7072 * which is how many have been actually dumped.
7073 */
7074 if (stack_size)
7075 size += sizeof(u64) + stack_size;
7076
7077 data->stack_user_size = stack_size;
7078 header->size += size;
7079 }
60e2364e
SE
7080
7081 if (sample_type & PERF_SAMPLE_REGS_INTR) {
7082 /* regs dump ABI info */
7083 int size = sizeof(u64);
7084
7085 perf_sample_regs_intr(&data->regs_intr, regs);
7086
7087 if (data->regs_intr.regs) {
7088 u64 mask = event->attr.sample_regs_intr;
7089
7090 size += hweight64(mask) * sizeof(u64);
7091 }
7092
7093 header->size += size;
7094 }
fc7ce9c7
KL
7095
7096 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
7097 data->phys_addr = perf_virt_to_phys(data->addr);
a4faf00d 7098
6546b19f
NK
7099#ifdef CONFIG_CGROUP_PERF
7100 if (sample_type & PERF_SAMPLE_CGROUP) {
7101 struct cgroup *cgrp;
7102
7103 /* protected by RCU */
7104 cgrp = task_css_check(current, perf_event_cgrp_id, 1)->cgroup;
7105 data->cgroup = cgroup_id(cgrp);
7106 }
7107#endif
7108
a4faf00d
AS
7109 if (sample_type & PERF_SAMPLE_AUX) {
7110 u64 size;
7111
7112 header->size += sizeof(u64); /* size */
7113
7114 /*
7115 * Given the 16bit nature of header::size, an AUX sample can
7116 * easily overflow it, what with all the preceding sample bits.
7117 * Make sure this doesn't happen by using up to U16_MAX bytes
7118 * per sample in total (rounded down to 8 byte boundary).
7119 */
7120 size = min_t(size_t, U16_MAX - header->size,
7121 event->attr.aux_sample_size);
7122 size = rounddown(size, 8);
7123 size = perf_prepare_sample_aux(event, data, size);
7124
7125 WARN_ON_ONCE(size + header->size > U16_MAX);
7126 header->size += size;
7127 }
7128 /*
7129 * If you're adding more sample types here, you likely need to do
7130 * something about the overflowing header::size, like repurpose the
7131 * lowest 3 bits of size, which should be always zero at the moment.
7132 * This raises a more important question, do we really need 512k sized
7133 * samples and why, so good argumentation is in order for whatever you
7134 * do here next.
7135 */
7136 WARN_ON_ONCE(header->size & 7);
5622f295 7137}
7f453c24 7138
56201969 7139static __always_inline int
9ecda41a
WN
7140__perf_event_output(struct perf_event *event,
7141 struct perf_sample_data *data,
7142 struct pt_regs *regs,
7143 int (*output_begin)(struct perf_output_handle *,
7144 struct perf_event *,
7145 unsigned int))
5622f295
MM
7146{
7147 struct perf_output_handle handle;
7148 struct perf_event_header header;
56201969 7149 int err;
689802b2 7150
927c7a9e
FW
7151 /* protect the callchain buffers */
7152 rcu_read_lock();
7153
cdd6c482 7154 perf_prepare_sample(&header, data, event, regs);
5c148194 7155
56201969
ACM
7156 err = output_begin(&handle, event, header.size);
7157 if (err)
927c7a9e 7158 goto exit;
0322cd6e 7159
cdd6c482 7160 perf_output_sample(&handle, &header, data, event);
f413cdb8 7161
8a057d84 7162 perf_output_end(&handle);
927c7a9e
FW
7163
7164exit:
7165 rcu_read_unlock();
56201969 7166 return err;
0322cd6e
PZ
7167}
7168
9ecda41a
WN
7169void
7170perf_event_output_forward(struct perf_event *event,
7171 struct perf_sample_data *data,
7172 struct pt_regs *regs)
7173{
7174 __perf_event_output(event, data, regs, perf_output_begin_forward);
7175}
7176
7177void
7178perf_event_output_backward(struct perf_event *event,
7179 struct perf_sample_data *data,
7180 struct pt_regs *regs)
7181{
7182 __perf_event_output(event, data, regs, perf_output_begin_backward);
7183}
7184
56201969 7185int
9ecda41a
WN
7186perf_event_output(struct perf_event *event,
7187 struct perf_sample_data *data,
7188 struct pt_regs *regs)
7189{
56201969 7190 return __perf_event_output(event, data, regs, perf_output_begin);
9ecda41a
WN
7191}
7192
38b200d6 7193/*
cdd6c482 7194 * read event_id
38b200d6
PZ
7195 */
7196
7197struct perf_read_event {
7198 struct perf_event_header header;
7199
7200 u32 pid;
7201 u32 tid;
38b200d6
PZ
7202};
7203
7204static void
cdd6c482 7205perf_event_read_event(struct perf_event *event,
38b200d6
PZ
7206 struct task_struct *task)
7207{
7208 struct perf_output_handle handle;
c980d109 7209 struct perf_sample_data sample;
dfc65094 7210 struct perf_read_event read_event = {
38b200d6 7211 .header = {
cdd6c482 7212 .type = PERF_RECORD_READ,
38b200d6 7213 .misc = 0,
c320c7b7 7214 .size = sizeof(read_event) + event->read_size,
38b200d6 7215 },
cdd6c482
IM
7216 .pid = perf_event_pid(event, task),
7217 .tid = perf_event_tid(event, task),
38b200d6 7218 };
3dab77fb 7219 int ret;
38b200d6 7220
c980d109 7221 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 7222 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
7223 if (ret)
7224 return;
7225
dfc65094 7226 perf_output_put(&handle, read_event);
cdd6c482 7227 perf_output_read(&handle, event);
c980d109 7228 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 7229
38b200d6
PZ
7230 perf_output_end(&handle);
7231}
7232
aab5b71e 7233typedef void (perf_iterate_f)(struct perf_event *event, void *data);
52d857a8
JO
7234
7235static void
aab5b71e
PZ
7236perf_iterate_ctx(struct perf_event_context *ctx,
7237 perf_iterate_f output,
b73e4fef 7238 void *data, bool all)
52d857a8
JO
7239{
7240 struct perf_event *event;
7241
7242 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
b73e4fef
AS
7243 if (!all) {
7244 if (event->state < PERF_EVENT_STATE_INACTIVE)
7245 continue;
7246 if (!event_filter_match(event))
7247 continue;
7248 }
7249
67516844 7250 output(event, data);
52d857a8
JO
7251 }
7252}
7253
aab5b71e 7254static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
f2fb6bef
KL
7255{
7256 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
7257 struct perf_event *event;
7258
7259 list_for_each_entry_rcu(event, &pel->list, sb_list) {
0b8f1e2e
PZ
7260 /*
7261 * Skip events that are not fully formed yet; ensure that
7262 * if we observe event->ctx, both event and ctx will be
7263 * complete enough. See perf_install_in_context().
7264 */
7265 if (!smp_load_acquire(&event->ctx))
7266 continue;
7267
f2fb6bef
KL
7268 if (event->state < PERF_EVENT_STATE_INACTIVE)
7269 continue;
7270 if (!event_filter_match(event))
7271 continue;
7272 output(event, data);
7273 }
7274}
7275
aab5b71e
PZ
7276/*
7277 * Iterate all events that need to receive side-band events.
7278 *
7279 * For new callers; ensure that account_pmu_sb_event() includes
7280 * your event, otherwise it might not get delivered.
7281 */
52d857a8 7282static void
aab5b71e 7283perf_iterate_sb(perf_iterate_f output, void *data,
52d857a8
JO
7284 struct perf_event_context *task_ctx)
7285{
52d857a8 7286 struct perf_event_context *ctx;
52d857a8
JO
7287 int ctxn;
7288
aab5b71e
PZ
7289 rcu_read_lock();
7290 preempt_disable();
7291
4e93ad60 7292 /*
aab5b71e
PZ
7293 * If we have task_ctx != NULL we only notify the task context itself.
7294 * The task_ctx is set only for EXIT events before releasing task
4e93ad60
JO
7295 * context.
7296 */
7297 if (task_ctx) {
aab5b71e
PZ
7298 perf_iterate_ctx(task_ctx, output, data, false);
7299 goto done;
4e93ad60
JO
7300 }
7301
aab5b71e 7302 perf_iterate_sb_cpu(output, data);
f2fb6bef
KL
7303
7304 for_each_task_context_nr(ctxn) {
52d857a8
JO
7305 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
7306 if (ctx)
aab5b71e 7307 perf_iterate_ctx(ctx, output, data, false);
52d857a8 7308 }
aab5b71e 7309done:
f2fb6bef 7310 preempt_enable();
52d857a8 7311 rcu_read_unlock();
95ff4ca2
AS
7312}
7313
375637bc
AS
7314/*
7315 * Clear all file-based filters at exec, they'll have to be
7316 * re-instated when/if these objects are mmapped again.
7317 */
7318static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
7319{
7320 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
7321 struct perf_addr_filter *filter;
7322 unsigned int restart = 0, count = 0;
7323 unsigned long flags;
7324
7325 if (!has_addr_filter(event))
7326 return;
7327
7328 raw_spin_lock_irqsave(&ifh->lock, flags);
7329 list_for_each_entry(filter, &ifh->list, entry) {
9511bce9 7330 if (filter->path.dentry) {
c60f83b8
AS
7331 event->addr_filter_ranges[count].start = 0;
7332 event->addr_filter_ranges[count].size = 0;
375637bc
AS
7333 restart++;
7334 }
7335
7336 count++;
7337 }
7338
7339 if (restart)
7340 event->addr_filters_gen++;
7341 raw_spin_unlock_irqrestore(&ifh->lock, flags);
7342
7343 if (restart)
767ae086 7344 perf_event_stop(event, 1);
375637bc
AS
7345}
7346
7347void perf_event_exec(void)
7348{
7349 struct perf_event_context *ctx;
7350 int ctxn;
7351
7352 rcu_read_lock();
7353 for_each_task_context_nr(ctxn) {
7354 ctx = current->perf_event_ctxp[ctxn];
7355 if (!ctx)
7356 continue;
7357
7358 perf_event_enable_on_exec(ctxn);
7359
aab5b71e 7360 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
375637bc
AS
7361 true);
7362 }
7363 rcu_read_unlock();
7364}
7365
95ff4ca2 7366struct remote_output {
56de4e8f 7367 struct perf_buffer *rb;
95ff4ca2
AS
7368 int err;
7369};
7370
7371static void __perf_event_output_stop(struct perf_event *event, void *data)
7372{
7373 struct perf_event *parent = event->parent;
7374 struct remote_output *ro = data;
56de4e8f 7375 struct perf_buffer *rb = ro->rb;
375637bc
AS
7376 struct stop_event_data sd = {
7377 .event = event,
7378 };
95ff4ca2
AS
7379
7380 if (!has_aux(event))
7381 return;
7382
7383 if (!parent)
7384 parent = event;
7385
7386 /*
7387 * In case of inheritance, it will be the parent that links to the
767ae086
AS
7388 * ring-buffer, but it will be the child that's actually using it.
7389 *
7390 * We are using event::rb to determine if the event should be stopped,
7391 * however this may race with ring_buffer_attach() (through set_output),
7392 * which will make us skip the event that actually needs to be stopped.
7393 * So ring_buffer_attach() has to stop an aux event before re-assigning
7394 * its rb pointer.
95ff4ca2
AS
7395 */
7396 if (rcu_dereference(parent->rb) == rb)
375637bc 7397 ro->err = __perf_event_stop(&sd);
95ff4ca2
AS
7398}
7399
7400static int __perf_pmu_output_stop(void *info)
7401{
7402 struct perf_event *event = info;
f3a519e4 7403 struct pmu *pmu = event->ctx->pmu;
8b6a3fe8 7404 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95ff4ca2
AS
7405 struct remote_output ro = {
7406 .rb = event->rb,
7407 };
7408
7409 rcu_read_lock();
aab5b71e 7410 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
95ff4ca2 7411 if (cpuctx->task_ctx)
aab5b71e 7412 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
b73e4fef 7413 &ro, false);
95ff4ca2
AS
7414 rcu_read_unlock();
7415
7416 return ro.err;
7417}
7418
7419static void perf_pmu_output_stop(struct perf_event *event)
7420{
7421 struct perf_event *iter;
7422 int err, cpu;
7423
7424restart:
7425 rcu_read_lock();
7426 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
7427 /*
7428 * For per-CPU events, we need to make sure that neither they
7429 * nor their children are running; for cpu==-1 events it's
7430 * sufficient to stop the event itself if it's active, since
7431 * it can't have children.
7432 */
7433 cpu = iter->cpu;
7434 if (cpu == -1)
7435 cpu = READ_ONCE(iter->oncpu);
7436
7437 if (cpu == -1)
7438 continue;
7439
7440 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
7441 if (err == -EAGAIN) {
7442 rcu_read_unlock();
7443 goto restart;
7444 }
7445 }
7446 rcu_read_unlock();
52d857a8
JO
7447}
7448
60313ebe 7449/*
9f498cc5
PZ
7450 * task tracking -- fork/exit
7451 *
13d7a241 7452 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
60313ebe
PZ
7453 */
7454
9f498cc5 7455struct perf_task_event {
3a80b4a3 7456 struct task_struct *task;
cdd6c482 7457 struct perf_event_context *task_ctx;
60313ebe
PZ
7458
7459 struct {
7460 struct perf_event_header header;
7461
7462 u32 pid;
7463 u32 ppid;
9f498cc5
PZ
7464 u32 tid;
7465 u32 ptid;
393b2ad8 7466 u64 time;
cdd6c482 7467 } event_id;
60313ebe
PZ
7468};
7469
67516844
JO
7470static int perf_event_task_match(struct perf_event *event)
7471{
13d7a241
SE
7472 return event->attr.comm || event->attr.mmap ||
7473 event->attr.mmap2 || event->attr.mmap_data ||
7474 event->attr.task;
67516844
JO
7475}
7476
cdd6c482 7477static void perf_event_task_output(struct perf_event *event,
52d857a8 7478 void *data)
60313ebe 7479{
52d857a8 7480 struct perf_task_event *task_event = data;
60313ebe 7481 struct perf_output_handle handle;
c980d109 7482 struct perf_sample_data sample;
9f498cc5 7483 struct task_struct *task = task_event->task;
c980d109 7484 int ret, size = task_event->event_id.header.size;
8bb39f9a 7485
67516844
JO
7486 if (!perf_event_task_match(event))
7487 return;
7488
c980d109 7489 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 7490
c980d109 7491 ret = perf_output_begin(&handle, event,
a7ac67ea 7492 task_event->event_id.header.size);
ef60777c 7493 if (ret)
c980d109 7494 goto out;
60313ebe 7495
cdd6c482 7496 task_event->event_id.pid = perf_event_pid(event, task);
cdd6c482 7497 task_event->event_id.tid = perf_event_tid(event, task);
f3bed55e
IR
7498
7499 if (task_event->event_id.header.type == PERF_RECORD_EXIT) {
7500 task_event->event_id.ppid = perf_event_pid(event,
7501 task->real_parent);
7502 task_event->event_id.ptid = perf_event_pid(event,
7503 task->real_parent);
7504 } else { /* PERF_RECORD_FORK */
7505 task_event->event_id.ppid = perf_event_pid(event, current);
7506 task_event->event_id.ptid = perf_event_tid(event, current);
7507 }
9f498cc5 7508
34f43927
PZ
7509 task_event->event_id.time = perf_event_clock(event);
7510
cdd6c482 7511 perf_output_put(&handle, task_event->event_id);
393b2ad8 7512
c980d109
ACM
7513 perf_event__output_id_sample(event, &handle, &sample);
7514
60313ebe 7515 perf_output_end(&handle);
c980d109
ACM
7516out:
7517 task_event->event_id.header.size = size;
60313ebe
PZ
7518}
7519
cdd6c482
IM
7520static void perf_event_task(struct task_struct *task,
7521 struct perf_event_context *task_ctx,
3a80b4a3 7522 int new)
60313ebe 7523{
9f498cc5 7524 struct perf_task_event task_event;
60313ebe 7525
cdd6c482
IM
7526 if (!atomic_read(&nr_comm_events) &&
7527 !atomic_read(&nr_mmap_events) &&
7528 !atomic_read(&nr_task_events))
60313ebe
PZ
7529 return;
7530
9f498cc5 7531 task_event = (struct perf_task_event){
3a80b4a3
PZ
7532 .task = task,
7533 .task_ctx = task_ctx,
cdd6c482 7534 .event_id = {
60313ebe 7535 .header = {
cdd6c482 7536 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 7537 .misc = 0,
cdd6c482 7538 .size = sizeof(task_event.event_id),
60313ebe 7539 },
573402db
PZ
7540 /* .pid */
7541 /* .ppid */
9f498cc5
PZ
7542 /* .tid */
7543 /* .ptid */
34f43927 7544 /* .time */
60313ebe
PZ
7545 },
7546 };
7547
aab5b71e 7548 perf_iterate_sb(perf_event_task_output,
52d857a8
JO
7549 &task_event,
7550 task_ctx);
9f498cc5
PZ
7551}
7552
cdd6c482 7553void perf_event_fork(struct task_struct *task)
9f498cc5 7554{
cdd6c482 7555 perf_event_task(task, NULL, 1);
e4222673 7556 perf_event_namespaces(task);
60313ebe
PZ
7557}
7558
8d1b2d93
PZ
7559/*
7560 * comm tracking
7561 */
7562
7563struct perf_comm_event {
22a4f650
IM
7564 struct task_struct *task;
7565 char *comm;
8d1b2d93
PZ
7566 int comm_size;
7567
7568 struct {
7569 struct perf_event_header header;
7570
7571 u32 pid;
7572 u32 tid;
cdd6c482 7573 } event_id;
8d1b2d93
PZ
7574};
7575
67516844
JO
7576static int perf_event_comm_match(struct perf_event *event)
7577{
7578 return event->attr.comm;
7579}
7580
cdd6c482 7581static void perf_event_comm_output(struct perf_event *event,
52d857a8 7582 void *data)
8d1b2d93 7583{
52d857a8 7584 struct perf_comm_event *comm_event = data;
8d1b2d93 7585 struct perf_output_handle handle;
c980d109 7586 struct perf_sample_data sample;
cdd6c482 7587 int size = comm_event->event_id.header.size;
c980d109
ACM
7588 int ret;
7589
67516844
JO
7590 if (!perf_event_comm_match(event))
7591 return;
7592
c980d109
ACM
7593 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
7594 ret = perf_output_begin(&handle, event,
a7ac67ea 7595 comm_event->event_id.header.size);
8d1b2d93
PZ
7596
7597 if (ret)
c980d109 7598 goto out;
8d1b2d93 7599
cdd6c482
IM
7600 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
7601 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 7602
cdd6c482 7603 perf_output_put(&handle, comm_event->event_id);
76369139 7604 __output_copy(&handle, comm_event->comm,
8d1b2d93 7605 comm_event->comm_size);
c980d109
ACM
7606
7607 perf_event__output_id_sample(event, &handle, &sample);
7608
8d1b2d93 7609 perf_output_end(&handle);
c980d109
ACM
7610out:
7611 comm_event->event_id.header.size = size;
8d1b2d93
PZ
7612}
7613
cdd6c482 7614static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 7615{
413ee3b4 7616 char comm[TASK_COMM_LEN];
8d1b2d93 7617 unsigned int size;
8d1b2d93 7618
413ee3b4 7619 memset(comm, 0, sizeof(comm));
96b02d78 7620 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 7621 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
7622
7623 comm_event->comm = comm;
7624 comm_event->comm_size = size;
7625
cdd6c482 7626 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 7627
aab5b71e 7628 perf_iterate_sb(perf_event_comm_output,
52d857a8
JO
7629 comm_event,
7630 NULL);
8d1b2d93
PZ
7631}
7632
82b89778 7633void perf_event_comm(struct task_struct *task, bool exec)
8d1b2d93 7634{
9ee318a7
PZ
7635 struct perf_comm_event comm_event;
7636
cdd6c482 7637 if (!atomic_read(&nr_comm_events))
9ee318a7 7638 return;
a63eaf34 7639
9ee318a7 7640 comm_event = (struct perf_comm_event){
8d1b2d93 7641 .task = task,
573402db
PZ
7642 /* .comm */
7643 /* .comm_size */
cdd6c482 7644 .event_id = {
573402db 7645 .header = {
cdd6c482 7646 .type = PERF_RECORD_COMM,
82b89778 7647 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
573402db
PZ
7648 /* .size */
7649 },
7650 /* .pid */
7651 /* .tid */
8d1b2d93
PZ
7652 },
7653 };
7654
cdd6c482 7655 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
7656}
7657
e4222673
HB
7658/*
7659 * namespaces tracking
7660 */
7661
7662struct perf_namespaces_event {
7663 struct task_struct *task;
7664
7665 struct {
7666 struct perf_event_header header;
7667
7668 u32 pid;
7669 u32 tid;
7670 u64 nr_namespaces;
7671 struct perf_ns_link_info link_info[NR_NAMESPACES];
7672 } event_id;
7673};
7674
7675static int perf_event_namespaces_match(struct perf_event *event)
7676{
7677 return event->attr.namespaces;
7678}
7679
7680static void perf_event_namespaces_output(struct perf_event *event,
7681 void *data)
7682{
7683 struct perf_namespaces_event *namespaces_event = data;
7684 struct perf_output_handle handle;
7685 struct perf_sample_data sample;
34900ec5 7686 u16 header_size = namespaces_event->event_id.header.size;
e4222673
HB
7687 int ret;
7688
7689 if (!perf_event_namespaces_match(event))
7690 return;
7691
7692 perf_event_header__init_id(&namespaces_event->event_id.header,
7693 &sample, event);
7694 ret = perf_output_begin(&handle, event,
7695 namespaces_event->event_id.header.size);
7696 if (ret)
34900ec5 7697 goto out;
e4222673
HB
7698
7699 namespaces_event->event_id.pid = perf_event_pid(event,
7700 namespaces_event->task);
7701 namespaces_event->event_id.tid = perf_event_tid(event,
7702 namespaces_event->task);
7703
7704 perf_output_put(&handle, namespaces_event->event_id);
7705
7706 perf_event__output_id_sample(event, &handle, &sample);
7707
7708 perf_output_end(&handle);
34900ec5
JO
7709out:
7710 namespaces_event->event_id.header.size = header_size;
e4222673
HB
7711}
7712
7713static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
7714 struct task_struct *task,
7715 const struct proc_ns_operations *ns_ops)
7716{
7717 struct path ns_path;
7718 struct inode *ns_inode;
ce623f89 7719 int error;
e4222673
HB
7720
7721 error = ns_get_path(&ns_path, task, ns_ops);
7722 if (!error) {
7723 ns_inode = ns_path.dentry->d_inode;
7724 ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
7725 ns_link_info->ino = ns_inode->i_ino;
0e18dd12 7726 path_put(&ns_path);
e4222673
HB
7727 }
7728}
7729
7730void perf_event_namespaces(struct task_struct *task)
7731{
7732 struct perf_namespaces_event namespaces_event;
7733 struct perf_ns_link_info *ns_link_info;
7734
7735 if (!atomic_read(&nr_namespaces_events))
7736 return;
7737
7738 namespaces_event = (struct perf_namespaces_event){
7739 .task = task,
7740 .event_id = {
7741 .header = {
7742 .type = PERF_RECORD_NAMESPACES,
7743 .misc = 0,
7744 .size = sizeof(namespaces_event.event_id),
7745 },
7746 /* .pid */
7747 /* .tid */
7748 .nr_namespaces = NR_NAMESPACES,
7749 /* .link_info[NR_NAMESPACES] */
7750 },
7751 };
7752
7753 ns_link_info = namespaces_event.event_id.link_info;
7754
7755 perf_fill_ns_link_info(&ns_link_info[MNT_NS_INDEX],
7756 task, &mntns_operations);
7757
7758#ifdef CONFIG_USER_NS
7759 perf_fill_ns_link_info(&ns_link_info[USER_NS_INDEX],
7760 task, &userns_operations);
7761#endif
7762#ifdef CONFIG_NET_NS
7763 perf_fill_ns_link_info(&ns_link_info[NET_NS_INDEX],
7764 task, &netns_operations);
7765#endif
7766#ifdef CONFIG_UTS_NS
7767 perf_fill_ns_link_info(&ns_link_info[UTS_NS_INDEX],
7768 task, &utsns_operations);
7769#endif
7770#ifdef CONFIG_IPC_NS
7771 perf_fill_ns_link_info(&ns_link_info[IPC_NS_INDEX],
7772 task, &ipcns_operations);
7773#endif
7774#ifdef CONFIG_PID_NS
7775 perf_fill_ns_link_info(&ns_link_info[PID_NS_INDEX],
7776 task, &pidns_operations);
7777#endif
7778#ifdef CONFIG_CGROUPS
7779 perf_fill_ns_link_info(&ns_link_info[CGROUP_NS_INDEX],
7780 task, &cgroupns_operations);
7781#endif
7782
7783 perf_iterate_sb(perf_event_namespaces_output,
7784 &namespaces_event,
7785 NULL);
7786}
7787
96aaab68
NK
7788/*
7789 * cgroup tracking
7790 */
7791#ifdef CONFIG_CGROUP_PERF
7792
7793struct perf_cgroup_event {
7794 char *path;
7795 int path_size;
7796 struct {
7797 struct perf_event_header header;
7798 u64 id;
7799 char path[];
7800 } event_id;
7801};
7802
7803static int perf_event_cgroup_match(struct perf_event *event)
7804{
7805 return event->attr.cgroup;
7806}
7807
7808static void perf_event_cgroup_output(struct perf_event *event, void *data)
7809{
7810 struct perf_cgroup_event *cgroup_event = data;
7811 struct perf_output_handle handle;
7812 struct perf_sample_data sample;
7813 u16 header_size = cgroup_event->event_id.header.size;
7814 int ret;
7815
7816 if (!perf_event_cgroup_match(event))
7817 return;
7818
7819 perf_event_header__init_id(&cgroup_event->event_id.header,
7820 &sample, event);
7821 ret = perf_output_begin(&handle, event,
7822 cgroup_event->event_id.header.size);
7823 if (ret)
7824 goto out;
7825
7826 perf_output_put(&handle, cgroup_event->event_id);
7827 __output_copy(&handle, cgroup_event->path, cgroup_event->path_size);
7828
7829 perf_event__output_id_sample(event, &handle, &sample);
7830
7831 perf_output_end(&handle);
7832out:
7833 cgroup_event->event_id.header.size = header_size;
7834}
7835
7836static void perf_event_cgroup(struct cgroup *cgrp)
7837{
7838 struct perf_cgroup_event cgroup_event;
7839 char path_enomem[16] = "//enomem";
7840 char *pathname;
7841 size_t size;
7842
7843 if (!atomic_read(&nr_cgroup_events))
7844 return;
7845
7846 cgroup_event = (struct perf_cgroup_event){
7847 .event_id = {
7848 .header = {
7849 .type = PERF_RECORD_CGROUP,
7850 .misc = 0,
7851 .size = sizeof(cgroup_event.event_id),
7852 },
7853 .id = cgroup_id(cgrp),
7854 },
7855 };
7856
7857 pathname = kmalloc(PATH_MAX, GFP_KERNEL);
7858 if (pathname == NULL) {
7859 cgroup_event.path = path_enomem;
7860 } else {
7861 /* just to be sure to have enough space for alignment */
7862 cgroup_path(cgrp, pathname, PATH_MAX - sizeof(u64));
7863 cgroup_event.path = pathname;
7864 }
7865
7866 /*
7867 * Since our buffer works in 8 byte units we need to align our string
7868 * size to a multiple of 8. However, we must guarantee the tail end is
7869 * zero'd out to avoid leaking random bits to userspace.
7870 */
7871 size = strlen(cgroup_event.path) + 1;
7872 while (!IS_ALIGNED(size, sizeof(u64)))
7873 cgroup_event.path[size++] = '\0';
7874
7875 cgroup_event.event_id.header.size += size;
7876 cgroup_event.path_size = size;
7877
7878 perf_iterate_sb(perf_event_cgroup_output,
7879 &cgroup_event,
7880 NULL);
7881
7882 kfree(pathname);
7883}
7884
7885#endif
7886
0a4a9391
PZ
7887/*
7888 * mmap tracking
7889 */
7890
7891struct perf_mmap_event {
089dd79d
PZ
7892 struct vm_area_struct *vma;
7893
7894 const char *file_name;
7895 int file_size;
13d7a241
SE
7896 int maj, min;
7897 u64 ino;
7898 u64 ino_generation;
f972eb63 7899 u32 prot, flags;
0a4a9391
PZ
7900
7901 struct {
7902 struct perf_event_header header;
7903
7904 u32 pid;
7905 u32 tid;
7906 u64 start;
7907 u64 len;
7908 u64 pgoff;
cdd6c482 7909 } event_id;
0a4a9391
PZ
7910};
7911
67516844
JO
7912static int perf_event_mmap_match(struct perf_event *event,
7913 void *data)
7914{
7915 struct perf_mmap_event *mmap_event = data;
7916 struct vm_area_struct *vma = mmap_event->vma;
7917 int executable = vma->vm_flags & VM_EXEC;
7918
7919 return (!executable && event->attr.mmap_data) ||
13d7a241 7920 (executable && (event->attr.mmap || event->attr.mmap2));
67516844
JO
7921}
7922
cdd6c482 7923static void perf_event_mmap_output(struct perf_event *event,
52d857a8 7924 void *data)
0a4a9391 7925{
52d857a8 7926 struct perf_mmap_event *mmap_event = data;
0a4a9391 7927 struct perf_output_handle handle;
c980d109 7928 struct perf_sample_data sample;
cdd6c482 7929 int size = mmap_event->event_id.header.size;
d9c1bb2f 7930 u32 type = mmap_event->event_id.header.type;
c980d109 7931 int ret;
0a4a9391 7932
67516844
JO
7933 if (!perf_event_mmap_match(event, data))
7934 return;
7935
13d7a241
SE
7936 if (event->attr.mmap2) {
7937 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
7938 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
7939 mmap_event->event_id.header.size += sizeof(mmap_event->min);
7940 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
d008d525 7941 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
f972eb63
PZ
7942 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
7943 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
13d7a241
SE
7944 }
7945
c980d109
ACM
7946 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
7947 ret = perf_output_begin(&handle, event,
a7ac67ea 7948 mmap_event->event_id.header.size);
0a4a9391 7949 if (ret)
c980d109 7950 goto out;
0a4a9391 7951
cdd6c482
IM
7952 mmap_event->event_id.pid = perf_event_pid(event, current);
7953 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 7954
cdd6c482 7955 perf_output_put(&handle, mmap_event->event_id);
13d7a241
SE
7956
7957 if (event->attr.mmap2) {
7958 perf_output_put(&handle, mmap_event->maj);
7959 perf_output_put(&handle, mmap_event->min);
7960 perf_output_put(&handle, mmap_event->ino);
7961 perf_output_put(&handle, mmap_event->ino_generation);
f972eb63
PZ
7962 perf_output_put(&handle, mmap_event->prot);
7963 perf_output_put(&handle, mmap_event->flags);
13d7a241
SE
7964 }
7965
76369139 7966 __output_copy(&handle, mmap_event->file_name,
0a4a9391 7967 mmap_event->file_size);
c980d109
ACM
7968
7969 perf_event__output_id_sample(event, &handle, &sample);
7970
78d613eb 7971 perf_output_end(&handle);
c980d109
ACM
7972out:
7973 mmap_event->event_id.header.size = size;
d9c1bb2f 7974 mmap_event->event_id.header.type = type;
0a4a9391
PZ
7975}
7976
cdd6c482 7977static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 7978{
089dd79d
PZ
7979 struct vm_area_struct *vma = mmap_event->vma;
7980 struct file *file = vma->vm_file;
13d7a241
SE
7981 int maj = 0, min = 0;
7982 u64 ino = 0, gen = 0;
f972eb63 7983 u32 prot = 0, flags = 0;
0a4a9391
PZ
7984 unsigned int size;
7985 char tmp[16];
7986 char *buf = NULL;
2c42cfbf 7987 char *name;
413ee3b4 7988
0b3589be
PZ
7989 if (vma->vm_flags & VM_READ)
7990 prot |= PROT_READ;
7991 if (vma->vm_flags & VM_WRITE)
7992 prot |= PROT_WRITE;
7993 if (vma->vm_flags & VM_EXEC)
7994 prot |= PROT_EXEC;
7995
7996 if (vma->vm_flags & VM_MAYSHARE)
7997 flags = MAP_SHARED;
7998 else
7999 flags = MAP_PRIVATE;
8000
8001 if (vma->vm_flags & VM_DENYWRITE)
8002 flags |= MAP_DENYWRITE;
8003 if (vma->vm_flags & VM_MAYEXEC)
8004 flags |= MAP_EXECUTABLE;
8005 if (vma->vm_flags & VM_LOCKED)
8006 flags |= MAP_LOCKED;
03911132 8007 if (is_vm_hugetlb_page(vma))
0b3589be
PZ
8008 flags |= MAP_HUGETLB;
8009
0a4a9391 8010 if (file) {
13d7a241
SE
8011 struct inode *inode;
8012 dev_t dev;
3ea2f2b9 8013
2c42cfbf 8014 buf = kmalloc(PATH_MAX, GFP_KERNEL);
0a4a9391 8015 if (!buf) {
c7e548b4
ON
8016 name = "//enomem";
8017 goto cpy_name;
0a4a9391 8018 }
413ee3b4 8019 /*
3ea2f2b9 8020 * d_path() works from the end of the rb backwards, so we
413ee3b4
AB
8021 * need to add enough zero bytes after the string to handle
8022 * the 64bit alignment we do later.
8023 */
9bf39ab2 8024 name = file_path(file, buf, PATH_MAX - sizeof(u64));
0a4a9391 8025 if (IS_ERR(name)) {
c7e548b4
ON
8026 name = "//toolong";
8027 goto cpy_name;
0a4a9391 8028 }
13d7a241
SE
8029 inode = file_inode(vma->vm_file);
8030 dev = inode->i_sb->s_dev;
8031 ino = inode->i_ino;
8032 gen = inode->i_generation;
8033 maj = MAJOR(dev);
8034 min = MINOR(dev);
f972eb63 8035
c7e548b4 8036 goto got_name;
0a4a9391 8037 } else {
fbe26abe
JO
8038 if (vma->vm_ops && vma->vm_ops->name) {
8039 name = (char *) vma->vm_ops->name(vma);
8040 if (name)
8041 goto cpy_name;
8042 }
8043
2c42cfbf 8044 name = (char *)arch_vma_name(vma);
c7e548b4
ON
8045 if (name)
8046 goto cpy_name;
089dd79d 8047
32c5fb7e 8048 if (vma->vm_start <= vma->vm_mm->start_brk &&
3af9e859 8049 vma->vm_end >= vma->vm_mm->brk) {
c7e548b4
ON
8050 name = "[heap]";
8051 goto cpy_name;
32c5fb7e
ON
8052 }
8053 if (vma->vm_start <= vma->vm_mm->start_stack &&
3af9e859 8054 vma->vm_end >= vma->vm_mm->start_stack) {
c7e548b4
ON
8055 name = "[stack]";
8056 goto cpy_name;
089dd79d
PZ
8057 }
8058
c7e548b4
ON
8059 name = "//anon";
8060 goto cpy_name;
0a4a9391
PZ
8061 }
8062
c7e548b4
ON
8063cpy_name:
8064 strlcpy(tmp, name, sizeof(tmp));
8065 name = tmp;
0a4a9391 8066got_name:
2c42cfbf
PZ
8067 /*
8068 * Since our buffer works in 8 byte units we need to align our string
8069 * size to a multiple of 8. However, we must guarantee the tail end is
8070 * zero'd out to avoid leaking random bits to userspace.
8071 */
8072 size = strlen(name)+1;
8073 while (!IS_ALIGNED(size, sizeof(u64)))
8074 name[size++] = '\0';
0a4a9391
PZ
8075
8076 mmap_event->file_name = name;
8077 mmap_event->file_size = size;
13d7a241
SE
8078 mmap_event->maj = maj;
8079 mmap_event->min = min;
8080 mmap_event->ino = ino;
8081 mmap_event->ino_generation = gen;
f972eb63
PZ
8082 mmap_event->prot = prot;
8083 mmap_event->flags = flags;
0a4a9391 8084
2fe85427
SE
8085 if (!(vma->vm_flags & VM_EXEC))
8086 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
8087
cdd6c482 8088 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 8089
aab5b71e 8090 perf_iterate_sb(perf_event_mmap_output,
52d857a8
JO
8091 mmap_event,
8092 NULL);
665c2142 8093
0a4a9391
PZ
8094 kfree(buf);
8095}
8096
375637bc
AS
8097/*
8098 * Check whether inode and address range match filter criteria.
8099 */
8100static bool perf_addr_filter_match(struct perf_addr_filter *filter,
8101 struct file *file, unsigned long offset,
8102 unsigned long size)
8103{
7f635ff1
MP
8104 /* d_inode(NULL) won't be equal to any mapped user-space file */
8105 if (!filter->path.dentry)
8106 return false;
8107
9511bce9 8108 if (d_inode(filter->path.dentry) != file_inode(file))
375637bc
AS
8109 return false;
8110
8111 if (filter->offset > offset + size)
8112 return false;
8113
8114 if (filter->offset + filter->size < offset)
8115 return false;
8116
8117 return true;
8118}
8119
c60f83b8
AS
8120static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
8121 struct vm_area_struct *vma,
8122 struct perf_addr_filter_range *fr)
8123{
8124 unsigned long vma_size = vma->vm_end - vma->vm_start;
8125 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
8126 struct file *file = vma->vm_file;
8127
8128 if (!perf_addr_filter_match(filter, file, off, vma_size))
8129 return false;
8130
8131 if (filter->offset < off) {
8132 fr->start = vma->vm_start;
8133 fr->size = min(vma_size, filter->size - (off - filter->offset));
8134 } else {
8135 fr->start = vma->vm_start + filter->offset - off;
8136 fr->size = min(vma->vm_end - fr->start, filter->size);
8137 }
8138
8139 return true;
8140}
8141
375637bc
AS
8142static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
8143{
8144 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
8145 struct vm_area_struct *vma = data;
375637bc
AS
8146 struct perf_addr_filter *filter;
8147 unsigned int restart = 0, count = 0;
c60f83b8 8148 unsigned long flags;
375637bc
AS
8149
8150 if (!has_addr_filter(event))
8151 return;
8152
c60f83b8 8153 if (!vma->vm_file)
375637bc
AS
8154 return;
8155
8156 raw_spin_lock_irqsave(&ifh->lock, flags);
8157 list_for_each_entry(filter, &ifh->list, entry) {
c60f83b8
AS
8158 if (perf_addr_filter_vma_adjust(filter, vma,
8159 &event->addr_filter_ranges[count]))
375637bc 8160 restart++;
375637bc
AS
8161
8162 count++;
8163 }
8164
8165 if (restart)
8166 event->addr_filters_gen++;
8167 raw_spin_unlock_irqrestore(&ifh->lock, flags);
8168
8169 if (restart)
767ae086 8170 perf_event_stop(event, 1);
375637bc
AS
8171}
8172
8173/*
8174 * Adjust all task's events' filters to the new vma
8175 */
8176static void perf_addr_filters_adjust(struct vm_area_struct *vma)
8177{
8178 struct perf_event_context *ctx;
8179 int ctxn;
8180
12b40a23
MP
8181 /*
8182 * Data tracing isn't supported yet and as such there is no need
8183 * to keep track of anything that isn't related to executable code:
8184 */
8185 if (!(vma->vm_flags & VM_EXEC))
8186 return;
8187
375637bc
AS
8188 rcu_read_lock();
8189 for_each_task_context_nr(ctxn) {
8190 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
8191 if (!ctx)
8192 continue;
8193
aab5b71e 8194 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
375637bc
AS
8195 }
8196 rcu_read_unlock();
8197}
8198
3af9e859 8199void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 8200{
9ee318a7
PZ
8201 struct perf_mmap_event mmap_event;
8202
cdd6c482 8203 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
8204 return;
8205
8206 mmap_event = (struct perf_mmap_event){
089dd79d 8207 .vma = vma,
573402db
PZ
8208 /* .file_name */
8209 /* .file_size */
cdd6c482 8210 .event_id = {
573402db 8211 .header = {
cdd6c482 8212 .type = PERF_RECORD_MMAP,
39447b38 8213 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
8214 /* .size */
8215 },
8216 /* .pid */
8217 /* .tid */
089dd79d
PZ
8218 .start = vma->vm_start,
8219 .len = vma->vm_end - vma->vm_start,
3a0304e9 8220 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391 8221 },
13d7a241
SE
8222 /* .maj (attr_mmap2 only) */
8223 /* .min (attr_mmap2 only) */
8224 /* .ino (attr_mmap2 only) */
8225 /* .ino_generation (attr_mmap2 only) */
f972eb63
PZ
8226 /* .prot (attr_mmap2 only) */
8227 /* .flags (attr_mmap2 only) */
0a4a9391
PZ
8228 };
8229
375637bc 8230 perf_addr_filters_adjust(vma);
cdd6c482 8231 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
8232}
8233
68db7e98
AS
8234void perf_event_aux_event(struct perf_event *event, unsigned long head,
8235 unsigned long size, u64 flags)
8236{
8237 struct perf_output_handle handle;
8238 struct perf_sample_data sample;
8239 struct perf_aux_event {
8240 struct perf_event_header header;
8241 u64 offset;
8242 u64 size;
8243 u64 flags;
8244 } rec = {
8245 .header = {
8246 .type = PERF_RECORD_AUX,
8247 .misc = 0,
8248 .size = sizeof(rec),
8249 },
8250 .offset = head,
8251 .size = size,
8252 .flags = flags,
8253 };
8254 int ret;
8255
8256 perf_event_header__init_id(&rec.header, &sample, event);
8257 ret = perf_output_begin(&handle, event, rec.header.size);
8258
8259 if (ret)
8260 return;
8261
8262 perf_output_put(&handle, rec);
8263 perf_event__output_id_sample(event, &handle, &sample);
8264
8265 perf_output_end(&handle);
8266}
8267
f38b0dbb
KL
8268/*
8269 * Lost/dropped samples logging
8270 */
8271void perf_log_lost_samples(struct perf_event *event, u64 lost)
8272{
8273 struct perf_output_handle handle;
8274 struct perf_sample_data sample;
8275 int ret;
8276
8277 struct {
8278 struct perf_event_header header;
8279 u64 lost;
8280 } lost_samples_event = {
8281 .header = {
8282 .type = PERF_RECORD_LOST_SAMPLES,
8283 .misc = 0,
8284 .size = sizeof(lost_samples_event),
8285 },
8286 .lost = lost,
8287 };
8288
8289 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
8290
8291 ret = perf_output_begin(&handle, event,
8292 lost_samples_event.header.size);
8293 if (ret)
8294 return;
8295
8296 perf_output_put(&handle, lost_samples_event);
8297 perf_event__output_id_sample(event, &handle, &sample);
8298 perf_output_end(&handle);
8299}
8300
45ac1403
AH
8301/*
8302 * context_switch tracking
8303 */
8304
8305struct perf_switch_event {
8306 struct task_struct *task;
8307 struct task_struct *next_prev;
8308
8309 struct {
8310 struct perf_event_header header;
8311 u32 next_prev_pid;
8312 u32 next_prev_tid;
8313 } event_id;
8314};
8315
8316static int perf_event_switch_match(struct perf_event *event)
8317{
8318 return event->attr.context_switch;
8319}
8320
8321static void perf_event_switch_output(struct perf_event *event, void *data)
8322{
8323 struct perf_switch_event *se = data;
8324 struct perf_output_handle handle;
8325 struct perf_sample_data sample;
8326 int ret;
8327
8328 if (!perf_event_switch_match(event))
8329 return;
8330
8331 /* Only CPU-wide events are allowed to see next/prev pid/tid */
8332 if (event->ctx->task) {
8333 se->event_id.header.type = PERF_RECORD_SWITCH;
8334 se->event_id.header.size = sizeof(se->event_id.header);
8335 } else {
8336 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
8337 se->event_id.header.size = sizeof(se->event_id);
8338 se->event_id.next_prev_pid =
8339 perf_event_pid(event, se->next_prev);
8340 se->event_id.next_prev_tid =
8341 perf_event_tid(event, se->next_prev);
8342 }
8343
8344 perf_event_header__init_id(&se->event_id.header, &sample, event);
8345
8346 ret = perf_output_begin(&handle, event, se->event_id.header.size);
8347 if (ret)
8348 return;
8349
8350 if (event->ctx->task)
8351 perf_output_put(&handle, se->event_id.header);
8352 else
8353 perf_output_put(&handle, se->event_id);
8354
8355 perf_event__output_id_sample(event, &handle, &sample);
8356
8357 perf_output_end(&handle);
8358}
8359
8360static void perf_event_switch(struct task_struct *task,
8361 struct task_struct *next_prev, bool sched_in)
8362{
8363 struct perf_switch_event switch_event;
8364
8365 /* N.B. caller checks nr_switch_events != 0 */
8366
8367 switch_event = (struct perf_switch_event){
8368 .task = task,
8369 .next_prev = next_prev,
8370 .event_id = {
8371 .header = {
8372 /* .type */
8373 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
8374 /* .size */
8375 },
8376 /* .next_prev_pid */
8377 /* .next_prev_tid */
8378 },
8379 };
8380
101592b4
AB
8381 if (!sched_in && task->state == TASK_RUNNING)
8382 switch_event.event_id.header.misc |=
8383 PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
8384
aab5b71e 8385 perf_iterate_sb(perf_event_switch_output,
45ac1403
AH
8386 &switch_event,
8387 NULL);
8388}
8389
a78ac325
PZ
8390/*
8391 * IRQ throttle logging
8392 */
8393
cdd6c482 8394static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
8395{
8396 struct perf_output_handle handle;
c980d109 8397 struct perf_sample_data sample;
a78ac325
PZ
8398 int ret;
8399
8400 struct {
8401 struct perf_event_header header;
8402 u64 time;
cca3f454 8403 u64 id;
7f453c24 8404 u64 stream_id;
a78ac325
PZ
8405 } throttle_event = {
8406 .header = {
cdd6c482 8407 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
8408 .misc = 0,
8409 .size = sizeof(throttle_event),
8410 },
34f43927 8411 .time = perf_event_clock(event),
cdd6c482
IM
8412 .id = primary_event_id(event),
8413 .stream_id = event->id,
a78ac325
PZ
8414 };
8415
966ee4d6 8416 if (enable)
cdd6c482 8417 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 8418
c980d109
ACM
8419 perf_event_header__init_id(&throttle_event.header, &sample, event);
8420
8421 ret = perf_output_begin(&handle, event,
a7ac67ea 8422 throttle_event.header.size);
a78ac325
PZ
8423 if (ret)
8424 return;
8425
8426 perf_output_put(&handle, throttle_event);
c980d109 8427 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
8428 perf_output_end(&handle);
8429}
8430
76193a94
SL
8431/*
8432 * ksymbol register/unregister tracking
8433 */
8434
8435struct perf_ksymbol_event {
8436 const char *name;
8437 int name_len;
8438 struct {
8439 struct perf_event_header header;
8440 u64 addr;
8441 u32 len;
8442 u16 ksym_type;
8443 u16 flags;
8444 } event_id;
8445};
8446
8447static int perf_event_ksymbol_match(struct perf_event *event)
8448{
8449 return event->attr.ksymbol;
8450}
8451
8452static void perf_event_ksymbol_output(struct perf_event *event, void *data)
8453{
8454 struct perf_ksymbol_event *ksymbol_event = data;
8455 struct perf_output_handle handle;
8456 struct perf_sample_data sample;
8457 int ret;
8458
8459 if (!perf_event_ksymbol_match(event))
8460 return;
8461
8462 perf_event_header__init_id(&ksymbol_event->event_id.header,
8463 &sample, event);
8464 ret = perf_output_begin(&handle, event,
8465 ksymbol_event->event_id.header.size);
8466 if (ret)
8467 return;
8468
8469 perf_output_put(&handle, ksymbol_event->event_id);
8470 __output_copy(&handle, ksymbol_event->name, ksymbol_event->name_len);
8471 perf_event__output_id_sample(event, &handle, &sample);
8472
8473 perf_output_end(&handle);
8474}
8475
8476void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
8477 const char *sym)
8478{
8479 struct perf_ksymbol_event ksymbol_event;
8480 char name[KSYM_NAME_LEN];
8481 u16 flags = 0;
8482 int name_len;
8483
8484 if (!atomic_read(&nr_ksymbol_events))
8485 return;
8486
8487 if (ksym_type >= PERF_RECORD_KSYMBOL_TYPE_MAX ||
8488 ksym_type == PERF_RECORD_KSYMBOL_TYPE_UNKNOWN)
8489 goto err;
8490
8491 strlcpy(name, sym, KSYM_NAME_LEN);
8492 name_len = strlen(name) + 1;
8493 while (!IS_ALIGNED(name_len, sizeof(u64)))
8494 name[name_len++] = '\0';
8495 BUILD_BUG_ON(KSYM_NAME_LEN % sizeof(u64));
8496
8497 if (unregister)
8498 flags |= PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER;
8499
8500 ksymbol_event = (struct perf_ksymbol_event){
8501 .name = name,
8502 .name_len = name_len,
8503 .event_id = {
8504 .header = {
8505 .type = PERF_RECORD_KSYMBOL,
8506 .size = sizeof(ksymbol_event.event_id) +
8507 name_len,
8508 },
8509 .addr = addr,
8510 .len = len,
8511 .ksym_type = ksym_type,
8512 .flags = flags,
8513 },
8514 };
8515
8516 perf_iterate_sb(perf_event_ksymbol_output, &ksymbol_event, NULL);
8517 return;
8518err:
8519 WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
8520}
8521
6ee52e2a
SL
8522/*
8523 * bpf program load/unload tracking
8524 */
8525
8526struct perf_bpf_event {
8527 struct bpf_prog *prog;
8528 struct {
8529 struct perf_event_header header;
8530 u16 type;
8531 u16 flags;
8532 u32 id;
8533 u8 tag[BPF_TAG_SIZE];
8534 } event_id;
8535};
8536
8537static int perf_event_bpf_match(struct perf_event *event)
8538{
8539 return event->attr.bpf_event;
8540}
8541
8542static void perf_event_bpf_output(struct perf_event *event, void *data)
8543{
8544 struct perf_bpf_event *bpf_event = data;
8545 struct perf_output_handle handle;
8546 struct perf_sample_data sample;
8547 int ret;
8548
8549 if (!perf_event_bpf_match(event))
8550 return;
8551
8552 perf_event_header__init_id(&bpf_event->event_id.header,
8553 &sample, event);
8554 ret = perf_output_begin(&handle, event,
8555 bpf_event->event_id.header.size);
8556 if (ret)
8557 return;
8558
8559 perf_output_put(&handle, bpf_event->event_id);
8560 perf_event__output_id_sample(event, &handle, &sample);
8561
8562 perf_output_end(&handle);
8563}
8564
8565static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
8566 enum perf_bpf_event_type type)
8567{
8568 bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
6ee52e2a
SL
8569 int i;
8570
8571 if (prog->aux->func_cnt == 0) {
6ee52e2a
SL
8572 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
8573 (u64)(unsigned long)prog->bpf_func,
bfea9a85
JO
8574 prog->jited_len, unregister,
8575 prog->aux->ksym.name);
6ee52e2a
SL
8576 } else {
8577 for (i = 0; i < prog->aux->func_cnt; i++) {
8578 struct bpf_prog *subprog = prog->aux->func[i];
8579
6ee52e2a
SL
8580 perf_event_ksymbol(
8581 PERF_RECORD_KSYMBOL_TYPE_BPF,
8582 (u64)(unsigned long)subprog->bpf_func,
bfea9a85
JO
8583 subprog->jited_len, unregister,
8584 prog->aux->ksym.name);
6ee52e2a
SL
8585 }
8586 }
8587}
8588
8589void perf_event_bpf_event(struct bpf_prog *prog,
8590 enum perf_bpf_event_type type,
8591 u16 flags)
8592{
8593 struct perf_bpf_event bpf_event;
8594
8595 if (type <= PERF_BPF_EVENT_UNKNOWN ||
8596 type >= PERF_BPF_EVENT_MAX)
8597 return;
8598
8599 switch (type) {
8600 case PERF_BPF_EVENT_PROG_LOAD:
8601 case PERF_BPF_EVENT_PROG_UNLOAD:
8602 if (atomic_read(&nr_ksymbol_events))
8603 perf_event_bpf_emit_ksymbols(prog, type);
8604 break;
8605 default:
8606 break;
8607 }
8608
8609 if (!atomic_read(&nr_bpf_events))
8610 return;
8611
8612 bpf_event = (struct perf_bpf_event){
8613 .prog = prog,
8614 .event_id = {
8615 .header = {
8616 .type = PERF_RECORD_BPF_EVENT,
8617 .size = sizeof(bpf_event.event_id),
8618 },
8619 .type = type,
8620 .flags = flags,
8621 .id = prog->aux->id,
8622 },
8623 };
8624
8625 BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));
8626
8627 memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
8628 perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
8629}
8630
8d4e6c4c
AS
8631void perf_event_itrace_started(struct perf_event *event)
8632{
8633 event->attach_state |= PERF_ATTACH_ITRACE;
8634}
8635
ec0d7729
AS
8636static void perf_log_itrace_start(struct perf_event *event)
8637{
8638 struct perf_output_handle handle;
8639 struct perf_sample_data sample;
8640 struct perf_aux_event {
8641 struct perf_event_header header;
8642 u32 pid;
8643 u32 tid;
8644 } rec;
8645 int ret;
8646
8647 if (event->parent)
8648 event = event->parent;
8649
8650 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
8d4e6c4c 8651 event->attach_state & PERF_ATTACH_ITRACE)
ec0d7729
AS
8652 return;
8653
ec0d7729
AS
8654 rec.header.type = PERF_RECORD_ITRACE_START;
8655 rec.header.misc = 0;
8656 rec.header.size = sizeof(rec);
8657 rec.pid = perf_event_pid(event, current);
8658 rec.tid = perf_event_tid(event, current);
8659
8660 perf_event_header__init_id(&rec.header, &sample, event);
8661 ret = perf_output_begin(&handle, event, rec.header.size);
8662
8663 if (ret)
8664 return;
8665
8666 perf_output_put(&handle, rec);
8667 perf_event__output_id_sample(event, &handle, &sample);
8668
8669 perf_output_end(&handle);
8670}
8671
475113d9
JO
8672static int
8673__perf_event_account_interrupt(struct perf_event *event, int throttle)
f6c7d5fe 8674{
cdd6c482 8675 struct hw_perf_event *hwc = &event->hw;
79f14641 8676 int ret = 0;
475113d9 8677 u64 seq;
96398826 8678
e050e3f0
SE
8679 seq = __this_cpu_read(perf_throttled_seq);
8680 if (seq != hwc->interrupts_seq) {
8681 hwc->interrupts_seq = seq;
8682 hwc->interrupts = 1;
8683 } else {
8684 hwc->interrupts++;
8685 if (unlikely(throttle
8686 && hwc->interrupts >= max_samples_per_tick)) {
8687 __this_cpu_inc(perf_throttled_count);
555e0c1e 8688 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
163ec435
PZ
8689 hwc->interrupts = MAX_INTERRUPTS;
8690 perf_log_throttle(event, 0);
a78ac325
PZ
8691 ret = 1;
8692 }
e050e3f0 8693 }
60db5e09 8694
cdd6c482 8695 if (event->attr.freq) {
def0a9b2 8696 u64 now = perf_clock();
abd50713 8697 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 8698
abd50713 8699 hwc->freq_time_stamp = now;
bd2b5b12 8700
abd50713 8701 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 8702 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
8703 }
8704
475113d9
JO
8705 return ret;
8706}
8707
8708int perf_event_account_interrupt(struct perf_event *event)
8709{
8710 return __perf_event_account_interrupt(event, 1);
8711}
8712
8713/*
8714 * Generic event overflow handling, sampling.
8715 */
8716
8717static int __perf_event_overflow(struct perf_event *event,
8718 int throttle, struct perf_sample_data *data,
8719 struct pt_regs *regs)
8720{
8721 int events = atomic_read(&event->event_limit);
8722 int ret = 0;
8723
8724 /*
8725 * Non-sampling counters might still use the PMI to fold short
8726 * hardware counters, ignore those.
8727 */
8728 if (unlikely(!is_sampling_event(event)))
8729 return 0;
8730
8731 ret = __perf_event_account_interrupt(event, throttle);
cc1582c2 8732
2023b359
PZ
8733 /*
8734 * XXX event_limit might not quite work as expected on inherited
cdd6c482 8735 * events
2023b359
PZ
8736 */
8737
cdd6c482
IM
8738 event->pending_kill = POLL_IN;
8739 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 8740 ret = 1;
cdd6c482 8741 event->pending_kill = POLL_HUP;
5aab90ce
JO
8742
8743 perf_event_disable_inatomic(event);
79f14641
PZ
8744 }
8745
aa6a5f3c 8746 READ_ONCE(event->overflow_handler)(event, data, regs);
453f19ee 8747
fed66e2c 8748 if (*perf_event_fasync(event) && event->pending_kill) {
a8b0ca17
PZ
8749 event->pending_wakeup = 1;
8750 irq_work_queue(&event->pending);
f506b3dc
PZ
8751 }
8752
79f14641 8753 return ret;
f6c7d5fe
PZ
8754}
8755
a8b0ca17 8756int perf_event_overflow(struct perf_event *event,
5622f295
MM
8757 struct perf_sample_data *data,
8758 struct pt_regs *regs)
850bc73f 8759{
a8b0ca17 8760 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
8761}
8762
15dbf27c 8763/*
cdd6c482 8764 * Generic software event infrastructure
15dbf27c
PZ
8765 */
8766
b28ab83c
PZ
8767struct swevent_htable {
8768 struct swevent_hlist *swevent_hlist;
8769 struct mutex hlist_mutex;
8770 int hlist_refcount;
8771
8772 /* Recursion avoidance in each contexts */
8773 int recursion[PERF_NR_CONTEXTS];
8774};
8775
8776static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
8777
7b4b6658 8778/*
cdd6c482
IM
8779 * We directly increment event->count and keep a second value in
8780 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
8781 * is kept in the range [-sample_period, 0] so that we can use the
8782 * sign as trigger.
8783 */
8784
ab573844 8785u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 8786{
cdd6c482 8787 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
8788 u64 period = hwc->last_period;
8789 u64 nr, offset;
8790 s64 old, val;
8791
8792 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
8793
8794again:
e7850595 8795 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
8796 if (val < 0)
8797 return 0;
15dbf27c 8798
7b4b6658
PZ
8799 nr = div64_u64(period + val, period);
8800 offset = nr * period;
8801 val -= offset;
e7850595 8802 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 8803 goto again;
15dbf27c 8804
7b4b6658 8805 return nr;
15dbf27c
PZ
8806}
8807
0cff784a 8808static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 8809 struct perf_sample_data *data,
5622f295 8810 struct pt_regs *regs)
15dbf27c 8811{
cdd6c482 8812 struct hw_perf_event *hwc = &event->hw;
850bc73f 8813 int throttle = 0;
15dbf27c 8814
0cff784a
PZ
8815 if (!overflow)
8816 overflow = perf_swevent_set_period(event);
15dbf27c 8817
7b4b6658
PZ
8818 if (hwc->interrupts == MAX_INTERRUPTS)
8819 return;
15dbf27c 8820
7b4b6658 8821 for (; overflow; overflow--) {
a8b0ca17 8822 if (__perf_event_overflow(event, throttle,
5622f295 8823 data, regs)) {
7b4b6658
PZ
8824 /*
8825 * We inhibit the overflow from happening when
8826 * hwc->interrupts == MAX_INTERRUPTS.
8827 */
8828 break;
8829 }
cf450a73 8830 throttle = 1;
7b4b6658 8831 }
15dbf27c
PZ
8832}
8833
a4eaf7f1 8834static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 8835 struct perf_sample_data *data,
5622f295 8836 struct pt_regs *regs)
7b4b6658 8837{
cdd6c482 8838 struct hw_perf_event *hwc = &event->hw;
d6d020e9 8839
e7850595 8840 local64_add(nr, &event->count);
d6d020e9 8841
0cff784a
PZ
8842 if (!regs)
8843 return;
8844
6c7e550f 8845 if (!is_sampling_event(event))
7b4b6658 8846 return;
d6d020e9 8847
5d81e5cf
AV
8848 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
8849 data->period = nr;
8850 return perf_swevent_overflow(event, 1, data, regs);
8851 } else
8852 data->period = event->hw.last_period;
8853
0cff784a 8854 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 8855 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 8856
e7850595 8857 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 8858 return;
df1a132b 8859
a8b0ca17 8860 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
8861}
8862
f5ffe02e
FW
8863static int perf_exclude_event(struct perf_event *event,
8864 struct pt_regs *regs)
8865{
a4eaf7f1 8866 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 8867 return 1;
a4eaf7f1 8868
f5ffe02e
FW
8869 if (regs) {
8870 if (event->attr.exclude_user && user_mode(regs))
8871 return 1;
8872
8873 if (event->attr.exclude_kernel && !user_mode(regs))
8874 return 1;
8875 }
8876
8877 return 0;
8878}
8879
cdd6c482 8880static int perf_swevent_match(struct perf_event *event,
1c432d89 8881 enum perf_type_id type,
6fb2915d
LZ
8882 u32 event_id,
8883 struct perf_sample_data *data,
8884 struct pt_regs *regs)
15dbf27c 8885{
cdd6c482 8886 if (event->attr.type != type)
a21ca2ca 8887 return 0;
f5ffe02e 8888
cdd6c482 8889 if (event->attr.config != event_id)
15dbf27c
PZ
8890 return 0;
8891
f5ffe02e
FW
8892 if (perf_exclude_event(event, regs))
8893 return 0;
15dbf27c
PZ
8894
8895 return 1;
8896}
8897
76e1d904
FW
8898static inline u64 swevent_hash(u64 type, u32 event_id)
8899{
8900 u64 val = event_id | (type << 32);
8901
8902 return hash_64(val, SWEVENT_HLIST_BITS);
8903}
8904
49f135ed
FW
8905static inline struct hlist_head *
8906__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 8907{
49f135ed
FW
8908 u64 hash = swevent_hash(type, event_id);
8909
8910 return &hlist->heads[hash];
8911}
76e1d904 8912
49f135ed
FW
8913/* For the read side: events when they trigger */
8914static inline struct hlist_head *
b28ab83c 8915find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
8916{
8917 struct swevent_hlist *hlist;
76e1d904 8918
b28ab83c 8919 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
8920 if (!hlist)
8921 return NULL;
8922
49f135ed
FW
8923 return __find_swevent_head(hlist, type, event_id);
8924}
8925
8926/* For the event head insertion and removal in the hlist */
8927static inline struct hlist_head *
b28ab83c 8928find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
8929{
8930 struct swevent_hlist *hlist;
8931 u32 event_id = event->attr.config;
8932 u64 type = event->attr.type;
8933
8934 /*
8935 * Event scheduling is always serialized against hlist allocation
8936 * and release. Which makes the protected version suitable here.
8937 * The context lock guarantees that.
8938 */
b28ab83c 8939 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
8940 lockdep_is_held(&event->ctx->lock));
8941 if (!hlist)
8942 return NULL;
8943
8944 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
8945}
8946
8947static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 8948 u64 nr,
76e1d904
FW
8949 struct perf_sample_data *data,
8950 struct pt_regs *regs)
15dbf27c 8951{
4a32fea9 8952 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 8953 struct perf_event *event;
76e1d904 8954 struct hlist_head *head;
15dbf27c 8955
76e1d904 8956 rcu_read_lock();
b28ab83c 8957 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
8958 if (!head)
8959 goto end;
8960
b67bfe0d 8961 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 8962 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 8963 perf_swevent_event(event, nr, data, regs);
15dbf27c 8964 }
76e1d904
FW
8965end:
8966 rcu_read_unlock();
15dbf27c
PZ
8967}
8968
86038c5e
PZI
8969DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
8970
4ed7c92d 8971int perf_swevent_get_recursion_context(void)
96f6d444 8972{
4a32fea9 8973 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
96f6d444 8974
b28ab83c 8975 return get_recursion_context(swhash->recursion);
96f6d444 8976}
645e8cc0 8977EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 8978
98b5c2c6 8979void perf_swevent_put_recursion_context(int rctx)
15dbf27c 8980{
4a32fea9 8981 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
927c7a9e 8982
b28ab83c 8983 put_recursion_context(swhash->recursion, rctx);
ce71b9df 8984}
15dbf27c 8985
86038c5e 8986void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 8987{
a4234bfc 8988 struct perf_sample_data data;
4ed7c92d 8989
86038c5e 8990 if (WARN_ON_ONCE(!regs))
4ed7c92d 8991 return;
a4234bfc 8992
fd0d000b 8993 perf_sample_data_init(&data, addr, 0);
a8b0ca17 8994 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
86038c5e
PZI
8995}
8996
8997void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
8998{
8999 int rctx;
9000
9001 preempt_disable_notrace();
9002 rctx = perf_swevent_get_recursion_context();
9003 if (unlikely(rctx < 0))
9004 goto fail;
9005
9006 ___perf_sw_event(event_id, nr, regs, addr);
4ed7c92d
PZ
9007
9008 perf_swevent_put_recursion_context(rctx);
86038c5e 9009fail:
1c024eca 9010 preempt_enable_notrace();
b8e83514
PZ
9011}
9012
cdd6c482 9013static void perf_swevent_read(struct perf_event *event)
15dbf27c 9014{
15dbf27c
PZ
9015}
9016
a4eaf7f1 9017static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 9018{
4a32fea9 9019 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 9020 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
9021 struct hlist_head *head;
9022
6c7e550f 9023 if (is_sampling_event(event)) {
7b4b6658 9024 hwc->last_period = hwc->sample_period;
cdd6c482 9025 perf_swevent_set_period(event);
7b4b6658 9026 }
76e1d904 9027
a4eaf7f1
PZ
9028 hwc->state = !(flags & PERF_EF_START);
9029
b28ab83c 9030 head = find_swevent_head(swhash, event);
12ca6ad2 9031 if (WARN_ON_ONCE(!head))
76e1d904
FW
9032 return -EINVAL;
9033
9034 hlist_add_head_rcu(&event->hlist_entry, head);
6a694a60 9035 perf_event_update_userpage(event);
76e1d904 9036
15dbf27c
PZ
9037 return 0;
9038}
9039
a4eaf7f1 9040static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 9041{
76e1d904 9042 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
9043}
9044
a4eaf7f1 9045static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 9046{
a4eaf7f1 9047 event->hw.state = 0;
d6d020e9 9048}
aa9c4c0f 9049
a4eaf7f1 9050static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 9051{
a4eaf7f1 9052 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
9053}
9054
49f135ed
FW
9055/* Deref the hlist from the update side */
9056static inline struct swevent_hlist *
b28ab83c 9057swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 9058{
b28ab83c
PZ
9059 return rcu_dereference_protected(swhash->swevent_hlist,
9060 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
9061}
9062
b28ab83c 9063static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 9064{
b28ab83c 9065 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 9066
49f135ed 9067 if (!hlist)
76e1d904
FW
9068 return;
9069
70691d4a 9070 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
fa4bbc4c 9071 kfree_rcu(hlist, rcu_head);
76e1d904
FW
9072}
9073
3b364d7b 9074static void swevent_hlist_put_cpu(int cpu)
76e1d904 9075{
b28ab83c 9076 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 9077
b28ab83c 9078 mutex_lock(&swhash->hlist_mutex);
76e1d904 9079
b28ab83c
PZ
9080 if (!--swhash->hlist_refcount)
9081 swevent_hlist_release(swhash);
76e1d904 9082
b28ab83c 9083 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
9084}
9085
3b364d7b 9086static void swevent_hlist_put(void)
76e1d904
FW
9087{
9088 int cpu;
9089
76e1d904 9090 for_each_possible_cpu(cpu)
3b364d7b 9091 swevent_hlist_put_cpu(cpu);
76e1d904
FW
9092}
9093
3b364d7b 9094static int swevent_hlist_get_cpu(int cpu)
76e1d904 9095{
b28ab83c 9096 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
9097 int err = 0;
9098
b28ab83c 9099 mutex_lock(&swhash->hlist_mutex);
a63fbed7
TG
9100 if (!swevent_hlist_deref(swhash) &&
9101 cpumask_test_cpu(cpu, perf_online_mask)) {
76e1d904
FW
9102 struct swevent_hlist *hlist;
9103
9104 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
9105 if (!hlist) {
9106 err = -ENOMEM;
9107 goto exit;
9108 }
b28ab83c 9109 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 9110 }
b28ab83c 9111 swhash->hlist_refcount++;
9ed6060d 9112exit:
b28ab83c 9113 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
9114
9115 return err;
9116}
9117
3b364d7b 9118static int swevent_hlist_get(void)
76e1d904 9119{
3b364d7b 9120 int err, cpu, failed_cpu;
76e1d904 9121
a63fbed7 9122 mutex_lock(&pmus_lock);
76e1d904 9123 for_each_possible_cpu(cpu) {
3b364d7b 9124 err = swevent_hlist_get_cpu(cpu);
76e1d904
FW
9125 if (err) {
9126 failed_cpu = cpu;
9127 goto fail;
9128 }
9129 }
a63fbed7 9130 mutex_unlock(&pmus_lock);
76e1d904 9131 return 0;
9ed6060d 9132fail:
76e1d904
FW
9133 for_each_possible_cpu(cpu) {
9134 if (cpu == failed_cpu)
9135 break;
3b364d7b 9136 swevent_hlist_put_cpu(cpu);
76e1d904 9137 }
a63fbed7 9138 mutex_unlock(&pmus_lock);
76e1d904
FW
9139 return err;
9140}
9141
c5905afb 9142struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 9143
b0a873eb
PZ
9144static void sw_perf_event_destroy(struct perf_event *event)
9145{
9146 u64 event_id = event->attr.config;
95476b64 9147
b0a873eb
PZ
9148 WARN_ON(event->parent);
9149
c5905afb 9150 static_key_slow_dec(&perf_swevent_enabled[event_id]);
3b364d7b 9151 swevent_hlist_put();
b0a873eb
PZ
9152}
9153
9154static int perf_swevent_init(struct perf_event *event)
9155{
8176cced 9156 u64 event_id = event->attr.config;
b0a873eb
PZ
9157
9158 if (event->attr.type != PERF_TYPE_SOFTWARE)
9159 return -ENOENT;
9160
2481c5fa
SE
9161 /*
9162 * no branch sampling for software events
9163 */
9164 if (has_branch_stack(event))
9165 return -EOPNOTSUPP;
9166
b0a873eb
PZ
9167 switch (event_id) {
9168 case PERF_COUNT_SW_CPU_CLOCK:
9169 case PERF_COUNT_SW_TASK_CLOCK:
9170 return -ENOENT;
9171
9172 default:
9173 break;
9174 }
9175
ce677831 9176 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
9177 return -ENOENT;
9178
9179 if (!event->parent) {
9180 int err;
9181
3b364d7b 9182 err = swevent_hlist_get();
b0a873eb
PZ
9183 if (err)
9184 return err;
9185
c5905afb 9186 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
9187 event->destroy = sw_perf_event_destroy;
9188 }
9189
9190 return 0;
9191}
9192
9193static struct pmu perf_swevent = {
89a1e187 9194 .task_ctx_nr = perf_sw_context,
95476b64 9195
34f43927
PZ
9196 .capabilities = PERF_PMU_CAP_NO_NMI,
9197
b0a873eb 9198 .event_init = perf_swevent_init,
a4eaf7f1
PZ
9199 .add = perf_swevent_add,
9200 .del = perf_swevent_del,
9201 .start = perf_swevent_start,
9202 .stop = perf_swevent_stop,
1c024eca 9203 .read = perf_swevent_read,
1c024eca
PZ
9204};
9205
b0a873eb
PZ
9206#ifdef CONFIG_EVENT_TRACING
9207
1c024eca
PZ
9208static int perf_tp_filter_match(struct perf_event *event,
9209 struct perf_sample_data *data)
9210{
7e3f977e 9211 void *record = data->raw->frag.data;
1c024eca 9212
b71b437e
PZ
9213 /* only top level events have filters set */
9214 if (event->parent)
9215 event = event->parent;
9216
1c024eca
PZ
9217 if (likely(!event->filter) || filter_match_preds(event->filter, record))
9218 return 1;
9219 return 0;
9220}
9221
9222static int perf_tp_event_match(struct perf_event *event,
9223 struct perf_sample_data *data,
9224 struct pt_regs *regs)
9225{
a0f7d0f7
FW
9226 if (event->hw.state & PERF_HES_STOPPED)
9227 return 0;
580d607c 9228 /*
9fd2e48b 9229 * If exclude_kernel, only trace user-space tracepoints (uprobes)
580d607c 9230 */
9fd2e48b 9231 if (event->attr.exclude_kernel && !user_mode(regs))
1c024eca
PZ
9232 return 0;
9233
9234 if (!perf_tp_filter_match(event, data))
9235 return 0;
9236
9237 return 1;
9238}
9239
85b67bcb
AS
9240void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
9241 struct trace_event_call *call, u64 count,
9242 struct pt_regs *regs, struct hlist_head *head,
9243 struct task_struct *task)
9244{
e87c6bc3 9245 if (bpf_prog_array_valid(call)) {
85b67bcb 9246 *(struct pt_regs **)raw_data = regs;
e87c6bc3 9247 if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) {
85b67bcb
AS
9248 perf_swevent_put_recursion_context(rctx);
9249 return;
9250 }
9251 }
9252 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
8fd0fbbe 9253 rctx, task);
85b67bcb
AS
9254}
9255EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
9256
1e1dcd93 9257void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
e6dab5ff 9258 struct pt_regs *regs, struct hlist_head *head, int rctx,
8fd0fbbe 9259 struct task_struct *task)
95476b64
FW
9260{
9261 struct perf_sample_data data;
8fd0fbbe 9262 struct perf_event *event;
1c024eca 9263
95476b64 9264 struct perf_raw_record raw = {
7e3f977e
DB
9265 .frag = {
9266 .size = entry_size,
9267 .data = record,
9268 },
95476b64
FW
9269 };
9270
1e1dcd93 9271 perf_sample_data_init(&data, 0, 0);
95476b64
FW
9272 data.raw = &raw;
9273
1e1dcd93
AS
9274 perf_trace_buf_update(record, event_type);
9275
8fd0fbbe 9276 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 9277 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 9278 perf_swevent_event(event, count, &data, regs);
4f41c013 9279 }
ecc55f84 9280
e6dab5ff
AV
9281 /*
9282 * If we got specified a target task, also iterate its context and
9283 * deliver this event there too.
9284 */
9285 if (task && task != current) {
9286 struct perf_event_context *ctx;
9287 struct trace_entry *entry = record;
9288
9289 rcu_read_lock();
9290 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
9291 if (!ctx)
9292 goto unlock;
9293
9294 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cd6fb677
JO
9295 if (event->cpu != smp_processor_id())
9296 continue;
e6dab5ff
AV
9297 if (event->attr.type != PERF_TYPE_TRACEPOINT)
9298 continue;
9299 if (event->attr.config != entry->type)
9300 continue;
9301 if (perf_tp_event_match(event, &data, regs))
9302 perf_swevent_event(event, count, &data, regs);
9303 }
9304unlock:
9305 rcu_read_unlock();
9306 }
9307
ecc55f84 9308 perf_swevent_put_recursion_context(rctx);
95476b64
FW
9309}
9310EXPORT_SYMBOL_GPL(perf_tp_event);
9311
cdd6c482 9312static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 9313{
1c024eca 9314 perf_trace_destroy(event);
e077df4f
PZ
9315}
9316
b0a873eb 9317static int perf_tp_event_init(struct perf_event *event)
e077df4f 9318{
76e1d904
FW
9319 int err;
9320
b0a873eb
PZ
9321 if (event->attr.type != PERF_TYPE_TRACEPOINT)
9322 return -ENOENT;
9323
2481c5fa
SE
9324 /*
9325 * no branch sampling for tracepoint events
9326 */
9327 if (has_branch_stack(event))
9328 return -EOPNOTSUPP;
9329
1c024eca
PZ
9330 err = perf_trace_init(event);
9331 if (err)
b0a873eb 9332 return err;
e077df4f 9333
cdd6c482 9334 event->destroy = tp_perf_event_destroy;
e077df4f 9335
b0a873eb
PZ
9336 return 0;
9337}
9338
9339static struct pmu perf_tracepoint = {
89a1e187
PZ
9340 .task_ctx_nr = perf_sw_context,
9341
b0a873eb 9342 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
9343 .add = perf_trace_add,
9344 .del = perf_trace_del,
9345 .start = perf_swevent_start,
9346 .stop = perf_swevent_stop,
b0a873eb 9347 .read = perf_swevent_read,
b0a873eb
PZ
9348};
9349
33ea4b24 9350#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
e12f03d7
SL
9351/*
9352 * Flags in config, used by dynamic PMU kprobe and uprobe
9353 * The flags should match following PMU_FORMAT_ATTR().
9354 *
9355 * PERF_PROBE_CONFIG_IS_RETPROBE if set, create kretprobe/uretprobe
9356 * if not set, create kprobe/uprobe
a6ca88b2
SL
9357 *
9358 * The following values specify a reference counter (or semaphore in the
9359 * terminology of tools like dtrace, systemtap, etc.) Userspace Statically
9360 * Defined Tracepoints (USDT). Currently, we use 40 bit for the offset.
9361 *
9362 * PERF_UPROBE_REF_CTR_OFFSET_BITS # of bits in config as th offset
9363 * PERF_UPROBE_REF_CTR_OFFSET_SHIFT # of bits to shift left
e12f03d7
SL
9364 */
9365enum perf_probe_config {
9366 PERF_PROBE_CONFIG_IS_RETPROBE = 1U << 0, /* [k,u]retprobe */
a6ca88b2
SL
9367 PERF_UPROBE_REF_CTR_OFFSET_BITS = 32,
9368 PERF_UPROBE_REF_CTR_OFFSET_SHIFT = 64 - PERF_UPROBE_REF_CTR_OFFSET_BITS,
e12f03d7
SL
9369};
9370
9371PMU_FORMAT_ATTR(retprobe, "config:0");
a6ca88b2 9372#endif
e12f03d7 9373
a6ca88b2
SL
9374#ifdef CONFIG_KPROBE_EVENTS
9375static struct attribute *kprobe_attrs[] = {
e12f03d7
SL
9376 &format_attr_retprobe.attr,
9377 NULL,
9378};
9379
a6ca88b2 9380static struct attribute_group kprobe_format_group = {
e12f03d7 9381 .name = "format",
a6ca88b2 9382 .attrs = kprobe_attrs,
e12f03d7
SL
9383};
9384
a6ca88b2
SL
9385static const struct attribute_group *kprobe_attr_groups[] = {
9386 &kprobe_format_group,
e12f03d7
SL
9387 NULL,
9388};
9389
9390static int perf_kprobe_event_init(struct perf_event *event);
9391static struct pmu perf_kprobe = {
9392 .task_ctx_nr = perf_sw_context,
9393 .event_init = perf_kprobe_event_init,
9394 .add = perf_trace_add,
9395 .del = perf_trace_del,
9396 .start = perf_swevent_start,
9397 .stop = perf_swevent_stop,
9398 .read = perf_swevent_read,
a6ca88b2 9399 .attr_groups = kprobe_attr_groups,
e12f03d7
SL
9400};
9401
9402static int perf_kprobe_event_init(struct perf_event *event)
9403{
9404 int err;
9405 bool is_retprobe;
9406
9407 if (event->attr.type != perf_kprobe.type)
9408 return -ENOENT;
32e6e967 9409
c9e0924e 9410 if (!perfmon_capable())
32e6e967
SL
9411 return -EACCES;
9412
e12f03d7
SL
9413 /*
9414 * no branch sampling for probe events
9415 */
9416 if (has_branch_stack(event))
9417 return -EOPNOTSUPP;
9418
9419 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
9420 err = perf_kprobe_init(event, is_retprobe);
9421 if (err)
9422 return err;
9423
9424 event->destroy = perf_kprobe_destroy;
9425
9426 return 0;
9427}
9428#endif /* CONFIG_KPROBE_EVENTS */
9429
33ea4b24 9430#ifdef CONFIG_UPROBE_EVENTS
a6ca88b2
SL
9431PMU_FORMAT_ATTR(ref_ctr_offset, "config:32-63");
9432
9433static struct attribute *uprobe_attrs[] = {
9434 &format_attr_retprobe.attr,
9435 &format_attr_ref_ctr_offset.attr,
9436 NULL,
9437};
9438
9439static struct attribute_group uprobe_format_group = {
9440 .name = "format",
9441 .attrs = uprobe_attrs,
9442};
9443
9444static const struct attribute_group *uprobe_attr_groups[] = {
9445 &uprobe_format_group,
9446 NULL,
9447};
9448
33ea4b24
SL
9449static int perf_uprobe_event_init(struct perf_event *event);
9450static struct pmu perf_uprobe = {
9451 .task_ctx_nr = perf_sw_context,
9452 .event_init = perf_uprobe_event_init,
9453 .add = perf_trace_add,
9454 .del = perf_trace_del,
9455 .start = perf_swevent_start,
9456 .stop = perf_swevent_stop,
9457 .read = perf_swevent_read,
a6ca88b2 9458 .attr_groups = uprobe_attr_groups,
33ea4b24
SL
9459};
9460
9461static int perf_uprobe_event_init(struct perf_event *event)
9462{
9463 int err;
a6ca88b2 9464 unsigned long ref_ctr_offset;
33ea4b24
SL
9465 bool is_retprobe;
9466
9467 if (event->attr.type != perf_uprobe.type)
9468 return -ENOENT;
32e6e967 9469
c9e0924e 9470 if (!perfmon_capable())
32e6e967
SL
9471 return -EACCES;
9472
33ea4b24
SL
9473 /*
9474 * no branch sampling for probe events
9475 */
9476 if (has_branch_stack(event))
9477 return -EOPNOTSUPP;
9478
9479 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE;
a6ca88b2
SL
9480 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
9481 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe);
33ea4b24
SL
9482 if (err)
9483 return err;
9484
9485 event->destroy = perf_uprobe_destroy;
9486
9487 return 0;
9488}
9489#endif /* CONFIG_UPROBE_EVENTS */
9490
b0a873eb
PZ
9491static inline void perf_tp_register(void)
9492{
2e80a82a 9493 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e12f03d7
SL
9494#ifdef CONFIG_KPROBE_EVENTS
9495 perf_pmu_register(&perf_kprobe, "kprobe", -1);
9496#endif
33ea4b24
SL
9497#ifdef CONFIG_UPROBE_EVENTS
9498 perf_pmu_register(&perf_uprobe, "uprobe", -1);
9499#endif
e077df4f 9500}
6fb2915d 9501
6fb2915d
LZ
9502static void perf_event_free_filter(struct perf_event *event)
9503{
9504 ftrace_profile_free_filter(event);
9505}
9506
aa6a5f3c
AS
9507#ifdef CONFIG_BPF_SYSCALL
9508static void bpf_overflow_handler(struct perf_event *event,
9509 struct perf_sample_data *data,
9510 struct pt_regs *regs)
9511{
9512 struct bpf_perf_event_data_kern ctx = {
9513 .data = data,
7d9285e8 9514 .event = event,
aa6a5f3c
AS
9515 };
9516 int ret = 0;
9517
c895f6f7 9518 ctx.regs = perf_arch_bpf_user_pt_regs(regs);
aa6a5f3c
AS
9519 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
9520 goto out;
9521 rcu_read_lock();
88575199 9522 ret = BPF_PROG_RUN(event->prog, &ctx);
aa6a5f3c
AS
9523 rcu_read_unlock();
9524out:
9525 __this_cpu_dec(bpf_prog_active);
aa6a5f3c
AS
9526 if (!ret)
9527 return;
9528
9529 event->orig_overflow_handler(event, data, regs);
9530}
9531
9532static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
9533{
9534 struct bpf_prog *prog;
9535
9536 if (event->overflow_handler_context)
9537 /* hw breakpoint or kernel counter */
9538 return -EINVAL;
9539
9540 if (event->prog)
9541 return -EEXIST;
9542
9543 prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT);
9544 if (IS_ERR(prog))
9545 return PTR_ERR(prog);
9546
9547 event->prog = prog;
9548 event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
9549 WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
9550 return 0;
9551}
9552
9553static void perf_event_free_bpf_handler(struct perf_event *event)
9554{
9555 struct bpf_prog *prog = event->prog;
9556
9557 if (!prog)
9558 return;
9559
9560 WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
9561 event->prog = NULL;
9562 bpf_prog_put(prog);
9563}
9564#else
9565static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd)
9566{
9567 return -EOPNOTSUPP;
9568}
9569static void perf_event_free_bpf_handler(struct perf_event *event)
9570{
9571}
9572#endif
9573
e12f03d7
SL
9574/*
9575 * returns true if the event is a tracepoint, or a kprobe/upprobe created
9576 * with perf_event_open()
9577 */
9578static inline bool perf_event_is_tracing(struct perf_event *event)
9579{
9580 if (event->pmu == &perf_tracepoint)
9581 return true;
9582#ifdef CONFIG_KPROBE_EVENTS
9583 if (event->pmu == &perf_kprobe)
9584 return true;
33ea4b24
SL
9585#endif
9586#ifdef CONFIG_UPROBE_EVENTS
9587 if (event->pmu == &perf_uprobe)
9588 return true;
e12f03d7
SL
9589#endif
9590 return false;
9591}
9592
2541517c
AS
9593static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
9594{
cf5f5cea 9595 bool is_kprobe, is_tracepoint, is_syscall_tp;
2541517c 9596 struct bpf_prog *prog;
e87c6bc3 9597 int ret;
2541517c 9598
e12f03d7 9599 if (!perf_event_is_tracing(event))
f91840a3 9600 return perf_event_set_bpf_handler(event, prog_fd);
2541517c 9601
98b5c2c6
AS
9602 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
9603 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
cf5f5cea
YS
9604 is_syscall_tp = is_syscall_trace_event(event->tp_event);
9605 if (!is_kprobe && !is_tracepoint && !is_syscall_tp)
98b5c2c6 9606 /* bpf programs can only be attached to u/kprobe or tracepoint */
2541517c
AS
9607 return -EINVAL;
9608
9609 prog = bpf_prog_get(prog_fd);
9610 if (IS_ERR(prog))
9611 return PTR_ERR(prog);
9612
98b5c2c6 9613 if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
cf5f5cea
YS
9614 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
9615 (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
2541517c
AS
9616 /* valid fd, but invalid bpf program type */
9617 bpf_prog_put(prog);
9618 return -EINVAL;
9619 }
9620
9802d865
JB
9621 /* Kprobe override only works for kprobes, not uprobes. */
9622 if (prog->kprobe_override &&
9623 !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) {
9624 bpf_prog_put(prog);
9625 return -EINVAL;
9626 }
9627
cf5f5cea 9628 if (is_tracepoint || is_syscall_tp) {
32bbe007
AS
9629 int off = trace_event_get_offsets(event->tp_event);
9630
9631 if (prog->aux->max_ctx_offset > off) {
9632 bpf_prog_put(prog);
9633 return -EACCES;
9634 }
9635 }
2541517c 9636
e87c6bc3
YS
9637 ret = perf_event_attach_bpf_prog(event, prog);
9638 if (ret)
9639 bpf_prog_put(prog);
9640 return ret;
2541517c
AS
9641}
9642
9643static void perf_event_free_bpf_prog(struct perf_event *event)
9644{
e12f03d7 9645 if (!perf_event_is_tracing(event)) {
0b4c6841 9646 perf_event_free_bpf_handler(event);
2541517c 9647 return;
2541517c 9648 }
e87c6bc3 9649 perf_event_detach_bpf_prog(event);
2541517c
AS
9650}
9651
e077df4f 9652#else
6fb2915d 9653
b0a873eb 9654static inline void perf_tp_register(void)
e077df4f 9655{
e077df4f 9656}
6fb2915d 9657
6fb2915d
LZ
9658static void perf_event_free_filter(struct perf_event *event)
9659{
9660}
9661
2541517c
AS
9662static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
9663{
9664 return -ENOENT;
9665}
9666
9667static void perf_event_free_bpf_prog(struct perf_event *event)
9668{
9669}
07b139c8 9670#endif /* CONFIG_EVENT_TRACING */
e077df4f 9671
24f1e32c 9672#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 9673void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 9674{
f5ffe02e
FW
9675 struct perf_sample_data sample;
9676 struct pt_regs *regs = data;
9677
fd0d000b 9678 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 9679
a4eaf7f1 9680 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 9681 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
9682}
9683#endif
9684
375637bc
AS
9685/*
9686 * Allocate a new address filter
9687 */
9688static struct perf_addr_filter *
9689perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
9690{
9691 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
9692 struct perf_addr_filter *filter;
9693
9694 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
9695 if (!filter)
9696 return NULL;
9697
9698 INIT_LIST_HEAD(&filter->entry);
9699 list_add_tail(&filter->entry, filters);
9700
9701 return filter;
9702}
9703
9704static void free_filters_list(struct list_head *filters)
9705{
9706 struct perf_addr_filter *filter, *iter;
9707
9708 list_for_each_entry_safe(filter, iter, filters, entry) {
9511bce9 9709 path_put(&filter->path);
375637bc
AS
9710 list_del(&filter->entry);
9711 kfree(filter);
9712 }
9713}
9714
9715/*
9716 * Free existing address filters and optionally install new ones
9717 */
9718static void perf_addr_filters_splice(struct perf_event *event,
9719 struct list_head *head)
9720{
9721 unsigned long flags;
9722 LIST_HEAD(list);
9723
9724 if (!has_addr_filter(event))
9725 return;
9726
9727 /* don't bother with children, they don't have their own filters */
9728 if (event->parent)
9729 return;
9730
9731 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
9732
9733 list_splice_init(&event->addr_filters.list, &list);
9734 if (head)
9735 list_splice(head, &event->addr_filters.list);
9736
9737 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
9738
9739 free_filters_list(&list);
9740}
9741
9742/*
9743 * Scan through mm's vmas and see if one of them matches the
9744 * @filter; if so, adjust filter's address range.
c1e8d7c6 9745 * Called with mm::mmap_lock down for reading.
375637bc 9746 */
c60f83b8
AS
9747static void perf_addr_filter_apply(struct perf_addr_filter *filter,
9748 struct mm_struct *mm,
9749 struct perf_addr_filter_range *fr)
375637bc
AS
9750{
9751 struct vm_area_struct *vma;
9752
9753 for (vma = mm->mmap; vma; vma = vma->vm_next) {
c60f83b8 9754 if (!vma->vm_file)
375637bc
AS
9755 continue;
9756
c60f83b8
AS
9757 if (perf_addr_filter_vma_adjust(filter, vma, fr))
9758 return;
375637bc 9759 }
375637bc
AS
9760}
9761
9762/*
9763 * Update event's address range filters based on the
9764 * task's existing mappings, if any.
9765 */
9766static void perf_event_addr_filters_apply(struct perf_event *event)
9767{
9768 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
9769 struct task_struct *task = READ_ONCE(event->ctx->task);
9770 struct perf_addr_filter *filter;
9771 struct mm_struct *mm = NULL;
9772 unsigned int count = 0;
9773 unsigned long flags;
9774
9775 /*
9776 * We may observe TASK_TOMBSTONE, which means that the event tear-down
9777 * will stop on the parent's child_mutex that our caller is also holding
9778 */
9779 if (task == TASK_TOMBSTONE)
9780 return;
9781
52a44f83
AS
9782 if (ifh->nr_file_filters) {
9783 mm = get_task_mm(event->ctx->task);
9784 if (!mm)
9785 goto restart;
375637bc 9786
d8ed45c5 9787 mmap_read_lock(mm);
52a44f83 9788 }
375637bc
AS
9789
9790 raw_spin_lock_irqsave(&ifh->lock, flags);
9791 list_for_each_entry(filter, &ifh->list, entry) {
52a44f83
AS
9792 if (filter->path.dentry) {
9793 /*
9794 * Adjust base offset if the filter is associated to a
9795 * binary that needs to be mapped:
9796 */
9797 event->addr_filter_ranges[count].start = 0;
9798 event->addr_filter_ranges[count].size = 0;
375637bc 9799
c60f83b8 9800 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
52a44f83
AS
9801 } else {
9802 event->addr_filter_ranges[count].start = filter->offset;
9803 event->addr_filter_ranges[count].size = filter->size;
9804 }
375637bc
AS
9805
9806 count++;
9807 }
9808
9809 event->addr_filters_gen++;
9810 raw_spin_unlock_irqrestore(&ifh->lock, flags);
9811
52a44f83 9812 if (ifh->nr_file_filters) {
d8ed45c5 9813 mmap_read_unlock(mm);
375637bc 9814
52a44f83
AS
9815 mmput(mm);
9816 }
375637bc
AS
9817
9818restart:
767ae086 9819 perf_event_stop(event, 1);
375637bc
AS
9820}
9821
9822/*
9823 * Address range filtering: limiting the data to certain
9824 * instruction address ranges. Filters are ioctl()ed to us from
9825 * userspace as ascii strings.
9826 *
9827 * Filter string format:
9828 *
9829 * ACTION RANGE_SPEC
9830 * where ACTION is one of the
9831 * * "filter": limit the trace to this region
9832 * * "start": start tracing from this address
9833 * * "stop": stop tracing at this address/region;
9834 * RANGE_SPEC is
9835 * * for kernel addresses: <start address>[/<size>]
9836 * * for object files: <start address>[/<size>]@</path/to/object/file>
9837 *
6ed70cf3
AS
9838 * if <size> is not specified or is zero, the range is treated as a single
9839 * address; not valid for ACTION=="filter".
375637bc
AS
9840 */
9841enum {
e96271f3 9842 IF_ACT_NONE = -1,
375637bc
AS
9843 IF_ACT_FILTER,
9844 IF_ACT_START,
9845 IF_ACT_STOP,
9846 IF_SRC_FILE,
9847 IF_SRC_KERNEL,
9848 IF_SRC_FILEADDR,
9849 IF_SRC_KERNELADDR,
9850};
9851
9852enum {
9853 IF_STATE_ACTION = 0,
9854 IF_STATE_SOURCE,
9855 IF_STATE_END,
9856};
9857
9858static const match_table_t if_tokens = {
9859 { IF_ACT_FILTER, "filter" },
9860 { IF_ACT_START, "start" },
9861 { IF_ACT_STOP, "stop" },
9862 { IF_SRC_FILE, "%u/%u@%s" },
9863 { IF_SRC_KERNEL, "%u/%u" },
9864 { IF_SRC_FILEADDR, "%u@%s" },
9865 { IF_SRC_KERNELADDR, "%u" },
e96271f3 9866 { IF_ACT_NONE, NULL },
375637bc
AS
9867};
9868
9869/*
9870 * Address filter string parser
9871 */
9872static int
9873perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
9874 struct list_head *filters)
9875{
9876 struct perf_addr_filter *filter = NULL;
9877 char *start, *orig, *filename = NULL;
375637bc
AS
9878 substring_t args[MAX_OPT_ARGS];
9879 int state = IF_STATE_ACTION, token;
9880 unsigned int kernel = 0;
9881 int ret = -EINVAL;
9882
9883 orig = fstr = kstrdup(fstr, GFP_KERNEL);
9884 if (!fstr)
9885 return -ENOMEM;
9886
9887 while ((start = strsep(&fstr, " ,\n")) != NULL) {
6ed70cf3
AS
9888 static const enum perf_addr_filter_action_t actions[] = {
9889 [IF_ACT_FILTER] = PERF_ADDR_FILTER_ACTION_FILTER,
9890 [IF_ACT_START] = PERF_ADDR_FILTER_ACTION_START,
9891 [IF_ACT_STOP] = PERF_ADDR_FILTER_ACTION_STOP,
9892 };
375637bc
AS
9893 ret = -EINVAL;
9894
9895 if (!*start)
9896 continue;
9897
9898 /* filter definition begins */
9899 if (state == IF_STATE_ACTION) {
9900 filter = perf_addr_filter_new(event, filters);
9901 if (!filter)
9902 goto fail;
9903 }
9904
9905 token = match_token(start, if_tokens, args);
9906 switch (token) {
9907 case IF_ACT_FILTER:
9908 case IF_ACT_START:
375637bc
AS
9909 case IF_ACT_STOP:
9910 if (state != IF_STATE_ACTION)
9911 goto fail;
9912
6ed70cf3 9913 filter->action = actions[token];
375637bc
AS
9914 state = IF_STATE_SOURCE;
9915 break;
9916
9917 case IF_SRC_KERNELADDR:
9918 case IF_SRC_KERNEL:
9919 kernel = 1;
10c3405f 9920 /* fall through */
375637bc
AS
9921
9922 case IF_SRC_FILEADDR:
9923 case IF_SRC_FILE:
9924 if (state != IF_STATE_SOURCE)
9925 goto fail;
9926
375637bc
AS
9927 *args[0].to = 0;
9928 ret = kstrtoul(args[0].from, 0, &filter->offset);
9929 if (ret)
9930 goto fail;
9931
6ed70cf3 9932 if (token == IF_SRC_KERNEL || token == IF_SRC_FILE) {
375637bc
AS
9933 *args[1].to = 0;
9934 ret = kstrtoul(args[1].from, 0, &filter->size);
9935 if (ret)
9936 goto fail;
9937 }
9938
4059ffd0 9939 if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
6ed70cf3 9940 int fpos = token == IF_SRC_FILE ? 2 : 1;
4059ffd0
MP
9941
9942 filename = match_strdup(&args[fpos]);
375637bc
AS
9943 if (!filename) {
9944 ret = -ENOMEM;
9945 goto fail;
9946 }
9947 }
9948
9949 state = IF_STATE_END;
9950 break;
9951
9952 default:
9953 goto fail;
9954 }
9955
9956 /*
9957 * Filter definition is fully parsed, validate and install it.
9958 * Make sure that it doesn't contradict itself or the event's
9959 * attribute.
9960 */
9961 if (state == IF_STATE_END) {
9ccbfbb1 9962 ret = -EINVAL;
375637bc
AS
9963 if (kernel && event->attr.exclude_kernel)
9964 goto fail;
9965
6ed70cf3
AS
9966 /*
9967 * ACTION "filter" must have a non-zero length region
9968 * specified.
9969 */
9970 if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER &&
9971 !filter->size)
9972 goto fail;
9973
375637bc
AS
9974 if (!kernel) {
9975 if (!filename)
9976 goto fail;
9977
6ce77bfd
AS
9978 /*
9979 * For now, we only support file-based filters
9980 * in per-task events; doing so for CPU-wide
9981 * events requires additional context switching
9982 * trickery, since same object code will be
9983 * mapped at different virtual addresses in
9984 * different processes.
9985 */
9986 ret = -EOPNOTSUPP;
9987 if (!event->ctx->task)
9988 goto fail_free_name;
9989
375637bc 9990 /* look up the path and grab its inode */
9511bce9
SL
9991 ret = kern_path(filename, LOOKUP_FOLLOW,
9992 &filter->path);
375637bc
AS
9993 if (ret)
9994 goto fail_free_name;
9995
375637bc
AS
9996 kfree(filename);
9997 filename = NULL;
9998
9999 ret = -EINVAL;
9511bce9
SL
10000 if (!filter->path.dentry ||
10001 !S_ISREG(d_inode(filter->path.dentry)
10002 ->i_mode))
375637bc 10003 goto fail;
6ce77bfd
AS
10004
10005 event->addr_filters.nr_file_filters++;
375637bc
AS
10006 }
10007
10008 /* ready to consume more filters */
10009 state = IF_STATE_ACTION;
10010 filter = NULL;
10011 }
10012 }
10013
10014 if (state != IF_STATE_ACTION)
10015 goto fail;
10016
10017 kfree(orig);
10018
10019 return 0;
10020
10021fail_free_name:
10022 kfree(filename);
10023fail:
10024 free_filters_list(filters);
10025 kfree(orig);
10026
10027 return ret;
10028}
10029
10030static int
10031perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
10032{
10033 LIST_HEAD(filters);
10034 int ret;
10035
10036 /*
10037 * Since this is called in perf_ioctl() path, we're already holding
10038 * ctx::mutex.
10039 */
10040 lockdep_assert_held(&event->ctx->mutex);
10041
10042 if (WARN_ON_ONCE(event->parent))
10043 return -EINVAL;
10044
375637bc
AS
10045 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
10046 if (ret)
6ce77bfd 10047 goto fail_clear_files;
375637bc
AS
10048
10049 ret = event->pmu->addr_filters_validate(&filters);
6ce77bfd
AS
10050 if (ret)
10051 goto fail_free_filters;
375637bc
AS
10052
10053 /* remove existing filters, if any */
10054 perf_addr_filters_splice(event, &filters);
10055
10056 /* install new filters */
10057 perf_event_for_each_child(event, perf_event_addr_filters_apply);
10058
6ce77bfd
AS
10059 return ret;
10060
10061fail_free_filters:
10062 free_filters_list(&filters);
10063
10064fail_clear_files:
10065 event->addr_filters.nr_file_filters = 0;
10066
375637bc
AS
10067 return ret;
10068}
10069
c796bbbe
AS
10070static int perf_event_set_filter(struct perf_event *event, void __user *arg)
10071{
c796bbbe 10072 int ret = -EINVAL;
e12f03d7 10073 char *filter_str;
c796bbbe
AS
10074
10075 filter_str = strndup_user(arg, PAGE_SIZE);
10076 if (IS_ERR(filter_str))
10077 return PTR_ERR(filter_str);
10078
e12f03d7
SL
10079#ifdef CONFIG_EVENT_TRACING
10080 if (perf_event_is_tracing(event)) {
10081 struct perf_event_context *ctx = event->ctx;
10082
10083 /*
10084 * Beware, here be dragons!!
10085 *
10086 * the tracepoint muck will deadlock against ctx->mutex, but
10087 * the tracepoint stuff does not actually need it. So
10088 * temporarily drop ctx->mutex. As per perf_event_ctx_lock() we
10089 * already have a reference on ctx.
10090 *
10091 * This can result in event getting moved to a different ctx,
10092 * but that does not affect the tracepoint state.
10093 */
10094 mutex_unlock(&ctx->mutex);
10095 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
10096 mutex_lock(&ctx->mutex);
10097 } else
10098#endif
10099 if (has_addr_filter(event))
375637bc 10100 ret = perf_event_set_addr_filter(event, filter_str);
c796bbbe
AS
10101
10102 kfree(filter_str);
10103 return ret;
10104}
10105
b0a873eb
PZ
10106/*
10107 * hrtimer based swevent callback
10108 */
f29ac756 10109
b0a873eb 10110static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 10111{
b0a873eb
PZ
10112 enum hrtimer_restart ret = HRTIMER_RESTART;
10113 struct perf_sample_data data;
10114 struct pt_regs *regs;
10115 struct perf_event *event;
10116 u64 period;
f29ac756 10117
b0a873eb 10118 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
10119
10120 if (event->state != PERF_EVENT_STATE_ACTIVE)
10121 return HRTIMER_NORESTART;
10122
b0a873eb 10123 event->pmu->read(event);
f344011c 10124
fd0d000b 10125 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
10126 regs = get_irq_regs();
10127
10128 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 10129 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 10130 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
10131 ret = HRTIMER_NORESTART;
10132 }
24f1e32c 10133
b0a873eb
PZ
10134 period = max_t(u64, 10000, event->hw.sample_period);
10135 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 10136
b0a873eb 10137 return ret;
f29ac756
PZ
10138}
10139
b0a873eb 10140static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 10141{
b0a873eb 10142 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
10143 s64 period;
10144
10145 if (!is_sampling_event(event))
10146 return;
f5ffe02e 10147
5d508e82
FBH
10148 period = local64_read(&hwc->period_left);
10149 if (period) {
10150 if (period < 0)
10151 period = 10000;
fa407f35 10152
5d508e82
FBH
10153 local64_set(&hwc->period_left, 0);
10154 } else {
10155 period = max_t(u64, 10000, hwc->sample_period);
10156 }
3497d206 10157 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
30f9028b 10158 HRTIMER_MODE_REL_PINNED_HARD);
24f1e32c 10159}
b0a873eb
PZ
10160
10161static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 10162{
b0a873eb
PZ
10163 struct hw_perf_event *hwc = &event->hw;
10164
6c7e550f 10165 if (is_sampling_event(event)) {
b0a873eb 10166 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 10167 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
10168
10169 hrtimer_cancel(&hwc->hrtimer);
10170 }
24f1e32c
FW
10171}
10172
ba3dd36c
PZ
10173static void perf_swevent_init_hrtimer(struct perf_event *event)
10174{
10175 struct hw_perf_event *hwc = &event->hw;
10176
10177 if (!is_sampling_event(event))
10178 return;
10179
30f9028b 10180 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
ba3dd36c
PZ
10181 hwc->hrtimer.function = perf_swevent_hrtimer;
10182
10183 /*
10184 * Since hrtimers have a fixed rate, we can do a static freq->period
10185 * mapping and avoid the whole period adjust feedback stuff.
10186 */
10187 if (event->attr.freq) {
10188 long freq = event->attr.sample_freq;
10189
10190 event->attr.sample_period = NSEC_PER_SEC / freq;
10191 hwc->sample_period = event->attr.sample_period;
10192 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 10193 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
10194 event->attr.freq = 0;
10195 }
10196}
10197
b0a873eb
PZ
10198/*
10199 * Software event: cpu wall time clock
10200 */
10201
10202static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 10203{
b0a873eb
PZ
10204 s64 prev;
10205 u64 now;
10206
a4eaf7f1 10207 now = local_clock();
b0a873eb
PZ
10208 prev = local64_xchg(&event->hw.prev_count, now);
10209 local64_add(now - prev, &event->count);
24f1e32c 10210}
24f1e32c 10211
a4eaf7f1 10212static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 10213{
a4eaf7f1 10214 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 10215 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
10216}
10217
a4eaf7f1 10218static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 10219{
b0a873eb
PZ
10220 perf_swevent_cancel_hrtimer(event);
10221 cpu_clock_event_update(event);
10222}
f29ac756 10223
a4eaf7f1
PZ
10224static int cpu_clock_event_add(struct perf_event *event, int flags)
10225{
10226 if (flags & PERF_EF_START)
10227 cpu_clock_event_start(event, flags);
6a694a60 10228 perf_event_update_userpage(event);
a4eaf7f1
PZ
10229
10230 return 0;
10231}
10232
10233static void cpu_clock_event_del(struct perf_event *event, int flags)
10234{
10235 cpu_clock_event_stop(event, flags);
10236}
10237
b0a873eb
PZ
10238static void cpu_clock_event_read(struct perf_event *event)
10239{
10240 cpu_clock_event_update(event);
10241}
f344011c 10242
b0a873eb
PZ
10243static int cpu_clock_event_init(struct perf_event *event)
10244{
10245 if (event->attr.type != PERF_TYPE_SOFTWARE)
10246 return -ENOENT;
10247
10248 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
10249 return -ENOENT;
10250
2481c5fa
SE
10251 /*
10252 * no branch sampling for software events
10253 */
10254 if (has_branch_stack(event))
10255 return -EOPNOTSUPP;
10256
ba3dd36c
PZ
10257 perf_swevent_init_hrtimer(event);
10258
b0a873eb 10259 return 0;
f29ac756
PZ
10260}
10261
b0a873eb 10262static struct pmu perf_cpu_clock = {
89a1e187
PZ
10263 .task_ctx_nr = perf_sw_context,
10264
34f43927
PZ
10265 .capabilities = PERF_PMU_CAP_NO_NMI,
10266
b0a873eb 10267 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
10268 .add = cpu_clock_event_add,
10269 .del = cpu_clock_event_del,
10270 .start = cpu_clock_event_start,
10271 .stop = cpu_clock_event_stop,
b0a873eb
PZ
10272 .read = cpu_clock_event_read,
10273};
10274
10275/*
10276 * Software event: task time clock
10277 */
10278
10279static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 10280{
b0a873eb
PZ
10281 u64 prev;
10282 s64 delta;
5c92d124 10283
b0a873eb
PZ
10284 prev = local64_xchg(&event->hw.prev_count, now);
10285 delta = now - prev;
10286 local64_add(delta, &event->count);
10287}
5c92d124 10288
a4eaf7f1 10289static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 10290{
a4eaf7f1 10291 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 10292 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
10293}
10294
a4eaf7f1 10295static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
10296{
10297 perf_swevent_cancel_hrtimer(event);
10298 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
10299}
10300
10301static int task_clock_event_add(struct perf_event *event, int flags)
10302{
10303 if (flags & PERF_EF_START)
10304 task_clock_event_start(event, flags);
6a694a60 10305 perf_event_update_userpage(event);
b0a873eb 10306
a4eaf7f1
PZ
10307 return 0;
10308}
10309
10310static void task_clock_event_del(struct perf_event *event, int flags)
10311{
10312 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
10313}
10314
10315static void task_clock_event_read(struct perf_event *event)
10316{
768a06e2
PZ
10317 u64 now = perf_clock();
10318 u64 delta = now - event->ctx->timestamp;
10319 u64 time = event->ctx->time + delta;
b0a873eb
PZ
10320
10321 task_clock_event_update(event, time);
10322}
10323
10324static int task_clock_event_init(struct perf_event *event)
6fb2915d 10325{
b0a873eb
PZ
10326 if (event->attr.type != PERF_TYPE_SOFTWARE)
10327 return -ENOENT;
10328
10329 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
10330 return -ENOENT;
10331
2481c5fa
SE
10332 /*
10333 * no branch sampling for software events
10334 */
10335 if (has_branch_stack(event))
10336 return -EOPNOTSUPP;
10337
ba3dd36c
PZ
10338 perf_swevent_init_hrtimer(event);
10339
b0a873eb 10340 return 0;
6fb2915d
LZ
10341}
10342
b0a873eb 10343static struct pmu perf_task_clock = {
89a1e187
PZ
10344 .task_ctx_nr = perf_sw_context,
10345
34f43927
PZ
10346 .capabilities = PERF_PMU_CAP_NO_NMI,
10347
b0a873eb 10348 .event_init = task_clock_event_init,
a4eaf7f1
PZ
10349 .add = task_clock_event_add,
10350 .del = task_clock_event_del,
10351 .start = task_clock_event_start,
10352 .stop = task_clock_event_stop,
b0a873eb
PZ
10353 .read = task_clock_event_read,
10354};
6fb2915d 10355
ad5133b7 10356static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 10357{
e077df4f 10358}
6fb2915d 10359
fbbe0701
SB
10360static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
10361{
10362}
10363
ad5133b7 10364static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 10365{
ad5133b7 10366 return 0;
6fb2915d
LZ
10367}
10368
81ec3f3c
JO
10369static int perf_event_nop_int(struct perf_event *event, u64 value)
10370{
10371 return 0;
10372}
10373
18ab2cd3 10374static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
fbbe0701
SB
10375
10376static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
6fb2915d 10377{
fbbe0701
SB
10378 __this_cpu_write(nop_txn_flags, flags);
10379
10380 if (flags & ~PERF_PMU_TXN_ADD)
10381 return;
10382
ad5133b7 10383 perf_pmu_disable(pmu);
6fb2915d
LZ
10384}
10385
ad5133b7
PZ
10386static int perf_pmu_commit_txn(struct pmu *pmu)
10387{
fbbe0701
SB
10388 unsigned int flags = __this_cpu_read(nop_txn_flags);
10389
10390 __this_cpu_write(nop_txn_flags, 0);
10391
10392 if (flags & ~PERF_PMU_TXN_ADD)
10393 return 0;
10394
ad5133b7
PZ
10395 perf_pmu_enable(pmu);
10396 return 0;
10397}
e077df4f 10398
ad5133b7 10399static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 10400{
fbbe0701
SB
10401 unsigned int flags = __this_cpu_read(nop_txn_flags);
10402
10403 __this_cpu_write(nop_txn_flags, 0);
10404
10405 if (flags & ~PERF_PMU_TXN_ADD)
10406 return;
10407
ad5133b7 10408 perf_pmu_enable(pmu);
24f1e32c
FW
10409}
10410
35edc2a5
PZ
10411static int perf_event_idx_default(struct perf_event *event)
10412{
c719f560 10413 return 0;
35edc2a5
PZ
10414}
10415
8dc85d54
PZ
10416/*
10417 * Ensures all contexts with the same task_ctx_nr have the same
10418 * pmu_cpu_context too.
10419 */
9e317041 10420static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
24f1e32c 10421{
8dc85d54 10422 struct pmu *pmu;
b326e956 10423
8dc85d54
PZ
10424 if (ctxn < 0)
10425 return NULL;
24f1e32c 10426
8dc85d54
PZ
10427 list_for_each_entry(pmu, &pmus, entry) {
10428 if (pmu->task_ctx_nr == ctxn)
10429 return pmu->pmu_cpu_context;
10430 }
24f1e32c 10431
8dc85d54 10432 return NULL;
24f1e32c
FW
10433}
10434
51676957
PZ
10435static void free_pmu_context(struct pmu *pmu)
10436{
df0062b2
WD
10437 /*
10438 * Static contexts such as perf_sw_context have a global lifetime
10439 * and may be shared between different PMUs. Avoid freeing them
10440 * when a single PMU is going away.
10441 */
10442 if (pmu->task_ctx_nr > perf_invalid_context)
10443 return;
10444
51676957 10445 free_percpu(pmu->pmu_cpu_context);
24f1e32c 10446}
6e855cd4
AS
10447
10448/*
10449 * Let userspace know that this PMU supports address range filtering:
10450 */
10451static ssize_t nr_addr_filters_show(struct device *dev,
10452 struct device_attribute *attr,
10453 char *page)
10454{
10455 struct pmu *pmu = dev_get_drvdata(dev);
10456
10457 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
10458}
10459DEVICE_ATTR_RO(nr_addr_filters);
10460
2e80a82a 10461static struct idr pmu_idr;
d6d020e9 10462
abe43400
PZ
10463static ssize_t
10464type_show(struct device *dev, struct device_attribute *attr, char *page)
10465{
10466 struct pmu *pmu = dev_get_drvdata(dev);
10467
10468 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
10469}
90826ca7 10470static DEVICE_ATTR_RO(type);
abe43400 10471
62b85639
SE
10472static ssize_t
10473perf_event_mux_interval_ms_show(struct device *dev,
10474 struct device_attribute *attr,
10475 char *page)
10476{
10477 struct pmu *pmu = dev_get_drvdata(dev);
10478
10479 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
10480}
10481
272325c4
PZ
10482static DEFINE_MUTEX(mux_interval_mutex);
10483
62b85639
SE
10484static ssize_t
10485perf_event_mux_interval_ms_store(struct device *dev,
10486 struct device_attribute *attr,
10487 const char *buf, size_t count)
10488{
10489 struct pmu *pmu = dev_get_drvdata(dev);
10490 int timer, cpu, ret;
10491
10492 ret = kstrtoint(buf, 0, &timer);
10493 if (ret)
10494 return ret;
10495
10496 if (timer < 1)
10497 return -EINVAL;
10498
10499 /* same value, noting to do */
10500 if (timer == pmu->hrtimer_interval_ms)
10501 return count;
10502
272325c4 10503 mutex_lock(&mux_interval_mutex);
62b85639
SE
10504 pmu->hrtimer_interval_ms = timer;
10505
10506 /* update all cpuctx for this PMU */
a63fbed7 10507 cpus_read_lock();
272325c4 10508 for_each_online_cpu(cpu) {
62b85639
SE
10509 struct perf_cpu_context *cpuctx;
10510 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
10511 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
10512
272325c4
PZ
10513 cpu_function_call(cpu,
10514 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
62b85639 10515 }
a63fbed7 10516 cpus_read_unlock();
272325c4 10517 mutex_unlock(&mux_interval_mutex);
62b85639
SE
10518
10519 return count;
10520}
90826ca7 10521static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
62b85639 10522
90826ca7
GKH
10523static struct attribute *pmu_dev_attrs[] = {
10524 &dev_attr_type.attr,
10525 &dev_attr_perf_event_mux_interval_ms.attr,
10526 NULL,
abe43400 10527};
90826ca7 10528ATTRIBUTE_GROUPS(pmu_dev);
abe43400
PZ
10529
10530static int pmu_bus_running;
10531static struct bus_type pmu_bus = {
10532 .name = "event_source",
90826ca7 10533 .dev_groups = pmu_dev_groups,
abe43400
PZ
10534};
10535
10536static void pmu_dev_release(struct device *dev)
10537{
10538 kfree(dev);
10539}
10540
10541static int pmu_dev_alloc(struct pmu *pmu)
10542{
10543 int ret = -ENOMEM;
10544
10545 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
10546 if (!pmu->dev)
10547 goto out;
10548
0c9d42ed 10549 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
10550 device_initialize(pmu->dev);
10551 ret = dev_set_name(pmu->dev, "%s", pmu->name);
10552 if (ret)
10553 goto free_dev;
10554
10555 dev_set_drvdata(pmu->dev, pmu);
10556 pmu->dev->bus = &pmu_bus;
10557 pmu->dev->release = pmu_dev_release;
10558 ret = device_add(pmu->dev);
10559 if (ret)
10560 goto free_dev;
10561
6e855cd4
AS
10562 /* For PMUs with address filters, throw in an extra attribute: */
10563 if (pmu->nr_addr_filters)
10564 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
10565
10566 if (ret)
10567 goto del_dev;
10568
f3a3a825
JO
10569 if (pmu->attr_update)
10570 ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update);
10571
10572 if (ret)
10573 goto del_dev;
10574
abe43400
PZ
10575out:
10576 return ret;
10577
6e855cd4
AS
10578del_dev:
10579 device_del(pmu->dev);
10580
abe43400
PZ
10581free_dev:
10582 put_device(pmu->dev);
10583 goto out;
10584}
10585
547e9fd7 10586static struct lock_class_key cpuctx_mutex;
facc4307 10587static struct lock_class_key cpuctx_lock;
547e9fd7 10588
03d8e80b 10589int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 10590{
66d258c5 10591 int cpu, ret, max = PERF_TYPE_MAX;
24f1e32c 10592
b0a873eb 10593 mutex_lock(&pmus_lock);
33696fc0
PZ
10594 ret = -ENOMEM;
10595 pmu->pmu_disable_count = alloc_percpu(int);
10596 if (!pmu->pmu_disable_count)
10597 goto unlock;
f29ac756 10598
2e80a82a
PZ
10599 pmu->type = -1;
10600 if (!name)
10601 goto skip_type;
10602 pmu->name = name;
10603
66d258c5
PZ
10604 if (type != PERF_TYPE_SOFTWARE) {
10605 if (type >= 0)
10606 max = type;
10607
10608 ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL);
10609 if (ret < 0)
2e80a82a 10610 goto free_pdc;
66d258c5
PZ
10611
10612 WARN_ON(type >= 0 && ret != type);
10613
10614 type = ret;
2e80a82a
PZ
10615 }
10616 pmu->type = type;
10617
abe43400
PZ
10618 if (pmu_bus_running) {
10619 ret = pmu_dev_alloc(pmu);
10620 if (ret)
10621 goto free_idr;
10622 }
10623
2e80a82a 10624skip_type:
26657848
PZ
10625 if (pmu->task_ctx_nr == perf_hw_context) {
10626 static int hw_context_taken = 0;
10627
5101ef20
MR
10628 /*
10629 * Other than systems with heterogeneous CPUs, it never makes
10630 * sense for two PMUs to share perf_hw_context. PMUs which are
10631 * uncore must use perf_invalid_context.
10632 */
10633 if (WARN_ON_ONCE(hw_context_taken &&
10634 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
26657848
PZ
10635 pmu->task_ctx_nr = perf_invalid_context;
10636
10637 hw_context_taken = 1;
10638 }
10639
8dc85d54
PZ
10640 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
10641 if (pmu->pmu_cpu_context)
10642 goto got_cpu_context;
f29ac756 10643
c4814202 10644 ret = -ENOMEM;
108b02cf
PZ
10645 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
10646 if (!pmu->pmu_cpu_context)
abe43400 10647 goto free_dev;
f344011c 10648
108b02cf
PZ
10649 for_each_possible_cpu(cpu) {
10650 struct perf_cpu_context *cpuctx;
10651
10652 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 10653 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 10654 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 10655 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
108b02cf 10656 cpuctx->ctx.pmu = pmu;
a63fbed7 10657 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
9e630205 10658
272325c4 10659 __perf_mux_hrtimer_init(cpuctx, cpu);
836196be
IR
10660
10661 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default);
10662 cpuctx->heap = cpuctx->heap_default;
108b02cf 10663 }
76e1d904 10664
8dc85d54 10665got_cpu_context:
ad5133b7
PZ
10666 if (!pmu->start_txn) {
10667 if (pmu->pmu_enable) {
10668 /*
10669 * If we have pmu_enable/pmu_disable calls, install
10670 * transaction stubs that use that to try and batch
10671 * hardware accesses.
10672 */
10673 pmu->start_txn = perf_pmu_start_txn;
10674 pmu->commit_txn = perf_pmu_commit_txn;
10675 pmu->cancel_txn = perf_pmu_cancel_txn;
10676 } else {
fbbe0701 10677 pmu->start_txn = perf_pmu_nop_txn;
ad5133b7
PZ
10678 pmu->commit_txn = perf_pmu_nop_int;
10679 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 10680 }
5c92d124 10681 }
15dbf27c 10682
ad5133b7
PZ
10683 if (!pmu->pmu_enable) {
10684 pmu->pmu_enable = perf_pmu_nop_void;
10685 pmu->pmu_disable = perf_pmu_nop_void;
10686 }
10687
81ec3f3c
JO
10688 if (!pmu->check_period)
10689 pmu->check_period = perf_event_nop_int;
10690
35edc2a5
PZ
10691 if (!pmu->event_idx)
10692 pmu->event_idx = perf_event_idx_default;
10693
d44f821b
LK
10694 /*
10695 * Ensure the TYPE_SOFTWARE PMUs are at the head of the list,
10696 * since these cannot be in the IDR. This way the linear search
10697 * is fast, provided a valid software event is provided.
10698 */
10699 if (type == PERF_TYPE_SOFTWARE || !name)
10700 list_add_rcu(&pmu->entry, &pmus);
10701 else
10702 list_add_tail_rcu(&pmu->entry, &pmus);
10703
bed5b25a 10704 atomic_set(&pmu->exclusive_cnt, 0);
33696fc0
PZ
10705 ret = 0;
10706unlock:
b0a873eb
PZ
10707 mutex_unlock(&pmus_lock);
10708
33696fc0 10709 return ret;
108b02cf 10710
abe43400
PZ
10711free_dev:
10712 device_del(pmu->dev);
10713 put_device(pmu->dev);
10714
2e80a82a 10715free_idr:
66d258c5 10716 if (pmu->type != PERF_TYPE_SOFTWARE)
2e80a82a
PZ
10717 idr_remove(&pmu_idr, pmu->type);
10718
108b02cf
PZ
10719free_pdc:
10720 free_percpu(pmu->pmu_disable_count);
10721 goto unlock;
f29ac756 10722}
c464c76e 10723EXPORT_SYMBOL_GPL(perf_pmu_register);
f29ac756 10724
b0a873eb 10725void perf_pmu_unregister(struct pmu *pmu)
5c92d124 10726{
b0a873eb
PZ
10727 mutex_lock(&pmus_lock);
10728 list_del_rcu(&pmu->entry);
5c92d124 10729
0475f9ea 10730 /*
cde8e884
PZ
10731 * We dereference the pmu list under both SRCU and regular RCU, so
10732 * synchronize against both of those.
0475f9ea 10733 */
b0a873eb 10734 synchronize_srcu(&pmus_srcu);
cde8e884 10735 synchronize_rcu();
d6d020e9 10736
33696fc0 10737 free_percpu(pmu->pmu_disable_count);
66d258c5 10738 if (pmu->type != PERF_TYPE_SOFTWARE)
2e80a82a 10739 idr_remove(&pmu_idr, pmu->type);
a9f97721 10740 if (pmu_bus_running) {
0933840a
JO
10741 if (pmu->nr_addr_filters)
10742 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
10743 device_del(pmu->dev);
10744 put_device(pmu->dev);
10745 }
51676957 10746 free_pmu_context(pmu);
a9f97721 10747 mutex_unlock(&pmus_lock);
b0a873eb 10748}
c464c76e 10749EXPORT_SYMBOL_GPL(perf_pmu_unregister);
d6d020e9 10750
e321d02d
KL
10751static inline bool has_extended_regs(struct perf_event *event)
10752{
10753 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) ||
10754 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK);
10755}
10756
cc34b98b
MR
10757static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
10758{
ccd41c86 10759 struct perf_event_context *ctx = NULL;
cc34b98b
MR
10760 int ret;
10761
10762 if (!try_module_get(pmu->module))
10763 return -ENODEV;
ccd41c86 10764
0c7296ca
PZ
10765 /*
10766 * A number of pmu->event_init() methods iterate the sibling_list to,
10767 * for example, validate if the group fits on the PMU. Therefore,
10768 * if this is a sibling event, acquire the ctx->mutex to protect
10769 * the sibling_list.
10770 */
10771 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) {
8b10c5e2
PZ
10772 /*
10773 * This ctx->mutex can nest when we're called through
10774 * inheritance. See the perf_event_ctx_lock_nested() comment.
10775 */
10776 ctx = perf_event_ctx_lock_nested(event->group_leader,
10777 SINGLE_DEPTH_NESTING);
ccd41c86
PZ
10778 BUG_ON(!ctx);
10779 }
10780
cc34b98b
MR
10781 event->pmu = pmu;
10782 ret = pmu->event_init(event);
ccd41c86
PZ
10783
10784 if (ctx)
10785 perf_event_ctx_unlock(event->group_leader, ctx);
10786
cc6795ae 10787 if (!ret) {
e321d02d
KL
10788 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) &&
10789 has_extended_regs(event))
10790 ret = -EOPNOTSUPP;
10791
cc6795ae 10792 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE &&
e321d02d 10793 event_has_any_exclude_flag(event))
cc6795ae 10794 ret = -EINVAL;
e321d02d
KL
10795
10796 if (ret && event->destroy)
10797 event->destroy(event);
cc6795ae
AM
10798 }
10799
cc34b98b
MR
10800 if (ret)
10801 module_put(pmu->module);
10802
10803 return ret;
10804}
10805
18ab2cd3 10806static struct pmu *perf_init_event(struct perf_event *event)
b0a873eb 10807{
66d258c5 10808 int idx, type, ret;
85c617ab 10809 struct pmu *pmu;
b0a873eb
PZ
10810
10811 idx = srcu_read_lock(&pmus_srcu);
2e80a82a 10812
40999312
KL
10813 /* Try parent's PMU first: */
10814 if (event->parent && event->parent->pmu) {
10815 pmu = event->parent->pmu;
10816 ret = perf_try_init_event(pmu, event);
10817 if (!ret)
10818 goto unlock;
10819 }
10820
66d258c5
PZ
10821 /*
10822 * PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
10823 * are often aliases for PERF_TYPE_RAW.
10824 */
10825 type = event->attr.type;
10826 if (type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE)
10827 type = PERF_TYPE_RAW;
10828
10829again:
2e80a82a 10830 rcu_read_lock();
66d258c5 10831 pmu = idr_find(&pmu_idr, type);
2e80a82a 10832 rcu_read_unlock();
940c5b29 10833 if (pmu) {
cc34b98b 10834 ret = perf_try_init_event(pmu, event);
66d258c5
PZ
10835 if (ret == -ENOENT && event->attr.type != type) {
10836 type = event->attr.type;
10837 goto again;
10838 }
10839
940c5b29
LM
10840 if (ret)
10841 pmu = ERR_PTR(ret);
66d258c5 10842
2e80a82a 10843 goto unlock;
940c5b29 10844 }
2e80a82a 10845
9f0bff11 10846 list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) {
cc34b98b 10847 ret = perf_try_init_event(pmu, event);
b0a873eb 10848 if (!ret)
e5f4d339 10849 goto unlock;
76e1d904 10850
b0a873eb
PZ
10851 if (ret != -ENOENT) {
10852 pmu = ERR_PTR(ret);
e5f4d339 10853 goto unlock;
f344011c 10854 }
5c92d124 10855 }
e5f4d339
PZ
10856 pmu = ERR_PTR(-ENOENT);
10857unlock:
b0a873eb 10858 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 10859
4aeb0b42 10860 return pmu;
5c92d124
IM
10861}
10862
f2fb6bef
KL
10863static void attach_sb_event(struct perf_event *event)
10864{
10865 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
10866
10867 raw_spin_lock(&pel->lock);
10868 list_add_rcu(&event->sb_list, &pel->list);
10869 raw_spin_unlock(&pel->lock);
10870}
10871
aab5b71e
PZ
10872/*
10873 * We keep a list of all !task (and therefore per-cpu) events
10874 * that need to receive side-band records.
10875 *
10876 * This avoids having to scan all the various PMU per-cpu contexts
10877 * looking for them.
10878 */
f2fb6bef
KL
10879static void account_pmu_sb_event(struct perf_event *event)
10880{
a4f144eb 10881 if (is_sb_event(event))
f2fb6bef
KL
10882 attach_sb_event(event);
10883}
10884
4beb31f3
FW
10885static void account_event_cpu(struct perf_event *event, int cpu)
10886{
10887 if (event->parent)
10888 return;
10889
4beb31f3
FW
10890 if (is_cgroup_event(event))
10891 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
10892}
10893
555e0c1e
FW
10894/* Freq events need the tick to stay alive (see perf_event_task_tick). */
10895static void account_freq_event_nohz(void)
10896{
10897#ifdef CONFIG_NO_HZ_FULL
10898 /* Lock so we don't race with concurrent unaccount */
10899 spin_lock(&nr_freq_lock);
10900 if (atomic_inc_return(&nr_freq_events) == 1)
10901 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
10902 spin_unlock(&nr_freq_lock);
10903#endif
10904}
10905
10906static void account_freq_event(void)
10907{
10908 if (tick_nohz_full_enabled())
10909 account_freq_event_nohz();
10910 else
10911 atomic_inc(&nr_freq_events);
10912}
10913
10914
766d6c07
FW
10915static void account_event(struct perf_event *event)
10916{
25432ae9
PZ
10917 bool inc = false;
10918
4beb31f3
FW
10919 if (event->parent)
10920 return;
10921
766d6c07 10922 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 10923 inc = true;
766d6c07
FW
10924 if (event->attr.mmap || event->attr.mmap_data)
10925 atomic_inc(&nr_mmap_events);
10926 if (event->attr.comm)
10927 atomic_inc(&nr_comm_events);
e4222673
HB
10928 if (event->attr.namespaces)
10929 atomic_inc(&nr_namespaces_events);
96aaab68
NK
10930 if (event->attr.cgroup)
10931 atomic_inc(&nr_cgroup_events);
766d6c07
FW
10932 if (event->attr.task)
10933 atomic_inc(&nr_task_events);
555e0c1e
FW
10934 if (event->attr.freq)
10935 account_freq_event();
45ac1403
AH
10936 if (event->attr.context_switch) {
10937 atomic_inc(&nr_switch_events);
25432ae9 10938 inc = true;
45ac1403 10939 }
4beb31f3 10940 if (has_branch_stack(event))
25432ae9 10941 inc = true;
4beb31f3 10942 if (is_cgroup_event(event))
25432ae9 10943 inc = true;
76193a94
SL
10944 if (event->attr.ksymbol)
10945 atomic_inc(&nr_ksymbol_events);
6ee52e2a
SL
10946 if (event->attr.bpf_event)
10947 atomic_inc(&nr_bpf_events);
25432ae9 10948
9107c89e 10949 if (inc) {
5bce9db1
AS
10950 /*
10951 * We need the mutex here because static_branch_enable()
10952 * must complete *before* the perf_sched_count increment
10953 * becomes visible.
10954 */
9107c89e
PZ
10955 if (atomic_inc_not_zero(&perf_sched_count))
10956 goto enabled;
10957
10958 mutex_lock(&perf_sched_mutex);
10959 if (!atomic_read(&perf_sched_count)) {
10960 static_branch_enable(&perf_sched_events);
10961 /*
10962 * Guarantee that all CPUs observe they key change and
10963 * call the perf scheduling hooks before proceeding to
10964 * install events that need them.
10965 */
0809d954 10966 synchronize_rcu();
9107c89e
PZ
10967 }
10968 /*
10969 * Now that we have waited for the sync_sched(), allow further
10970 * increments to by-pass the mutex.
10971 */
10972 atomic_inc(&perf_sched_count);
10973 mutex_unlock(&perf_sched_mutex);
10974 }
10975enabled:
4beb31f3
FW
10976
10977 account_event_cpu(event, event->cpu);
f2fb6bef
KL
10978
10979 account_pmu_sb_event(event);
766d6c07
FW
10980}
10981
0793a61d 10982/*
788faab7 10983 * Allocate and initialize an event structure
0793a61d 10984 */
cdd6c482 10985static struct perf_event *
c3f00c70 10986perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
10987 struct task_struct *task,
10988 struct perf_event *group_leader,
10989 struct perf_event *parent_event,
4dc0da86 10990 perf_overflow_handler_t overflow_handler,
79dff51e 10991 void *context, int cgroup_fd)
0793a61d 10992{
51b0fe39 10993 struct pmu *pmu;
cdd6c482
IM
10994 struct perf_event *event;
10995 struct hw_perf_event *hwc;
90983b16 10996 long err = -EINVAL;
0793a61d 10997
66832eb4
ON
10998 if ((unsigned)cpu >= nr_cpu_ids) {
10999 if (!task || cpu != -1)
11000 return ERR_PTR(-EINVAL);
11001 }
11002
c3f00c70 11003 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 11004 if (!event)
d5d2bc0d 11005 return ERR_PTR(-ENOMEM);
0793a61d 11006
04289bb9 11007 /*
cdd6c482 11008 * Single events are their own group leaders, with an
04289bb9
IM
11009 * empty sibling list:
11010 */
11011 if (!group_leader)
cdd6c482 11012 group_leader = event;
04289bb9 11013
cdd6c482
IM
11014 mutex_init(&event->child_mutex);
11015 INIT_LIST_HEAD(&event->child_list);
fccc714b 11016
cdd6c482
IM
11017 INIT_LIST_HEAD(&event->event_entry);
11018 INIT_LIST_HEAD(&event->sibling_list);
6668128a 11019 INIT_LIST_HEAD(&event->active_list);
8e1a2031 11020 init_event_group(event);
10c6db11 11021 INIT_LIST_HEAD(&event->rb_entry);
71ad88ef 11022 INIT_LIST_HEAD(&event->active_entry);
375637bc 11023 INIT_LIST_HEAD(&event->addr_filters.list);
f3ae75de
SE
11024 INIT_HLIST_NODE(&event->hlist_entry);
11025
10c6db11 11026
cdd6c482 11027 init_waitqueue_head(&event->waitq);
1d54ad94 11028 event->pending_disable = -1;
e360adbe 11029 init_irq_work(&event->pending, perf_pending_event);
0793a61d 11030
cdd6c482 11031 mutex_init(&event->mmap_mutex);
375637bc 11032 raw_spin_lock_init(&event->addr_filters.lock);
7b732a75 11033
a6fa941d 11034 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
11035 event->cpu = cpu;
11036 event->attr = *attr;
11037 event->group_leader = group_leader;
11038 event->pmu = NULL;
cdd6c482 11039 event->oncpu = -1;
a96bbc16 11040
cdd6c482 11041 event->parent = parent_event;
b84fbc9f 11042
17cf22c3 11043 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 11044 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 11045
cdd6c482 11046 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 11047
d580ff86
PZ
11048 if (task) {
11049 event->attach_state = PERF_ATTACH_TASK;
d580ff86 11050 /*
50f16a8b
PZ
11051 * XXX pmu::event_init needs to know what task to account to
11052 * and we cannot use the ctx information because we need the
11053 * pmu before we get a ctx.
d580ff86 11054 */
7b3c92b8 11055 event->hw.target = get_task_struct(task);
d580ff86
PZ
11056 }
11057
34f43927
PZ
11058 event->clock = &local_clock;
11059 if (parent_event)
11060 event->clock = parent_event->clock;
11061
4dc0da86 11062 if (!overflow_handler && parent_event) {
b326e956 11063 overflow_handler = parent_event->overflow_handler;
4dc0da86 11064 context = parent_event->overflow_handler_context;
f1e4ba5b 11065#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
aa6a5f3c 11066 if (overflow_handler == bpf_overflow_handler) {
85192dbf 11067 struct bpf_prog *prog = parent_event->prog;
aa6a5f3c 11068
85192dbf 11069 bpf_prog_inc(prog);
aa6a5f3c
AS
11070 event->prog = prog;
11071 event->orig_overflow_handler =
11072 parent_event->orig_overflow_handler;
11073 }
11074#endif
4dc0da86 11075 }
66832eb4 11076
1879445d
WN
11077 if (overflow_handler) {
11078 event->overflow_handler = overflow_handler;
11079 event->overflow_handler_context = context;
9ecda41a
WN
11080 } else if (is_write_backward(event)){
11081 event->overflow_handler = perf_event_output_backward;
11082 event->overflow_handler_context = NULL;
1879445d 11083 } else {
9ecda41a 11084 event->overflow_handler = perf_event_output_forward;
1879445d
WN
11085 event->overflow_handler_context = NULL;
11086 }
97eaf530 11087
0231bb53 11088 perf_event__state_init(event);
a86ed508 11089
4aeb0b42 11090 pmu = NULL;
b8e83514 11091
cdd6c482 11092 hwc = &event->hw;
bd2b5b12 11093 hwc->sample_period = attr->sample_period;
0d48696f 11094 if (attr->freq && attr->sample_freq)
bd2b5b12 11095 hwc->sample_period = 1;
eced1dfc 11096 hwc->last_period = hwc->sample_period;
bd2b5b12 11097
e7850595 11098 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 11099
2023b359 11100 /*
ba5213ae
PZ
11101 * We currently do not support PERF_SAMPLE_READ on inherited events.
11102 * See perf_output_read().
2023b359 11103 */
ba5213ae 11104 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
90983b16 11105 goto err_ns;
a46a2300
YZ
11106
11107 if (!has_branch_stack(event))
11108 event->attr.branch_sample_type = 0;
2023b359 11109
b0a873eb 11110 pmu = perf_init_event(event);
85c617ab 11111 if (IS_ERR(pmu)) {
4aeb0b42 11112 err = PTR_ERR(pmu);
90983b16 11113 goto err_ns;
621a01ea 11114 }
d5d2bc0d 11115
09f4e8f0
PZ
11116 /*
11117 * Disallow uncore-cgroup events, they don't make sense as the cgroup will
11118 * be different on other CPUs in the uncore mask.
11119 */
11120 if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) {
11121 err = -EINVAL;
11122 goto err_pmu;
11123 }
11124
ab43762e
AS
11125 if (event->attr.aux_output &&
11126 !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) {
11127 err = -EOPNOTSUPP;
11128 goto err_pmu;
11129 }
11130
98add2af
PZ
11131 if (cgroup_fd != -1) {
11132 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
11133 if (err)
11134 goto err_pmu;
11135 }
11136
bed5b25a
AS
11137 err = exclusive_event_init(event);
11138 if (err)
11139 goto err_pmu;
11140
375637bc 11141 if (has_addr_filter(event)) {
c60f83b8
AS
11142 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
11143 sizeof(struct perf_addr_filter_range),
11144 GFP_KERNEL);
11145 if (!event->addr_filter_ranges) {
36cc2b92 11146 err = -ENOMEM;
375637bc 11147 goto err_per_task;
36cc2b92 11148 }
375637bc 11149
18736eef
AS
11150 /*
11151 * Clone the parent's vma offsets: they are valid until exec()
11152 * even if the mm is not shared with the parent.
11153 */
11154 if (event->parent) {
11155 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
11156
11157 raw_spin_lock_irq(&ifh->lock);
c60f83b8
AS
11158 memcpy(event->addr_filter_ranges,
11159 event->parent->addr_filter_ranges,
11160 pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
18736eef
AS
11161 raw_spin_unlock_irq(&ifh->lock);
11162 }
11163
375637bc
AS
11164 /* force hw sync on the address filters */
11165 event->addr_filters_gen = 1;
11166 }
11167
cdd6c482 11168 if (!event->parent) {
927c7a9e 11169 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
97c79a38 11170 err = get_callchain_buffers(attr->sample_max_stack);
90983b16 11171 if (err)
375637bc 11172 goto err_addr_filters;
d010b332 11173 }
f344011c 11174 }
9ee318a7 11175
da97e184
JFG
11176 err = security_perf_event_alloc(event);
11177 if (err)
11178 goto err_callchain_buffer;
11179
927a5570
AS
11180 /* symmetric to unaccount_event() in _free_event() */
11181 account_event(event);
11182
cdd6c482 11183 return event;
90983b16 11184
da97e184
JFG
11185err_callchain_buffer:
11186 if (!event->parent) {
11187 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
11188 put_callchain_buffers();
11189 }
375637bc 11190err_addr_filters:
c60f83b8 11191 kfree(event->addr_filter_ranges);
375637bc 11192
bed5b25a
AS
11193err_per_task:
11194 exclusive_event_destroy(event);
11195
90983b16 11196err_pmu:
98add2af
PZ
11197 if (is_cgroup_event(event))
11198 perf_detach_cgroup(event);
90983b16
FW
11199 if (event->destroy)
11200 event->destroy(event);
c464c76e 11201 module_put(pmu->module);
90983b16
FW
11202err_ns:
11203 if (event->ns)
11204 put_pid_ns(event->ns);
621b6d2e
PB
11205 if (event->hw.target)
11206 put_task_struct(event->hw.target);
90983b16
FW
11207 kfree(event);
11208
11209 return ERR_PTR(err);
0793a61d
TG
11210}
11211
cdd6c482
IM
11212static int perf_copy_attr(struct perf_event_attr __user *uattr,
11213 struct perf_event_attr *attr)
974802ea 11214{
974802ea 11215 u32 size;
cdf8073d 11216 int ret;
974802ea 11217
c2ba8f41 11218 /* Zero the full structure, so that a short copy will be nice. */
974802ea
PZ
11219 memset(attr, 0, sizeof(*attr));
11220
11221 ret = get_user(size, &uattr->size);
11222 if (ret)
11223 return ret;
11224
c2ba8f41
AS
11225 /* ABI compatibility quirk: */
11226 if (!size)
974802ea 11227 size = PERF_ATTR_SIZE_VER0;
c2ba8f41 11228 if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE)
974802ea
PZ
11229 goto err_size;
11230
c2ba8f41
AS
11231 ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
11232 if (ret) {
11233 if (ret == -E2BIG)
11234 goto err_size;
11235 return ret;
974802ea
PZ
11236 }
11237
f12f42ac
MX
11238 attr->size = size;
11239
a4faf00d 11240 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
974802ea
PZ
11241 return -EINVAL;
11242
11243 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
11244 return -EINVAL;
11245
11246 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
11247 return -EINVAL;
11248
bce38cd5
SE
11249 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
11250 u64 mask = attr->branch_sample_type;
11251
11252 /* only using defined bits */
11253 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
11254 return -EINVAL;
11255
11256 /* at least one branch bit must be set */
11257 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
11258 return -EINVAL;
11259
bce38cd5
SE
11260 /* propagate priv level, when not set for branch */
11261 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
11262
11263 /* exclude_kernel checked on syscall entry */
11264 if (!attr->exclude_kernel)
11265 mask |= PERF_SAMPLE_BRANCH_KERNEL;
11266
11267 if (!attr->exclude_user)
11268 mask |= PERF_SAMPLE_BRANCH_USER;
11269
11270 if (!attr->exclude_hv)
11271 mask |= PERF_SAMPLE_BRANCH_HV;
11272 /*
11273 * adjust user setting (for HW filter setup)
11274 */
11275 attr->branch_sample_type = mask;
11276 }
e712209a 11277 /* privileged levels capture (kernel, hv): check permissions */
da97e184
JFG
11278 if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) {
11279 ret = perf_allow_kernel(attr);
11280 if (ret)
11281 return ret;
11282 }
bce38cd5 11283 }
4018994f 11284
c5ebcedb 11285 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 11286 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
11287 if (ret)
11288 return ret;
11289 }
11290
11291 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
11292 if (!arch_perf_have_user_stack_dump())
11293 return -ENOSYS;
11294
11295 /*
11296 * We have __u32 type for the size, but so far
11297 * we can only use __u16 as maximum due to the
11298 * __u16 sample size limit.
11299 */
11300 if (attr->sample_stack_user >= USHRT_MAX)
78b562fb 11301 return -EINVAL;
c5ebcedb 11302 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
78b562fb 11303 return -EINVAL;
c5ebcedb 11304 }
4018994f 11305
5f970521
JO
11306 if (!attr->sample_max_stack)
11307 attr->sample_max_stack = sysctl_perf_event_max_stack;
11308
60e2364e
SE
11309 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
11310 ret = perf_reg_validate(attr->sample_regs_intr);
6546b19f
NK
11311
11312#ifndef CONFIG_CGROUP_PERF
11313 if (attr->sample_type & PERF_SAMPLE_CGROUP)
11314 return -EINVAL;
11315#endif
11316
974802ea
PZ
11317out:
11318 return ret;
11319
11320err_size:
11321 put_user(sizeof(*attr), &uattr->size);
11322 ret = -E2BIG;
11323 goto out;
11324}
11325
ac9721f3
PZ
11326static int
11327perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 11328{
56de4e8f 11329 struct perf_buffer *rb = NULL;
a4be7c27
PZ
11330 int ret = -EINVAL;
11331
ac9721f3 11332 if (!output_event)
a4be7c27
PZ
11333 goto set;
11334
ac9721f3
PZ
11335 /* don't allow circular references */
11336 if (event == output_event)
a4be7c27
PZ
11337 goto out;
11338
0f139300
PZ
11339 /*
11340 * Don't allow cross-cpu buffers
11341 */
11342 if (output_event->cpu != event->cpu)
11343 goto out;
11344
11345 /*
76369139 11346 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
11347 */
11348 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
11349 goto out;
11350
34f43927
PZ
11351 /*
11352 * Mixing clocks in the same buffer is trouble you don't need.
11353 */
11354 if (output_event->clock != event->clock)
11355 goto out;
11356
9ecda41a
WN
11357 /*
11358 * Either writing ring buffer from beginning or from end.
11359 * Mixing is not allowed.
11360 */
11361 if (is_write_backward(output_event) != is_write_backward(event))
11362 goto out;
11363
45bfb2e5
PZ
11364 /*
11365 * If both events generate aux data, they must be on the same PMU
11366 */
11367 if (has_aux(event) && has_aux(output_event) &&
11368 event->pmu != output_event->pmu)
11369 goto out;
11370
a4be7c27 11371set:
cdd6c482 11372 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
11373 /* Can't redirect output if we've got an active mmap() */
11374 if (atomic_read(&event->mmap_count))
11375 goto unlock;
a4be7c27 11376
ac9721f3 11377 if (output_event) {
76369139
FW
11378 /* get the rb we want to redirect to */
11379 rb = ring_buffer_get(output_event);
11380 if (!rb)
ac9721f3 11381 goto unlock;
a4be7c27
PZ
11382 }
11383
b69cf536 11384 ring_buffer_attach(event, rb);
9bb5d40c 11385
a4be7c27 11386 ret = 0;
ac9721f3
PZ
11387unlock:
11388 mutex_unlock(&event->mmap_mutex);
11389
a4be7c27 11390out:
a4be7c27
PZ
11391 return ret;
11392}
11393
f63a8daa
PZ
11394static void mutex_lock_double(struct mutex *a, struct mutex *b)
11395{
11396 if (b < a)
11397 swap(a, b);
11398
11399 mutex_lock(a);
11400 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
11401}
11402
34f43927
PZ
11403static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
11404{
11405 bool nmi_safe = false;
11406
11407 switch (clk_id) {
11408 case CLOCK_MONOTONIC:
11409 event->clock = &ktime_get_mono_fast_ns;
11410 nmi_safe = true;
11411 break;
11412
11413 case CLOCK_MONOTONIC_RAW:
11414 event->clock = &ktime_get_raw_fast_ns;
11415 nmi_safe = true;
11416 break;
11417
11418 case CLOCK_REALTIME:
11419 event->clock = &ktime_get_real_ns;
11420 break;
11421
11422 case CLOCK_BOOTTIME:
9285ec4c 11423 event->clock = &ktime_get_boottime_ns;
34f43927
PZ
11424 break;
11425
11426 case CLOCK_TAI:
9285ec4c 11427 event->clock = &ktime_get_clocktai_ns;
34f43927
PZ
11428 break;
11429
11430 default:
11431 return -EINVAL;
11432 }
11433
11434 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
11435 return -EINVAL;
11436
11437 return 0;
11438}
11439
321027c1
PZ
11440/*
11441 * Variation on perf_event_ctx_lock_nested(), except we take two context
11442 * mutexes.
11443 */
11444static struct perf_event_context *
11445__perf_event_ctx_lock_double(struct perf_event *group_leader,
11446 struct perf_event_context *ctx)
11447{
11448 struct perf_event_context *gctx;
11449
11450again:
11451 rcu_read_lock();
11452 gctx = READ_ONCE(group_leader->ctx);
8c94abbb 11453 if (!refcount_inc_not_zero(&gctx->refcount)) {
321027c1
PZ
11454 rcu_read_unlock();
11455 goto again;
11456 }
11457 rcu_read_unlock();
11458
11459 mutex_lock_double(&gctx->mutex, &ctx->mutex);
11460
11461 if (group_leader->ctx != gctx) {
11462 mutex_unlock(&ctx->mutex);
11463 mutex_unlock(&gctx->mutex);
11464 put_ctx(gctx);
11465 goto again;
11466 }
11467
11468 return gctx;
11469}
11470
0793a61d 11471/**
cdd6c482 11472 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 11473 *
cdd6c482 11474 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 11475 * @pid: target pid
9f66a381 11476 * @cpu: target cpu
cdd6c482 11477 * @group_fd: group leader event fd
0793a61d 11478 */
cdd6c482
IM
11479SYSCALL_DEFINE5(perf_event_open,
11480 struct perf_event_attr __user *, attr_uptr,
2743a5b0 11481 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 11482{
b04243ef
PZ
11483 struct perf_event *group_leader = NULL, *output_event = NULL;
11484 struct perf_event *event, *sibling;
cdd6c482 11485 struct perf_event_attr attr;
f63a8daa 11486 struct perf_event_context *ctx, *uninitialized_var(gctx);
cdd6c482 11487 struct file *event_file = NULL;
2903ff01 11488 struct fd group = {NULL, 0};
38a81da2 11489 struct task_struct *task = NULL;
89a1e187 11490 struct pmu *pmu;
ea635c64 11491 int event_fd;
b04243ef 11492 int move_group = 0;
dc86cabe 11493 int err;
a21b0b35 11494 int f_flags = O_RDWR;
79dff51e 11495 int cgroup_fd = -1;
0793a61d 11496
2743a5b0 11497 /* for future expandability... */
e5d1367f 11498 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
11499 return -EINVAL;
11500
da97e184
JFG
11501 /* Do we allow access to perf_event_open(2) ? */
11502 err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
11503 if (err)
11504 return err;
11505
dc86cabe
IM
11506 err = perf_copy_attr(attr_uptr, &attr);
11507 if (err)
11508 return err;
eab656ae 11509
0764771d 11510 if (!attr.exclude_kernel) {
da97e184
JFG
11511 err = perf_allow_kernel(&attr);
11512 if (err)
11513 return err;
0764771d
PZ
11514 }
11515
e4222673 11516 if (attr.namespaces) {
18aa1856 11517 if (!perfmon_capable())
e4222673
HB
11518 return -EACCES;
11519 }
11520
df58ab24 11521 if (attr.freq) {
cdd6c482 11522 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24 11523 return -EINVAL;
0819b2e3
PZ
11524 } else {
11525 if (attr.sample_period & (1ULL << 63))
11526 return -EINVAL;
df58ab24
PZ
11527 }
11528
fc7ce9c7 11529 /* Only privileged users can get physical addresses */
da97e184
JFG
11530 if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) {
11531 err = perf_allow_kernel(&attr);
11532 if (err)
11533 return err;
11534 }
fc7ce9c7 11535
b0c8fdc7
DH
11536 err = security_locked_down(LOCKDOWN_PERF);
11537 if (err && (attr.sample_type & PERF_SAMPLE_REGS_INTR))
11538 /* REGS_INTR can leak data, lockdown must prevent this */
11539 return err;
11540
11541 err = 0;
11542
e5d1367f
SE
11543 /*
11544 * In cgroup mode, the pid argument is used to pass the fd
11545 * opened to the cgroup directory in cgroupfs. The cpu argument
11546 * designates the cpu on which to monitor threads from that
11547 * cgroup.
11548 */
11549 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
11550 return -EINVAL;
11551
a21b0b35
YD
11552 if (flags & PERF_FLAG_FD_CLOEXEC)
11553 f_flags |= O_CLOEXEC;
11554
11555 event_fd = get_unused_fd_flags(f_flags);
ea635c64
AV
11556 if (event_fd < 0)
11557 return event_fd;
11558
ac9721f3 11559 if (group_fd != -1) {
2903ff01
AV
11560 err = perf_fget_light(group_fd, &group);
11561 if (err)
d14b12d7 11562 goto err_fd;
2903ff01 11563 group_leader = group.file->private_data;
ac9721f3
PZ
11564 if (flags & PERF_FLAG_FD_OUTPUT)
11565 output_event = group_leader;
11566 if (flags & PERF_FLAG_FD_NO_GROUP)
11567 group_leader = NULL;
11568 }
11569
e5d1367f 11570 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
11571 task = find_lively_task_by_vpid(pid);
11572 if (IS_ERR(task)) {
11573 err = PTR_ERR(task);
11574 goto err_group_fd;
11575 }
11576 }
11577
1f4ee503
PZ
11578 if (task && group_leader &&
11579 group_leader->attr.inherit != attr.inherit) {
11580 err = -EINVAL;
11581 goto err_task;
11582 }
11583
79c9ce57 11584 if (task) {
69143038 11585 err = mutex_lock_interruptible(&task->signal->exec_update_mutex);
79c9ce57 11586 if (err)
e5aeee51 11587 goto err_task;
79c9ce57
PZ
11588
11589 /*
11590 * Reuse ptrace permission checks for now.
11591 *
69143038 11592 * We must hold exec_update_mutex across this and any potential
79c9ce57
PZ
11593 * perf_install_in_context() call for this new event to
11594 * serialize against exec() altering our credentials (and the
11595 * perf_event_exit_task() that could imply).
11596 */
11597 err = -EACCES;
11598 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
11599 goto err_cred;
11600 }
11601
79dff51e
MF
11602 if (flags & PERF_FLAG_PID_CGROUP)
11603 cgroup_fd = pid;
11604
4dc0da86 11605 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
79dff51e 11606 NULL, NULL, cgroup_fd);
d14b12d7
SE
11607 if (IS_ERR(event)) {
11608 err = PTR_ERR(event);
79c9ce57 11609 goto err_cred;
d14b12d7
SE
11610 }
11611
53b25335
VW
11612 if (is_sampling_event(event)) {
11613 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
a1396555 11614 err = -EOPNOTSUPP;
53b25335
VW
11615 goto err_alloc;
11616 }
11617 }
11618
89a1e187
PZ
11619 /*
11620 * Special case software events and allow them to be part of
11621 * any hardware group.
11622 */
11623 pmu = event->pmu;
b04243ef 11624
34f43927
PZ
11625 if (attr.use_clockid) {
11626 err = perf_event_set_clock(event, attr.clockid);
11627 if (err)
11628 goto err_alloc;
11629 }
11630
4ff6a8de
DCC
11631 if (pmu->task_ctx_nr == perf_sw_context)
11632 event->event_caps |= PERF_EV_CAP_SOFTWARE;
11633
a1150c20
SL
11634 if (group_leader) {
11635 if (is_software_event(event) &&
11636 !in_software_context(group_leader)) {
b04243ef 11637 /*
a1150c20
SL
11638 * If the event is a sw event, but the group_leader
11639 * is on hw context.
b04243ef 11640 *
a1150c20
SL
11641 * Allow the addition of software events to hw
11642 * groups, this is safe because software events
11643 * never fail to schedule.
b04243ef 11644 */
a1150c20
SL
11645 pmu = group_leader->ctx->pmu;
11646 } else if (!is_software_event(event) &&
11647 is_software_event(group_leader) &&
4ff6a8de 11648 (group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
b04243ef
PZ
11649 /*
11650 * In case the group is a pure software group, and we
11651 * try to add a hardware event, move the whole group to
11652 * the hardware context.
11653 */
11654 move_group = 1;
11655 }
11656 }
89a1e187
PZ
11657
11658 /*
11659 * Get the target context (task or percpu):
11660 */
4af57ef2 11661 ctx = find_get_context(pmu, task, event);
89a1e187
PZ
11662 if (IS_ERR(ctx)) {
11663 err = PTR_ERR(ctx);
c6be5a5c 11664 goto err_alloc;
89a1e187
PZ
11665 }
11666
ccff286d 11667 /*
cdd6c482 11668 * Look up the group leader (we will attach this event to it):
04289bb9 11669 */
ac9721f3 11670 if (group_leader) {
dc86cabe 11671 err = -EINVAL;
04289bb9 11672
04289bb9 11673 /*
ccff286d
IM
11674 * Do not allow a recursive hierarchy (this new sibling
11675 * becoming part of another group-sibling):
11676 */
11677 if (group_leader->group_leader != group_leader)
c3f00c70 11678 goto err_context;
34f43927
PZ
11679
11680 /* All events in a group should have the same clock */
11681 if (group_leader->clock != event->clock)
11682 goto err_context;
11683
ccff286d 11684 /*
64aee2a9
MR
11685 * Make sure we're both events for the same CPU;
11686 * grouping events for different CPUs is broken; since
11687 * you can never concurrently schedule them anyhow.
04289bb9 11688 */
64aee2a9
MR
11689 if (group_leader->cpu != event->cpu)
11690 goto err_context;
c3c87e77 11691
64aee2a9
MR
11692 /*
11693 * Make sure we're both on the same task, or both
11694 * per-CPU events.
11695 */
11696 if (group_leader->ctx->task != ctx->task)
11697 goto err_context;
11698
11699 /*
11700 * Do not allow to attach to a group in a different task
11701 * or CPU context. If we're moving SW events, we'll fix
11702 * this up later, so allow that.
11703 */
11704 if (!move_group && group_leader->ctx != ctx)
11705 goto err_context;
b04243ef 11706
3b6f9e5c
PM
11707 /*
11708 * Only a group leader can be exclusive or pinned
11709 */
0d48696f 11710 if (attr.exclusive || attr.pinned)
c3f00c70 11711 goto err_context;
ac9721f3
PZ
11712 }
11713
11714 if (output_event) {
11715 err = perf_event_set_output(event, output_event);
11716 if (err)
c3f00c70 11717 goto err_context;
ac9721f3 11718 }
0793a61d 11719
a21b0b35
YD
11720 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
11721 f_flags);
ea635c64
AV
11722 if (IS_ERR(event_file)) {
11723 err = PTR_ERR(event_file);
201c2f85 11724 event_file = NULL;
c3f00c70 11725 goto err_context;
ea635c64 11726 }
9b51f66d 11727
b04243ef 11728 if (move_group) {
321027c1
PZ
11729 gctx = __perf_event_ctx_lock_double(group_leader, ctx);
11730
84c4e620
PZ
11731 if (gctx->task == TASK_TOMBSTONE) {
11732 err = -ESRCH;
11733 goto err_locked;
11734 }
321027c1
PZ
11735
11736 /*
11737 * Check if we raced against another sys_perf_event_open() call
11738 * moving the software group underneath us.
11739 */
11740 if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
11741 /*
11742 * If someone moved the group out from under us, check
11743 * if this new event wound up on the same ctx, if so
11744 * its the regular !move_group case, otherwise fail.
11745 */
11746 if (gctx != ctx) {
11747 err = -EINVAL;
11748 goto err_locked;
11749 } else {
11750 perf_event_ctx_unlock(group_leader, gctx);
11751 move_group = 0;
11752 }
11753 }
8a58ddae
AS
11754
11755 /*
11756 * Failure to create exclusive events returns -EBUSY.
11757 */
11758 err = -EBUSY;
11759 if (!exclusive_event_installable(group_leader, ctx))
11760 goto err_locked;
11761
11762 for_each_sibling_event(sibling, group_leader) {
11763 if (!exclusive_event_installable(sibling, ctx))
11764 goto err_locked;
11765 }
f55fc2a5
PZ
11766 } else {
11767 mutex_lock(&ctx->mutex);
11768 }
11769
84c4e620
PZ
11770 if (ctx->task == TASK_TOMBSTONE) {
11771 err = -ESRCH;
11772 goto err_locked;
11773 }
11774
a723968c
PZ
11775 if (!perf_event_validate_size(event)) {
11776 err = -E2BIG;
11777 goto err_locked;
11778 }
11779
a63fbed7
TG
11780 if (!task) {
11781 /*
11782 * Check if the @cpu we're creating an event for is online.
11783 *
11784 * We use the perf_cpu_context::ctx::mutex to serialize against
11785 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
11786 */
11787 struct perf_cpu_context *cpuctx =
11788 container_of(ctx, struct perf_cpu_context, ctx);
11789
11790 if (!cpuctx->online) {
11791 err = -ENODEV;
11792 goto err_locked;
11793 }
11794 }
11795
da9ec3d3
MR
11796 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
11797 err = -EINVAL;
ab43762e 11798 goto err_locked;
da9ec3d3 11799 }
a63fbed7 11800
f55fc2a5
PZ
11801 /*
11802 * Must be under the same ctx::mutex as perf_install_in_context(),
11803 * because we need to serialize with concurrent event creation.
11804 */
11805 if (!exclusive_event_installable(event, ctx)) {
f55fc2a5
PZ
11806 err = -EBUSY;
11807 goto err_locked;
11808 }
f63a8daa 11809
f55fc2a5
PZ
11810 WARN_ON_ONCE(ctx->parent_ctx);
11811
79c9ce57
PZ
11812 /*
11813 * This is the point on no return; we cannot fail hereafter. This is
11814 * where we start modifying current state.
11815 */
11816
f55fc2a5 11817 if (move_group) {
f63a8daa
PZ
11818 /*
11819 * See perf_event_ctx_lock() for comments on the details
11820 * of swizzling perf_event::ctx.
11821 */
45a0e07a 11822 perf_remove_from_context(group_leader, 0);
279b5165 11823 put_ctx(gctx);
0231bb53 11824
edb39592 11825 for_each_sibling_event(sibling, group_leader) {
45a0e07a 11826 perf_remove_from_context(sibling, 0);
b04243ef
PZ
11827 put_ctx(gctx);
11828 }
b04243ef 11829
f63a8daa
PZ
11830 /*
11831 * Wait for everybody to stop referencing the events through
11832 * the old lists, before installing it on new lists.
11833 */
0cda4c02 11834 synchronize_rcu();
f63a8daa 11835
8f95b435
PZI
11836 /*
11837 * Install the group siblings before the group leader.
11838 *
11839 * Because a group leader will try and install the entire group
11840 * (through the sibling list, which is still in-tact), we can
11841 * end up with siblings installed in the wrong context.
11842 *
11843 * By installing siblings first we NO-OP because they're not
11844 * reachable through the group lists.
11845 */
edb39592 11846 for_each_sibling_event(sibling, group_leader) {
8f95b435 11847 perf_event__state_init(sibling);
9fc81d87 11848 perf_install_in_context(ctx, sibling, sibling->cpu);
b04243ef
PZ
11849 get_ctx(ctx);
11850 }
8f95b435
PZI
11851
11852 /*
11853 * Removing from the context ends up with disabled
11854 * event. What we want here is event in the initial
11855 * startup state, ready to be add into new context.
11856 */
11857 perf_event__state_init(group_leader);
11858 perf_install_in_context(ctx, group_leader, group_leader->cpu);
11859 get_ctx(ctx);
bed5b25a
AS
11860 }
11861
f73e22ab
PZ
11862 /*
11863 * Precalculate sample_data sizes; do while holding ctx::mutex such
11864 * that we're serialized against further additions and before
11865 * perf_install_in_context() which is the point the event is active and
11866 * can use these values.
11867 */
11868 perf_event__header_size(event);
11869 perf_event__id_header_size(event);
11870
78cd2c74
PZ
11871 event->owner = current;
11872
e2d37cd2 11873 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 11874 perf_unpin_context(ctx);
f63a8daa 11875
f55fc2a5 11876 if (move_group)
321027c1 11877 perf_event_ctx_unlock(group_leader, gctx);
d859e29f 11878 mutex_unlock(&ctx->mutex);
9b51f66d 11879
79c9ce57 11880 if (task) {
69143038 11881 mutex_unlock(&task->signal->exec_update_mutex);
79c9ce57
PZ
11882 put_task_struct(task);
11883 }
11884
cdd6c482
IM
11885 mutex_lock(&current->perf_event_mutex);
11886 list_add_tail(&event->owner_entry, &current->perf_event_list);
11887 mutex_unlock(&current->perf_event_mutex);
082ff5a2 11888
8a49542c
PZ
11889 /*
11890 * Drop the reference on the group_event after placing the
11891 * new event on the sibling_list. This ensures destruction
11892 * of the group leader will find the pointer to itself in
11893 * perf_group_detach().
11894 */
2903ff01 11895 fdput(group);
ea635c64
AV
11896 fd_install(event_fd, event_file);
11897 return event_fd;
0793a61d 11898
f55fc2a5
PZ
11899err_locked:
11900 if (move_group)
321027c1 11901 perf_event_ctx_unlock(group_leader, gctx);
f55fc2a5
PZ
11902 mutex_unlock(&ctx->mutex);
11903/* err_file: */
11904 fput(event_file);
c3f00c70 11905err_context:
fe4b04fa 11906 perf_unpin_context(ctx);
ea635c64 11907 put_ctx(ctx);
c6be5a5c 11908err_alloc:
13005627
PZ
11909 /*
11910 * If event_file is set, the fput() above will have called ->release()
11911 * and that will take care of freeing the event.
11912 */
11913 if (!event_file)
11914 free_event(event);
79c9ce57
PZ
11915err_cred:
11916 if (task)
69143038 11917 mutex_unlock(&task->signal->exec_update_mutex);
1f4ee503 11918err_task:
e7d0bc04
PZ
11919 if (task)
11920 put_task_struct(task);
89a1e187 11921err_group_fd:
2903ff01 11922 fdput(group);
ea635c64
AV
11923err_fd:
11924 put_unused_fd(event_fd);
dc86cabe 11925 return err;
0793a61d
TG
11926}
11927
fb0459d7
AV
11928/**
11929 * perf_event_create_kernel_counter
11930 *
11931 * @attr: attributes of the counter to create
11932 * @cpu: cpu in which the counter is bound
38a81da2 11933 * @task: task to profile (NULL for percpu)
fb0459d7
AV
11934 */
11935struct perf_event *
11936perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 11937 struct task_struct *task,
4dc0da86
AK
11938 perf_overflow_handler_t overflow_handler,
11939 void *context)
fb0459d7 11940{
fb0459d7 11941 struct perf_event_context *ctx;
c3f00c70 11942 struct perf_event *event;
fb0459d7 11943 int err;
d859e29f 11944
dce5affb
AS
11945 /*
11946 * Grouping is not supported for kernel events, neither is 'AUX',
11947 * make sure the caller's intentions are adjusted.
11948 */
11949 if (attr->aux_output)
11950 return ERR_PTR(-EINVAL);
11951
4dc0da86 11952 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
79dff51e 11953 overflow_handler, context, -1);
c3f00c70
PZ
11954 if (IS_ERR(event)) {
11955 err = PTR_ERR(event);
11956 goto err;
11957 }
d859e29f 11958
f8697762 11959 /* Mark owner so we could distinguish it from user events. */
63b6da39 11960 event->owner = TASK_TOMBSTONE;
f8697762 11961
f25d8ba9
AS
11962 /*
11963 * Get the target context (task or percpu):
11964 */
4af57ef2 11965 ctx = find_get_context(event->pmu, task, event);
c6567f64
FW
11966 if (IS_ERR(ctx)) {
11967 err = PTR_ERR(ctx);
c3f00c70 11968 goto err_free;
d859e29f 11969 }
fb0459d7 11970
fb0459d7
AV
11971 WARN_ON_ONCE(ctx->parent_ctx);
11972 mutex_lock(&ctx->mutex);
84c4e620
PZ
11973 if (ctx->task == TASK_TOMBSTONE) {
11974 err = -ESRCH;
11975 goto err_unlock;
11976 }
11977
a63fbed7
TG
11978 if (!task) {
11979 /*
11980 * Check if the @cpu we're creating an event for is online.
11981 *
11982 * We use the perf_cpu_context::ctx::mutex to serialize against
11983 * the hotplug notifiers. See perf_event_{init,exit}_cpu().
11984 */
11985 struct perf_cpu_context *cpuctx =
11986 container_of(ctx, struct perf_cpu_context, ctx);
11987 if (!cpuctx->online) {
11988 err = -ENODEV;
11989 goto err_unlock;
11990 }
11991 }
11992
bed5b25a 11993 if (!exclusive_event_installable(event, ctx)) {
bed5b25a 11994 err = -EBUSY;
84c4e620 11995 goto err_unlock;
bed5b25a
AS
11996 }
11997
4ce54af8 11998 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 11999 perf_unpin_context(ctx);
fb0459d7
AV
12000 mutex_unlock(&ctx->mutex);
12001
fb0459d7
AV
12002 return event;
12003
84c4e620
PZ
12004err_unlock:
12005 mutex_unlock(&ctx->mutex);
12006 perf_unpin_context(ctx);
12007 put_ctx(ctx);
c3f00c70
PZ
12008err_free:
12009 free_event(event);
12010err:
c6567f64 12011 return ERR_PTR(err);
9b51f66d 12012}
fb0459d7 12013EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 12014
0cda4c02
YZ
12015void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
12016{
12017 struct perf_event_context *src_ctx;
12018 struct perf_event_context *dst_ctx;
12019 struct perf_event *event, *tmp;
12020 LIST_HEAD(events);
12021
12022 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
12023 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
12024
f63a8daa
PZ
12025 /*
12026 * See perf_event_ctx_lock() for comments on the details
12027 * of swizzling perf_event::ctx.
12028 */
12029 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
0cda4c02
YZ
12030 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
12031 event_entry) {
45a0e07a 12032 perf_remove_from_context(event, 0);
9a545de0 12033 unaccount_event_cpu(event, src_cpu);
0cda4c02 12034 put_ctx(src_ctx);
9886167d 12035 list_add(&event->migrate_entry, &events);
0cda4c02 12036 }
0cda4c02 12037
8f95b435
PZI
12038 /*
12039 * Wait for the events to quiesce before re-instating them.
12040 */
0cda4c02
YZ
12041 synchronize_rcu();
12042
8f95b435
PZI
12043 /*
12044 * Re-instate events in 2 passes.
12045 *
12046 * Skip over group leaders and only install siblings on this first
12047 * pass, siblings will not get enabled without a leader, however a
12048 * leader will enable its siblings, even if those are still on the old
12049 * context.
12050 */
12051 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
12052 if (event->group_leader == event)
12053 continue;
12054
12055 list_del(&event->migrate_entry);
12056 if (event->state >= PERF_EVENT_STATE_OFF)
12057 event->state = PERF_EVENT_STATE_INACTIVE;
12058 account_event_cpu(event, dst_cpu);
12059 perf_install_in_context(dst_ctx, event, dst_cpu);
12060 get_ctx(dst_ctx);
12061 }
12062
12063 /*
12064 * Once all the siblings are setup properly, install the group leaders
12065 * to make it go.
12066 */
9886167d
PZ
12067 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
12068 list_del(&event->migrate_entry);
0cda4c02
YZ
12069 if (event->state >= PERF_EVENT_STATE_OFF)
12070 event->state = PERF_EVENT_STATE_INACTIVE;
9a545de0 12071 account_event_cpu(event, dst_cpu);
0cda4c02
YZ
12072 perf_install_in_context(dst_ctx, event, dst_cpu);
12073 get_ctx(dst_ctx);
12074 }
12075 mutex_unlock(&dst_ctx->mutex);
f63a8daa 12076 mutex_unlock(&src_ctx->mutex);
0cda4c02
YZ
12077}
12078EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
12079
cdd6c482 12080static void sync_child_event(struct perf_event *child_event,
38b200d6 12081 struct task_struct *child)
d859e29f 12082{
cdd6c482 12083 struct perf_event *parent_event = child_event->parent;
8bc20959 12084 u64 child_val;
d859e29f 12085
cdd6c482
IM
12086 if (child_event->attr.inherit_stat)
12087 perf_event_read_event(child_event, child);
38b200d6 12088
b5e58793 12089 child_val = perf_event_count(child_event);
d859e29f
PM
12090
12091 /*
12092 * Add back the child's count to the parent's count:
12093 */
a6e6dea6 12094 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
12095 atomic64_add(child_event->total_time_enabled,
12096 &parent_event->child_total_time_enabled);
12097 atomic64_add(child_event->total_time_running,
12098 &parent_event->child_total_time_running);
d859e29f
PM
12099}
12100
9b51f66d 12101static void
8ba289b8
PZ
12102perf_event_exit_event(struct perf_event *child_event,
12103 struct perf_event_context *child_ctx,
12104 struct task_struct *child)
9b51f66d 12105{
8ba289b8
PZ
12106 struct perf_event *parent_event = child_event->parent;
12107
1903d50c
PZ
12108 /*
12109 * Do not destroy the 'original' grouping; because of the context
12110 * switch optimization the original events could've ended up in a
12111 * random child task.
12112 *
12113 * If we were to destroy the original group, all group related
12114 * operations would cease to function properly after this random
12115 * child dies.
12116 *
12117 * Do destroy all inherited groups, we don't care about those
12118 * and being thorough is better.
12119 */
32132a3d
PZ
12120 raw_spin_lock_irq(&child_ctx->lock);
12121 WARN_ON_ONCE(child_ctx->is_active);
12122
8ba289b8 12123 if (parent_event)
32132a3d
PZ
12124 perf_group_detach(child_event);
12125 list_del_event(child_event, child_ctx);
0d3d73aa 12126 perf_event_set_state(child_event, PERF_EVENT_STATE_EXIT); /* is_event_hup() */
32132a3d 12127 raw_spin_unlock_irq(&child_ctx->lock);
0cc0c027 12128
9b51f66d 12129 /*
8ba289b8 12130 * Parent events are governed by their filedesc, retain them.
9b51f66d 12131 */
8ba289b8 12132 if (!parent_event) {
179033b3 12133 perf_event_wakeup(child_event);
8ba289b8 12134 return;
4bcf349a 12135 }
8ba289b8
PZ
12136 /*
12137 * Child events can be cleaned up.
12138 */
12139
12140 sync_child_event(child_event, child);
12141
12142 /*
12143 * Remove this event from the parent's list
12144 */
12145 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
12146 mutex_lock(&parent_event->child_mutex);
12147 list_del_init(&child_event->child_list);
12148 mutex_unlock(&parent_event->child_mutex);
12149
12150 /*
12151 * Kick perf_poll() for is_event_hup().
12152 */
12153 perf_event_wakeup(parent_event);
12154 free_event(child_event);
12155 put_event(parent_event);
9b51f66d
IM
12156}
12157
8dc85d54 12158static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 12159{
211de6eb 12160 struct perf_event_context *child_ctx, *clone_ctx = NULL;
63b6da39 12161 struct perf_event *child_event, *next;
63b6da39
PZ
12162
12163 WARN_ON_ONCE(child != current);
9b51f66d 12164
6a3351b6 12165 child_ctx = perf_pin_task_context(child, ctxn);
63b6da39 12166 if (!child_ctx)
9b51f66d
IM
12167 return;
12168
ad3a37de 12169 /*
6a3351b6
PZ
12170 * In order to reduce the amount of tricky in ctx tear-down, we hold
12171 * ctx::mutex over the entire thing. This serializes against almost
12172 * everything that wants to access the ctx.
12173 *
12174 * The exception is sys_perf_event_open() /
12175 * perf_event_create_kernel_count() which does find_get_context()
12176 * without ctx::mutex (it cannot because of the move_group double mutex
12177 * lock thing). See the comments in perf_install_in_context().
ad3a37de 12178 */
6a3351b6 12179 mutex_lock(&child_ctx->mutex);
c93f7669
PM
12180
12181 /*
6a3351b6
PZ
12182 * In a single ctx::lock section, de-schedule the events and detach the
12183 * context from the task such that we cannot ever get it scheduled back
12184 * in.
c93f7669 12185 */
6a3351b6 12186 raw_spin_lock_irq(&child_ctx->lock);
487f05e1 12187 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx, EVENT_ALL);
4a1c0f26 12188
71a851b4 12189 /*
63b6da39
PZ
12190 * Now that the context is inactive, destroy the task <-> ctx relation
12191 * and mark the context dead.
71a851b4 12192 */
63b6da39
PZ
12193 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
12194 put_ctx(child_ctx); /* cannot be last */
12195 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
12196 put_task_struct(current); /* cannot be last */
4a1c0f26 12197
211de6eb 12198 clone_ctx = unclone_ctx(child_ctx);
6a3351b6 12199 raw_spin_unlock_irq(&child_ctx->lock);
9f498cc5 12200
211de6eb
PZ
12201 if (clone_ctx)
12202 put_ctx(clone_ctx);
4a1c0f26 12203
9f498cc5 12204 /*
cdd6c482
IM
12205 * Report the task dead after unscheduling the events so that we
12206 * won't get any samples after PERF_RECORD_EXIT. We can however still
12207 * get a few PERF_RECORD_READ events.
9f498cc5 12208 */
cdd6c482 12209 perf_event_task(child, child_ctx, 0);
a63eaf34 12210
ebf905fc 12211 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
8ba289b8 12212 perf_event_exit_event(child_event, child_ctx, child);
8bc20959 12213
a63eaf34
PM
12214 mutex_unlock(&child_ctx->mutex);
12215
12216 put_ctx(child_ctx);
9b51f66d
IM
12217}
12218
8dc85d54
PZ
12219/*
12220 * When a child task exits, feed back event values to parent events.
79c9ce57 12221 *
69143038 12222 * Can be called with exec_update_mutex held when called from
96ecee29 12223 * setup_new_exec().
8dc85d54
PZ
12224 */
12225void perf_event_exit_task(struct task_struct *child)
12226{
8882135b 12227 struct perf_event *event, *tmp;
8dc85d54
PZ
12228 int ctxn;
12229
8882135b
PZ
12230 mutex_lock(&child->perf_event_mutex);
12231 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
12232 owner_entry) {
12233 list_del_init(&event->owner_entry);
12234
12235 /*
12236 * Ensure the list deletion is visible before we clear
12237 * the owner, closes a race against perf_release() where
12238 * we need to serialize on the owner->perf_event_mutex.
12239 */
f47c02c0 12240 smp_store_release(&event->owner, NULL);
8882135b
PZ
12241 }
12242 mutex_unlock(&child->perf_event_mutex);
12243
8dc85d54
PZ
12244 for_each_task_context_nr(ctxn)
12245 perf_event_exit_task_context(child, ctxn);
4e93ad60
JO
12246
12247 /*
12248 * The perf_event_exit_task_context calls perf_event_task
12249 * with child's task_ctx, which generates EXIT events for
12250 * child contexts and sets child->perf_event_ctxp[] to NULL.
12251 * At this point we need to send EXIT events to cpu contexts.
12252 */
12253 perf_event_task(child, NULL, 0);
8dc85d54
PZ
12254}
12255
889ff015
FW
12256static void perf_free_event(struct perf_event *event,
12257 struct perf_event_context *ctx)
12258{
12259 struct perf_event *parent = event->parent;
12260
12261 if (WARN_ON_ONCE(!parent))
12262 return;
12263
12264 mutex_lock(&parent->child_mutex);
12265 list_del_init(&event->child_list);
12266 mutex_unlock(&parent->child_mutex);
12267
a6fa941d 12268 put_event(parent);
889ff015 12269
652884fe 12270 raw_spin_lock_irq(&ctx->lock);
8a49542c 12271 perf_group_detach(event);
889ff015 12272 list_del_event(event, ctx);
652884fe 12273 raw_spin_unlock_irq(&ctx->lock);
889ff015
FW
12274 free_event(event);
12275}
12276
bbbee908 12277/*
1cf8dfe8
PZ
12278 * Free a context as created by inheritance by perf_event_init_task() below,
12279 * used by fork() in case of fail.
652884fe 12280 *
1cf8dfe8
PZ
12281 * Even though the task has never lived, the context and events have been
12282 * exposed through the child_list, so we must take care tearing it all down.
bbbee908 12283 */
cdd6c482 12284void perf_event_free_task(struct task_struct *task)
bbbee908 12285{
8dc85d54 12286 struct perf_event_context *ctx;
cdd6c482 12287 struct perf_event *event, *tmp;
8dc85d54 12288 int ctxn;
bbbee908 12289
8dc85d54
PZ
12290 for_each_task_context_nr(ctxn) {
12291 ctx = task->perf_event_ctxp[ctxn];
12292 if (!ctx)
12293 continue;
bbbee908 12294
8dc85d54 12295 mutex_lock(&ctx->mutex);
e552a838
PZ
12296 raw_spin_lock_irq(&ctx->lock);
12297 /*
12298 * Destroy the task <-> ctx relation and mark the context dead.
12299 *
12300 * This is important because even though the task hasn't been
12301 * exposed yet the context has been (through child_list).
12302 */
12303 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
12304 WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
12305 put_task_struct(task); /* cannot be last */
12306 raw_spin_unlock_irq(&ctx->lock);
bbbee908 12307
15121c78 12308 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry)
8dc85d54 12309 perf_free_event(event, ctx);
bbbee908 12310
8dc85d54 12311 mutex_unlock(&ctx->mutex);
1cf8dfe8
PZ
12312
12313 /*
12314 * perf_event_release_kernel() could've stolen some of our
12315 * child events and still have them on its free_list. In that
12316 * case we must wait for these events to have been freed (in
12317 * particular all their references to this task must've been
12318 * dropped).
12319 *
12320 * Without this copy_process() will unconditionally free this
12321 * task (irrespective of its reference count) and
12322 * _free_event()'s put_task_struct(event->hw.target) will be a
12323 * use-after-free.
12324 *
12325 * Wait for all events to drop their context reference.
12326 */
12327 wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1);
12328 put_ctx(ctx); /* must be last */
8dc85d54 12329 }
889ff015
FW
12330}
12331
4e231c79
PZ
12332void perf_event_delayed_put(struct task_struct *task)
12333{
12334 int ctxn;
12335
12336 for_each_task_context_nr(ctxn)
12337 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
12338}
12339
e03e7ee3 12340struct file *perf_event_get(unsigned int fd)
ffe8690c 12341{
02e5ad97 12342 struct file *file = fget(fd);
e03e7ee3
AS
12343 if (!file)
12344 return ERR_PTR(-EBADF);
ffe8690c 12345
e03e7ee3
AS
12346 if (file->f_op != &perf_fops) {
12347 fput(file);
12348 return ERR_PTR(-EBADF);
12349 }
ffe8690c 12350
e03e7ee3 12351 return file;
ffe8690c
KX
12352}
12353
f8d959a5
YS
12354const struct perf_event *perf_get_event(struct file *file)
12355{
12356 if (file->f_op != &perf_fops)
12357 return ERR_PTR(-EINVAL);
12358
12359 return file->private_data;
12360}
12361
ffe8690c
KX
12362const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
12363{
12364 if (!event)
12365 return ERR_PTR(-EINVAL);
12366
12367 return &event->attr;
12368}
12369
97dee4f3 12370/*
788faab7 12371 * Inherit an event from parent task to child task.
d8a8cfc7
PZ
12372 *
12373 * Returns:
12374 * - valid pointer on success
12375 * - NULL for orphaned events
12376 * - IS_ERR() on error
97dee4f3
PZ
12377 */
12378static struct perf_event *
12379inherit_event(struct perf_event *parent_event,
12380 struct task_struct *parent,
12381 struct perf_event_context *parent_ctx,
12382 struct task_struct *child,
12383 struct perf_event *group_leader,
12384 struct perf_event_context *child_ctx)
12385{
8ca2bd41 12386 enum perf_event_state parent_state = parent_event->state;
97dee4f3 12387 struct perf_event *child_event;
cee010ec 12388 unsigned long flags;
97dee4f3
PZ
12389
12390 /*
12391 * Instead of creating recursive hierarchies of events,
12392 * we link inherited events back to the original parent,
12393 * which has a filp for sure, which we use as the reference
12394 * count:
12395 */
12396 if (parent_event->parent)
12397 parent_event = parent_event->parent;
12398
12399 child_event = perf_event_alloc(&parent_event->attr,
12400 parent_event->cpu,
d580ff86 12401 child,
97dee4f3 12402 group_leader, parent_event,
79dff51e 12403 NULL, NULL, -1);
97dee4f3
PZ
12404 if (IS_ERR(child_event))
12405 return child_event;
a6fa941d 12406
313ccb96
JO
12407
12408 if ((child_event->attach_state & PERF_ATTACH_TASK_DATA) &&
12409 !child_ctx->task_ctx_data) {
12410 struct pmu *pmu = child_event->pmu;
12411
12412 child_ctx->task_ctx_data = kzalloc(pmu->task_ctx_size,
12413 GFP_KERNEL);
12414 if (!child_ctx->task_ctx_data) {
12415 free_event(child_event);
697d8778 12416 return ERR_PTR(-ENOMEM);
313ccb96
JO
12417 }
12418 }
12419
c6e5b732
PZ
12420 /*
12421 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
12422 * must be under the same lock in order to serialize against
12423 * perf_event_release_kernel(), such that either we must observe
12424 * is_orphaned_event() or they will observe us on the child_list.
12425 */
12426 mutex_lock(&parent_event->child_mutex);
fadfe7be
JO
12427 if (is_orphaned_event(parent_event) ||
12428 !atomic_long_inc_not_zero(&parent_event->refcount)) {
c6e5b732 12429 mutex_unlock(&parent_event->child_mutex);
313ccb96 12430 /* task_ctx_data is freed with child_ctx */
a6fa941d
AV
12431 free_event(child_event);
12432 return NULL;
12433 }
12434
97dee4f3
PZ
12435 get_ctx(child_ctx);
12436
12437 /*
12438 * Make the child state follow the state of the parent event,
12439 * not its attr.disabled bit. We hold the parent's mutex,
12440 * so we won't race with perf_event_{en, dis}able_family.
12441 */
1929def9 12442 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
97dee4f3
PZ
12443 child_event->state = PERF_EVENT_STATE_INACTIVE;
12444 else
12445 child_event->state = PERF_EVENT_STATE_OFF;
12446
12447 if (parent_event->attr.freq) {
12448 u64 sample_period = parent_event->hw.sample_period;
12449 struct hw_perf_event *hwc = &child_event->hw;
12450
12451 hwc->sample_period = sample_period;
12452 hwc->last_period = sample_period;
12453
12454 local64_set(&hwc->period_left, sample_period);
12455 }
12456
12457 child_event->ctx = child_ctx;
12458 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
12459 child_event->overflow_handler_context
12460 = parent_event->overflow_handler_context;
97dee4f3 12461
614b6780
TG
12462 /*
12463 * Precalculate sample_data sizes
12464 */
12465 perf_event__header_size(child_event);
6844c09d 12466 perf_event__id_header_size(child_event);
614b6780 12467
97dee4f3
PZ
12468 /*
12469 * Link it up in the child's context:
12470 */
cee010ec 12471 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 12472 add_event_to_ctx(child_event, child_ctx);
cee010ec 12473 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 12474
97dee4f3
PZ
12475 /*
12476 * Link this into the parent event's child list
12477 */
97dee4f3
PZ
12478 list_add_tail(&child_event->child_list, &parent_event->child_list);
12479 mutex_unlock(&parent_event->child_mutex);
12480
12481 return child_event;
12482}
12483
d8a8cfc7
PZ
12484/*
12485 * Inherits an event group.
12486 *
12487 * This will quietly suppress orphaned events; !inherit_event() is not an error.
12488 * This matches with perf_event_release_kernel() removing all child events.
12489 *
12490 * Returns:
12491 * - 0 on success
12492 * - <0 on error
12493 */
97dee4f3
PZ
12494static int inherit_group(struct perf_event *parent_event,
12495 struct task_struct *parent,
12496 struct perf_event_context *parent_ctx,
12497 struct task_struct *child,
12498 struct perf_event_context *child_ctx)
12499{
12500 struct perf_event *leader;
12501 struct perf_event *sub;
12502 struct perf_event *child_ctr;
12503
12504 leader = inherit_event(parent_event, parent, parent_ctx,
12505 child, NULL, child_ctx);
12506 if (IS_ERR(leader))
12507 return PTR_ERR(leader);
d8a8cfc7
PZ
12508 /*
12509 * @leader can be NULL here because of is_orphaned_event(). In this
12510 * case inherit_event() will create individual events, similar to what
12511 * perf_group_detach() would do anyway.
12512 */
edb39592 12513 for_each_sibling_event(sub, parent_event) {
97dee4f3
PZ
12514 child_ctr = inherit_event(sub, parent, parent_ctx,
12515 child, leader, child_ctx);
12516 if (IS_ERR(child_ctr))
12517 return PTR_ERR(child_ctr);
f733c6b5 12518
00496fe5 12519 if (sub->aux_event == parent_event && child_ctr &&
f733c6b5
AS
12520 !perf_get_aux_event(child_ctr, leader))
12521 return -EINVAL;
97dee4f3
PZ
12522 }
12523 return 0;
889ff015
FW
12524}
12525
d8a8cfc7
PZ
12526/*
12527 * Creates the child task context and tries to inherit the event-group.
12528 *
12529 * Clears @inherited_all on !attr.inherited or error. Note that we'll leave
12530 * inherited_all set when we 'fail' to inherit an orphaned event; this is
12531 * consistent with perf_event_release_kernel() removing all child events.
12532 *
12533 * Returns:
12534 * - 0 on success
12535 * - <0 on error
12536 */
889ff015
FW
12537static int
12538inherit_task_group(struct perf_event *event, struct task_struct *parent,
12539 struct perf_event_context *parent_ctx,
8dc85d54 12540 struct task_struct *child, int ctxn,
889ff015
FW
12541 int *inherited_all)
12542{
12543 int ret;
8dc85d54 12544 struct perf_event_context *child_ctx;
889ff015
FW
12545
12546 if (!event->attr.inherit) {
12547 *inherited_all = 0;
12548 return 0;
bbbee908
PZ
12549 }
12550
fe4b04fa 12551 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
12552 if (!child_ctx) {
12553 /*
12554 * This is executed from the parent task context, so
12555 * inherit events that have been marked for cloning.
12556 * First allocate and initialize a context for the
12557 * child.
12558 */
734df5ab 12559 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
12560 if (!child_ctx)
12561 return -ENOMEM;
bbbee908 12562
8dc85d54 12563 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
12564 }
12565
12566 ret = inherit_group(event, parent, parent_ctx,
12567 child, child_ctx);
12568
12569 if (ret)
12570 *inherited_all = 0;
12571
12572 return ret;
bbbee908
PZ
12573}
12574
9b51f66d 12575/*
cdd6c482 12576 * Initialize the perf_event context in task_struct
9b51f66d 12577 */
985c8dcb 12578static int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 12579{
889ff015 12580 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
12581 struct perf_event_context *cloned_ctx;
12582 struct perf_event *event;
9b51f66d 12583 struct task_struct *parent = current;
564c2b21 12584 int inherited_all = 1;
dddd3379 12585 unsigned long flags;
6ab423e0 12586 int ret = 0;
9b51f66d 12587
8dc85d54 12588 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
12589 return 0;
12590
ad3a37de 12591 /*
25346b93
PM
12592 * If the parent's context is a clone, pin it so it won't get
12593 * swapped under us.
ad3a37de 12594 */
8dc85d54 12595 parent_ctx = perf_pin_task_context(parent, ctxn);
ffb4ef21
PZ
12596 if (!parent_ctx)
12597 return 0;
25346b93 12598
ad3a37de
PM
12599 /*
12600 * No need to check if parent_ctx != NULL here; since we saw
12601 * it non-NULL earlier, the only reason for it to become NULL
12602 * is if we exit, and since we're currently in the middle of
12603 * a fork we can't be exiting at the same time.
12604 */
ad3a37de 12605
9b51f66d
IM
12606 /*
12607 * Lock the parent list. No need to lock the child - not PID
12608 * hashed yet and not running, so nobody can access it.
12609 */
d859e29f 12610 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
12611
12612 /*
12613 * We dont have to disable NMIs - we are only looking at
12614 * the list, not manipulating it:
12615 */
6e6804d2 12616 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) {
8dc85d54
PZ
12617 ret = inherit_task_group(event, parent, parent_ctx,
12618 child, ctxn, &inherited_all);
889ff015 12619 if (ret)
e7cc4865 12620 goto out_unlock;
889ff015 12621 }
b93f7978 12622
dddd3379
TG
12623 /*
12624 * We can't hold ctx->lock when iterating the ->flexible_group list due
12625 * to allocations, but we need to prevent rotation because
12626 * rotate_ctx() will change the list from interrupt context.
12627 */
12628 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
12629 parent_ctx->rotate_disable = 1;
12630 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
12631
6e6804d2 12632 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) {
8dc85d54
PZ
12633 ret = inherit_task_group(event, parent, parent_ctx,
12634 child, ctxn, &inherited_all);
889ff015 12635 if (ret)
e7cc4865 12636 goto out_unlock;
564c2b21
PM
12637 }
12638
dddd3379
TG
12639 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
12640 parent_ctx->rotate_disable = 0;
dddd3379 12641
8dc85d54 12642 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 12643
05cbaa28 12644 if (child_ctx && inherited_all) {
564c2b21
PM
12645 /*
12646 * Mark the child context as a clone of the parent
12647 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
12648 *
12649 * Note that if the parent is a clone, the holding of
12650 * parent_ctx->lock avoids it from being uncloned.
564c2b21 12651 */
c5ed5145 12652 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
12653 if (cloned_ctx) {
12654 child_ctx->parent_ctx = cloned_ctx;
25346b93 12655 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
12656 } else {
12657 child_ctx->parent_ctx = parent_ctx;
12658 child_ctx->parent_gen = parent_ctx->generation;
12659 }
12660 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
12661 }
12662
c5ed5145 12663 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
e7cc4865 12664out_unlock:
d859e29f 12665 mutex_unlock(&parent_ctx->mutex);
6ab423e0 12666
25346b93 12667 perf_unpin_context(parent_ctx);
fe4b04fa 12668 put_ctx(parent_ctx);
ad3a37de 12669
6ab423e0 12670 return ret;
9b51f66d
IM
12671}
12672
8dc85d54
PZ
12673/*
12674 * Initialize the perf_event context in task_struct
12675 */
12676int perf_event_init_task(struct task_struct *child)
12677{
12678 int ctxn, ret;
12679
8550d7cb
ON
12680 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
12681 mutex_init(&child->perf_event_mutex);
12682 INIT_LIST_HEAD(&child->perf_event_list);
12683
8dc85d54
PZ
12684 for_each_task_context_nr(ctxn) {
12685 ret = perf_event_init_context(child, ctxn);
6c72e350
PZ
12686 if (ret) {
12687 perf_event_free_task(child);
8dc85d54 12688 return ret;
6c72e350 12689 }
8dc85d54
PZ
12690 }
12691
12692 return 0;
12693}
12694
220b140b
PM
12695static void __init perf_event_init_all_cpus(void)
12696{
b28ab83c 12697 struct swevent_htable *swhash;
220b140b 12698 int cpu;
220b140b 12699
a63fbed7
TG
12700 zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
12701
220b140b 12702 for_each_possible_cpu(cpu) {
b28ab83c
PZ
12703 swhash = &per_cpu(swevent_htable, cpu);
12704 mutex_init(&swhash->hlist_mutex);
2fde4f94 12705 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
f2fb6bef
KL
12706
12707 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
12708 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
e48c1788 12709
058fe1c0
DCC
12710#ifdef CONFIG_CGROUP_PERF
12711 INIT_LIST_HEAD(&per_cpu(cgrp_cpuctx_list, cpu));
12712#endif
e48c1788 12713 INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
220b140b
PM
12714 }
12715}
12716
d18bf422 12717static void perf_swevent_init_cpu(unsigned int cpu)
0793a61d 12718{
108b02cf 12719 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 12720
b28ab83c 12721 mutex_lock(&swhash->hlist_mutex);
059fcd8c 12722 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
76e1d904
FW
12723 struct swevent_hlist *hlist;
12724
b28ab83c
PZ
12725 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
12726 WARN_ON(!hlist);
12727 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 12728 }
b28ab83c 12729 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
12730}
12731
2965faa5 12732#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
108b02cf 12733static void __perf_event_exit_context(void *__info)
0793a61d 12734{
108b02cf 12735 struct perf_event_context *ctx = __info;
fae3fde6
PZ
12736 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
12737 struct perf_event *event;
0793a61d 12738
fae3fde6 12739 raw_spin_lock(&ctx->lock);
0ee098c9 12740 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
fae3fde6 12741 list_for_each_entry(event, &ctx->event_list, event_entry)
45a0e07a 12742 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
fae3fde6 12743 raw_spin_unlock(&ctx->lock);
0793a61d 12744}
108b02cf
PZ
12745
12746static void perf_event_exit_cpu_context(int cpu)
12747{
a63fbed7 12748 struct perf_cpu_context *cpuctx;
108b02cf
PZ
12749 struct perf_event_context *ctx;
12750 struct pmu *pmu;
108b02cf 12751
a63fbed7
TG
12752 mutex_lock(&pmus_lock);
12753 list_for_each_entry(pmu, &pmus, entry) {
12754 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
12755 ctx = &cpuctx->ctx;
108b02cf
PZ
12756
12757 mutex_lock(&ctx->mutex);
12758 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
a63fbed7 12759 cpuctx->online = 0;
108b02cf
PZ
12760 mutex_unlock(&ctx->mutex);
12761 }
a63fbed7
TG
12762 cpumask_clear_cpu(cpu, perf_online_mask);
12763 mutex_unlock(&pmus_lock);
108b02cf 12764}
00e16c3d
TG
12765#else
12766
12767static void perf_event_exit_cpu_context(int cpu) { }
12768
12769#endif
108b02cf 12770
a63fbed7
TG
12771int perf_event_init_cpu(unsigned int cpu)
12772{
12773 struct perf_cpu_context *cpuctx;
12774 struct perf_event_context *ctx;
12775 struct pmu *pmu;
12776
12777 perf_swevent_init_cpu(cpu);
12778
12779 mutex_lock(&pmus_lock);
12780 cpumask_set_cpu(cpu, perf_online_mask);
12781 list_for_each_entry(pmu, &pmus, entry) {
12782 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
12783 ctx = &cpuctx->ctx;
12784
12785 mutex_lock(&ctx->mutex);
12786 cpuctx->online = 1;
12787 mutex_unlock(&ctx->mutex);
12788 }
12789 mutex_unlock(&pmus_lock);
12790
12791 return 0;
12792}
12793
00e16c3d 12794int perf_event_exit_cpu(unsigned int cpu)
0793a61d 12795{
e3703f8c 12796 perf_event_exit_cpu_context(cpu);
00e16c3d 12797 return 0;
0793a61d 12798}
0793a61d 12799
c277443c
PZ
12800static int
12801perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
12802{
12803 int cpu;
12804
12805 for_each_online_cpu(cpu)
12806 perf_event_exit_cpu(cpu);
12807
12808 return NOTIFY_OK;
12809}
12810
12811/*
12812 * Run the perf reboot notifier at the very last possible moment so that
12813 * the generic watchdog code runs as long as possible.
12814 */
12815static struct notifier_block perf_reboot_notifier = {
12816 .notifier_call = perf_reboot,
12817 .priority = INT_MIN,
12818};
12819
cdd6c482 12820void __init perf_event_init(void)
0793a61d 12821{
3c502e7a
JW
12822 int ret;
12823
2e80a82a
PZ
12824 idr_init(&pmu_idr);
12825
220b140b 12826 perf_event_init_all_cpus();
b0a873eb 12827 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
12828 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
12829 perf_pmu_register(&perf_cpu_clock, NULL, -1);
12830 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb 12831 perf_tp_register();
00e16c3d 12832 perf_event_init_cpu(smp_processor_id());
c277443c 12833 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
12834
12835 ret = init_hw_breakpoint();
12836 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520 12837
b01c3a00
JO
12838 /*
12839 * Build time assertion that we keep the data_head at the intended
12840 * location. IOW, validation we got the __reserved[] size right.
12841 */
12842 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
12843 != 1024);
0793a61d 12844}
abe43400 12845
fd979c01
CS
12846ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
12847 char *page)
12848{
12849 struct perf_pmu_events_attr *pmu_attr =
12850 container_of(attr, struct perf_pmu_events_attr, attr);
12851
12852 if (pmu_attr->event_str)
12853 return sprintf(page, "%s\n", pmu_attr->event_str);
12854
12855 return 0;
12856}
675965b0 12857EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
fd979c01 12858
abe43400
PZ
12859static int __init perf_event_sysfs_init(void)
12860{
12861 struct pmu *pmu;
12862 int ret;
12863
12864 mutex_lock(&pmus_lock);
12865
12866 ret = bus_register(&pmu_bus);
12867 if (ret)
12868 goto unlock;
12869
12870 list_for_each_entry(pmu, &pmus, entry) {
12871 if (!pmu->name || pmu->type < 0)
12872 continue;
12873
12874 ret = pmu_dev_alloc(pmu);
12875 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
12876 }
12877 pmu_bus_running = 1;
12878 ret = 0;
12879
12880unlock:
12881 mutex_unlock(&pmus_lock);
12882
12883 return ret;
12884}
12885device_initcall(perf_event_sysfs_init);
e5d1367f
SE
12886
12887#ifdef CONFIG_CGROUP_PERF
eb95419b
TH
12888static struct cgroup_subsys_state *
12889perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
e5d1367f
SE
12890{
12891 struct perf_cgroup *jc;
e5d1367f 12892
1b15d055 12893 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
12894 if (!jc)
12895 return ERR_PTR(-ENOMEM);
12896
e5d1367f
SE
12897 jc->info = alloc_percpu(struct perf_cgroup_info);
12898 if (!jc->info) {
12899 kfree(jc);
12900 return ERR_PTR(-ENOMEM);
12901 }
12902
e5d1367f
SE
12903 return &jc->css;
12904}
12905
eb95419b 12906static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
e5d1367f 12907{
eb95419b
TH
12908 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
12909
e5d1367f
SE
12910 free_percpu(jc->info);
12911 kfree(jc);
12912}
12913
96aaab68
NK
12914static int perf_cgroup_css_online(struct cgroup_subsys_state *css)
12915{
12916 perf_event_cgroup(css->cgroup);
12917 return 0;
12918}
12919
e5d1367f
SE
12920static int __perf_cgroup_move(void *info)
12921{
12922 struct task_struct *task = info;
ddaaf4e2 12923 rcu_read_lock();
e5d1367f 12924 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
ddaaf4e2 12925 rcu_read_unlock();
e5d1367f
SE
12926 return 0;
12927}
12928
1f7dd3e5 12929static void perf_cgroup_attach(struct cgroup_taskset *tset)
e5d1367f 12930{
bb9d97b6 12931 struct task_struct *task;
1f7dd3e5 12932 struct cgroup_subsys_state *css;
bb9d97b6 12933
1f7dd3e5 12934 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 12935 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
12936}
12937
073219e9 12938struct cgroup_subsys perf_event_cgrp_subsys = {
92fb9748
TH
12939 .css_alloc = perf_cgroup_css_alloc,
12940 .css_free = perf_cgroup_css_free,
96aaab68 12941 .css_online = perf_cgroup_css_online,
bb9d97b6 12942 .attach = perf_cgroup_attach,
968ebff1
TH
12943 /*
12944 * Implicitly enable on dfl hierarchy so that perf events can
12945 * always be filtered by cgroup2 path as long as perf_event
12946 * controller is not mounted on a legacy hierarchy.
12947 */
12948 .implicit_on_dfl = true,
8cfd8147 12949 .threaded = true,
e5d1367f
SE
12950};
12951#endif /* CONFIG_CGROUP_PERF */