]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - kernel/trace/trace_events.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / kernel / trace / trace_events.c
CommitLineData
bcea3f96 1// SPDX-License-Identifier: GPL-2.0
b77e38aa
SR
2/*
3 * event tracer
4 *
5 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
6 *
981d081e
SR
7 * - Added format output of fields of the trace point.
8 * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
9 *
b77e38aa
SR
10 */
11
3448bac3
FF
12#define pr_fmt(fmt) fmt
13
e6187007 14#include <linux/workqueue.h>
17911ff3 15#include <linux/security.h>
e6187007
SR
16#include <linux/spinlock.h>
17#include <linux/kthread.h>
8434dc93 18#include <linux/tracefs.h>
b77e38aa
SR
19#include <linux/uaccess.h>
20#include <linux/module.h>
21#include <linux/ctype.h>
49090107 22#include <linux/sort.h>
5a0e3ad6 23#include <linux/slab.h>
e6187007 24#include <linux/delay.h>
b77e38aa 25
3fdaf80f 26#include <trace/events/sched.h>
04ae87a5 27#include <trace/syscall.h>
3fdaf80f 28
020e5f85
LZ
29#include <asm/setup.h>
30
91729ef9 31#include "trace_output.h"
b77e38aa 32
4e5292ea 33#undef TRACE_SYSTEM
b628b3e6
SR
34#define TRACE_SYSTEM "TRACE_SYSTEM"
35
20c8928a 36DEFINE_MUTEX(event_mutex);
11a241a3 37
a59fd602 38LIST_HEAD(ftrace_events);
9f616680 39static LIST_HEAD(ftrace_generic_fields);
b3a8c6fd 40static LIST_HEAD(ftrace_common_fields);
a838deab 41static bool eventdir_initialized;
a59fd602 42
d1a29143
SR
43#define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
44
45static struct kmem_cache *field_cachep;
46static struct kmem_cache *file_cachep;
47
6e94a780
SR
48static inline int system_refcount(struct event_subsystem *system)
49{
79ac6ef5 50 return system->ref_count;
6e94a780
SR
51}
52
53static int system_refcount_inc(struct event_subsystem *system)
54{
79ac6ef5 55 return system->ref_count++;
6e94a780
SR
56}
57
58static int system_refcount_dec(struct event_subsystem *system)
59{
79ac6ef5 60 return --system->ref_count;
6e94a780
SR
61}
62
ae63b31e
SR
63/* Double loops, do not use break, only goto's work */
64#define do_for_each_event_file(tr, file) \
65 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
66 list_for_each_entry(file, &tr->events, list)
67
68#define do_for_each_event_file_safe(tr, file) \
69 list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
7f1d2f82 70 struct trace_event_file *___n; \
ae63b31e
SR
71 list_for_each_entry_safe(file, ___n, &tr->events, list)
72
73#define while_for_each_event_file() \
74 }
75
b3a8c6fd
J
76static struct ftrace_event_field *
77__find_event_field(struct list_head *head, char *name)
78{
79 struct ftrace_event_field *field;
80
81 list_for_each_entry(field, head, link) {
82 if (!strcmp(field->name, name))
83 return field;
84 }
85
86 return NULL;
87}
88
89struct ftrace_event_field *
2425bcb9 90trace_find_event_field(struct trace_event_call *call, char *name)
b3a8c6fd
J
91{
92 struct ftrace_event_field *field;
93 struct list_head *head;
94
e57cbaf0
SRRH
95 head = trace_get_fields(call);
96 field = __find_event_field(head, name);
9f616680
DW
97 if (field)
98 return field;
99
e57cbaf0 100 field = __find_event_field(&ftrace_generic_fields, name);
b3a8c6fd
J
101 if (field)
102 return field;
103
e57cbaf0 104 return __find_event_field(&ftrace_common_fields, name);
b3a8c6fd
J
105}
106
8728fe50
LZ
107static int __trace_define_field(struct list_head *head, const char *type,
108 const char *name, int offset, int size,
109 int is_signed, int filter_type)
cf027f64
TZ
110{
111 struct ftrace_event_field *field;
112
d1a29143 113 field = kmem_cache_alloc(field_cachep, GFP_TRACE);
cf027f64 114 if (!field)
aaf6ac0f 115 return -ENOMEM;
fe9f57f2 116
92edca07
SR
117 field->name = name;
118 field->type = type;
fe9f57f2 119
43b51ead
LZ
120 if (filter_type == FILTER_OTHER)
121 field->filter_type = filter_assign_type(type);
122 else
123 field->filter_type = filter_type;
124
cf027f64
TZ
125 field->offset = offset;
126 field->size = size;
a118e4d1 127 field->is_signed = is_signed;
aa38e9fc 128
2e33af02 129 list_add(&field->link, head);
cf027f64
TZ
130
131 return 0;
cf027f64 132}
8728fe50 133
2425bcb9 134int trace_define_field(struct trace_event_call *call, const char *type,
8728fe50
LZ
135 const char *name, int offset, int size, int is_signed,
136 int filter_type)
137{
138 struct list_head *head;
139
140 if (WARN_ON(!call->class))
141 return 0;
142
143 head = trace_get_fields(call);
144 return __trace_define_field(head, type, name, offset, size,
145 is_signed, filter_type);
146}
17c873ec 147EXPORT_SYMBOL_GPL(trace_define_field);
cf027f64 148
9f616680
DW
149#define __generic_field(type, item, filter_type) \
150 ret = __trace_define_field(&ftrace_generic_fields, #type, \
151 #item, 0, 0, is_signed_type(type), \
152 filter_type); \
153 if (ret) \
154 return ret;
155
e647d6b3 156#define __common_field(type, item) \
8728fe50
LZ
157 ret = __trace_define_field(&ftrace_common_fields, #type, \
158 "common_" #item, \
159 offsetof(typeof(ent), item), \
160 sizeof(ent.item), \
161 is_signed_type(type), FILTER_OTHER); \
e647d6b3
LZ
162 if (ret) \
163 return ret;
164
9f616680
DW
165static int trace_define_generic_fields(void)
166{
167 int ret;
168
e57cbaf0
SRRH
169 __generic_field(int, CPU, FILTER_CPU);
170 __generic_field(int, cpu, FILTER_CPU);
171 __generic_field(char *, COMM, FILTER_COMM);
172 __generic_field(char *, comm, FILTER_COMM);
9f616680
DW
173
174 return ret;
175}
176
8728fe50 177static int trace_define_common_fields(void)
e647d6b3
LZ
178{
179 int ret;
180 struct trace_entry ent;
181
182 __common_field(unsigned short, type);
183 __common_field(unsigned char, flags);
184 __common_field(unsigned char, preempt_count);
185 __common_field(int, pid);
e647d6b3
LZ
186
187 return ret;
188}
189
2425bcb9 190static void trace_destroy_fields(struct trace_event_call *call)
2df75e41
LZ
191{
192 struct ftrace_event_field *field, *next;
2e33af02 193 struct list_head *head;
2df75e41 194
2e33af02
SR
195 head = trace_get_fields(call);
196 list_for_each_entry_safe(field, next, head, link) {
2df75e41 197 list_del(&field->link);
d1a29143 198 kmem_cache_free(field_cachep, field);
2df75e41
LZ
199 }
200}
201
32bbe007
AS
202/*
203 * run-time version of trace_event_get_offsets_<call>() that returns the last
204 * accessible offset of trace fields excluding __dynamic_array bytes
205 */
206int trace_event_get_offsets(struct trace_event_call *call)
207{
208 struct ftrace_event_field *tail;
209 struct list_head *head;
210
211 head = trace_get_fields(call);
212 /*
213 * head->next points to the last field with the largest offset,
214 * since it was added last by trace_define_field()
215 */
216 tail = list_first_entry(head, struct ftrace_event_field, link);
217 return tail->offset + tail->size;
218}
219
2425bcb9 220int trace_event_raw_init(struct trace_event_call *call)
87d9b4e1
LZ
221{
222 int id;
223
9023c930 224 id = register_trace_event(&call->event);
87d9b4e1
LZ
225 if (!id)
226 return -ENODEV;
87d9b4e1
LZ
227
228 return 0;
229}
230EXPORT_SYMBOL_GPL(trace_event_raw_init);
231
3fdaf80f
SRRH
232bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
233{
234 struct trace_array *tr = trace_file->tr;
235 struct trace_array_cpu *data;
27683626 236 struct trace_pid_list *no_pid_list;
3fdaf80f
SRRH
237 struct trace_pid_list *pid_list;
238
da25a672 239 pid_list = rcu_dereference_raw(tr->filtered_pids);
27683626
SRV
240 no_pid_list = rcu_dereference_raw(tr->filtered_no_pids);
241
242 if (!pid_list && !no_pid_list)
3fdaf80f
SRRH
243 return false;
244
1c5eb448 245 data = this_cpu_ptr(tr->array_buffer.data);
3fdaf80f
SRRH
246
247 return data->ignore_pid;
248}
249EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
250
3f795dcf
SRRH
251void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
252 struct trace_event_file *trace_file,
253 unsigned long len)
3fd40d1e 254{
2425bcb9 255 struct trace_event_call *event_call = trace_file->event_call;
3fd40d1e 256
3fdaf80f
SRRH
257 if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
258 trace_event_ignore_this_pid(trace_file))
259 return NULL;
260
3fd40d1e
SR
261 local_save_flags(fbuffer->flags);
262 fbuffer->pc = preempt_count();
e947841c 263 /*
30c93704 264 * If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
e947841c
SRRH
265 * preemption (adding one to the preempt_count). Since we are
266 * interested in the preempt_count at the time the tracepoint was
267 * hit, we need to subtract one to offset the increment.
268 */
30c93704 269 if (IS_ENABLED(CONFIG_PREEMPTION))
e947841c 270 fbuffer->pc--;
7f1d2f82 271 fbuffer->trace_file = trace_file;
3fd40d1e
SR
272
273 fbuffer->event =
7f1d2f82 274 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
3fd40d1e
SR
275 event_call->event.type, len,
276 fbuffer->flags, fbuffer->pc);
277 if (!fbuffer->event)
278 return NULL;
279
8cfcf155 280 fbuffer->regs = NULL;
3fd40d1e
SR
281 fbuffer->entry = ring_buffer_event_data(fbuffer->event);
282 return fbuffer->entry;
283}
3f795dcf 284EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
3fd40d1e 285
2425bcb9 286int trace_event_reg(struct trace_event_call *call,
9023c930 287 enum trace_reg type, void *data)
a1d0ce82 288{
7f1d2f82 289 struct trace_event_file *file = data;
ae63b31e 290
de7b2973 291 WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
a1d0ce82
SR
292 switch (type) {
293 case TRACE_REG_REGISTER:
de7b2973 294 return tracepoint_probe_register(call->tp,
a1d0ce82 295 call->class->probe,
ae63b31e 296 file);
a1d0ce82 297 case TRACE_REG_UNREGISTER:
de7b2973 298 tracepoint_probe_unregister(call->tp,
a1d0ce82 299 call->class->probe,
ae63b31e 300 file);
a1d0ce82
SR
301 return 0;
302
303#ifdef CONFIG_PERF_EVENTS
304 case TRACE_REG_PERF_REGISTER:
de7b2973 305 return tracepoint_probe_register(call->tp,
a1d0ce82
SR
306 call->class->perf_probe,
307 call);
308 case TRACE_REG_PERF_UNREGISTER:
de7b2973 309 tracepoint_probe_unregister(call->tp,
a1d0ce82
SR
310 call->class->perf_probe,
311 call);
312 return 0;
ceec0b6f
JO
313 case TRACE_REG_PERF_OPEN:
314 case TRACE_REG_PERF_CLOSE:
489c75c3
JO
315 case TRACE_REG_PERF_ADD:
316 case TRACE_REG_PERF_DEL:
ceec0b6f 317 return 0;
a1d0ce82
SR
318#endif
319 }
320 return 0;
321}
9023c930 322EXPORT_SYMBOL_GPL(trace_event_reg);
a1d0ce82 323
e870e9a1
LZ
324void trace_event_enable_cmd_record(bool enable)
325{
7f1d2f82 326 struct trace_event_file *file;
ae63b31e 327 struct trace_array *tr;
e870e9a1 328
3a53acf1
PS
329 lockdep_assert_held(&event_mutex);
330
ae63b31e
SR
331 do_for_each_event_file(tr, file) {
332
5d6ad960 333 if (!(file->flags & EVENT_FILE_FL_ENABLED))
e870e9a1
LZ
334 continue;
335
336 if (enable) {
337 tracing_start_cmdline_record();
5d6ad960 338 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1
LZ
339 } else {
340 tracing_stop_cmdline_record();
5d6ad960 341 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 342 }
ae63b31e 343 } while_for_each_event_file();
e870e9a1
LZ
344}
345
d914ba37
JF
346void trace_event_enable_tgid_record(bool enable)
347{
348 struct trace_event_file *file;
349 struct trace_array *tr;
350
3a53acf1
PS
351 lockdep_assert_held(&event_mutex);
352
d914ba37
JF
353 do_for_each_event_file(tr, file) {
354 if (!(file->flags & EVENT_FILE_FL_ENABLED))
355 continue;
356
357 if (enable) {
358 tracing_start_tgid_record();
359 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
360 } else {
361 tracing_stop_tgid_record();
362 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT,
363 &file->flags);
364 }
365 } while_for_each_event_file();
d914ba37
JF
366}
367
7f1d2f82 368static int __ftrace_event_enable_disable(struct trace_event_file *file,
417944c4 369 int enable, int soft_disable)
fd994989 370{
2425bcb9 371 struct trace_event_call *call = file->event_call;
983f938a 372 struct trace_array *tr = file->tr;
0fc1b09f 373 unsigned long file_flags = file->flags;
3b8e4273 374 int ret = 0;
417944c4 375 int disable;
3b8e4273 376
fd994989
SR
377 switch (enable) {
378 case 0:
417944c4 379 /*
1cf4c073
MH
380 * When soft_disable is set and enable is cleared, the sm_ref
381 * reference counter is decremented. If it reaches 0, we want
417944c4
SRRH
382 * to clear the SOFT_DISABLED flag but leave the event in the
383 * state that it was. That is, if the event was enabled and
384 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
385 * is set we do not want the event to be enabled before we
386 * clear the bit.
387 *
388 * When soft_disable is not set but the SOFT_MODE flag is,
389 * we do nothing. Do not disable the tracepoint, otherwise
390 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
391 */
392 if (soft_disable) {
1cf4c073
MH
393 if (atomic_dec_return(&file->sm_ref) > 0)
394 break;
5d6ad960
SRRH
395 disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
396 clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
417944c4 397 } else
5d6ad960 398 disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
417944c4 399
5d6ad960
SRRH
400 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
401 clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
402 if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
e870e9a1 403 tracing_stop_cmdline_record();
5d6ad960 404 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 405 }
d914ba37
JF
406
407 if (file->flags & EVENT_FILE_FL_RECORDED_TGID) {
408 tracing_stop_tgid_record();
7685ab6c 409 clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
d914ba37
JF
410 }
411
ae63b31e 412 call->class->reg(call, TRACE_REG_UNREGISTER, file);
fd994989 413 }
3baa5e4c 414 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
5d6ad960
SRRH
415 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
416 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
3baa5e4c 417 else
5d6ad960 418 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
fd994989
SR
419 break;
420 case 1:
417944c4
SRRH
421 /*
422 * When soft_disable is set and enable is set, we want to
423 * register the tracepoint for the event, but leave the event
424 * as is. That means, if the event was already enabled, we do
425 * nothing (but set SOFT_MODE). If the event is disabled, we
426 * set SOFT_DISABLED before enabling the event tracepoint, so
427 * it still seems to be disabled.
428 */
429 if (!soft_disable)
5d6ad960 430 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
1cf4c073
MH
431 else {
432 if (atomic_inc_return(&file->sm_ref) > 1)
433 break;
5d6ad960 434 set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
1cf4c073 435 }
417944c4 436
5d6ad960 437 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
d914ba37 438 bool cmd = false, tgid = false;
417944c4
SRRH
439
440 /* Keep the event disabled, when going to SOFT_MODE. */
441 if (soft_disable)
5d6ad960 442 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
417944c4 443
983f938a 444 if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
d914ba37 445 cmd = true;
e870e9a1 446 tracing_start_cmdline_record();
5d6ad960 447 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
e870e9a1 448 }
d914ba37
JF
449
450 if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
451 tgid = true;
452 tracing_start_tgid_record();
453 set_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags);
454 }
455
ae63b31e 456 ret = call->class->reg(call, TRACE_REG_REGISTER, file);
3b8e4273 457 if (ret) {
d914ba37
JF
458 if (cmd)
459 tracing_stop_cmdline_record();
460 if (tgid)
461 tracing_stop_tgid_record();
3b8e4273 462 pr_info("event trace: Could not enable event "
687fcc4a 463 "%s\n", trace_event_name(call));
3b8e4273
LZ
464 break;
465 }
5d6ad960 466 set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
575380da
SRRH
467
468 /* WAS_ENABLED gets set but never cleared. */
065e63f9 469 set_bit(EVENT_FILE_FL_WAS_ENABLED_BIT, &file->flags);
fd994989 470 }
fd994989
SR
471 break;
472 }
3b8e4273 473
0fc1b09f
SRRH
474 /* Enable or disable use of trace_buffered_event */
475 if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
476 (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
477 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
478 trace_buffered_event_enable();
479 else
480 trace_buffered_event_disable();
481 }
482
3b8e4273 483 return ret;
fd994989
SR
484}
485
7f1d2f82 486int trace_event_enable_disable(struct trace_event_file *file,
85f2b082
TZ
487 int enable, int soft_disable)
488{
489 return __ftrace_event_enable_disable(file, enable, soft_disable);
490}
491
7f1d2f82 492static int ftrace_event_enable_disable(struct trace_event_file *file,
417944c4
SRRH
493 int enable)
494{
495 return __ftrace_event_enable_disable(file, enable, 0);
496}
497
ae63b31e 498static void ftrace_clear_events(struct trace_array *tr)
0e907c99 499{
7f1d2f82 500 struct trace_event_file *file;
0e907c99
Z
501
502 mutex_lock(&event_mutex);
ae63b31e
SR
503 list_for_each_entry(file, &tr->events, list) {
504 ftrace_event_enable_disable(file, 0);
0e907c99
Z
505 }
506 mutex_unlock(&event_mutex);
507}
508
c37775d5
SR
509static void
510event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
511{
512 struct trace_pid_list *pid_list;
513 struct trace_array *tr = data;
514
da25a672 515 pid_list = rcu_dereference_raw(tr->filtered_pids);
4e267db1 516 trace_filter_add_remove_task(pid_list, NULL, task);
27683626
SRV
517
518 pid_list = rcu_dereference_raw(tr->filtered_no_pids);
519 trace_filter_add_remove_task(pid_list, NULL, task);
c37775d5
SR
520}
521
522static void
523event_filter_pid_sched_process_fork(void *data,
524 struct task_struct *self,
525 struct task_struct *task)
526{
527 struct trace_pid_list *pid_list;
528 struct trace_array *tr = data;
529
530 pid_list = rcu_dereference_sched(tr->filtered_pids);
4e267db1 531 trace_filter_add_remove_task(pid_list, self, task);
27683626
SRV
532
533 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
534 trace_filter_add_remove_task(pid_list, self, task);
c37775d5
SR
535}
536
537void trace_event_follow_fork(struct trace_array *tr, bool enable)
538{
539 if (enable) {
540 register_trace_prio_sched_process_fork(event_filter_pid_sched_process_fork,
541 tr, INT_MIN);
afcab636 542 register_trace_prio_sched_process_free(event_filter_pid_sched_process_exit,
c37775d5
SR
543 tr, INT_MAX);
544 } else {
545 unregister_trace_sched_process_fork(event_filter_pid_sched_process_fork,
546 tr);
afcab636 547 unregister_trace_sched_process_free(event_filter_pid_sched_process_exit,
c37775d5
SR
548 tr);
549 }
3fdaf80f
SRRH
550}
551
552static void
22402cd0 553event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
3fdaf80f
SRRH
554 struct task_struct *prev, struct task_struct *next)
555{
556 struct trace_array *tr = data;
27683626 557 struct trace_pid_list *no_pid_list;
3fdaf80f 558 struct trace_pid_list *pid_list;
27683626 559 bool ret;
3fdaf80f
SRRH
560
561 pid_list = rcu_dereference_sched(tr->filtered_pids);
27683626 562 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
3fdaf80f 563
27683626
SRV
564 /*
565 * Sched switch is funny, as we only want to ignore it
566 * in the notrace case if both prev and next should be ignored.
567 */
568 ret = trace_ignore_this_task(NULL, no_pid_list, prev) &&
569 trace_ignore_this_task(NULL, no_pid_list, next);
570
571 this_cpu_write(tr->array_buffer.data->ignore_pid, ret ||
572 (trace_ignore_this_task(pid_list, NULL, prev) &&
573 trace_ignore_this_task(pid_list, NULL, next)));
3fdaf80f
SRRH
574}
575
576static void
22402cd0 577event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
3fdaf80f
SRRH
578 struct task_struct *prev, struct task_struct *next)
579{
580 struct trace_array *tr = data;
27683626 581 struct trace_pid_list *no_pid_list;
3fdaf80f
SRRH
582 struct trace_pid_list *pid_list;
583
584 pid_list = rcu_dereference_sched(tr->filtered_pids);
27683626 585 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
3fdaf80f 586
1c5eb448 587 this_cpu_write(tr->array_buffer.data->ignore_pid,
27683626 588 trace_ignore_this_task(pid_list, no_pid_list, next));
3fdaf80f
SRRH
589}
590
591static void
592event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
593{
594 struct trace_array *tr = data;
27683626 595 struct trace_pid_list *no_pid_list;
3fdaf80f
SRRH
596 struct trace_pid_list *pid_list;
597
598 /* Nothing to do if we are already tracing */
1c5eb448 599 if (!this_cpu_read(tr->array_buffer.data->ignore_pid))
3fdaf80f
SRRH
600 return;
601
602 pid_list = rcu_dereference_sched(tr->filtered_pids);
27683626 603 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
3fdaf80f 604
1c5eb448 605 this_cpu_write(tr->array_buffer.data->ignore_pid,
27683626 606 trace_ignore_this_task(pid_list, no_pid_list, task));
3fdaf80f
SRRH
607}
608
609static void
610event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
611{
612 struct trace_array *tr = data;
27683626 613 struct trace_pid_list *no_pid_list;
3fdaf80f
SRRH
614 struct trace_pid_list *pid_list;
615
616 /* Nothing to do if we are not tracing */
1c5eb448 617 if (this_cpu_read(tr->array_buffer.data->ignore_pid))
3fdaf80f
SRRH
618 return;
619
620 pid_list = rcu_dereference_sched(tr->filtered_pids);
27683626 621 no_pid_list = rcu_dereference_sched(tr->filtered_no_pids);
3fdaf80f
SRRH
622
623 /* Set tracing if current is enabled */
1c5eb448 624 this_cpu_write(tr->array_buffer.data->ignore_pid,
27683626 625 trace_ignore_this_task(pid_list, no_pid_list, current));
3fdaf80f
SRRH
626}
627
27683626 628static void unregister_pid_events(struct trace_array *tr)
49090107 629{
3fdaf80f
SRRH
630 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
631 unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
632
633 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
634 unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
635
0f72e37e
SRRH
636 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
637 unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
638
639 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
640 unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
27683626 641}
0f72e37e 642
27683626
SRV
643static void __ftrace_clear_event_pids(struct trace_array *tr, int type)
644{
645 struct trace_pid_list *pid_list;
646 struct trace_pid_list *no_pid_list;
647 struct trace_event_file *file;
648 int cpu;
649
650 pid_list = rcu_dereference_protected(tr->filtered_pids,
651 lockdep_is_held(&event_mutex));
652 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
653 lockdep_is_held(&event_mutex));
654
655 /* Make sure there's something to do */
656 if (!pid_type_enabled(type, pid_list, no_pid_list))
657 return;
658
659 if (!still_need_pid_events(type, pid_list, no_pid_list)) {
660 unregister_pid_events(tr);
661
662 list_for_each_entry(file, &tr->events, list) {
663 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
664 }
665
666 for_each_possible_cpu(cpu)
667 per_cpu_ptr(tr->array_buffer.data, cpu)->ignore_pid = false;
3fdaf80f
SRRH
668 }
669
27683626
SRV
670 if (type & TRACE_PIDS)
671 rcu_assign_pointer(tr->filtered_pids, NULL);
3fdaf80f 672
27683626
SRV
673 if (type & TRACE_NO_PIDS)
674 rcu_assign_pointer(tr->filtered_no_pids, NULL);
49090107
SRRH
675
676 /* Wait till all users are no longer using pid filtering */
e0a568dc 677 tracepoint_synchronize_unregister();
49090107 678
27683626
SRV
679 if ((type & TRACE_PIDS) && pid_list)
680 trace_free_pid_list(pid_list);
681
682 if ((type & TRACE_NO_PIDS) && no_pid_list)
683 trace_free_pid_list(no_pid_list);
49090107
SRRH
684}
685
27683626 686static void ftrace_clear_event_pids(struct trace_array *tr, int type)
49090107
SRRH
687{
688 mutex_lock(&event_mutex);
27683626 689 __ftrace_clear_event_pids(tr, type);
49090107
SRRH
690 mutex_unlock(&event_mutex);
691}
692
e9dbfae5
SR
693static void __put_system(struct event_subsystem *system)
694{
695 struct event_filter *filter = system->filter;
696
6e94a780
SR
697 WARN_ON_ONCE(system_refcount(system) == 0);
698 if (system_refcount_dec(system))
e9dbfae5
SR
699 return;
700
ae63b31e
SR
701 list_del(&system->list);
702
e9dbfae5
SR
703 if (filter) {
704 kfree(filter->filter_string);
705 kfree(filter);
706 }
79ac6ef5 707 kfree_const(system->name);
e9dbfae5
SR
708 kfree(system);
709}
710
711static void __get_system(struct event_subsystem *system)
712{
6e94a780
SR
713 WARN_ON_ONCE(system_refcount(system) == 0);
714 system_refcount_inc(system);
e9dbfae5
SR
715}
716
7967b3e0 717static void __get_system_dir(struct trace_subsystem_dir *dir)
ae63b31e
SR
718{
719 WARN_ON_ONCE(dir->ref_count == 0);
720 dir->ref_count++;
721 __get_system(dir->subsystem);
722}
723
7967b3e0 724static void __put_system_dir(struct trace_subsystem_dir *dir)
ae63b31e
SR
725{
726 WARN_ON_ONCE(dir->ref_count == 0);
727 /* If the subsystem is about to be freed, the dir must be too */
6e94a780 728 WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
ae63b31e
SR
729
730 __put_system(dir->subsystem);
731 if (!--dir->ref_count)
732 kfree(dir);
733}
734
7967b3e0 735static void put_system(struct trace_subsystem_dir *dir)
e9dbfae5
SR
736{
737 mutex_lock(&event_mutex);
ae63b31e 738 __put_system_dir(dir);
e9dbfae5
SR
739 mutex_unlock(&event_mutex);
740}
741
7967b3e0 742static void remove_subsystem(struct trace_subsystem_dir *dir)
f6a84bdc
ON
743{
744 if (!dir)
745 return;
746
747 if (!--dir->nr_events) {
a3d1e7eb 748 tracefs_remove(dir->entry);
f6a84bdc
ON
749 list_del(&dir->list);
750 __put_system_dir(dir);
751 }
752}
753
7f1d2f82 754static void remove_event_file_dir(struct trace_event_file *file)
f6a84bdc 755{
bf682c31
ON
756 struct dentry *dir = file->dir;
757 struct dentry *child;
758
759 if (dir) {
760 spin_lock(&dir->d_lock); /* probably unneeded */
946e51f2 761 list_for_each_entry(child, &dir->d_subdirs, d_child) {
7682c918
DH
762 if (d_really_is_positive(child)) /* probably unneeded */
763 d_inode(child)->i_private = NULL;
bf682c31
ON
764 }
765 spin_unlock(&dir->d_lock);
766
a3d1e7eb 767 tracefs_remove(dir);
bf682c31
ON
768 }
769
f6a84bdc 770 list_del(&file->list);
f6a84bdc 771 remove_subsystem(file->system);
2448e349 772 free_event_filter(file->filter);
f6a84bdc
ON
773 kmem_cache_free(file_cachep, file);
774}
775
8f31bfe5
LZ
776/*
777 * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
778 */
2a6c24af
SRRH
779static int
780__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
781 const char *sub, const char *event, int set)
b77e38aa 782{
7f1d2f82 783 struct trace_event_file *file;
2425bcb9 784 struct trace_event_call *call;
de7b2973 785 const char *name;
29f93943 786 int ret = -EINVAL;
989a0a3d 787 int eret = 0;
8f31bfe5 788
ae63b31e
SR
789 list_for_each_entry(file, &tr->events, list) {
790
791 call = file->event_call;
687fcc4a 792 name = trace_event_name(call);
8f31bfe5 793
de7b2973 794 if (!name || !call->class || !call->class->reg)
8f31bfe5
LZ
795 continue;
796
9b63776f
SR
797 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
798 continue;
799
8f31bfe5 800 if (match &&
de7b2973 801 strcmp(match, name) != 0 &&
8f082018 802 strcmp(match, call->class->system) != 0)
8f31bfe5
LZ
803 continue;
804
8f082018 805 if (sub && strcmp(sub, call->class->system) != 0)
8f31bfe5
LZ
806 continue;
807
de7b2973 808 if (event && strcmp(event, name) != 0)
8f31bfe5
LZ
809 continue;
810
989a0a3d 811 ret = ftrace_event_enable_disable(file, set);
8f31bfe5 812
989a0a3d
SRRH
813 /*
814 * Save the first error and return that. Some events
815 * may still have been enabled, but let the user
816 * know that something went wrong.
817 */
818 if (ret && !eret)
819 eret = ret;
820
821 ret = eret;
8f31bfe5 822 }
2a6c24af
SRRH
823
824 return ret;
825}
826
827static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
828 const char *sub, const char *event, int set)
829{
830 int ret;
831
832 mutex_lock(&event_mutex);
833 ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
8f31bfe5
LZ
834 mutex_unlock(&event_mutex);
835
836 return ret;
837}
838
595a438c 839int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
8f31bfe5 840{
b628b3e6 841 char *event = NULL, *sub = NULL, *match;
84fce9db 842 int ret;
b628b3e6 843
953ae45a
DI
844 if (!tr)
845 return -ENOENT;
b628b3e6
SR
846 /*
847 * The buf format can be <subsystem>:<event-name>
848 * *:<event-name> means any event by that name.
849 * :<event-name> is the same.
850 *
851 * <subsystem>:* means all events in that subsystem
852 * <subsystem>: means the same.
853 *
854 * <name> (no ':') means all events in a subsystem with
855 * the name <name> or any event that matches <name>
856 */
857
858 match = strsep(&buf, ":");
859 if (buf) {
860 sub = match;
861 event = buf;
862 match = NULL;
863
864 if (!strlen(sub) || strcmp(sub, "*") == 0)
865 sub = NULL;
866 if (!strlen(event) || strcmp(event, "*") == 0)
867 event = NULL;
868 }
b77e38aa 869
84fce9db
JK
870 ret = __ftrace_set_clr_event(tr, match, sub, event, set);
871
872 /* Put back the colon to allow this to be called again */
873 if (buf)
874 *(buf - 1) = ':';
875
876 return ret;
b77e38aa
SR
877}
878
4671c794
SR
879/**
880 * trace_set_clr_event - enable or disable an event
881 * @system: system name to match (NULL for any system)
882 * @event: event name to match (NULL for all events, within system)
883 * @set: 1 to enable, 0 to disable
884 *
885 * This is a way for other parts of the kernel to enable or disable
886 * event recording.
887 *
888 * Returns 0 on success, -EINVAL if the parameters do not match any
889 * registered events.
890 */
891int trace_set_clr_event(const char *system, const char *event, int set)
892{
ae63b31e
SR
893 struct trace_array *tr = top_trace_array();
894
dc81e5e3
YY
895 if (!tr)
896 return -ENODEV;
897
ae63b31e 898 return __ftrace_set_clr_event(tr, NULL, system, event, set);
4671c794 899}
56355b83 900EXPORT_SYMBOL_GPL(trace_set_clr_event);
4671c794 901
28879787
DI
902/**
903 * trace_array_set_clr_event - enable or disable an event for a trace array.
904 * @tr: concerned trace array.
905 * @system: system name to match (NULL for any system)
906 * @event: event name to match (NULL for all events, within system)
907 * @enable: true to enable, false to disable
908 *
909 * This is a way for other parts of the kernel to enable or disable
910 * event recording.
911 *
912 * Returns 0 on success, -EINVAL if the parameters do not match any
913 * registered events.
914 */
915int trace_array_set_clr_event(struct trace_array *tr, const char *system,
916 const char *event, bool enable)
917{
918 int set;
919
920 if (!tr)
921 return -ENOENT;
922
923 set = (enable == true) ? 1 : 0;
924 return __ftrace_set_clr_event(tr, NULL, system, event, set);
925}
926EXPORT_SYMBOL_GPL(trace_array_set_clr_event);
927
b77e38aa
SR
928/* 128 should be much more than enough */
929#define EVENT_BUF_SIZE 127
930
931static ssize_t
932ftrace_event_write(struct file *file, const char __user *ubuf,
933 size_t cnt, loff_t *ppos)
934{
48966364 935 struct trace_parser parser;
ae63b31e
SR
936 struct seq_file *m = file->private_data;
937 struct trace_array *tr = m->private;
4ba7978e 938 ssize_t read, ret;
b77e38aa 939
4ba7978e 940 if (!cnt)
b77e38aa
SR
941 return 0;
942
1852fcce
SR
943 ret = tracing_update_buffers();
944 if (ret < 0)
945 return ret;
946
48966364 947 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
b77e38aa
SR
948 return -ENOMEM;
949
48966364 950 read = trace_get_user(&parser, ubuf, cnt, ppos);
951
4ba7978e 952 if (read >= 0 && trace_parser_loaded((&parser))) {
48966364 953 int set = 1;
b77e38aa 954
48966364 955 if (*parser.buffer == '!')
b77e38aa 956 set = 0;
b77e38aa 957
ae63b31e 958 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
b77e38aa 959 if (ret)
48966364 960 goto out_put;
b77e38aa 961 }
b77e38aa
SR
962
963 ret = read;
964
48966364 965 out_put:
966 trace_parser_put(&parser);
b77e38aa
SR
967
968 return ret;
969}
970
971static void *
972t_next(struct seq_file *m, void *v, loff_t *pos)
973{
7f1d2f82 974 struct trace_event_file *file = v;
2425bcb9 975 struct trace_event_call *call;
ae63b31e 976 struct trace_array *tr = m->private;
b77e38aa
SR
977
978 (*pos)++;
979
ae63b31e
SR
980 list_for_each_entry_continue(file, &tr->events, list) {
981 call = file->event_call;
40e26815
SR
982 /*
983 * The ftrace subsystem is for showing formats only.
984 * They can not be enabled or disabled via the event files.
985 */
d045437a
SRRH
986 if (call->class && call->class->reg &&
987 !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
ae63b31e 988 return file;
40e26815 989 }
b77e38aa 990
30bd39cd 991 return NULL;
b77e38aa
SR
992}
993
994static void *t_start(struct seq_file *m, loff_t *pos)
995{
7f1d2f82 996 struct trace_event_file *file;
ae63b31e 997 struct trace_array *tr = m->private;
e1c7e2a6
LZ
998 loff_t l;
999
20c8928a 1000 mutex_lock(&event_mutex);
e1c7e2a6 1001
7f1d2f82 1002 file = list_entry(&tr->events, struct trace_event_file, list);
e1c7e2a6 1003 for (l = 0; l <= *pos; ) {
ae63b31e
SR
1004 file = t_next(m, file, &l);
1005 if (!file)
e1c7e2a6
LZ
1006 break;
1007 }
ae63b31e 1008 return file;
b77e38aa
SR
1009}
1010
1011static void *
1012s_next(struct seq_file *m, void *v, loff_t *pos)
1013{
7f1d2f82 1014 struct trace_event_file *file = v;
ae63b31e 1015 struct trace_array *tr = m->private;
b77e38aa
SR
1016
1017 (*pos)++;
1018
ae63b31e 1019 list_for_each_entry_continue(file, &tr->events, list) {
5d6ad960 1020 if (file->flags & EVENT_FILE_FL_ENABLED)
ae63b31e 1021 return file;
b77e38aa
SR
1022 }
1023
30bd39cd 1024 return NULL;
b77e38aa
SR
1025}
1026
1027static void *s_start(struct seq_file *m, loff_t *pos)
1028{
7f1d2f82 1029 struct trace_event_file *file;
ae63b31e 1030 struct trace_array *tr = m->private;
e1c7e2a6
LZ
1031 loff_t l;
1032
20c8928a 1033 mutex_lock(&event_mutex);
e1c7e2a6 1034
7f1d2f82 1035 file = list_entry(&tr->events, struct trace_event_file, list);
e1c7e2a6 1036 for (l = 0; l <= *pos; ) {
ae63b31e
SR
1037 file = s_next(m, file, &l);
1038 if (!file)
e1c7e2a6
LZ
1039 break;
1040 }
ae63b31e 1041 return file;
b77e38aa
SR
1042}
1043
1044static int t_show(struct seq_file *m, void *v)
1045{
7f1d2f82 1046 struct trace_event_file *file = v;
2425bcb9 1047 struct trace_event_call *call = file->event_call;
b77e38aa 1048
8f082018
SR
1049 if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
1050 seq_printf(m, "%s:", call->class->system);
687fcc4a 1051 seq_printf(m, "%s\n", trace_event_name(call));
b77e38aa
SR
1052
1053 return 0;
1054}
1055
1056static void t_stop(struct seq_file *m, void *p)
1057{
20c8928a 1058 mutex_unlock(&event_mutex);
b77e38aa
SR
1059}
1060
f4d34a87 1061static void *
27683626 1062__next(struct seq_file *m, void *v, loff_t *pos, int type)
f4d34a87
SR
1063{
1064 struct trace_array *tr = m->private;
27683626
SRV
1065 struct trace_pid_list *pid_list;
1066
1067 if (type == TRACE_PIDS)
1068 pid_list = rcu_dereference_sched(tr->filtered_pids);
1069 else
1070 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
f4d34a87 1071
5cc8976b 1072 return trace_pid_next(pid_list, v, pos);
f4d34a87
SR
1073}
1074
27683626
SRV
1075static void *
1076p_next(struct seq_file *m, void *v, loff_t *pos)
1077{
1078 return __next(m, v, pos, TRACE_PIDS);
1079}
1080
1081static void *
1082np_next(struct seq_file *m, void *v, loff_t *pos)
1083{
1084 return __next(m, v, pos, TRACE_NO_PIDS);
1085}
1086
1087static void *__start(struct seq_file *m, loff_t *pos, int type)
fb662288 1088 __acquires(RCU)
49090107
SRRH
1089{
1090 struct trace_pid_list *pid_list;
1091 struct trace_array *tr = m->private;
1092
1093 /*
1094 * Grab the mutex, to keep calls to p_next() having the same
1095 * tr->filtered_pids as p_start() has.
1096 * If we just passed the tr->filtered_pids around, then RCU would
1097 * have been enough, but doing that makes things more complex.
1098 */
1099 mutex_lock(&event_mutex);
1100 rcu_read_lock_sched();
1101
27683626
SRV
1102 if (type == TRACE_PIDS)
1103 pid_list = rcu_dereference_sched(tr->filtered_pids);
1104 else
1105 pid_list = rcu_dereference_sched(tr->filtered_no_pids);
49090107 1106
f4d34a87 1107 if (!pid_list)
49090107
SRRH
1108 return NULL;
1109
5cc8976b 1110 return trace_pid_start(pid_list, pos);
49090107
SRRH
1111}
1112
27683626
SRV
1113static void *p_start(struct seq_file *m, loff_t *pos)
1114 __acquires(RCU)
1115{
1116 return __start(m, pos, TRACE_PIDS);
1117}
1118
1119static void *np_start(struct seq_file *m, loff_t *pos)
1120 __acquires(RCU)
1121{
1122 return __start(m, pos, TRACE_NO_PIDS);
1123}
1124
49090107 1125static void p_stop(struct seq_file *m, void *p)
fb662288 1126 __releases(RCU)
49090107
SRRH
1127{
1128 rcu_read_unlock_sched();
1129 mutex_unlock(&event_mutex);
1130}
1131
1473e441
SR
1132static ssize_t
1133event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1134 loff_t *ppos)
1135{
7f1d2f82 1136 struct trace_event_file *file;
bc6f6b08 1137 unsigned long flags;
a4390596
TZ
1138 char buf[4] = "0";
1139
bc6f6b08
ON
1140 mutex_lock(&event_mutex);
1141 file = event_file_data(filp);
1142 if (likely(file))
1143 flags = file->flags;
1144 mutex_unlock(&event_mutex);
1145
1146 if (!file)
1147 return -ENODEV;
1148
5d6ad960
SRRH
1149 if (flags & EVENT_FILE_FL_ENABLED &&
1150 !(flags & EVENT_FILE_FL_SOFT_DISABLED))
a4390596
TZ
1151 strcpy(buf, "1");
1152
5d6ad960
SRRH
1153 if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1154 flags & EVENT_FILE_FL_SOFT_MODE)
a4390596
TZ
1155 strcat(buf, "*");
1156
1157 strcat(buf, "\n");
1473e441 1158
417944c4 1159 return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1473e441
SR
1160}
1161
1162static ssize_t
1163event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1164 loff_t *ppos)
1165{
7f1d2f82 1166 struct trace_event_file *file;
1473e441
SR
1167 unsigned long val;
1168 int ret;
1169
22fe9b54
PH
1170 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1171 if (ret)
1473e441
SR
1172 return ret;
1173
1852fcce
SR
1174 ret = tracing_update_buffers();
1175 if (ret < 0)
1176 return ret;
1177
1473e441
SR
1178 switch (val) {
1179 case 0:
1473e441 1180 case 1:
bc6f6b08 1181 ret = -ENODEV;
11a241a3 1182 mutex_lock(&event_mutex);
bc6f6b08
ON
1183 file = event_file_data(filp);
1184 if (likely(file))
1185 ret = ftrace_event_enable_disable(file, val);
11a241a3 1186 mutex_unlock(&event_mutex);
1473e441
SR
1187 break;
1188
1189 default:
1190 return -EINVAL;
1191 }
1192
1193 *ppos += cnt;
1194
3b8e4273 1195 return ret ? ret : cnt;
1473e441
SR
1196}
1197
8ae79a13
SR
1198static ssize_t
1199system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1200 loff_t *ppos)
1201{
c142b15d 1202 const char set_to_char[4] = { '?', '0', '1', 'X' };
7967b3e0 1203 struct trace_subsystem_dir *dir = filp->private_data;
ae63b31e 1204 struct event_subsystem *system = dir->subsystem;
2425bcb9 1205 struct trace_event_call *call;
7f1d2f82 1206 struct trace_event_file *file;
ae63b31e 1207 struct trace_array *tr = dir->tr;
8ae79a13 1208 char buf[2];
c142b15d 1209 int set = 0;
8ae79a13
SR
1210 int ret;
1211
8ae79a13 1212 mutex_lock(&event_mutex);
ae63b31e
SR
1213 list_for_each_entry(file, &tr->events, list) {
1214 call = file->event_call;
256cfdd6
SRV
1215 if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
1216 !trace_event_name(call) || !call->class || !call->class->reg)
8ae79a13
SR
1217 continue;
1218
40ee4dff 1219 if (system && strcmp(call->class->system, system->name) != 0)
8ae79a13
SR
1220 continue;
1221
1222 /*
1223 * We need to find out if all the events are set
1224 * or if all events or cleared, or if we have
1225 * a mixture.
1226 */
5d6ad960 1227 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
c142b15d 1228
8ae79a13
SR
1229 /*
1230 * If we have a mixture, no need to look further.
1231 */
c142b15d 1232 if (set == 3)
8ae79a13
SR
1233 break;
1234 }
1235 mutex_unlock(&event_mutex);
1236
c142b15d 1237 buf[0] = set_to_char[set];
8ae79a13 1238 buf[1] = '\n';
8ae79a13
SR
1239
1240 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1241
1242 return ret;
1243}
1244
1245static ssize_t
1246system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1247 loff_t *ppos)
1248{
7967b3e0 1249 struct trace_subsystem_dir *dir = filp->private_data;
ae63b31e 1250 struct event_subsystem *system = dir->subsystem;
40ee4dff 1251 const char *name = NULL;
8ae79a13 1252 unsigned long val;
8ae79a13
SR
1253 ssize_t ret;
1254
22fe9b54
PH
1255 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1256 if (ret)
8ae79a13
SR
1257 return ret;
1258
1259 ret = tracing_update_buffers();
1260 if (ret < 0)
1261 return ret;
1262
8f31bfe5 1263 if (val != 0 && val != 1)
8ae79a13 1264 return -EINVAL;
8ae79a13 1265
40ee4dff
SR
1266 /*
1267 * Opening of "enable" adds a ref count to system,
1268 * so the name is safe to use.
1269 */
1270 if (system)
1271 name = system->name;
1272
ae63b31e 1273 ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
8ae79a13 1274 if (ret)
8f31bfe5 1275 goto out;
8ae79a13
SR
1276
1277 ret = cnt;
1278
8f31bfe5 1279out:
8ae79a13
SR
1280 *ppos += cnt;
1281
1282 return ret;
1283}
1284
2a37a3df
SR
1285enum {
1286 FORMAT_HEADER = 1,
86397dc3
LZ
1287 FORMAT_FIELD_SEPERATOR = 2,
1288 FORMAT_PRINTFMT = 3,
2a37a3df
SR
1289};
1290
1291static void *f_next(struct seq_file *m, void *v, loff_t *pos)
981d081e 1292{
2425bcb9 1293 struct trace_event_call *call = event_file_data(m->private);
86397dc3
LZ
1294 struct list_head *common_head = &ftrace_common_fields;
1295 struct list_head *head = trace_get_fields(call);
7710b639 1296 struct list_head *node = v;
981d081e 1297
2a37a3df 1298 (*pos)++;
5a65e956 1299
2a37a3df
SR
1300 switch ((unsigned long)v) {
1301 case FORMAT_HEADER:
7710b639
ON
1302 node = common_head;
1303 break;
5a65e956 1304
86397dc3 1305 case FORMAT_FIELD_SEPERATOR:
7710b639
ON
1306 node = head;
1307 break;
5a65e956 1308
2a37a3df
SR
1309 case FORMAT_PRINTFMT:
1310 /* all done */
1311 return NULL;
5a65e956
LJ
1312 }
1313
7710b639
ON
1314 node = node->prev;
1315 if (node == common_head)
86397dc3 1316 return (void *)FORMAT_FIELD_SEPERATOR;
7710b639 1317 else if (node == head)
2a37a3df 1318 return (void *)FORMAT_PRINTFMT;
7710b639
ON
1319 else
1320 return node;
2a37a3df
SR
1321}
1322
1323static int f_show(struct seq_file *m, void *v)
1324{
2425bcb9 1325 struct trace_event_call *call = event_file_data(m->private);
2a37a3df
SR
1326 struct ftrace_event_field *field;
1327 const char *array_descriptor;
1328
1329 switch ((unsigned long)v) {
1330 case FORMAT_HEADER:
687fcc4a 1331 seq_printf(m, "name: %s\n", trace_event_name(call));
2a37a3df 1332 seq_printf(m, "ID: %d\n", call->event.type);
fa6f0cc7 1333 seq_puts(m, "format:\n");
8728fe50 1334 return 0;
5a65e956 1335
86397dc3
LZ
1336 case FORMAT_FIELD_SEPERATOR:
1337 seq_putc(m, '\n');
1338 return 0;
1339
2a37a3df
SR
1340 case FORMAT_PRINTFMT:
1341 seq_printf(m, "\nprint fmt: %s\n",
1342 call->print_fmt);
1343 return 0;
981d081e 1344 }
8728fe50 1345
7710b639 1346 field = list_entry(v, struct ftrace_event_field, link);
2a37a3df
SR
1347 /*
1348 * Smartly shows the array type(except dynamic array).
1349 * Normal:
1350 * field:TYPE VAR
1351 * If TYPE := TYPE[LEN], it is shown:
1352 * field:TYPE VAR[LEN]
1353 */
1354 array_descriptor = strchr(field->type, '[');
8728fe50 1355
b6b27355 1356 if (str_has_prefix(field->type, "__data_loc"))
2a37a3df 1357 array_descriptor = NULL;
8728fe50 1358
2a37a3df
SR
1359 if (!array_descriptor)
1360 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1361 field->type, field->name, field->offset,
1362 field->size, !!field->is_signed);
1363 else
1364 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1365 (int)(array_descriptor - field->type),
1366 field->type, field->name,
1367 array_descriptor, field->offset,
1368 field->size, !!field->is_signed);
8728fe50 1369
2a37a3df
SR
1370 return 0;
1371}
5a65e956 1372
7710b639
ON
1373static void *f_start(struct seq_file *m, loff_t *pos)
1374{
1375 void *p = (void *)FORMAT_HEADER;
1376 loff_t l = 0;
1377
c5a44a12
ON
1378 /* ->stop() is called even if ->start() fails */
1379 mutex_lock(&event_mutex);
1380 if (!event_file_data(m->private))
1381 return ERR_PTR(-ENODEV);
1382
7710b639
ON
1383 while (l < *pos && p)
1384 p = f_next(m, p, &l);
1385
1386 return p;
1387}
1388
2a37a3df
SR
1389static void f_stop(struct seq_file *m, void *p)
1390{
c5a44a12 1391 mutex_unlock(&event_mutex);
2a37a3df 1392}
981d081e 1393
2a37a3df
SR
1394static const struct seq_operations trace_format_seq_ops = {
1395 .start = f_start,
1396 .next = f_next,
1397 .stop = f_stop,
1398 .show = f_show,
1399};
1400
1401static int trace_format_open(struct inode *inode, struct file *file)
1402{
2a37a3df
SR
1403 struct seq_file *m;
1404 int ret;
1405
17911ff3
SRV
1406 /* Do we want to hide event format files on tracefs lockdown? */
1407
2a37a3df
SR
1408 ret = seq_open(file, &trace_format_seq_ops);
1409 if (ret < 0)
1410 return ret;
1411
1412 m = file->private_data;
c5a44a12 1413 m->private = file;
2a37a3df
SR
1414
1415 return 0;
981d081e
SR
1416}
1417
23725aee
PZ
1418static ssize_t
1419event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1420{
1a11126b 1421 int id = (long)event_file_data(filp);
cd458ba9
ON
1422 char buf[32];
1423 int len;
23725aee 1424
1a11126b
ON
1425 if (unlikely(!id))
1426 return -ENODEV;
1427
1428 len = sprintf(buf, "%d\n", id);
1429
cd458ba9 1430 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
23725aee
PZ
1431}
1432
7ce7e424
TZ
1433static ssize_t
1434event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1435 loff_t *ppos)
1436{
7f1d2f82 1437 struct trace_event_file *file;
7ce7e424 1438 struct trace_seq *s;
e2912b09 1439 int r = -ENODEV;
7ce7e424
TZ
1440
1441 if (*ppos)
1442 return 0;
1443
1444 s = kmalloc(sizeof(*s), GFP_KERNEL);
e2912b09 1445
7ce7e424
TZ
1446 if (!s)
1447 return -ENOMEM;
1448
1449 trace_seq_init(s);
1450
e2912b09 1451 mutex_lock(&event_mutex);
f306cc82
TZ
1452 file = event_file_data(filp);
1453 if (file)
1454 print_event_filter(file, s);
e2912b09
ON
1455 mutex_unlock(&event_mutex);
1456
f306cc82 1457 if (file)
5ac48378
SRRH
1458 r = simple_read_from_buffer(ubuf, cnt, ppos,
1459 s->buffer, trace_seq_used(s));
7ce7e424
TZ
1460
1461 kfree(s);
1462
1463 return r;
1464}
1465
1466static ssize_t
1467event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1468 loff_t *ppos)
1469{
7f1d2f82 1470 struct trace_event_file *file;
8b372562 1471 char *buf;
e2912b09 1472 int err = -ENODEV;
7ce7e424 1473
8b372562 1474 if (cnt >= PAGE_SIZE)
7ce7e424
TZ
1475 return -EINVAL;
1476
70f6cbb6
AV
1477 buf = memdup_user_nul(ubuf, cnt);
1478 if (IS_ERR(buf))
1479 return PTR_ERR(buf);
7ce7e424 1480
e2912b09 1481 mutex_lock(&event_mutex);
f306cc82
TZ
1482 file = event_file_data(filp);
1483 if (file)
1484 err = apply_event_filter(file, buf);
e2912b09
ON
1485 mutex_unlock(&event_mutex);
1486
70f6cbb6 1487 kfree(buf);
8b372562 1488 if (err < 0)
44e9c8b7 1489 return err;
0a19e53c 1490
7ce7e424
TZ
1491 *ppos += cnt;
1492
1493 return cnt;
1494}
1495
e9dbfae5
SR
1496static LIST_HEAD(event_subsystems);
1497
1498static int subsystem_open(struct inode *inode, struct file *filp)
1499{
1500 struct event_subsystem *system = NULL;
7967b3e0 1501 struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
ae63b31e 1502 struct trace_array *tr;
e9dbfae5
SR
1503 int ret;
1504
d6d3523c
GB
1505 if (tracing_is_disabled())
1506 return -ENODEV;
1507
e9dbfae5
SR
1508 /* Make sure the system still exists */
1509 mutex_lock(&event_mutex);
12ecef0c 1510 mutex_lock(&trace_types_lock);
ae63b31e
SR
1511 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1512 list_for_each_entry(dir, &tr->systems, list) {
1513 if (dir == inode->i_private) {
1514 /* Don't open systems with no events */
1515 if (dir->nr_events) {
1516 __get_system_dir(dir);
1517 system = dir->subsystem;
1518 }
1519 goto exit_loop;
e9dbfae5 1520 }
e9dbfae5
SR
1521 }
1522 }
ae63b31e 1523 exit_loop:
a8227415 1524 mutex_unlock(&trace_types_lock);
12ecef0c 1525 mutex_unlock(&event_mutex);
e9dbfae5 1526
ae63b31e 1527 if (!system)
e9dbfae5
SR
1528 return -ENODEV;
1529
ae63b31e
SR
1530 /* Some versions of gcc think dir can be uninitialized here */
1531 WARN_ON(!dir);
1532
8e2e2fa4
SRRH
1533 /* Still need to increment the ref count of the system */
1534 if (trace_array_get(tr) < 0) {
1535 put_system(dir);
1536 return -ENODEV;
1537 }
1538
e9dbfae5 1539 ret = tracing_open_generic(inode, filp);
8e2e2fa4
SRRH
1540 if (ret < 0) {
1541 trace_array_put(tr);
ae63b31e 1542 put_system(dir);
8e2e2fa4 1543 }
ae63b31e
SR
1544
1545 return ret;
1546}
1547
1548static int system_tr_open(struct inode *inode, struct file *filp)
1549{
7967b3e0 1550 struct trace_subsystem_dir *dir;
ae63b31e
SR
1551 struct trace_array *tr = inode->i_private;
1552 int ret;
1553
1554 /* Make a temporary dir that has no system but points to tr */
1555 dir = kzalloc(sizeof(*dir), GFP_KERNEL);
aa07d71f 1556 if (!dir)
ae63b31e 1557 return -ENOMEM;
ae63b31e 1558
aa07d71f 1559 ret = tracing_open_generic_tr(inode, filp);
8e2e2fa4 1560 if (ret < 0) {
ae63b31e 1561 kfree(dir);
d6d3523c 1562 return ret;
8e2e2fa4 1563 }
aa07d71f 1564 dir->tr = tr;
ae63b31e 1565 filp->private_data = dir;
e9dbfae5 1566
d6d3523c 1567 return 0;
e9dbfae5
SR
1568}
1569
1570static int subsystem_release(struct inode *inode, struct file *file)
1571{
7967b3e0 1572 struct trace_subsystem_dir *dir = file->private_data;
e9dbfae5 1573
8e2e2fa4
SRRH
1574 trace_array_put(dir->tr);
1575
ae63b31e
SR
1576 /*
1577 * If dir->subsystem is NULL, then this is a temporary
1578 * descriptor that was made for a trace_array to enable
1579 * all subsystems.
1580 */
1581 if (dir->subsystem)
1582 put_system(dir);
1583 else
1584 kfree(dir);
e9dbfae5
SR
1585
1586 return 0;
1587}
1588
cfb180f3
TZ
1589static ssize_t
1590subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1591 loff_t *ppos)
1592{
7967b3e0 1593 struct trace_subsystem_dir *dir = filp->private_data;
ae63b31e 1594 struct event_subsystem *system = dir->subsystem;
cfb180f3
TZ
1595 struct trace_seq *s;
1596 int r;
1597
1598 if (*ppos)
1599 return 0;
1600
1601 s = kmalloc(sizeof(*s), GFP_KERNEL);
1602 if (!s)
1603 return -ENOMEM;
1604
1605 trace_seq_init(s);
1606
8b372562 1607 print_subsystem_event_filter(system, s);
5ac48378
SRRH
1608 r = simple_read_from_buffer(ubuf, cnt, ppos,
1609 s->buffer, trace_seq_used(s));
cfb180f3
TZ
1610
1611 kfree(s);
1612
1613 return r;
1614}
1615
1616static ssize_t
1617subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1618 loff_t *ppos)
1619{
7967b3e0 1620 struct trace_subsystem_dir *dir = filp->private_data;
8b372562 1621 char *buf;
cfb180f3
TZ
1622 int err;
1623
8b372562 1624 if (cnt >= PAGE_SIZE)
cfb180f3
TZ
1625 return -EINVAL;
1626
70f6cbb6
AV
1627 buf = memdup_user_nul(ubuf, cnt);
1628 if (IS_ERR(buf))
1629 return PTR_ERR(buf);
cfb180f3 1630
ae63b31e 1631 err = apply_subsystem_event_filter(dir, buf);
70f6cbb6 1632 kfree(buf);
8b372562 1633 if (err < 0)
44e9c8b7 1634 return err;
cfb180f3
TZ
1635
1636 *ppos += cnt;
1637
1638 return cnt;
1639}
1640
d1b182a8
SR
1641static ssize_t
1642show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1643{
1644 int (*func)(struct trace_seq *s) = filp->private_data;
1645 struct trace_seq *s;
1646 int r;
1647
1648 if (*ppos)
1649 return 0;
1650
1651 s = kmalloc(sizeof(*s), GFP_KERNEL);
1652 if (!s)
1653 return -ENOMEM;
1654
1655 trace_seq_init(s);
1656
1657 func(s);
5ac48378
SRRH
1658 r = simple_read_from_buffer(ubuf, cnt, ppos,
1659 s->buffer, trace_seq_used(s));
d1b182a8
SR
1660
1661 kfree(s);
1662
1663 return r;
1664}
1665
8ca532ad
SRRH
1666static void ignore_task_cpu(void *data)
1667{
1668 struct trace_array *tr = data;
1669 struct trace_pid_list *pid_list;
27683626 1670 struct trace_pid_list *no_pid_list;
8ca532ad
SRRH
1671
1672 /*
1673 * This function is called by on_each_cpu() while the
1674 * event_mutex is held.
1675 */
1676 pid_list = rcu_dereference_protected(tr->filtered_pids,
1677 mutex_is_locked(&event_mutex));
27683626
SRV
1678 no_pid_list = rcu_dereference_protected(tr->filtered_no_pids,
1679 mutex_is_locked(&event_mutex));
8ca532ad 1680
1c5eb448 1681 this_cpu_write(tr->array_buffer.data->ignore_pid,
27683626
SRV
1682 trace_ignore_this_task(pid_list, no_pid_list, current));
1683}
1684
1685static void register_pid_events(struct trace_array *tr)
1686{
1687 /*
1688 * Register a probe that is called before all other probes
1689 * to set ignore_pid if next or prev do not match.
1690 * Register a probe this is called after all other probes
1691 * to only keep ignore_pid set if next pid matches.
1692 */
1693 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1694 tr, INT_MAX);
1695 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1696 tr, 0);
1697
1698 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1699 tr, INT_MAX);
1700 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1701 tr, 0);
1702
1703 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1704 tr, INT_MAX);
1705 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1706 tr, 0);
1707
1708 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1709 tr, INT_MAX);
1710 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1711 tr, 0);
8ca532ad
SRRH
1712}
1713
49090107 1714static ssize_t
27683626
SRV
1715event_pid_write(struct file *filp, const char __user *ubuf,
1716 size_t cnt, loff_t *ppos, int type)
49090107 1717{
3fdaf80f 1718 struct seq_file *m = filp->private_data;
49090107
SRRH
1719 struct trace_array *tr = m->private;
1720 struct trace_pid_list *filtered_pids = NULL;
27683626 1721 struct trace_pid_list *other_pids = NULL;
f4d34a87 1722 struct trace_pid_list *pid_list;
3fdaf80f 1723 struct trace_event_file *file;
76c813e2 1724 ssize_t ret;
49090107
SRRH
1725
1726 if (!cnt)
1727 return 0;
1728
1729 ret = tracing_update_buffers();
1730 if (ret < 0)
1731 return ret;
1732
49090107 1733 mutex_lock(&event_mutex);
76c813e2 1734
27683626
SRV
1735 if (type == TRACE_PIDS) {
1736 filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1737 lockdep_is_held(&event_mutex));
1738 other_pids = rcu_dereference_protected(tr->filtered_no_pids,
1739 lockdep_is_held(&event_mutex));
1740 } else {
1741 filtered_pids = rcu_dereference_protected(tr->filtered_no_pids,
1742 lockdep_is_held(&event_mutex));
1743 other_pids = rcu_dereference_protected(tr->filtered_pids,
1744 lockdep_is_held(&event_mutex));
1745 }
f4d34a87 1746
76c813e2
SRRH
1747 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
1748 if (ret < 0)
f4d34a87 1749 goto out;
49090107 1750
27683626
SRV
1751 if (type == TRACE_PIDS)
1752 rcu_assign_pointer(tr->filtered_pids, pid_list);
1753 else
1754 rcu_assign_pointer(tr->filtered_no_pids, pid_list);
49090107 1755
3fdaf80f
SRRH
1756 list_for_each_entry(file, &tr->events, list) {
1757 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1758 }
49090107
SRRH
1759
1760 if (filtered_pids) {
e0a568dc 1761 tracepoint_synchronize_unregister();
76c813e2 1762 trace_free_pid_list(filtered_pids);
27683626
SRV
1763 } else if (pid_list && !other_pids) {
1764 register_pid_events(tr);
49090107
SRRH
1765 }
1766
799fd44c
SRRH
1767 /*
1768 * Ignoring of pids is done at task switch. But we have to
1769 * check for those tasks that are currently running.
1770 * Always do this in case a pid was appended or removed.
1771 */
1772 on_each_cpu(ignore_task_cpu, tr, 1);
1773
f4d34a87 1774 out:
3fdaf80f
SRRH
1775 mutex_unlock(&event_mutex);
1776
76c813e2
SRRH
1777 if (ret > 0)
1778 *ppos += ret;
49090107
SRRH
1779
1780 return ret;
1781}
1782
27683626
SRV
1783static ssize_t
1784ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1785 size_t cnt, loff_t *ppos)
1786{
1787 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
1788}
1789
1790static ssize_t
1791ftrace_event_npid_write(struct file *filp, const char __user *ubuf,
1792 size_t cnt, loff_t *ppos)
1793{
1794 return event_pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
1795}
1796
15075cac
SR
1797static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1798static int ftrace_event_set_open(struct inode *inode, struct file *file);
49090107 1799static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
27683626 1800static int ftrace_event_set_npid_open(struct inode *inode, struct file *file);
f77d09a3 1801static int ftrace_event_release(struct inode *inode, struct file *file);
15075cac 1802
b77e38aa
SR
1803static const struct seq_operations show_event_seq_ops = {
1804 .start = t_start,
1805 .next = t_next,
1806 .show = t_show,
1807 .stop = t_stop,
1808};
1809
1810static const struct seq_operations show_set_event_seq_ops = {
1811 .start = s_start,
1812 .next = s_next,
1813 .show = t_show,
1814 .stop = t_stop,
1815};
1816
49090107
SRRH
1817static const struct seq_operations show_set_pid_seq_ops = {
1818 .start = p_start,
1819 .next = p_next,
5cc8976b 1820 .show = trace_pid_show,
49090107
SRRH
1821 .stop = p_stop,
1822};
1823
27683626
SRV
1824static const struct seq_operations show_set_no_pid_seq_ops = {
1825 .start = np_start,
1826 .next = np_next,
1827 .show = trace_pid_show,
1828 .stop = p_stop,
1829};
1830
2314c4ae 1831static const struct file_operations ftrace_avail_fops = {
15075cac 1832 .open = ftrace_event_avail_open,
2314c4ae
SR
1833 .read = seq_read,
1834 .llseek = seq_lseek,
1835 .release = seq_release,
1836};
1837
b77e38aa 1838static const struct file_operations ftrace_set_event_fops = {
15075cac 1839 .open = ftrace_event_set_open,
b77e38aa
SR
1840 .read = seq_read,
1841 .write = ftrace_event_write,
1842 .llseek = seq_lseek,
f77d09a3 1843 .release = ftrace_event_release,
b77e38aa
SR
1844};
1845
49090107
SRRH
1846static const struct file_operations ftrace_set_event_pid_fops = {
1847 .open = ftrace_event_set_pid_open,
1848 .read = seq_read,
1849 .write = ftrace_event_pid_write,
1850 .llseek = seq_lseek,
1851 .release = ftrace_event_release,
1852};
1853
27683626
SRV
1854static const struct file_operations ftrace_set_event_notrace_pid_fops = {
1855 .open = ftrace_event_set_npid_open,
1856 .read = seq_read,
1857 .write = ftrace_event_npid_write,
1858 .llseek = seq_lseek,
1859 .release = ftrace_event_release,
1860};
1861
1473e441 1862static const struct file_operations ftrace_enable_fops = {
bf682c31 1863 .open = tracing_open_generic,
1473e441
SR
1864 .read = event_enable_read,
1865 .write = event_enable_write,
6038f373 1866 .llseek = default_llseek,
1473e441
SR
1867};
1868
981d081e 1869static const struct file_operations ftrace_event_format_fops = {
2a37a3df
SR
1870 .open = trace_format_open,
1871 .read = seq_read,
1872 .llseek = seq_lseek,
1873 .release = seq_release,
981d081e
SR
1874};
1875
23725aee 1876static const struct file_operations ftrace_event_id_fops = {
23725aee 1877 .read = event_id_read,
6038f373 1878 .llseek = default_llseek,
23725aee
PZ
1879};
1880
7ce7e424
TZ
1881static const struct file_operations ftrace_event_filter_fops = {
1882 .open = tracing_open_generic,
1883 .read = event_filter_read,
1884 .write = event_filter_write,
6038f373 1885 .llseek = default_llseek,
7ce7e424
TZ
1886};
1887
cfb180f3 1888static const struct file_operations ftrace_subsystem_filter_fops = {
e9dbfae5 1889 .open = subsystem_open,
cfb180f3
TZ
1890 .read = subsystem_filter_read,
1891 .write = subsystem_filter_write,
6038f373 1892 .llseek = default_llseek,
e9dbfae5 1893 .release = subsystem_release,
cfb180f3
TZ
1894};
1895
8ae79a13 1896static const struct file_operations ftrace_system_enable_fops = {
40ee4dff 1897 .open = subsystem_open,
8ae79a13
SR
1898 .read = system_enable_read,
1899 .write = system_enable_write,
6038f373 1900 .llseek = default_llseek,
40ee4dff 1901 .release = subsystem_release,
8ae79a13
SR
1902};
1903
ae63b31e
SR
1904static const struct file_operations ftrace_tr_enable_fops = {
1905 .open = system_tr_open,
1906 .read = system_enable_read,
1907 .write = system_enable_write,
1908 .llseek = default_llseek,
1909 .release = subsystem_release,
1910};
1911
d1b182a8
SR
1912static const struct file_operations ftrace_show_header_fops = {
1913 .open = tracing_open_generic,
1914 .read = show_header,
6038f373 1915 .llseek = default_llseek,
d1b182a8
SR
1916};
1917
ae63b31e
SR
1918static int
1919ftrace_event_open(struct inode *inode, struct file *file,
1920 const struct seq_operations *seq_ops)
1473e441 1921{
ae63b31e
SR
1922 struct seq_file *m;
1923 int ret;
1473e441 1924
17911ff3
SRV
1925 ret = security_locked_down(LOCKDOWN_TRACEFS);
1926 if (ret)
1927 return ret;
1928
ae63b31e
SR
1929 ret = seq_open(file, seq_ops);
1930 if (ret < 0)
1931 return ret;
1932 m = file->private_data;
1933 /* copy tr over to seq ops */
1934 m->private = inode->i_private;
1473e441 1935
ae63b31e 1936 return ret;
1473e441
SR
1937}
1938
f77d09a3
AL
1939static int ftrace_event_release(struct inode *inode, struct file *file)
1940{
1941 struct trace_array *tr = inode->i_private;
1942
1943 trace_array_put(tr);
1944
1945 return seq_release(inode, file);
1946}
1947
15075cac
SR
1948static int
1949ftrace_event_avail_open(struct inode *inode, struct file *file)
1950{
1951 const struct seq_operations *seq_ops = &show_event_seq_ops;
1952
17911ff3 1953 /* Checks for tracefs lockdown */
ae63b31e 1954 return ftrace_event_open(inode, file, seq_ops);
15075cac
SR
1955}
1956
1957static int
1958ftrace_event_set_open(struct inode *inode, struct file *file)
1959{
1960 const struct seq_operations *seq_ops = &show_set_event_seq_ops;
ae63b31e 1961 struct trace_array *tr = inode->i_private;
f77d09a3
AL
1962 int ret;
1963
8530dec6
SRV
1964 ret = tracing_check_open_get_tr(tr);
1965 if (ret)
1966 return ret;
15075cac
SR
1967
1968 if ((file->f_mode & FMODE_WRITE) &&
1969 (file->f_flags & O_TRUNC))
ae63b31e 1970 ftrace_clear_events(tr);
15075cac 1971
f77d09a3
AL
1972 ret = ftrace_event_open(inode, file, seq_ops);
1973 if (ret < 0)
1974 trace_array_put(tr);
1975 return ret;
ae63b31e
SR
1976}
1977
49090107
SRRH
1978static int
1979ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1980{
1981 const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1982 struct trace_array *tr = inode->i_private;
1983 int ret;
1984
8530dec6
SRV
1985 ret = tracing_check_open_get_tr(tr);
1986 if (ret)
1987 return ret;
49090107
SRRH
1988
1989 if ((file->f_mode & FMODE_WRITE) &&
1990 (file->f_flags & O_TRUNC))
27683626
SRV
1991 ftrace_clear_event_pids(tr, TRACE_PIDS);
1992
1993 ret = ftrace_event_open(inode, file, seq_ops);
1994 if (ret < 0)
1995 trace_array_put(tr);
1996 return ret;
1997}
1998
1999static int
2000ftrace_event_set_npid_open(struct inode *inode, struct file *file)
2001{
2002 const struct seq_operations *seq_ops = &show_set_no_pid_seq_ops;
2003 struct trace_array *tr = inode->i_private;
2004 int ret;
2005
2006 ret = tracing_check_open_get_tr(tr);
2007 if (ret)
2008 return ret;
2009
2010 if ((file->f_mode & FMODE_WRITE) &&
2011 (file->f_flags & O_TRUNC))
2012 ftrace_clear_event_pids(tr, TRACE_NO_PIDS);
49090107
SRRH
2013
2014 ret = ftrace_event_open(inode, file, seq_ops);
2015 if (ret < 0)
2016 trace_array_put(tr);
2017 return ret;
2018}
2019
ae63b31e
SR
2020static struct event_subsystem *
2021create_new_subsystem(const char *name)
2022{
2023 struct event_subsystem *system;
2024
2025 /* need to create new entry */
2026 system = kmalloc(sizeof(*system), GFP_KERNEL);
2027 if (!system)
2028 return NULL;
2029
2030 system->ref_count = 1;
6e94a780
SR
2031
2032 /* Only allocate if dynamic (kprobes and modules) */
79ac6ef5
RV
2033 system->name = kstrdup_const(name, GFP_KERNEL);
2034 if (!system->name)
2035 goto out_free;
ae63b31e
SR
2036
2037 system->filter = NULL;
2038
2039 system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
2040 if (!system->filter)
2041 goto out_free;
2042
2043 list_add(&system->list, &event_subsystems);
2044
2045 return system;
2046
2047 out_free:
79ac6ef5 2048 kfree_const(system->name);
ae63b31e
SR
2049 kfree(system);
2050 return NULL;
15075cac
SR
2051}
2052
6ecc2d1c 2053static struct dentry *
ae63b31e 2054event_subsystem_dir(struct trace_array *tr, const char *name,
7f1d2f82 2055 struct trace_event_file *file, struct dentry *parent)
6ecc2d1c 2056{
7967b3e0 2057 struct trace_subsystem_dir *dir;
6ecc2d1c 2058 struct event_subsystem *system;
e1112b4d 2059 struct dentry *entry;
6ecc2d1c
SR
2060
2061 /* First see if we did not already create this dir */
ae63b31e
SR
2062 list_for_each_entry(dir, &tr->systems, list) {
2063 system = dir->subsystem;
dc82ec98 2064 if (strcmp(system->name, name) == 0) {
ae63b31e
SR
2065 dir->nr_events++;
2066 file->system = dir;
2067 return dir->entry;
dc82ec98 2068 }
6ecc2d1c
SR
2069 }
2070
ae63b31e
SR
2071 /* Now see if the system itself exists. */
2072 list_for_each_entry(system, &event_subsystems, list) {
2073 if (strcmp(system->name, name) == 0)
2074 break;
6ecc2d1c 2075 }
ae63b31e
SR
2076 /* Reset system variable when not found */
2077 if (&system->list == &event_subsystems)
2078 system = NULL;
6ecc2d1c 2079
ae63b31e
SR
2080 dir = kmalloc(sizeof(*dir), GFP_KERNEL);
2081 if (!dir)
2082 goto out_fail;
6ecc2d1c 2083
ae63b31e
SR
2084 if (!system) {
2085 system = create_new_subsystem(name);
2086 if (!system)
2087 goto out_free;
2088 } else
2089 __get_system(system);
2090
8434dc93 2091 dir->entry = tracefs_create_dir(name, parent);
ae63b31e 2092 if (!dir->entry) {
3448bac3 2093 pr_warn("Failed to create system directory %s\n", name);
ae63b31e
SR
2094 __put_system(system);
2095 goto out_free;
6d723736
SR
2096 }
2097
ae63b31e
SR
2098 dir->tr = tr;
2099 dir->ref_count = 1;
2100 dir->nr_events = 1;
2101 dir->subsystem = system;
2102 file->system = dir;
8b372562 2103
8434dc93 2104 entry = tracefs_create_file("filter", 0644, dir->entry, dir,
e1112b4d 2105 &ftrace_subsystem_filter_fops);
8b372562
TZ
2106 if (!entry) {
2107 kfree(system->filter);
2108 system->filter = NULL;
8434dc93 2109 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
8b372562 2110 }
e1112b4d 2111
ae63b31e 2112 trace_create_file("enable", 0644, dir->entry, dir,
f3f3f009 2113 &ftrace_system_enable_fops);
8ae79a13 2114
ae63b31e
SR
2115 list_add(&dir->list, &tr->systems);
2116
2117 return dir->entry;
2118
2119 out_free:
2120 kfree(dir);
2121 out_fail:
2122 /* Only print this message if failed on memory allocation */
2123 if (!dir || !system)
3448bac3 2124 pr_warn("No memory to create event subsystem %s\n", name);
ae63b31e 2125 return NULL;
6ecc2d1c
SR
2126}
2127
ac343da7
MH
2128static int
2129event_define_fields(struct trace_event_call *call)
2130{
2131 struct list_head *head;
2132 int ret = 0;
2133
2134 /*
2135 * Other events may have the same class. Only update
2136 * the fields if they are not already defined.
2137 */
2138 head = trace_get_fields(call);
2139 if (list_empty(head)) {
2140 struct trace_event_fields *field = call->class->fields_array;
2141 unsigned int offset = sizeof(struct trace_entry);
2142
2143 for (; field->type; field++) {
2144 if (field->type == TRACE_FUNCTION_TYPE) {
2145 field->define_fields(call);
2146 break;
2147 }
2148
2149 offset = ALIGN(offset, field->align);
2150 ret = trace_define_field(call, field->type, field->name,
2151 offset, field->size,
2152 field->is_signed, field->filter_type);
2153 if (WARN_ON_ONCE(ret)) {
2154 pr_err("error code is %d\n", ret);
2155 break;
2156 }
2157
2158 offset += field->size;
2159 }
2160 }
2161
2162 return ret;
2163}
2164
1473e441 2165static int
7f1d2f82 2166event_create_dir(struct dentry *parent, struct trace_event_file *file)
1473e441 2167{
2425bcb9 2168 struct trace_event_call *call = file->event_call;
ae63b31e 2169 struct trace_array *tr = file->tr;
ae63b31e 2170 struct dentry *d_events;
de7b2973 2171 const char *name;
fd994989 2172 int ret;
1473e441 2173
6ecc2d1c
SR
2174 /*
2175 * If the trace point header did not define TRACE_SYSTEM
2176 * then the system would be called "TRACE_SYSTEM".
2177 */
ae63b31e
SR
2178 if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
2179 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
2180 if (!d_events)
2181 return -ENOMEM;
2182 } else
2183 d_events = parent;
2184
687fcc4a 2185 name = trace_event_name(call);
8434dc93 2186 file->dir = tracefs_create_dir(name, d_events);
ae63b31e 2187 if (!file->dir) {
8434dc93 2188 pr_warn("Could not create tracefs '%s' directory\n", name);
1473e441
SR
2189 return -1;
2190 }
2191
9b63776f 2192 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
ae63b31e 2193 trace_create_file("enable", 0644, file->dir, file,
620a30e9 2194 &ftrace_enable_fops);
1473e441 2195
2239291a 2196#ifdef CONFIG_PERF_EVENTS
a1d0ce82 2197 if (call->event.type && call->class->reg)
1a11126b 2198 trace_create_file("id", 0444, file->dir,
620a30e9
ON
2199 (void *)(long)call->event.type,
2200 &ftrace_event_id_fops);
2239291a 2201#endif
23725aee 2202
ac343da7
MH
2203 ret = event_define_fields(call);
2204 if (ret < 0) {
2205 pr_warn("Could not initialize trace point events/%s\n", name);
2206 return ret;
cf027f64
TZ
2207 }
2208
854145e0
CH
2209 /*
2210 * Only event directories that can be enabled should have
5d948c86 2211 * triggers or filters.
854145e0 2212 */
5d948c86
SRV
2213 if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
2214 trace_create_file("filter", 0644, file->dir, file,
2215 &ftrace_event_filter_fops);
2216
854145e0
CH
2217 trace_create_file("trigger", 0644, file->dir, file,
2218 &event_trigger_fops);
5d948c86 2219 }
85f2b082 2220
7ef224d1
TZ
2221#ifdef CONFIG_HIST_TRIGGERS
2222 trace_create_file("hist", 0444, file->dir, file,
2223 &event_hist_fops);
2d19bd79
TZ
2224#endif
2225#ifdef CONFIG_HIST_TRIGGERS_DEBUG
2226 trace_create_file("hist_debug", 0444, file->dir, file,
2227 &event_hist_debug_fops);
7ef224d1 2228#endif
ae63b31e 2229 trace_create_file("format", 0444, file->dir, call,
620a30e9 2230 &ftrace_event_format_fops);
6d723736 2231
6c3edaf9
CW
2232#ifdef CONFIG_TRACE_EVENT_INJECT
2233 if (call->event.type && call->class->reg)
2234 trace_create_file("inject", 0200, file->dir, file,
2235 &event_inject_fops);
2236#endif
2237
6d723736
SR
2238 return 0;
2239}
2240
2425bcb9 2241static void remove_event_from_tracers(struct trace_event_call *call)
ae63b31e 2242{
7f1d2f82 2243 struct trace_event_file *file;
ae63b31e
SR
2244 struct trace_array *tr;
2245
2246 do_for_each_event_file_safe(tr, file) {
ae63b31e
SR
2247 if (file->event_call != call)
2248 continue;
2249
f6a84bdc 2250 remove_event_file_dir(file);
ae63b31e
SR
2251 /*
2252 * The do_for_each_event_file_safe() is
2253 * a double loop. After finding the call for this
2254 * trace_array, we use break to jump to the next
2255 * trace_array.
2256 */
2257 break;
2258 } while_for_each_event_file();
2259}
2260
2425bcb9 2261static void event_remove(struct trace_event_call *call)
8781915a 2262{
ae63b31e 2263 struct trace_array *tr;
7f1d2f82 2264 struct trace_event_file *file;
ae63b31e
SR
2265
2266 do_for_each_event_file(tr, file) {
2267 if (file->event_call != call)
2268 continue;
065e63f9
SRV
2269
2270 if (file->flags & EVENT_FILE_FL_WAS_ENABLED)
2271 tr->clear_trace = true;
2272
ae63b31e
SR
2273 ftrace_event_enable_disable(file, 0);
2274 /*
2275 * The do_for_each_event_file() is
2276 * a double loop. After finding the call for this
2277 * trace_array, we use break to jump to the next
2278 * trace_array.
2279 */
2280 break;
2281 } while_for_each_event_file();
2282
8781915a 2283 if (call->event.funcs)
9023c930 2284 __unregister_trace_event(&call->event);
ae63b31e 2285 remove_event_from_tracers(call);
8781915a
EG
2286 list_del(&call->list);
2287}
2288
2425bcb9 2289static int event_init(struct trace_event_call *call)
8781915a
EG
2290{
2291 int ret = 0;
de7b2973 2292 const char *name;
8781915a 2293
687fcc4a 2294 name = trace_event_name(call);
de7b2973 2295 if (WARN_ON(!name))
8781915a
EG
2296 return -EINVAL;
2297
2298 if (call->class->raw_init) {
2299 ret = call->class->raw_init(call);
2300 if (ret < 0 && ret != -ENOSYS)
3448bac3 2301 pr_warn("Could not initialize trace events/%s\n", name);
8781915a
EG
2302 }
2303
2304 return ret;
2305}
2306
67ead0a6 2307static int
2425bcb9 2308__register_event(struct trace_event_call *call, struct module *mod)
bd1a5c84 2309{
bd1a5c84 2310 int ret;
6d723736 2311
8781915a
EG
2312 ret = event_init(call);
2313 if (ret < 0)
2314 return ret;
701970b3 2315
ae63b31e 2316 list_add(&call->list, &ftrace_events);
67ead0a6 2317 call->mod = mod;
88f70d75 2318
ae63b31e 2319 return 0;
bd1a5c84
MH
2320}
2321
67ec0d85 2322static char *eval_replace(char *ptr, struct trace_eval_map *map, int len)
0c564a53
SRRH
2323{
2324 int rlen;
2325 int elen;
2326
67ec0d85 2327 /* Find the length of the eval value as a string */
00f4b652 2328 elen = snprintf(ptr, 0, "%ld", map->eval_value);
0c564a53
SRRH
2329 /* Make sure there's enough room to replace the string with the value */
2330 if (len < elen)
2331 return NULL;
2332
00f4b652 2333 snprintf(ptr, elen + 1, "%ld", map->eval_value);
0c564a53
SRRH
2334
2335 /* Get the rest of the string of ptr */
2336 rlen = strlen(ptr + len);
2337 memmove(ptr + elen, ptr + len, rlen);
2338 /* Make sure we end the new string */
2339 ptr[elen + rlen] = 0;
2340
2341 return ptr + elen;
2342}
2343
2425bcb9 2344static void update_event_printk(struct trace_event_call *call,
00f4b652 2345 struct trace_eval_map *map)
0c564a53
SRRH
2346{
2347 char *ptr;
2348 int quote = 0;
00f4b652 2349 int len = strlen(map->eval_string);
0c564a53
SRRH
2350
2351 for (ptr = call->print_fmt; *ptr; ptr++) {
2352 if (*ptr == '\\') {
2353 ptr++;
2354 /* paranoid */
2355 if (!*ptr)
2356 break;
2357 continue;
2358 }
2359 if (*ptr == '"') {
2360 quote ^= 1;
2361 continue;
2362 }
2363 if (quote)
2364 continue;
2365 if (isdigit(*ptr)) {
2366 /* skip numbers */
2367 do {
2368 ptr++;
2369 /* Check for alpha chars like ULL */
2370 } while (isalnum(*ptr));
3193899d
SRRH
2371 if (!*ptr)
2372 break;
0c564a53
SRRH
2373 /*
2374 * A number must have some kind of delimiter after
2375 * it, and we can ignore that too.
2376 */
2377 continue;
2378 }
2379 if (isalpha(*ptr) || *ptr == '_') {
00f4b652 2380 if (strncmp(map->eval_string, ptr, len) == 0 &&
0c564a53 2381 !isalnum(ptr[len]) && ptr[len] != '_') {
67ec0d85
JL
2382 ptr = eval_replace(ptr, map, len);
2383 /* enum/sizeof string smaller than value */
0c564a53
SRRH
2384 if (WARN_ON_ONCE(!ptr))
2385 return;
2386 /*
67ec0d85 2387 * No need to decrement here, as eval_replace()
0c564a53 2388 * returns the pointer to the character passed
67ec0d85 2389 * the eval, and two evals can not be placed
0c564a53
SRRH
2390 * back to back without something in between.
2391 * We can skip that something in between.
2392 */
2393 continue;
2394 }
2395 skip_more:
2396 do {
2397 ptr++;
2398 } while (isalnum(*ptr) || *ptr == '_');
3193899d
SRRH
2399 if (!*ptr)
2400 break;
0c564a53
SRRH
2401 /*
2402 * If what comes after this variable is a '.' or
2403 * '->' then we can continue to ignore that string.
2404 */
2405 if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2406 ptr += *ptr == '.' ? 1 : 2;
3193899d
SRRH
2407 if (!*ptr)
2408 break;
0c564a53
SRRH
2409 goto skip_more;
2410 }
2411 /*
2412 * Once again, we can skip the delimiter that came
2413 * after the string.
2414 */
2415 continue;
2416 }
2417 }
2418}
2419
f57a4143 2420void trace_event_eval_update(struct trace_eval_map **map, int len)
0c564a53 2421{
2425bcb9 2422 struct trace_event_call *call, *p;
0c564a53 2423 const char *last_system = NULL;
1ebe1eaf 2424 bool first = false;
0c564a53
SRRH
2425 int last_i;
2426 int i;
2427
2428 down_write(&trace_event_sem);
2429 list_for_each_entry_safe(call, p, &ftrace_events, list) {
2430 /* events are usually grouped together with systems */
2431 if (!last_system || call->class->system != last_system) {
1ebe1eaf 2432 first = true;
0c564a53
SRRH
2433 last_i = 0;
2434 last_system = call->class->system;
2435 }
2436
1ebe1eaf
SRV
2437 /*
2438 * Since calls are grouped by systems, the likelyhood that the
2439 * next call in the iteration belongs to the same system as the
2b5894cc 2440 * previous call is high. As an optimization, we skip searching
1ebe1eaf
SRV
2441 * for a map[] that matches the call's system if the last call
2442 * was from the same system. That's what last_i is for. If the
2443 * call has the same system as the previous call, then last_i
2444 * will be the index of the first map[] that has a matching
2445 * system.
2446 */
0c564a53
SRRH
2447 for (i = last_i; i < len; i++) {
2448 if (call->class->system == map[i]->system) {
2449 /* Save the first system if need be */
1ebe1eaf 2450 if (first) {
0c564a53 2451 last_i = i;
1ebe1eaf
SRV
2452 first = false;
2453 }
0c564a53
SRRH
2454 update_event_printk(call, map[i]);
2455 }
2456 }
2457 }
2458 up_write(&trace_event_sem);
2459}
2460
7f1d2f82 2461static struct trace_event_file *
2425bcb9 2462trace_create_new_event(struct trace_event_call *call,
da511bf3
SRRH
2463 struct trace_array *tr)
2464{
7f1d2f82 2465 struct trace_event_file *file;
da511bf3
SRRH
2466
2467 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2468 if (!file)
2469 return NULL;
2470
2471 file->event_call = call;
2472 file->tr = tr;
2473 atomic_set(&file->sm_ref, 0);
85f2b082
TZ
2474 atomic_set(&file->tm_ref, 0);
2475 INIT_LIST_HEAD(&file->triggers);
da511bf3
SRRH
2476 list_add(&file->list, &tr->events);
2477
2478 return file;
2479}
2480
ae63b31e
SR
2481/* Add an event to a trace directory */
2482static int
2425bcb9 2483__trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
ae63b31e 2484{
7f1d2f82 2485 struct trace_event_file *file;
ae63b31e 2486
da511bf3 2487 file = trace_create_new_event(call, tr);
ae63b31e
SR
2488 if (!file)
2489 return -ENOMEM;
2490
a838deab
MH
2491 if (eventdir_initialized)
2492 return event_create_dir(tr->event_dir, file);
2493 else
2494 return event_define_fields(call);
ae63b31e
SR
2495}
2496
77248221
SR
2497/*
2498 * Just create a decriptor for early init. A descriptor is required
2499 * for enabling events at boot. We want to enable events before
2500 * the filesystem is initialized.
2501 */
ce66f613 2502static int
2425bcb9 2503__trace_early_add_new_event(struct trace_event_call *call,
77248221
SR
2504 struct trace_array *tr)
2505{
7f1d2f82 2506 struct trace_event_file *file;
77248221 2507
da511bf3 2508 file = trace_create_new_event(call, tr);
77248221
SR
2509 if (!file)
2510 return -ENOMEM;
2511
ac343da7 2512 return event_define_fields(call);
77248221
SR
2513}
2514
ae63b31e 2515struct ftrace_module_file_ops;
2425bcb9 2516static void __add_event_to_tracers(struct trace_event_call *call);
ae63b31e 2517
7e1413ed
SRV
2518/* Add an additional event_call dynamically */
2519int trace_add_event_call(struct trace_event_call *call)
bd1a5c84
MH
2520{
2521 int ret;
fc800a10
MH
2522 lockdep_assert_held(&event_mutex);
2523
12ecef0c 2524 mutex_lock(&trace_types_lock);
701970b3 2525
ae63b31e
SR
2526 ret = __register_event(call, NULL);
2527 if (ret >= 0)
779c5e37 2528 __add_event_to_tracers(call);
a2ca5e03 2529
a8227415 2530 mutex_unlock(&trace_types_lock);
fc800a10
MH
2531 return ret;
2532}
2533
4fead8e4 2534/*
a8227415
AL
2535 * Must be called under locking of trace_types_lock, event_mutex and
2536 * trace_event_sem.
4fead8e4 2537 */
2425bcb9 2538static void __trace_remove_event_call(struct trace_event_call *call)
bd1a5c84 2539{
8781915a 2540 event_remove(call);
bd1a5c84 2541 trace_destroy_fields(call);
57375747
ON
2542 free_event_filter(call->filter);
2543 call->filter = NULL;
bd1a5c84
MH
2544}
2545
2425bcb9 2546static int probe_remove_event_call(struct trace_event_call *call)
2816c551
ON
2547{
2548 struct trace_array *tr;
7f1d2f82 2549 struct trace_event_file *file;
2816c551
ON
2550
2551#ifdef CONFIG_PERF_EVENTS
2552 if (call->perf_refcount)
2553 return -EBUSY;
2554#endif
2555 do_for_each_event_file(tr, file) {
2556 if (file->event_call != call)
2557 continue;
2558 /*
2559 * We can't rely on ftrace_event_enable_disable(enable => 0)
5d6ad960 2560 * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2816c551
ON
2561 * TRACE_REG_UNREGISTER.
2562 */
5d6ad960 2563 if (file->flags & EVENT_FILE_FL_ENABLED)
2816c551 2564 return -EBUSY;
2ba64035
SRRH
2565 /*
2566 * The do_for_each_event_file_safe() is
2567 * a double loop. After finding the call for this
2568 * trace_array, we use break to jump to the next
2569 * trace_array.
2570 */
2816c551
ON
2571 break;
2572 } while_for_each_event_file();
2573
2574 __trace_remove_event_call(call);
2575
2576 return 0;
2577}
2578
7e1413ed
SRV
2579/* Remove an event_call */
2580int trace_remove_event_call(struct trace_event_call *call)
bd1a5c84 2581{
2816c551
ON
2582 int ret;
2583
fc800a10
MH
2584 lockdep_assert_held(&event_mutex);
2585
12ecef0c 2586 mutex_lock(&trace_types_lock);
52f6ad6d 2587 down_write(&trace_event_sem);
2816c551 2588 ret = probe_remove_event_call(call);
52f6ad6d 2589 up_write(&trace_event_sem);
a8227415 2590 mutex_unlock(&trace_types_lock);
fc800a10
MH
2591
2592 return ret;
2593}
2594
bd1a5c84
MH
2595#define for_each_event(event, start, end) \
2596 for (event = start; \
2597 (unsigned long)event < (unsigned long)end; \
2598 event++)
2599
2600#ifdef CONFIG_MODULES
2601
6d723736
SR
2602static void trace_module_add_events(struct module *mod)
2603{
2425bcb9 2604 struct trace_event_call **call, **start, **end;
6d723736 2605
45ab2813
SRRH
2606 if (!mod->num_trace_events)
2607 return;
2608
2609 /* Don't add infrastructure for mods without tracepoints */
2610 if (trace_module_has_bad_taint(mod)) {
2611 pr_err("%s: module has bad taint, not creating trace events\n",
2612 mod->name);
2613 return;
2614 }
2615
6d723736
SR
2616 start = mod->trace_events;
2617 end = mod->trace_events + mod->num_trace_events;
2618
6d723736 2619 for_each_event(call, start, end) {
ae63b31e 2620 __register_event(*call, mod);
779c5e37 2621 __add_event_to_tracers(*call);
6d723736
SR
2622 }
2623}
2624
2625static void trace_module_remove_events(struct module *mod)
2626{
2425bcb9 2627 struct trace_event_call *call, *p;
6d723736 2628
52f6ad6d 2629 down_write(&trace_event_sem);
6d723736 2630 list_for_each_entry_safe(call, p, &ftrace_events, list) {
065e63f9 2631 if (call->mod == mod)
bd1a5c84 2632 __trace_remove_event_call(call);
6d723736 2633 }
52f6ad6d 2634 up_write(&trace_event_sem);
9456f0fa
SR
2635
2636 /*
2637 * It is safest to reset the ring buffer if the module being unloaded
873c642f
SRRH
2638 * registered any events that were used. The only worry is if
2639 * a new module gets loaded, and takes on the same id as the events
2640 * of this module. When printing out the buffer, traced events left
2641 * over from this module may be passed to the new module events and
2642 * unexpected results may occur.
9456f0fa 2643 */
065e63f9 2644 tracing_reset_all_online_cpus();
6d723736
SR
2645}
2646
61f919a1
SR
2647static int trace_module_notify(struct notifier_block *self,
2648 unsigned long val, void *data)
6d723736
SR
2649{
2650 struct module *mod = data;
2651
2652 mutex_lock(&event_mutex);
12ecef0c 2653 mutex_lock(&trace_types_lock);
6d723736
SR
2654 switch (val) {
2655 case MODULE_STATE_COMING:
2656 trace_module_add_events(mod);
2657 break;
2658 case MODULE_STATE_GOING:
2659 trace_module_remove_events(mod);
2660 break;
2661 }
a8227415 2662 mutex_unlock(&trace_types_lock);
12ecef0c 2663 mutex_unlock(&event_mutex);
fd994989 2664
0340a6b7 2665 return NOTIFY_OK;
1473e441 2666}
315326c1 2667
836d481e
ON
2668static struct notifier_block trace_module_nb = {
2669 .notifier_call = trace_module_notify,
3673b8e4 2670 .priority = 1, /* higher than trace.c module notify */
836d481e 2671};
61f919a1 2672#endif /* CONFIG_MODULES */
1473e441 2673
ae63b31e
SR
2674/* Create a new event directory structure for a trace directory. */
2675static void
2676__trace_add_event_dirs(struct trace_array *tr)
2677{
2425bcb9 2678 struct trace_event_call *call;
ae63b31e
SR
2679 int ret;
2680
2681 list_for_each_entry(call, &ftrace_events, list) {
620a30e9 2682 ret = __trace_add_new_event(call, tr);
ae63b31e 2683 if (ret < 0)
3448bac3 2684 pr_warn("Could not create directory for event %s\n",
687fcc4a 2685 trace_event_name(call));
ae63b31e
SR
2686 }
2687}
2688
3c96529c 2689/* Returns any file that matches the system and event */
7f1d2f82 2690struct trace_event_file *
3c96529c 2691__find_event_file(struct trace_array *tr, const char *system, const char *event)
3cd715de 2692{
7f1d2f82 2693 struct trace_event_file *file;
2425bcb9 2694 struct trace_event_call *call;
de7b2973 2695 const char *name;
3cd715de
SRRH
2696
2697 list_for_each_entry(file, &tr->events, list) {
2698
2699 call = file->event_call;
687fcc4a 2700 name = trace_event_name(call);
3cd715de 2701
3c96529c 2702 if (!name || !call->class)
3cd715de
SRRH
2703 continue;
2704
de7b2973 2705 if (strcmp(event, name) == 0 &&
3cd715de
SRRH
2706 strcmp(system, call->class->system) == 0)
2707 return file;
2708 }
2709 return NULL;
2710}
2711
3c96529c
SRV
2712/* Returns valid trace event files that match system and event */
2713struct trace_event_file *
2714find_event_file(struct trace_array *tr, const char *system, const char *event)
2715{
2716 struct trace_event_file *file;
2717
2718 file = __find_event_file(tr, system, event);
2719 if (!file || !file->event_call->class->reg ||
2720 file->event_call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2721 return NULL;
2722
2723 return file;
2724}
2725
e3e2a2cc
TZ
2726/**
2727 * trace_get_event_file - Find and return a trace event file
2728 * @instance: The name of the trace instance containing the event
2729 * @system: The name of the system containing the event
2730 * @event: The name of the event
2731 *
2732 * Return a trace event file given the trace instance name, trace
2733 * system, and trace event name. If the instance name is NULL, it
2734 * refers to the top-level trace array.
2735 *
2736 * This function will look it up and return it if found, after calling
2737 * trace_array_get() to prevent the instance from going away, and
2738 * increment the event's module refcount to prevent it from being
2739 * removed.
2740 *
2741 * To release the file, call trace_put_event_file(), which will call
2742 * trace_array_put() and decrement the event's module refcount.
2743 *
2744 * Return: The trace event on success, ERR_PTR otherwise.
2745 */
2746struct trace_event_file *trace_get_event_file(const char *instance,
2747 const char *system,
2748 const char *event)
2749{
2750 struct trace_array *tr = top_trace_array();
2751 struct trace_event_file *file = NULL;
2752 int ret = -EINVAL;
2753
2754 if (instance) {
2755 tr = trace_array_find_get(instance);
2756 if (!tr)
2757 return ERR_PTR(-ENOENT);
2758 } else {
2759 ret = trace_array_get(tr);
2760 if (ret)
2761 return ERR_PTR(ret);
2762 }
2763
2764 mutex_lock(&event_mutex);
2765
2766 file = find_event_file(tr, system, event);
2767 if (!file) {
2768 trace_array_put(tr);
2769 ret = -EINVAL;
2770 goto out;
2771 }
2772
2773 /* Don't let event modules unload while in use */
2774 ret = try_module_get(file->event_call->mod);
2775 if (!ret) {
2776 trace_array_put(tr);
2777 ret = -EBUSY;
2778 goto out;
2779 }
2780
2781 ret = 0;
2782 out:
2783 mutex_unlock(&event_mutex);
2784
2785 if (ret)
2786 file = ERR_PTR(ret);
2787
2788 return file;
2789}
2790EXPORT_SYMBOL_GPL(trace_get_event_file);
2791
2792/**
2793 * trace_put_event_file - Release a file from trace_get_event_file()
2794 * @file: The trace event file
2795 *
2796 * If a file was retrieved using trace_get_event_file(), this should
2797 * be called when it's no longer needed. It will cancel the previous
2798 * trace_array_get() called by that function, and decrement the
2799 * event's module refcount.
2800 */
2801void trace_put_event_file(struct trace_event_file *file)
2802{
2803 mutex_lock(&event_mutex);
2804 module_put(file->event_call->mod);
2805 mutex_unlock(&event_mutex);
2806
2807 trace_array_put(file->tr);
2808}
2809EXPORT_SYMBOL_GPL(trace_put_event_file);
2810
2875a08b
SRRH
2811#ifdef CONFIG_DYNAMIC_FTRACE
2812
2813/* Avoid typos */
2814#define ENABLE_EVENT_STR "enable_event"
2815#define DISABLE_EVENT_STR "disable_event"
2816
2817struct event_probe_data {
7f1d2f82 2818 struct trace_event_file *file;
2875a08b
SRRH
2819 unsigned long count;
2820 int ref;
2821 bool enable;
2822};
2823
41794f19
SRV
2824static void update_event_probe(struct event_probe_data *data)
2825{
2826 if (data->enable)
2827 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2828 else
2829 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2830}
2831
3cd715de 2832static void
bca6c8d0 2833event_enable_probe(unsigned long ip, unsigned long parent_ip,
b5f081b5 2834 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 2835 void *data)
3cd715de 2836{
6e444319
SRV
2837 struct ftrace_func_mapper *mapper = data;
2838 struct event_probe_data *edata;
41794f19 2839 void **pdata;
3cd715de 2840
41794f19
SRV
2841 pdata = ftrace_func_mapper_find_ip(mapper, ip);
2842 if (!pdata || !*pdata)
3cd715de
SRRH
2843 return;
2844
6e444319
SRV
2845 edata = *pdata;
2846 update_event_probe(edata);
3cd715de
SRRH
2847}
2848
2849static void
bca6c8d0 2850event_enable_count_probe(unsigned long ip, unsigned long parent_ip,
b5f081b5 2851 struct trace_array *tr, struct ftrace_probe_ops *ops,
6e444319 2852 void *data)
3cd715de 2853{
6e444319
SRV
2854 struct ftrace_func_mapper *mapper = data;
2855 struct event_probe_data *edata;
41794f19 2856 void **pdata;
3cd715de 2857
41794f19
SRV
2858 pdata = ftrace_func_mapper_find_ip(mapper, ip);
2859 if (!pdata || !*pdata)
3cd715de
SRRH
2860 return;
2861
6e444319 2862 edata = *pdata;
41794f19 2863
6e444319 2864 if (!edata->count)
3cd715de
SRRH
2865 return;
2866
2867 /* Skip if the event is in a state we want to switch to */
6e444319 2868 if (edata->enable == !(edata->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
3cd715de
SRRH
2869 return;
2870
6e444319
SRV
2871 if (edata->count != -1)
2872 (edata->count)--;
3cd715de 2873
6e444319 2874 update_event_probe(edata);
3cd715de
SRRH
2875}
2876
2877static int
2878event_enable_print(struct seq_file *m, unsigned long ip,
6e444319 2879 struct ftrace_probe_ops *ops, void *data)
3cd715de 2880{
6e444319
SRV
2881 struct ftrace_func_mapper *mapper = data;
2882 struct event_probe_data *edata;
41794f19
SRV
2883 void **pdata;
2884
2885 pdata = ftrace_func_mapper_find_ip(mapper, ip);
2886
2887 if (WARN_ON_ONCE(!pdata || !*pdata))
2888 return 0;
2889
6e444319 2890 edata = *pdata;
3cd715de
SRRH
2891
2892 seq_printf(m, "%ps:", (void *)ip);
2893
2894 seq_printf(m, "%s:%s:%s",
6e444319
SRV
2895 edata->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2896 edata->file->event_call->class->system,
2897 trace_event_name(edata->file->event_call));
3cd715de 2898
6e444319 2899 if (edata->count == -1)
fa6f0cc7 2900 seq_puts(m, ":unlimited\n");
3cd715de 2901 else
6e444319 2902 seq_printf(m, ":count=%ld\n", edata->count);
3cd715de
SRRH
2903
2904 return 0;
2905}
2906
2907static int
b5f081b5 2908event_enable_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 2909 unsigned long ip, void *init_data, void **data)
3cd715de 2910{
6e444319
SRV
2911 struct ftrace_func_mapper *mapper = *data;
2912 struct event_probe_data *edata = init_data;
41794f19
SRV
2913 int ret;
2914
6e444319
SRV
2915 if (!mapper) {
2916 mapper = allocate_ftrace_func_mapper();
2917 if (!mapper)
2918 return -ENODEV;
2919 *data = mapper;
2920 }
2921
2922 ret = ftrace_func_mapper_add_ip(mapper, ip, edata);
41794f19
SRV
2923 if (ret < 0)
2924 return ret;
3cd715de 2925
6e444319
SRV
2926 edata->ref++;
2927
2928 return 0;
2929}
2930
2931static int free_probe_data(void *data)
2932{
2933 struct event_probe_data *edata = data;
41794f19 2934
6e444319
SRV
2935 edata->ref--;
2936 if (!edata->ref) {
2937 /* Remove the SOFT_MODE flag */
2938 __ftrace_event_enable_disable(edata->file, 0, 1);
2939 module_put(edata->file->event_call->mod);
2940 kfree(edata);
2941 }
3cd715de
SRRH
2942 return 0;
2943}
2944
2945static void
b5f081b5 2946event_enable_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
6e444319 2947 unsigned long ip, void *data)
3cd715de 2948{
6e444319
SRV
2949 struct ftrace_func_mapper *mapper = data;
2950 struct event_probe_data *edata;
2951
2952 if (!ip) {
2953 if (!mapper)
2954 return;
2955 free_ftrace_func_mapper(mapper, free_probe_data);
2956 return;
2957 }
41794f19 2958
6e444319 2959 edata = ftrace_func_mapper_remove_ip(mapper, ip);
41794f19 2960
6e444319 2961 if (WARN_ON_ONCE(!edata))
41794f19 2962 return;
3cd715de 2963
6e444319 2964 if (WARN_ON_ONCE(edata->ref <= 0))
3cd715de
SRRH
2965 return;
2966
6e444319 2967 free_probe_data(edata);
3cd715de
SRRH
2968}
2969
2970static struct ftrace_probe_ops event_enable_probe_ops = {
2971 .func = event_enable_probe,
2972 .print = event_enable_print,
2973 .init = event_enable_init,
2974 .free = event_enable_free,
2975};
2976
2977static struct ftrace_probe_ops event_enable_count_probe_ops = {
2978 .func = event_enable_count_probe,
2979 .print = event_enable_print,
2980 .init = event_enable_init,
2981 .free = event_enable_free,
2982};
2983
2984static struct ftrace_probe_ops event_disable_probe_ops = {
2985 .func = event_enable_probe,
2986 .print = event_enable_print,
2987 .init = event_enable_init,
2988 .free = event_enable_free,
2989};
2990
2991static struct ftrace_probe_ops event_disable_count_probe_ops = {
2992 .func = event_enable_count_probe,
2993 .print = event_enable_print,
2994 .init = event_enable_init,
2995 .free = event_enable_free,
2996};
2997
2998static int
04ec7bb6 2999event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
3cd715de
SRRH
3000 char *glob, char *cmd, char *param, int enabled)
3001{
7f1d2f82 3002 struct trace_event_file *file;
3cd715de
SRRH
3003 struct ftrace_probe_ops *ops;
3004 struct event_probe_data *data;
3005 const char *system;
3006 const char *event;
3007 char *number;
3008 bool enable;
3009 int ret;
3010
dc81e5e3
YY
3011 if (!tr)
3012 return -ENODEV;
3013
3cd715de 3014 /* hash funcs only work with set_ftrace_filter */
8092e808 3015 if (!enabled || !param)
3cd715de
SRRH
3016 return -EINVAL;
3017
3018 system = strsep(&param, ":");
3019 if (!param)
3020 return -EINVAL;
3021
3022 event = strsep(&param, ":");
3023
3024 mutex_lock(&event_mutex);
3025
3026 ret = -EINVAL;
3027 file = find_event_file(tr, system, event);
3028 if (!file)
3029 goto out;
3030
3031 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
3032
3033 if (enable)
3034 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
3035 else
3036 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
3037
3038 if (glob[0] == '!') {
7b60f3d8 3039 ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
3cd715de
SRRH
3040 goto out;
3041 }
3042
3043 ret = -ENOMEM;
41794f19 3044
3cd715de
SRRH
3045 data = kzalloc(sizeof(*data), GFP_KERNEL);
3046 if (!data)
3047 goto out;
3048
3049 data->enable = enable;
3050 data->count = -1;
3051 data->file = file;
3052
3053 if (!param)
3054 goto out_reg;
3055
3056 number = strsep(&param, ":");
3057
3058 ret = -EINVAL;
3059 if (!strlen(number))
3060 goto out_free;
3061
3062 /*
3063 * We use the callback data field (which is a pointer)
3064 * as our counter.
3065 */
3066 ret = kstrtoul(number, 0, &data->count);
3067 if (ret)
3068 goto out_free;
3069
3070 out_reg:
3071 /* Don't let event modules unload while probe registered */
3072 ret = try_module_get(file->event_call->mod);
6ed01066
MH
3073 if (!ret) {
3074 ret = -EBUSY;
3cd715de 3075 goto out_free;
6ed01066 3076 }
3cd715de
SRRH
3077
3078 ret = __ftrace_event_enable_disable(file, 1, 1);
3079 if (ret < 0)
3080 goto out_put;
41794f19 3081
04ec7bb6 3082 ret = register_ftrace_function_probe(glob, tr, ops, data);
ff305ded
SRRH
3083 /*
3084 * The above returns on success the # of functions enabled,
3085 * but if it didn't find any functions it returns zero.
3086 * Consider no functions a failure too.
3087 */
a5b85bd1
MH
3088 if (!ret) {
3089 ret = -ENOENT;
3cd715de 3090 goto out_disable;
ff305ded
SRRH
3091 } else if (ret < 0)
3092 goto out_disable;
3093 /* Just return zero, not the number of enabled functions */
3094 ret = 0;
3cd715de
SRRH
3095 out:
3096 mutex_unlock(&event_mutex);
3097 return ret;
3098
3099 out_disable:
3100 __ftrace_event_enable_disable(file, 0, 1);
3101 out_put:
3102 module_put(file->event_call->mod);
3103 out_free:
3104 kfree(data);
3105 goto out;
3106}
3107
3108static struct ftrace_func_command event_enable_cmd = {
3109 .name = ENABLE_EVENT_STR,
3110 .func = event_enable_func,
3111};
3112
3113static struct ftrace_func_command event_disable_cmd = {
3114 .name = DISABLE_EVENT_STR,
3115 .func = event_enable_func,
3116};
3117
3118static __init int register_event_cmds(void)
3119{
3120 int ret;
3121
3122 ret = register_ftrace_command(&event_enable_cmd);
3123 if (WARN_ON(ret < 0))
3124 return ret;
3125 ret = register_ftrace_command(&event_disable_cmd);
3126 if (WARN_ON(ret < 0))
3127 unregister_ftrace_command(&event_enable_cmd);
3128 return ret;
3129}
3130#else
3131static inline int register_event_cmds(void) { return 0; }
3132#endif /* CONFIG_DYNAMIC_FTRACE */
3133
77248221 3134/*
720dee53
MH
3135 * The top level array and trace arrays created by boot-time tracing
3136 * have already had its trace_event_file descriptors created in order
3137 * to allow for early events to be recorded.
3138 * This function is called after the tracefs has been initialized,
3139 * and we now have to create the files associated to the events.
77248221 3140 */
720dee53 3141static void __trace_early_add_event_dirs(struct trace_array *tr)
77248221 3142{
7f1d2f82 3143 struct trace_event_file *file;
77248221
SR
3144 int ret;
3145
3146
3147 list_for_each_entry(file, &tr->events, list) {
620a30e9 3148 ret = event_create_dir(tr->event_dir, file);
77248221 3149 if (ret < 0)
3448bac3 3150 pr_warn("Could not create directory for event %s\n",
687fcc4a 3151 trace_event_name(file->event_call));
77248221
SR
3152 }
3153}
3154
3155/*
720dee53
MH
3156 * For early boot up, the top trace array and the trace arrays created
3157 * by boot-time tracing require to have a list of events that can be
3158 * enabled. This must be done before the filesystem is set up in order
3159 * to allow events to be traced early.
77248221 3160 */
720dee53 3161void __trace_early_add_events(struct trace_array *tr)
77248221 3162{
2425bcb9 3163 struct trace_event_call *call;
77248221
SR
3164 int ret;
3165
3166 list_for_each_entry(call, &ftrace_events, list) {
3167 /* Early boot up should not have any modules loaded */
3168 if (WARN_ON_ONCE(call->mod))
3169 continue;
3170
3171 ret = __trace_early_add_new_event(call, tr);
3172 if (ret < 0)
3448bac3 3173 pr_warn("Could not create early event %s\n",
687fcc4a 3174 trace_event_name(call));
77248221
SR
3175 }
3176}
3177
0c8916c3
SR
3178/* Remove the event directory structure for a trace directory. */
3179static void
3180__trace_remove_event_dirs(struct trace_array *tr)
3181{
7f1d2f82 3182 struct trace_event_file *file, *next;
0c8916c3 3183
f6a84bdc
ON
3184 list_for_each_entry_safe(file, next, &tr->events, list)
3185 remove_event_file_dir(file);
0c8916c3
SR
3186}
3187
2425bcb9 3188static void __add_event_to_tracers(struct trace_event_call *call)
ae63b31e
SR
3189{
3190 struct trace_array *tr;
3191
620a30e9
ON
3192 list_for_each_entry(tr, &ftrace_trace_arrays, list)
3193 __trace_add_new_event(call, tr);
ae63b31e
SR
3194}
3195
2425bcb9
SRRH
3196extern struct trace_event_call *__start_ftrace_events[];
3197extern struct trace_event_call *__stop_ftrace_events[];
a59fd602 3198
020e5f85
LZ
3199static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
3200
3201static __init int setup_trace_event(char *str)
3202{
3203 strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
55034cd6 3204 ring_buffer_expanded = true;
60efe21e 3205 disable_tracing_selftest("running event tracing");
020e5f85
LZ
3206
3207 return 1;
3208}
3209__setup("trace_event=", setup_trace_event);
3210
77248221
SR
3211/* Expects to have event_mutex held when called */
3212static int
3213create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
ae63b31e
SR
3214{
3215 struct dentry *d_events;
3216 struct dentry *entry;
3217
8434dc93 3218 entry = tracefs_create_file("set_event", 0644, parent,
ae63b31e
SR
3219 tr, &ftrace_set_event_fops);
3220 if (!entry) {
8434dc93 3221 pr_warn("Could not create tracefs 'set_event' entry\n");
ae63b31e
SR
3222 return -ENOMEM;
3223 }
3224
8434dc93 3225 d_events = tracefs_create_dir("events", parent);
277ba044 3226 if (!d_events) {
8434dc93 3227 pr_warn("Could not create tracefs 'events' directory\n");
277ba044
SR
3228 return -ENOMEM;
3229 }
ae63b31e 3230
7d436400
SRRH
3231 entry = trace_create_file("enable", 0644, d_events,
3232 tr, &ftrace_tr_enable_fops);
3233 if (!entry) {
3234 pr_warn("Could not create tracefs 'enable' entry\n");
3235 return -ENOMEM;
3236 }
3237
3238 /* There are not as crucial, just warn if they are not created */
3239
49090107
SRRH
3240 entry = tracefs_create_file("set_event_pid", 0644, parent,
3241 tr, &ftrace_set_event_pid_fops);
7d436400
SRRH
3242 if (!entry)
3243 pr_warn("Could not create tracefs 'set_event_pid' entry\n");
49090107 3244
27683626
SRV
3245 entry = tracefs_create_file("set_event_notrace_pid", 0644, parent,
3246 tr, &ftrace_set_event_notrace_pid_fops);
3247 if (!entry)
3248 pr_warn("Could not create tracefs 'set_event_notrace_pid' entry\n");
3249
ae63b31e 3250 /* ring buffer internal formats */
7d436400
SRRH
3251 entry = trace_create_file("header_page", 0444, d_events,
3252 ring_buffer_print_page_header,
3253 &ftrace_show_header_fops);
3254 if (!entry)
3255 pr_warn("Could not create tracefs 'header_page' entry\n");
3256
3257 entry = trace_create_file("header_event", 0444, d_events,
3258 ring_buffer_print_entry_header,
3259 &ftrace_show_header_fops);
3260 if (!entry)
3261 pr_warn("Could not create tracefs 'header_event' entry\n");
ae63b31e
SR
3262
3263 tr->event_dir = d_events;
77248221
SR
3264
3265 return 0;
3266}
3267
3268/**
3269 * event_trace_add_tracer - add a instance of a trace_array to events
3270 * @parent: The parent dentry to place the files/directories for events in
3271 * @tr: The trace array associated with these events
3272 *
3273 * When a new instance is created, it needs to set up its events
3274 * directory, as well as other files associated with events. It also
2b5894cc 3275 * creates the event hierarchy in the @parent/events directory.
77248221
SR
3276 *
3277 * Returns 0 on success.
12ecef0c
SRV
3278 *
3279 * Must be called with event_mutex held.
77248221
SR
3280 */
3281int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
3282{
3283 int ret;
3284
12ecef0c 3285 lockdep_assert_held(&event_mutex);
77248221
SR
3286
3287 ret = create_event_toplevel_files(parent, tr);
3288 if (ret)
12ecef0c 3289 goto out;
77248221 3290
52f6ad6d 3291 down_write(&trace_event_sem);
720dee53
MH
3292 /* If tr already has the event list, it is initialized in early boot. */
3293 if (unlikely(!list_empty(&tr->events)))
3294 __trace_early_add_event_dirs(tr);
3295 else
3296 __trace_add_event_dirs(tr);
52f6ad6d 3297 up_write(&trace_event_sem);
277ba044 3298
12ecef0c 3299 out:
77248221
SR
3300 return ret;
3301}
3302
3303/*
3304 * The top trace array already had its file descriptors created.
3305 * Now the files themselves need to be created.
3306 */
3307static __init int
3308early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
3309{
3310 int ret;
3311
3312 mutex_lock(&event_mutex);
3313
3314 ret = create_event_toplevel_files(parent, tr);
3315 if (ret)
3316 goto out_unlock;
3317
52f6ad6d 3318 down_write(&trace_event_sem);
77248221 3319 __trace_early_add_event_dirs(tr);
52f6ad6d 3320 up_write(&trace_event_sem);
77248221
SR
3321
3322 out_unlock:
3323 mutex_unlock(&event_mutex);
3324
3325 return ret;
ae63b31e
SR
3326}
3327
12ecef0c 3328/* Must be called with event_mutex held */
0c8916c3
SR
3329int event_trace_del_tracer(struct trace_array *tr)
3330{
12ecef0c 3331 lockdep_assert_held(&event_mutex);
0c8916c3 3332
85f2b082
TZ
3333 /* Disable any event triggers and associated soft-disabled events */
3334 clear_event_triggers(tr);
3335
49090107 3336 /* Clear the pid list */
27683626 3337 __ftrace_clear_event_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
49090107 3338
2a6c24af
SRRH
3339 /* Disable any running events */
3340 __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3341
e0a568dc
SRV
3342 /* Make sure no more events are being executed */
3343 tracepoint_synchronize_unregister();
3ccb0123 3344
52f6ad6d 3345 down_write(&trace_event_sem);
0c8916c3 3346 __trace_remove_event_dirs(tr);
a3d1e7eb 3347 tracefs_remove(tr->event_dir);
52f6ad6d 3348 up_write(&trace_event_sem);
0c8916c3
SR
3349
3350 tr->event_dir = NULL;
3351
0c8916c3
SR
3352 return 0;
3353}
3354
d1a29143
SR
3355static __init int event_trace_memsetup(void)
3356{
3357 field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
7f1d2f82 3358 file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
d1a29143
SR
3359 return 0;
3360}
3361
ce1039bd
SRRH
3362static __init void
3363early_enable_events(struct trace_array *tr, bool disable_first)
3364{
3365 char *buf = bootup_event_buf;
3366 char *token;
3367 int ret;
3368
3369 while (true) {
3370 token = strsep(&buf, ",");
3371
3372 if (!token)
3373 break;
ce1039bd 3374
43ed3843
SRRH
3375 if (*token) {
3376 /* Restarting syscalls requires that we stop them first */
3377 if (disable_first)
3378 ftrace_set_clr_event(tr, token, 0);
ce1039bd 3379
43ed3843
SRRH
3380 ret = ftrace_set_clr_event(tr, token, 1);
3381 if (ret)
3382 pr_warn("Failed to enable trace event: %s\n", token);
3383 }
ce1039bd
SRRH
3384
3385 /* Put back the comma to allow this to be called again */
3386 if (buf)
3387 *(buf - 1) = ',';
3388 }
3389}
3390
8781915a
EG
3391static __init int event_trace_enable(void)
3392{
ae63b31e 3393 struct trace_array *tr = top_trace_array();
2425bcb9 3394 struct trace_event_call **iter, *call;
8781915a
EG
3395 int ret;
3396
dc81e5e3
YY
3397 if (!tr)
3398 return -ENODEV;
3399
8781915a
EG
3400 for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3401
3402 call = *iter;
3403 ret = event_init(call);
3404 if (!ret)
3405 list_add(&call->list, &ftrace_events);
3406 }
3407
77248221
SR
3408 /*
3409 * We need the top trace array to have a working set of trace
3410 * points at early init, before the debug files and directories
3411 * are created. Create the file entries now, and attach them
3412 * to the actual file dentries later.
3413 */
3414 __trace_early_add_events(tr);
3415
ce1039bd 3416 early_enable_events(tr, false);
81698831
SR
3417
3418 trace_printk_start_comm();
3419
3cd715de
SRRH
3420 register_event_cmds();
3421
85f2b082
TZ
3422 register_trigger_cmds();
3423
8781915a
EG
3424 return 0;
3425}
3426
ce1039bd
SRRH
3427/*
3428 * event_trace_enable() is called from trace_event_init() first to
3429 * initialize events and perhaps start any events that are on the
3430 * command line. Unfortunately, there are some events that will not
3431 * start this early, like the system call tracepoints that need
524666cb
GKB
3432 * to set the %SYSCALL_WORK_SYSCALL_TRACEPOINT flag of pid 1. But
3433 * event_trace_enable() is called before pid 1 starts, and this flag
3434 * is never set, making the syscall tracepoint never get reached, but
3435 * the event is enabled regardless (and not doing anything).
ce1039bd
SRRH
3436 */
3437static __init int event_trace_enable_again(void)
3438{
3439 struct trace_array *tr;
3440
3441 tr = top_trace_array();
3442 if (!tr)
3443 return -ENODEV;
3444
3445 early_enable_events(tr, true);
3446
3447 return 0;
3448}
3449
3450early_initcall(event_trace_enable_again);
3451
ac343da7
MH
3452/* Init fields which doesn't related to the tracefs */
3453static __init int event_trace_init_fields(void)
3454{
3455 if (trace_define_generic_fields())
3456 pr_warn("tracing: Failed to allocated generic fields");
3457
3458 if (trace_define_common_fields())
3459 pr_warn("tracing: Failed to allocate common fields");
3460
3461 return 0;
3462}
3463
58b92547 3464__init int event_trace_init(void)
b77e38aa 3465{
ae63b31e 3466 struct trace_array *tr;
b77e38aa 3467 struct dentry *entry;
6d723736 3468 int ret;
b77e38aa 3469
ae63b31e 3470 tr = top_trace_array();
dc81e5e3
YY
3471 if (!tr)
3472 return -ENODEV;
ae63b31e 3473
dc300d77 3474 entry = tracefs_create_file("available_events", 0444, NULL,
ae63b31e 3475 tr, &ftrace_avail_fops);
2314c4ae 3476 if (!entry)
8434dc93 3477 pr_warn("Could not create tracefs 'available_events' entry\n");
2314c4ae 3478
dc300d77 3479 ret = early_event_add_tracer(NULL, tr);
ae63b31e
SR
3480 if (ret)
3481 return ret;
020e5f85 3482
836d481e 3483#ifdef CONFIG_MODULES
6d723736 3484 ret = register_module_notifier(&trace_module_nb);
55379376 3485 if (ret)
3448bac3 3486 pr_warn("Failed to register trace events module notifier\n");
836d481e 3487#endif
a838deab
MH
3488
3489 eventdir_initialized = true;
3490
b77e38aa
SR
3491 return 0;
3492}
5f893b26
SRRH
3493
3494void __init trace_event_init(void)
3495{
3496 event_trace_memsetup();
3497 init_ftrace_syscalls();
3498 event_trace_enable();
ac343da7 3499 event_trace_init_fields();
5f893b26
SRRH
3500}
3501
b3015fe4 3502#ifdef CONFIG_EVENT_TRACE_STARTUP_TEST
e6187007
SR
3503
3504static DEFINE_SPINLOCK(test_spinlock);
3505static DEFINE_SPINLOCK(test_spinlock_irq);
3506static DEFINE_MUTEX(test_mutex);
3507
3508static __init void test_work(struct work_struct *dummy)
3509{
3510 spin_lock(&test_spinlock);
3511 spin_lock_irq(&test_spinlock_irq);
3512 udelay(1);
3513 spin_unlock_irq(&test_spinlock_irq);
3514 spin_unlock(&test_spinlock);
3515
3516 mutex_lock(&test_mutex);
3517 msleep(1);
3518 mutex_unlock(&test_mutex);
3519}
3520
3521static __init int event_test_thread(void *unused)
3522{
3523 void *test_malloc;
3524
3525 test_malloc = kmalloc(1234, GFP_KERNEL);
3526 if (!test_malloc)
3527 pr_info("failed to kmalloc\n");
3528
3529 schedule_on_each_cpu(test_work);
3530
3531 kfree(test_malloc);
3532
3533 set_current_state(TASK_INTERRUPTIBLE);
fe0e01c7 3534 while (!kthread_should_stop()) {
e6187007 3535 schedule();
fe0e01c7
PZ
3536 set_current_state(TASK_INTERRUPTIBLE);
3537 }
3538 __set_current_state(TASK_RUNNING);
e6187007
SR
3539
3540 return 0;
3541}
3542
3543/*
3544 * Do various things that may trigger events.
3545 */
3546static __init void event_test_stuff(void)
3547{
3548 struct task_struct *test_thread;
3549
3550 test_thread = kthread_run(event_test_thread, NULL, "test-events");
3551 msleep(1);
3552 kthread_stop(test_thread);
3553}
3554
3555/*
3556 * For every trace event defined, we will test each trace point separately,
3557 * and then by groups, and finally all trace points.
3558 */
9ea21c1e 3559static __init void event_trace_self_tests(void)
e6187007 3560{
7967b3e0 3561 struct trace_subsystem_dir *dir;
7f1d2f82 3562 struct trace_event_file *file;
2425bcb9 3563 struct trace_event_call *call;
e6187007 3564 struct event_subsystem *system;
ae63b31e 3565 struct trace_array *tr;
e6187007
SR
3566 int ret;
3567
ae63b31e 3568 tr = top_trace_array();
dc81e5e3
YY
3569 if (!tr)
3570 return;
ae63b31e 3571
e6187007
SR
3572 pr_info("Running tests on trace events:\n");
3573
ae63b31e
SR
3574 list_for_each_entry(file, &tr->events, list) {
3575
3576 call = file->event_call;
e6187007 3577
2239291a
SR
3578 /* Only test those that have a probe */
3579 if (!call->class || !call->class->probe)
e6187007
SR
3580 continue;
3581
1f5a6b45
SR
3582/*
3583 * Testing syscall events here is pretty useless, but
3584 * we still do it if configured. But this is time consuming.
3585 * What we really need is a user thread to perform the
3586 * syscalls as we test.
3587 */
3588#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
8f082018
SR
3589 if (call->class->system &&
3590 strcmp(call->class->system, "syscalls") == 0)
1f5a6b45
SR
3591 continue;
3592#endif
3593
687fcc4a 3594 pr_info("Testing event %s: ", trace_event_name(call));
e6187007
SR
3595
3596 /*
3597 * If an event is already enabled, someone is using
3598 * it and the self test should not be on.
3599 */
5d6ad960 3600 if (file->flags & EVENT_FILE_FL_ENABLED) {
3448bac3 3601 pr_warn("Enabled event during self test!\n");
e6187007
SR
3602 WARN_ON_ONCE(1);
3603 continue;
3604 }
3605
ae63b31e 3606 ftrace_event_enable_disable(file, 1);
e6187007 3607 event_test_stuff();
ae63b31e 3608 ftrace_event_enable_disable(file, 0);
e6187007
SR
3609
3610 pr_cont("OK\n");
3611 }
3612
3613 /* Now test at the sub system level */
3614
3615 pr_info("Running tests on trace event systems:\n");
3616
ae63b31e
SR
3617 list_for_each_entry(dir, &tr->systems, list) {
3618
3619 system = dir->subsystem;
e6187007
SR
3620
3621 /* the ftrace system is special, skip it */
3622 if (strcmp(system->name, "ftrace") == 0)
3623 continue;
3624
3625 pr_info("Testing event system %s: ", system->name);
3626
ae63b31e 3627 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
e6187007 3628 if (WARN_ON_ONCE(ret)) {
3448bac3
FF
3629 pr_warn("error enabling system %s\n",
3630 system->name);
e6187007
SR
3631 continue;
3632 }
3633
3634 event_test_stuff();
3635
ae63b31e 3636 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
76bab1b7 3637 if (WARN_ON_ONCE(ret)) {
3448bac3
FF
3638 pr_warn("error disabling system %s\n",
3639 system->name);
76bab1b7
YL
3640 continue;
3641 }
e6187007
SR
3642
3643 pr_cont("OK\n");
3644 }
3645
3646 /* Test with all events enabled */
3647
3648 pr_info("Running tests on all trace events:\n");
3649 pr_info("Testing all events: ");
3650
ae63b31e 3651 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
e6187007 3652 if (WARN_ON_ONCE(ret)) {
3448bac3 3653 pr_warn("error enabling all events\n");
9ea21c1e 3654 return;
e6187007
SR
3655 }
3656
3657 event_test_stuff();
3658
3659 /* reset sysname */
ae63b31e 3660 ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
e6187007 3661 if (WARN_ON_ONCE(ret)) {
3448bac3 3662 pr_warn("error disabling all events\n");
9ea21c1e 3663 return;
e6187007
SR
3664 }
3665
3666 pr_cont("OK\n");
9ea21c1e
SR
3667}
3668
3669#ifdef CONFIG_FUNCTION_TRACER
3670
245b2e70 3671static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
9ea21c1e 3672
9b9db275 3673static struct trace_event_file event_trace_file __initdata;
b7f0c959
SRRH
3674
3675static void __init
2f5f6ad9 3676function_test_events_call(unsigned long ip, unsigned long parent_ip,
d19ad077 3677 struct ftrace_ops *op, struct ftrace_regs *regs)
9ea21c1e 3678{
13292494 3679 struct trace_buffer *buffer;
9ea21c1e
SR
3680 struct ring_buffer_event *event;
3681 struct ftrace_entry *entry;
3682 unsigned long flags;
3683 long disabled;
9ea21c1e
SR
3684 int cpu;
3685 int pc;
3686
3687 pc = preempt_count();
5168ae50 3688 preempt_disable_notrace();
9ea21c1e 3689 cpu = raw_smp_processor_id();
245b2e70 3690 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
9ea21c1e
SR
3691
3692 if (disabled != 1)
3693 goto out;
3694
3695 local_save_flags(flags);
3696
9b9db275
SRRH
3697 event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
3698 TRACE_FN, sizeof(*entry),
3699 flags, pc);
9ea21c1e
SR
3700 if (!event)
3701 goto out;
3702 entry = ring_buffer_event_data(event);
3703 entry->ip = ip;
3704 entry->parent_ip = parent_ip;
3705
9b9db275
SRRH
3706 event_trigger_unlock_commit(&event_trace_file, buffer, event,
3707 entry, flags, pc);
9ea21c1e 3708 out:
245b2e70 3709 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
5168ae50 3710 preempt_enable_notrace();
9ea21c1e
SR
3711}
3712
3713static struct ftrace_ops trace_ops __initdata =
3714{
3715 .func = function_test_events_call,
3716};
3717
3718static __init void event_trace_self_test_with_function(void)
3719{
17bb615a 3720 int ret;
9b9db275
SRRH
3721
3722 event_trace_file.tr = top_trace_array();
3723 if (WARN_ON(!event_trace_file.tr))
2d34f489 3724 return;
9b9db275 3725
17bb615a
SR
3726 ret = register_ftrace_function(&trace_ops);
3727 if (WARN_ON(ret < 0)) {
3728 pr_info("Failed to enable function tracer for event tests\n");
3729 return;
3730 }
9ea21c1e
SR
3731 pr_info("Running tests again, along with the function tracer\n");
3732 event_trace_self_tests();
3733 unregister_ftrace_function(&trace_ops);
3734}
3735#else
3736static __init void event_trace_self_test_with_function(void)
3737{
3738}
3739#endif
3740
3741static __init int event_trace_self_tests_init(void)
3742{
020e5f85
LZ
3743 if (!tracing_selftest_disabled) {
3744 event_trace_self_tests();
3745 event_trace_self_test_with_function();
3746 }
e6187007
SR
3747
3748 return 0;
3749}
3750
28d20e2d 3751late_initcall(event_trace_self_tests_init);
e6187007
SR
3752
3753#endif