]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/trace_events_trigger.c
tracing: Fix double free of event_trigger_data
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_events_trigger.c
1 /*
2 * trace_events_trigger - trace event triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/rculist.h>
26
27 #include "trace.h"
28
29 static LIST_HEAD(trigger_commands);
30 static DEFINE_MUTEX(trigger_cmd_mutex);
31
32 void trigger_data_free(struct event_trigger_data *data)
33 {
34 if (data->cmd_ops->set_filter)
35 data->cmd_ops->set_filter(NULL, data, NULL);
36
37 synchronize_sched(); /* make sure current triggers exit before free */
38 kfree(data);
39 }
40
41 /**
42 * event_triggers_call - Call triggers associated with a trace event
43 * @file: The trace_event_file associated with the event
44 * @rec: The trace entry for the event, NULL for unconditional invocation
45 *
46 * For each trigger associated with an event, invoke the trigger
47 * function registered with the associated trigger command. If rec is
48 * non-NULL, it means that the trigger requires further processing and
49 * shouldn't be unconditionally invoked. If rec is non-NULL and the
50 * trigger has a filter associated with it, rec will checked against
51 * the filter and if the record matches the trigger will be invoked.
52 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
53 * in any case until the current event is written, the trigger
54 * function isn't invoked but the bit associated with the deferred
55 * trigger is set in the return value.
56 *
57 * Returns an enum event_trigger_type value containing a set bit for
58 * any trigger that should be deferred, ETT_NONE if nothing to defer.
59 *
60 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
61 *
62 * Return: an enum event_trigger_type value containing a set bit for
63 * any trigger that should be deferred, ETT_NONE if nothing to defer.
64 */
65 enum event_trigger_type
66 event_triggers_call(struct trace_event_file *file, void *rec)
67 {
68 struct event_trigger_data *data;
69 enum event_trigger_type tt = ETT_NONE;
70 struct event_filter *filter;
71
72 if (list_empty(&file->triggers))
73 return tt;
74
75 list_for_each_entry_rcu(data, &file->triggers, list) {
76 if (data->paused)
77 continue;
78 if (!rec) {
79 data->ops->func(data, rec);
80 continue;
81 }
82 filter = rcu_dereference_sched(data->filter);
83 if (filter && !filter_match_preds(filter, rec))
84 continue;
85 if (event_command_post_trigger(data->cmd_ops)) {
86 tt |= data->cmd_ops->trigger_type;
87 continue;
88 }
89 data->ops->func(data, rec);
90 }
91 return tt;
92 }
93 EXPORT_SYMBOL_GPL(event_triggers_call);
94
95 /**
96 * event_triggers_post_call - Call 'post_triggers' for a trace event
97 * @file: The trace_event_file associated with the event
98 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
99 * @rec: The trace entry for the event
100 *
101 * For each trigger associated with an event, invoke the trigger
102 * function registered with the associated trigger command, if the
103 * corresponding bit is set in the tt enum passed into this function.
104 * See @event_triggers_call for details on how those bits are set.
105 *
106 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
107 */
108 void
109 event_triggers_post_call(struct trace_event_file *file,
110 enum event_trigger_type tt,
111 void *rec)
112 {
113 struct event_trigger_data *data;
114
115 list_for_each_entry_rcu(data, &file->triggers, list) {
116 if (data->paused)
117 continue;
118 if (data->cmd_ops->trigger_type & tt)
119 data->ops->func(data, rec);
120 }
121 }
122 EXPORT_SYMBOL_GPL(event_triggers_post_call);
123
124 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
125
126 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
127 {
128 struct trace_event_file *event_file = event_file_data(m->private);
129
130 if (t == SHOW_AVAILABLE_TRIGGERS)
131 return NULL;
132
133 return seq_list_next(t, &event_file->triggers, pos);
134 }
135
136 static void *trigger_start(struct seq_file *m, loff_t *pos)
137 {
138 struct trace_event_file *event_file;
139
140 /* ->stop() is called even if ->start() fails */
141 mutex_lock(&event_mutex);
142 event_file = event_file_data(m->private);
143 if (unlikely(!event_file))
144 return ERR_PTR(-ENODEV);
145
146 if (list_empty(&event_file->triggers))
147 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
148
149 return seq_list_start(&event_file->triggers, *pos);
150 }
151
152 static void trigger_stop(struct seq_file *m, void *t)
153 {
154 mutex_unlock(&event_mutex);
155 }
156
157 static int trigger_show(struct seq_file *m, void *v)
158 {
159 struct event_trigger_data *data;
160 struct event_command *p;
161
162 if (v == SHOW_AVAILABLE_TRIGGERS) {
163 seq_puts(m, "# Available triggers:\n");
164 seq_putc(m, '#');
165 mutex_lock(&trigger_cmd_mutex);
166 list_for_each_entry_reverse(p, &trigger_commands, list)
167 seq_printf(m, " %s", p->name);
168 seq_putc(m, '\n');
169 mutex_unlock(&trigger_cmd_mutex);
170 return 0;
171 }
172
173 data = list_entry(v, struct event_trigger_data, list);
174 data->ops->print(m, data->ops, data);
175
176 return 0;
177 }
178
179 static const struct seq_operations event_triggers_seq_ops = {
180 .start = trigger_start,
181 .next = trigger_next,
182 .stop = trigger_stop,
183 .show = trigger_show,
184 };
185
186 static int event_trigger_regex_open(struct inode *inode, struct file *file)
187 {
188 int ret = 0;
189
190 mutex_lock(&event_mutex);
191
192 if (unlikely(!event_file_data(file))) {
193 mutex_unlock(&event_mutex);
194 return -ENODEV;
195 }
196
197 if ((file->f_mode & FMODE_WRITE) &&
198 (file->f_flags & O_TRUNC)) {
199 struct trace_event_file *event_file;
200 struct event_command *p;
201
202 event_file = event_file_data(file);
203
204 list_for_each_entry(p, &trigger_commands, list) {
205 if (p->unreg_all)
206 p->unreg_all(event_file);
207 }
208 }
209
210 if (file->f_mode & FMODE_READ) {
211 ret = seq_open(file, &event_triggers_seq_ops);
212 if (!ret) {
213 struct seq_file *m = file->private_data;
214 m->private = file;
215 }
216 }
217
218 mutex_unlock(&event_mutex);
219
220 return ret;
221 }
222
223 static int trigger_process_regex(struct trace_event_file *file, char *buff)
224 {
225 char *command, *next = buff;
226 struct event_command *p;
227 int ret = -EINVAL;
228
229 command = strsep(&next, ": \t");
230 command = (command[0] != '!') ? command : command + 1;
231
232 mutex_lock(&trigger_cmd_mutex);
233 list_for_each_entry(p, &trigger_commands, list) {
234 if (strcmp(p->name, command) == 0) {
235 ret = p->func(p, file, buff, command, next);
236 goto out_unlock;
237 }
238 }
239 out_unlock:
240 mutex_unlock(&trigger_cmd_mutex);
241
242 return ret;
243 }
244
245 static ssize_t event_trigger_regex_write(struct file *file,
246 const char __user *ubuf,
247 size_t cnt, loff_t *ppos)
248 {
249 struct trace_event_file *event_file;
250 ssize_t ret;
251 char *buf;
252
253 if (!cnt)
254 return 0;
255
256 if (cnt >= PAGE_SIZE)
257 return -EINVAL;
258
259 buf = memdup_user_nul(ubuf, cnt);
260 if (IS_ERR(buf))
261 return PTR_ERR(buf);
262
263 strim(buf);
264
265 mutex_lock(&event_mutex);
266 event_file = event_file_data(file);
267 if (unlikely(!event_file)) {
268 mutex_unlock(&event_mutex);
269 kfree(buf);
270 return -ENODEV;
271 }
272 ret = trigger_process_regex(event_file, buf);
273 mutex_unlock(&event_mutex);
274
275 kfree(buf);
276 if (ret < 0)
277 goto out;
278
279 *ppos += cnt;
280 ret = cnt;
281 out:
282 return ret;
283 }
284
285 static int event_trigger_regex_release(struct inode *inode, struct file *file)
286 {
287 mutex_lock(&event_mutex);
288
289 if (file->f_mode & FMODE_READ)
290 seq_release(inode, file);
291
292 mutex_unlock(&event_mutex);
293
294 return 0;
295 }
296
297 static ssize_t
298 event_trigger_write(struct file *filp, const char __user *ubuf,
299 size_t cnt, loff_t *ppos)
300 {
301 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
302 }
303
304 static int
305 event_trigger_open(struct inode *inode, struct file *filp)
306 {
307 return event_trigger_regex_open(inode, filp);
308 }
309
310 static int
311 event_trigger_release(struct inode *inode, struct file *file)
312 {
313 return event_trigger_regex_release(inode, file);
314 }
315
316 const struct file_operations event_trigger_fops = {
317 .open = event_trigger_open,
318 .read = seq_read,
319 .write = event_trigger_write,
320 .llseek = tracing_lseek,
321 .release = event_trigger_release,
322 };
323
324 /*
325 * Currently we only register event commands from __init, so mark this
326 * __init too.
327 */
328 __init int register_event_command(struct event_command *cmd)
329 {
330 struct event_command *p;
331 int ret = 0;
332
333 mutex_lock(&trigger_cmd_mutex);
334 list_for_each_entry(p, &trigger_commands, list) {
335 if (strcmp(cmd->name, p->name) == 0) {
336 ret = -EBUSY;
337 goto out_unlock;
338 }
339 }
340 list_add(&cmd->list, &trigger_commands);
341 out_unlock:
342 mutex_unlock(&trigger_cmd_mutex);
343
344 return ret;
345 }
346
347 /*
348 * Currently we only unregister event commands from __init, so mark
349 * this __init too.
350 */
351 __init int unregister_event_command(struct event_command *cmd)
352 {
353 struct event_command *p, *n;
354 int ret = -ENODEV;
355
356 mutex_lock(&trigger_cmd_mutex);
357 list_for_each_entry_safe(p, n, &trigger_commands, list) {
358 if (strcmp(cmd->name, p->name) == 0) {
359 ret = 0;
360 list_del_init(&p->list);
361 goto out_unlock;
362 }
363 }
364 out_unlock:
365 mutex_unlock(&trigger_cmd_mutex);
366
367 return ret;
368 }
369
370 /**
371 * event_trigger_print - Generic event_trigger_ops @print implementation
372 * @name: The name of the event trigger
373 * @m: The seq_file being printed to
374 * @data: Trigger-specific data
375 * @filter_str: filter_str to print, if present
376 *
377 * Common implementation for event triggers to print themselves.
378 *
379 * Usually wrapped by a function that simply sets the @name of the
380 * trigger command and then invokes this.
381 *
382 * Return: 0 on success, errno otherwise
383 */
384 static int
385 event_trigger_print(const char *name, struct seq_file *m,
386 void *data, char *filter_str)
387 {
388 long count = (long)data;
389
390 seq_puts(m, name);
391
392 if (count == -1)
393 seq_puts(m, ":unlimited");
394 else
395 seq_printf(m, ":count=%ld", count);
396
397 if (filter_str)
398 seq_printf(m, " if %s\n", filter_str);
399 else
400 seq_putc(m, '\n');
401
402 return 0;
403 }
404
405 /**
406 * event_trigger_init - Generic event_trigger_ops @init implementation
407 * @ops: The trigger ops associated with the trigger
408 * @data: Trigger-specific data
409 *
410 * Common implementation of event trigger initialization.
411 *
412 * Usually used directly as the @init method in event trigger
413 * implementations.
414 *
415 * Return: 0 on success, errno otherwise
416 */
417 int event_trigger_init(struct event_trigger_ops *ops,
418 struct event_trigger_data *data)
419 {
420 data->ref++;
421 return 0;
422 }
423
424 /**
425 * event_trigger_free - Generic event_trigger_ops @free implementation
426 * @ops: The trigger ops associated with the trigger
427 * @data: Trigger-specific data
428 *
429 * Common implementation of event trigger de-initialization.
430 *
431 * Usually used directly as the @free method in event trigger
432 * implementations.
433 */
434 static void
435 event_trigger_free(struct event_trigger_ops *ops,
436 struct event_trigger_data *data)
437 {
438 if (WARN_ON_ONCE(data->ref <= 0))
439 return;
440
441 data->ref--;
442 if (!data->ref)
443 trigger_data_free(data);
444 }
445
446 int trace_event_trigger_enable_disable(struct trace_event_file *file,
447 int trigger_enable)
448 {
449 int ret = 0;
450
451 if (trigger_enable) {
452 if (atomic_inc_return(&file->tm_ref) > 1)
453 return ret;
454 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
455 ret = trace_event_enable_disable(file, 1, 1);
456 } else {
457 if (atomic_dec_return(&file->tm_ref) > 0)
458 return ret;
459 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
460 ret = trace_event_enable_disable(file, 0, 1);
461 }
462
463 return ret;
464 }
465
466 /**
467 * clear_event_triggers - Clear all triggers associated with a trace array
468 * @tr: The trace array to clear
469 *
470 * For each trigger, the triggering event has its tm_ref decremented
471 * via trace_event_trigger_enable_disable(), and any associated event
472 * (in the case of enable/disable_event triggers) will have its sm_ref
473 * decremented via free()->trace_event_enable_disable(). That
474 * combination effectively reverses the soft-mode/trigger state added
475 * by trigger registration.
476 *
477 * Must be called with event_mutex held.
478 */
479 void
480 clear_event_triggers(struct trace_array *tr)
481 {
482 struct trace_event_file *file;
483
484 list_for_each_entry(file, &tr->events, list) {
485 struct event_trigger_data *data, *n;
486 list_for_each_entry_safe(data, n, &file->triggers, list) {
487 trace_event_trigger_enable_disable(file, 0);
488 list_del_rcu(&data->list);
489 if (data->ops->free)
490 data->ops->free(data->ops, data);
491 }
492 }
493 }
494
495 /**
496 * update_cond_flag - Set or reset the TRIGGER_COND bit
497 * @file: The trace_event_file associated with the event
498 *
499 * If an event has triggers and any of those triggers has a filter or
500 * a post_trigger, trigger invocation needs to be deferred until after
501 * the current event has logged its data, and the event should have
502 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
503 * cleared.
504 */
505 void update_cond_flag(struct trace_event_file *file)
506 {
507 struct event_trigger_data *data;
508 bool set_cond = false;
509
510 list_for_each_entry_rcu(data, &file->triggers, list) {
511 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
512 event_command_needs_rec(data->cmd_ops)) {
513 set_cond = true;
514 break;
515 }
516 }
517
518 if (set_cond)
519 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
520 else
521 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
522 }
523
524 /**
525 * register_trigger - Generic event_command @reg implementation
526 * @glob: The raw string used to register the trigger
527 * @ops: The trigger ops associated with the trigger
528 * @data: Trigger-specific data to associate with the trigger
529 * @file: The trace_event_file associated with the event
530 *
531 * Common implementation for event trigger registration.
532 *
533 * Usually used directly as the @reg method in event command
534 * implementations.
535 *
536 * Return: 0 on success, errno otherwise
537 */
538 static int register_trigger(char *glob, struct event_trigger_ops *ops,
539 struct event_trigger_data *data,
540 struct trace_event_file *file)
541 {
542 struct event_trigger_data *test;
543 int ret = 0;
544
545 list_for_each_entry_rcu(test, &file->triggers, list) {
546 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
547 ret = -EEXIST;
548 goto out;
549 }
550 }
551
552 if (data->ops->init) {
553 ret = data->ops->init(data->ops, data);
554 if (ret < 0)
555 goto out;
556 }
557
558 list_add_rcu(&data->list, &file->triggers);
559 ret++;
560
561 update_cond_flag(file);
562 if (trace_event_trigger_enable_disable(file, 1) < 0) {
563 list_del_rcu(&data->list);
564 update_cond_flag(file);
565 ret--;
566 }
567 out:
568 return ret;
569 }
570
571 /**
572 * unregister_trigger - Generic event_command @unreg implementation
573 * @glob: The raw string used to register the trigger
574 * @ops: The trigger ops associated with the trigger
575 * @test: Trigger-specific data used to find the trigger to remove
576 * @file: The trace_event_file associated with the event
577 *
578 * Common implementation for event trigger unregistration.
579 *
580 * Usually used directly as the @unreg method in event command
581 * implementations.
582 */
583 void unregister_trigger(char *glob, struct event_trigger_ops *ops,
584 struct event_trigger_data *test,
585 struct trace_event_file *file)
586 {
587 struct event_trigger_data *data;
588 bool unregistered = false;
589
590 list_for_each_entry_rcu(data, &file->triggers, list) {
591 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
592 unregistered = true;
593 list_del_rcu(&data->list);
594 trace_event_trigger_enable_disable(file, 0);
595 update_cond_flag(file);
596 break;
597 }
598 }
599
600 if (unregistered && data->ops->free)
601 data->ops->free(data->ops, data);
602 }
603
604 /**
605 * event_trigger_callback - Generic event_command @func implementation
606 * @cmd_ops: The command ops, used for trigger registration
607 * @file: The trace_event_file associated with the event
608 * @glob: The raw string used to register the trigger
609 * @cmd: The cmd portion of the string used to register the trigger
610 * @param: The params portion of the string used to register the trigger
611 *
612 * Common implementation for event command parsing and trigger
613 * instantiation.
614 *
615 * Usually used directly as the @func method in event command
616 * implementations.
617 *
618 * Return: 0 on success, errno otherwise
619 */
620 static int
621 event_trigger_callback(struct event_command *cmd_ops,
622 struct trace_event_file *file,
623 char *glob, char *cmd, char *param)
624 {
625 struct event_trigger_data *trigger_data;
626 struct event_trigger_ops *trigger_ops;
627 char *trigger = NULL;
628 char *number;
629 int ret;
630
631 /* separate the trigger from the filter (t:n [if filter]) */
632 if (param && isdigit(param[0]))
633 trigger = strsep(&param, " \t");
634
635 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
636
637 ret = -ENOMEM;
638 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
639 if (!trigger_data)
640 goto out;
641
642 trigger_data->count = -1;
643 trigger_data->ops = trigger_ops;
644 trigger_data->cmd_ops = cmd_ops;
645 trigger_data->private_data = file;
646 INIT_LIST_HEAD(&trigger_data->list);
647 INIT_LIST_HEAD(&trigger_data->named_list);
648
649 if (glob[0] == '!') {
650 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
651 kfree(trigger_data);
652 ret = 0;
653 goto out;
654 }
655
656 if (trigger) {
657 number = strsep(&trigger, ":");
658
659 ret = -EINVAL;
660 if (!strlen(number))
661 goto out_free;
662
663 /*
664 * We use the callback data field (which is a pointer)
665 * as our counter.
666 */
667 ret = kstrtoul(number, 0, &trigger_data->count);
668 if (ret)
669 goto out_free;
670 }
671
672 if (!param) /* if param is non-empty, it's supposed to be a filter */
673 goto out_reg;
674
675 if (!cmd_ops->set_filter)
676 goto out_reg;
677
678 ret = cmd_ops->set_filter(param, trigger_data, file);
679 if (ret < 0)
680 goto out_free;
681
682 out_reg:
683 /* Up the trigger_data count to make sure reg doesn't free it on failure */
684 event_trigger_init(trigger_ops, trigger_data);
685 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
686 /*
687 * The above returns on success the # of functions enabled,
688 * but if it didn't find any functions it returns zero.
689 * Consider no functions a failure too.
690 */
691 if (!ret) {
692 cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
693 ret = -ENOENT;
694 } else if (ret > 0)
695 ret = 0;
696
697 /* Down the counter of trigger_data or free it if not used anymore */
698 event_trigger_free(trigger_ops, trigger_data);
699 out:
700 return ret;
701
702 out_free:
703 if (cmd_ops->set_filter)
704 cmd_ops->set_filter(NULL, trigger_data, NULL);
705 kfree(trigger_data);
706 goto out;
707 }
708
709 /**
710 * set_trigger_filter - Generic event_command @set_filter implementation
711 * @filter_str: The filter string for the trigger, NULL to remove filter
712 * @trigger_data: Trigger-specific data
713 * @file: The trace_event_file associated with the event
714 *
715 * Common implementation for event command filter parsing and filter
716 * instantiation.
717 *
718 * Usually used directly as the @set_filter method in event command
719 * implementations.
720 *
721 * Also used to remove a filter (if filter_str = NULL).
722 *
723 * Return: 0 on success, errno otherwise
724 */
725 int set_trigger_filter(char *filter_str,
726 struct event_trigger_data *trigger_data,
727 struct trace_event_file *file)
728 {
729 struct event_trigger_data *data = trigger_data;
730 struct event_filter *filter = NULL, *tmp;
731 int ret = -EINVAL;
732 char *s;
733
734 if (!filter_str) /* clear the current filter */
735 goto assign;
736
737 s = strsep(&filter_str, " \t");
738
739 if (!strlen(s) || strcmp(s, "if") != 0)
740 goto out;
741
742 if (!filter_str)
743 goto out;
744
745 /* The filter is for the 'trigger' event, not the triggered event */
746 ret = create_event_filter(file->event_call, filter_str, false, &filter);
747 if (ret)
748 goto out;
749 assign:
750 tmp = rcu_access_pointer(data->filter);
751
752 rcu_assign_pointer(data->filter, filter);
753
754 if (tmp) {
755 /* Make sure the call is done with the filter */
756 synchronize_sched();
757 free_event_filter(tmp);
758 }
759
760 kfree(data->filter_str);
761 data->filter_str = NULL;
762
763 if (filter_str) {
764 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
765 if (!data->filter_str) {
766 free_event_filter(rcu_access_pointer(data->filter));
767 data->filter = NULL;
768 ret = -ENOMEM;
769 }
770 }
771 out:
772 return ret;
773 }
774
775 static LIST_HEAD(named_triggers);
776
777 /**
778 * find_named_trigger - Find the common named trigger associated with @name
779 * @name: The name of the set of named triggers to find the common data for
780 *
781 * Named triggers are sets of triggers that share a common set of
782 * trigger data. The first named trigger registered with a given name
783 * owns the common trigger data that the others subsequently
784 * registered with the same name will reference. This function
785 * returns the common trigger data associated with that first
786 * registered instance.
787 *
788 * Return: the common trigger data for the given named trigger on
789 * success, NULL otherwise.
790 */
791 struct event_trigger_data *find_named_trigger(const char *name)
792 {
793 struct event_trigger_data *data;
794
795 if (!name)
796 return NULL;
797
798 list_for_each_entry(data, &named_triggers, named_list) {
799 if (data->named_data)
800 continue;
801 if (strcmp(data->name, name) == 0)
802 return data;
803 }
804
805 return NULL;
806 }
807
808 /**
809 * is_named_trigger - determine if a given trigger is a named trigger
810 * @test: The trigger data to test
811 *
812 * Return: true if 'test' is a named trigger, false otherwise.
813 */
814 bool is_named_trigger(struct event_trigger_data *test)
815 {
816 struct event_trigger_data *data;
817
818 list_for_each_entry(data, &named_triggers, named_list) {
819 if (test == data)
820 return true;
821 }
822
823 return false;
824 }
825
826 /**
827 * save_named_trigger - save the trigger in the named trigger list
828 * @name: The name of the named trigger set
829 * @data: The trigger data to save
830 *
831 * Return: 0 if successful, negative error otherwise.
832 */
833 int save_named_trigger(const char *name, struct event_trigger_data *data)
834 {
835 data->name = kstrdup(name, GFP_KERNEL);
836 if (!data->name)
837 return -ENOMEM;
838
839 list_add(&data->named_list, &named_triggers);
840
841 return 0;
842 }
843
844 /**
845 * del_named_trigger - delete a trigger from the named trigger list
846 * @data: The trigger data to delete
847 */
848 void del_named_trigger(struct event_trigger_data *data)
849 {
850 kfree(data->name);
851 data->name = NULL;
852
853 list_del(&data->named_list);
854 }
855
856 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
857 {
858 struct event_trigger_data *test;
859
860 list_for_each_entry(test, &named_triggers, named_list) {
861 if (strcmp(test->name, data->name) == 0) {
862 if (pause) {
863 test->paused_tmp = test->paused;
864 test->paused = true;
865 } else {
866 test->paused = test->paused_tmp;
867 }
868 }
869 }
870 }
871
872 /**
873 * pause_named_trigger - Pause all named triggers with the same name
874 * @data: The trigger data of a named trigger to pause
875 *
876 * Pauses a named trigger along with all other triggers having the
877 * same name. Because named triggers share a common set of data,
878 * pausing only one is meaningless, so pausing one named trigger needs
879 * to pause all triggers with the same name.
880 */
881 void pause_named_trigger(struct event_trigger_data *data)
882 {
883 __pause_named_trigger(data, true);
884 }
885
886 /**
887 * unpause_named_trigger - Un-pause all named triggers with the same name
888 * @data: The trigger data of a named trigger to unpause
889 *
890 * Un-pauses a named trigger along with all other triggers having the
891 * same name. Because named triggers share a common set of data,
892 * unpausing only one is meaningless, so unpausing one named trigger
893 * needs to unpause all triggers with the same name.
894 */
895 void unpause_named_trigger(struct event_trigger_data *data)
896 {
897 __pause_named_trigger(data, false);
898 }
899
900 /**
901 * set_named_trigger_data - Associate common named trigger data
902 * @data: The trigger data of a named trigger to unpause
903 *
904 * Named triggers are sets of triggers that share a common set of
905 * trigger data. The first named trigger registered with a given name
906 * owns the common trigger data that the others subsequently
907 * registered with the same name will reference. This function
908 * associates the common trigger data from the first trigger with the
909 * given trigger.
910 */
911 void set_named_trigger_data(struct event_trigger_data *data,
912 struct event_trigger_data *named_data)
913 {
914 data->named_data = named_data;
915 }
916
917 static void
918 traceon_trigger(struct event_trigger_data *data, void *rec)
919 {
920 if (tracing_is_on())
921 return;
922
923 tracing_on();
924 }
925
926 static void
927 traceon_count_trigger(struct event_trigger_data *data, void *rec)
928 {
929 if (tracing_is_on())
930 return;
931
932 if (!data->count)
933 return;
934
935 if (data->count != -1)
936 (data->count)--;
937
938 tracing_on();
939 }
940
941 static void
942 traceoff_trigger(struct event_trigger_data *data, void *rec)
943 {
944 if (!tracing_is_on())
945 return;
946
947 tracing_off();
948 }
949
950 static void
951 traceoff_count_trigger(struct event_trigger_data *data, void *rec)
952 {
953 if (!tracing_is_on())
954 return;
955
956 if (!data->count)
957 return;
958
959 if (data->count != -1)
960 (data->count)--;
961
962 tracing_off();
963 }
964
965 static int
966 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
967 struct event_trigger_data *data)
968 {
969 return event_trigger_print("traceon", m, (void *)data->count,
970 data->filter_str);
971 }
972
973 static int
974 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
975 struct event_trigger_data *data)
976 {
977 return event_trigger_print("traceoff", m, (void *)data->count,
978 data->filter_str);
979 }
980
981 static struct event_trigger_ops traceon_trigger_ops = {
982 .func = traceon_trigger,
983 .print = traceon_trigger_print,
984 .init = event_trigger_init,
985 .free = event_trigger_free,
986 };
987
988 static struct event_trigger_ops traceon_count_trigger_ops = {
989 .func = traceon_count_trigger,
990 .print = traceon_trigger_print,
991 .init = event_trigger_init,
992 .free = event_trigger_free,
993 };
994
995 static struct event_trigger_ops traceoff_trigger_ops = {
996 .func = traceoff_trigger,
997 .print = traceoff_trigger_print,
998 .init = event_trigger_init,
999 .free = event_trigger_free,
1000 };
1001
1002 static struct event_trigger_ops traceoff_count_trigger_ops = {
1003 .func = traceoff_count_trigger,
1004 .print = traceoff_trigger_print,
1005 .init = event_trigger_init,
1006 .free = event_trigger_free,
1007 };
1008
1009 static struct event_trigger_ops *
1010 onoff_get_trigger_ops(char *cmd, char *param)
1011 {
1012 struct event_trigger_ops *ops;
1013
1014 /* we register both traceon and traceoff to this callback */
1015 if (strcmp(cmd, "traceon") == 0)
1016 ops = param ? &traceon_count_trigger_ops :
1017 &traceon_trigger_ops;
1018 else
1019 ops = param ? &traceoff_count_trigger_ops :
1020 &traceoff_trigger_ops;
1021
1022 return ops;
1023 }
1024
1025 static struct event_command trigger_traceon_cmd = {
1026 .name = "traceon",
1027 .trigger_type = ETT_TRACE_ONOFF,
1028 .func = event_trigger_callback,
1029 .reg = register_trigger,
1030 .unreg = unregister_trigger,
1031 .get_trigger_ops = onoff_get_trigger_ops,
1032 .set_filter = set_trigger_filter,
1033 };
1034
1035 static struct event_command trigger_traceoff_cmd = {
1036 .name = "traceoff",
1037 .trigger_type = ETT_TRACE_ONOFF,
1038 .flags = EVENT_CMD_FL_POST_TRIGGER,
1039 .func = event_trigger_callback,
1040 .reg = register_trigger,
1041 .unreg = unregister_trigger,
1042 .get_trigger_ops = onoff_get_trigger_ops,
1043 .set_filter = set_trigger_filter,
1044 };
1045
1046 #ifdef CONFIG_TRACER_SNAPSHOT
1047 static void
1048 snapshot_trigger(struct event_trigger_data *data, void *rec)
1049 {
1050 struct trace_event_file *file = data->private_data;
1051
1052 if (file)
1053 tracing_snapshot_instance(file->tr);
1054 else
1055 tracing_snapshot();
1056 }
1057
1058 static void
1059 snapshot_count_trigger(struct event_trigger_data *data, void *rec)
1060 {
1061 if (!data->count)
1062 return;
1063
1064 if (data->count != -1)
1065 (data->count)--;
1066
1067 snapshot_trigger(data, rec);
1068 }
1069
1070 static int
1071 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1072 struct event_trigger_data *data,
1073 struct trace_event_file *file)
1074 {
1075 int ret = register_trigger(glob, ops, data, file);
1076
1077 if (ret > 0 && tracing_alloc_snapshot_instance(file->tr) != 0) {
1078 unregister_trigger(glob, ops, data, file);
1079 ret = 0;
1080 }
1081
1082 return ret;
1083 }
1084
1085 static int
1086 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1087 struct event_trigger_data *data)
1088 {
1089 return event_trigger_print("snapshot", m, (void *)data->count,
1090 data->filter_str);
1091 }
1092
1093 static struct event_trigger_ops snapshot_trigger_ops = {
1094 .func = snapshot_trigger,
1095 .print = snapshot_trigger_print,
1096 .init = event_trigger_init,
1097 .free = event_trigger_free,
1098 };
1099
1100 static struct event_trigger_ops snapshot_count_trigger_ops = {
1101 .func = snapshot_count_trigger,
1102 .print = snapshot_trigger_print,
1103 .init = event_trigger_init,
1104 .free = event_trigger_free,
1105 };
1106
1107 static struct event_trigger_ops *
1108 snapshot_get_trigger_ops(char *cmd, char *param)
1109 {
1110 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1111 }
1112
1113 static struct event_command trigger_snapshot_cmd = {
1114 .name = "snapshot",
1115 .trigger_type = ETT_SNAPSHOT,
1116 .func = event_trigger_callback,
1117 .reg = register_snapshot_trigger,
1118 .unreg = unregister_trigger,
1119 .get_trigger_ops = snapshot_get_trigger_ops,
1120 .set_filter = set_trigger_filter,
1121 };
1122
1123 static __init int register_trigger_snapshot_cmd(void)
1124 {
1125 int ret;
1126
1127 ret = register_event_command(&trigger_snapshot_cmd);
1128 WARN_ON(ret < 0);
1129
1130 return ret;
1131 }
1132 #else
1133 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1134 #endif /* CONFIG_TRACER_SNAPSHOT */
1135
1136 #ifdef CONFIG_STACKTRACE
1137 #ifdef CONFIG_UNWINDER_ORC
1138 /* Skip 2:
1139 * event_triggers_post_call()
1140 * trace_event_raw_event_xxx()
1141 */
1142 # define STACK_SKIP 2
1143 #else
1144 /*
1145 * Skip 4:
1146 * stacktrace_trigger()
1147 * event_triggers_post_call()
1148 * trace_event_buffer_commit()
1149 * trace_event_raw_event_xxx()
1150 */
1151 #define STACK_SKIP 4
1152 #endif
1153
1154 static void
1155 stacktrace_trigger(struct event_trigger_data *data, void *rec)
1156 {
1157 trace_dump_stack(STACK_SKIP);
1158 }
1159
1160 static void
1161 stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
1162 {
1163 if (!data->count)
1164 return;
1165
1166 if (data->count != -1)
1167 (data->count)--;
1168
1169 stacktrace_trigger(data, rec);
1170 }
1171
1172 static int
1173 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1174 struct event_trigger_data *data)
1175 {
1176 return event_trigger_print("stacktrace", m, (void *)data->count,
1177 data->filter_str);
1178 }
1179
1180 static struct event_trigger_ops stacktrace_trigger_ops = {
1181 .func = stacktrace_trigger,
1182 .print = stacktrace_trigger_print,
1183 .init = event_trigger_init,
1184 .free = event_trigger_free,
1185 };
1186
1187 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1188 .func = stacktrace_count_trigger,
1189 .print = stacktrace_trigger_print,
1190 .init = event_trigger_init,
1191 .free = event_trigger_free,
1192 };
1193
1194 static struct event_trigger_ops *
1195 stacktrace_get_trigger_ops(char *cmd, char *param)
1196 {
1197 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1198 }
1199
1200 static struct event_command trigger_stacktrace_cmd = {
1201 .name = "stacktrace",
1202 .trigger_type = ETT_STACKTRACE,
1203 .flags = EVENT_CMD_FL_POST_TRIGGER,
1204 .func = event_trigger_callback,
1205 .reg = register_trigger,
1206 .unreg = unregister_trigger,
1207 .get_trigger_ops = stacktrace_get_trigger_ops,
1208 .set_filter = set_trigger_filter,
1209 };
1210
1211 static __init int register_trigger_stacktrace_cmd(void)
1212 {
1213 int ret;
1214
1215 ret = register_event_command(&trigger_stacktrace_cmd);
1216 WARN_ON(ret < 0);
1217
1218 return ret;
1219 }
1220 #else
1221 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1222 #endif /* CONFIG_STACKTRACE */
1223
1224 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1225 {
1226 unregister_event_command(&trigger_traceon_cmd);
1227 unregister_event_command(&trigger_traceoff_cmd);
1228 }
1229
1230 static void
1231 event_enable_trigger(struct event_trigger_data *data, void *rec)
1232 {
1233 struct enable_trigger_data *enable_data = data->private_data;
1234
1235 if (enable_data->enable)
1236 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1237 else
1238 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1239 }
1240
1241 static void
1242 event_enable_count_trigger(struct event_trigger_data *data, void *rec)
1243 {
1244 struct enable_trigger_data *enable_data = data->private_data;
1245
1246 if (!data->count)
1247 return;
1248
1249 /* Skip if the event is in a state we want to switch to */
1250 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1251 return;
1252
1253 if (data->count != -1)
1254 (data->count)--;
1255
1256 event_enable_trigger(data, rec);
1257 }
1258
1259 int event_enable_trigger_print(struct seq_file *m,
1260 struct event_trigger_ops *ops,
1261 struct event_trigger_data *data)
1262 {
1263 struct enable_trigger_data *enable_data = data->private_data;
1264
1265 seq_printf(m, "%s:%s:%s",
1266 enable_data->hist ?
1267 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1268 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1269 enable_data->file->event_call->class->system,
1270 trace_event_name(enable_data->file->event_call));
1271
1272 if (data->count == -1)
1273 seq_puts(m, ":unlimited");
1274 else
1275 seq_printf(m, ":count=%ld", data->count);
1276
1277 if (data->filter_str)
1278 seq_printf(m, " if %s\n", data->filter_str);
1279 else
1280 seq_putc(m, '\n');
1281
1282 return 0;
1283 }
1284
1285 void event_enable_trigger_free(struct event_trigger_ops *ops,
1286 struct event_trigger_data *data)
1287 {
1288 struct enable_trigger_data *enable_data = data->private_data;
1289
1290 if (WARN_ON_ONCE(data->ref <= 0))
1291 return;
1292
1293 data->ref--;
1294 if (!data->ref) {
1295 /* Remove the SOFT_MODE flag */
1296 trace_event_enable_disable(enable_data->file, 0, 1);
1297 module_put(enable_data->file->event_call->mod);
1298 trigger_data_free(data);
1299 kfree(enable_data);
1300 }
1301 }
1302
1303 static struct event_trigger_ops event_enable_trigger_ops = {
1304 .func = event_enable_trigger,
1305 .print = event_enable_trigger_print,
1306 .init = event_trigger_init,
1307 .free = event_enable_trigger_free,
1308 };
1309
1310 static struct event_trigger_ops event_enable_count_trigger_ops = {
1311 .func = event_enable_count_trigger,
1312 .print = event_enable_trigger_print,
1313 .init = event_trigger_init,
1314 .free = event_enable_trigger_free,
1315 };
1316
1317 static struct event_trigger_ops event_disable_trigger_ops = {
1318 .func = event_enable_trigger,
1319 .print = event_enable_trigger_print,
1320 .init = event_trigger_init,
1321 .free = event_enable_trigger_free,
1322 };
1323
1324 static struct event_trigger_ops event_disable_count_trigger_ops = {
1325 .func = event_enable_count_trigger,
1326 .print = event_enable_trigger_print,
1327 .init = event_trigger_init,
1328 .free = event_enable_trigger_free,
1329 };
1330
1331 int event_enable_trigger_func(struct event_command *cmd_ops,
1332 struct trace_event_file *file,
1333 char *glob, char *cmd, char *param)
1334 {
1335 struct trace_event_file *event_enable_file;
1336 struct enable_trigger_data *enable_data;
1337 struct event_trigger_data *trigger_data;
1338 struct event_trigger_ops *trigger_ops;
1339 struct trace_array *tr = file->tr;
1340 const char *system;
1341 const char *event;
1342 bool hist = false;
1343 char *trigger;
1344 char *number;
1345 bool enable;
1346 int ret;
1347
1348 if (!param)
1349 return -EINVAL;
1350
1351 /* separate the trigger from the filter (s:e:n [if filter]) */
1352 trigger = strsep(&param, " \t");
1353 if (!trigger)
1354 return -EINVAL;
1355
1356 system = strsep(&trigger, ":");
1357 if (!trigger)
1358 return -EINVAL;
1359
1360 event = strsep(&trigger, ":");
1361
1362 ret = -EINVAL;
1363 event_enable_file = find_event_file(tr, system, event);
1364 if (!event_enable_file)
1365 goto out;
1366
1367 #ifdef CONFIG_HIST_TRIGGERS
1368 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1369 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1370
1371 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1372 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1373 #else
1374 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1375 #endif
1376 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1377
1378 ret = -ENOMEM;
1379 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1380 if (!trigger_data)
1381 goto out;
1382
1383 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1384 if (!enable_data) {
1385 kfree(trigger_data);
1386 goto out;
1387 }
1388
1389 trigger_data->count = -1;
1390 trigger_data->ops = trigger_ops;
1391 trigger_data->cmd_ops = cmd_ops;
1392 INIT_LIST_HEAD(&trigger_data->list);
1393 RCU_INIT_POINTER(trigger_data->filter, NULL);
1394
1395 enable_data->hist = hist;
1396 enable_data->enable = enable;
1397 enable_data->file = event_enable_file;
1398 trigger_data->private_data = enable_data;
1399
1400 if (glob[0] == '!') {
1401 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1402 kfree(trigger_data);
1403 kfree(enable_data);
1404 ret = 0;
1405 goto out;
1406 }
1407
1408 if (trigger) {
1409 number = strsep(&trigger, ":");
1410
1411 ret = -EINVAL;
1412 if (!strlen(number))
1413 goto out_free;
1414
1415 /*
1416 * We use the callback data field (which is a pointer)
1417 * as our counter.
1418 */
1419 ret = kstrtoul(number, 0, &trigger_data->count);
1420 if (ret)
1421 goto out_free;
1422 }
1423
1424 if (!param) /* if param is non-empty, it's supposed to be a filter */
1425 goto out_reg;
1426
1427 if (!cmd_ops->set_filter)
1428 goto out_reg;
1429
1430 ret = cmd_ops->set_filter(param, trigger_data, file);
1431 if (ret < 0)
1432 goto out_free;
1433
1434 out_reg:
1435 /* Don't let event modules unload while probe registered */
1436 ret = try_module_get(event_enable_file->event_call->mod);
1437 if (!ret) {
1438 ret = -EBUSY;
1439 goto out_free;
1440 }
1441
1442 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1443 if (ret < 0)
1444 goto out_put;
1445 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1446 /*
1447 * The above returns on success the # of functions enabled,
1448 * but if it didn't find any functions it returns zero.
1449 * Consider no functions a failure too.
1450 */
1451 if (!ret) {
1452 ret = -ENOENT;
1453 goto out_disable;
1454 } else if (ret < 0)
1455 goto out_disable;
1456 /* Just return zero, not the number of enabled functions */
1457 ret = 0;
1458 out:
1459 return ret;
1460
1461 out_disable:
1462 trace_event_enable_disable(event_enable_file, 0, 1);
1463 out_put:
1464 module_put(event_enable_file->event_call->mod);
1465 out_free:
1466 if (cmd_ops->set_filter)
1467 cmd_ops->set_filter(NULL, trigger_data, NULL);
1468 kfree(trigger_data);
1469 kfree(enable_data);
1470 goto out;
1471 }
1472
1473 int event_enable_register_trigger(char *glob,
1474 struct event_trigger_ops *ops,
1475 struct event_trigger_data *data,
1476 struct trace_event_file *file)
1477 {
1478 struct enable_trigger_data *enable_data = data->private_data;
1479 struct enable_trigger_data *test_enable_data;
1480 struct event_trigger_data *test;
1481 int ret = 0;
1482
1483 list_for_each_entry_rcu(test, &file->triggers, list) {
1484 test_enable_data = test->private_data;
1485 if (test_enable_data &&
1486 (test->cmd_ops->trigger_type ==
1487 data->cmd_ops->trigger_type) &&
1488 (test_enable_data->file == enable_data->file)) {
1489 ret = -EEXIST;
1490 goto out;
1491 }
1492 }
1493
1494 if (data->ops->init) {
1495 ret = data->ops->init(data->ops, data);
1496 if (ret < 0)
1497 goto out;
1498 }
1499
1500 list_add_rcu(&data->list, &file->triggers);
1501 ret++;
1502
1503 update_cond_flag(file);
1504 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1505 list_del_rcu(&data->list);
1506 update_cond_flag(file);
1507 ret--;
1508 }
1509 out:
1510 return ret;
1511 }
1512
1513 void event_enable_unregister_trigger(char *glob,
1514 struct event_trigger_ops *ops,
1515 struct event_trigger_data *test,
1516 struct trace_event_file *file)
1517 {
1518 struct enable_trigger_data *test_enable_data = test->private_data;
1519 struct enable_trigger_data *enable_data;
1520 struct event_trigger_data *data;
1521 bool unregistered = false;
1522
1523 list_for_each_entry_rcu(data, &file->triggers, list) {
1524 enable_data = data->private_data;
1525 if (enable_data &&
1526 (data->cmd_ops->trigger_type ==
1527 test->cmd_ops->trigger_type) &&
1528 (enable_data->file == test_enable_data->file)) {
1529 unregistered = true;
1530 list_del_rcu(&data->list);
1531 trace_event_trigger_enable_disable(file, 0);
1532 update_cond_flag(file);
1533 break;
1534 }
1535 }
1536
1537 if (unregistered && data->ops->free)
1538 data->ops->free(data->ops, data);
1539 }
1540
1541 static struct event_trigger_ops *
1542 event_enable_get_trigger_ops(char *cmd, char *param)
1543 {
1544 struct event_trigger_ops *ops;
1545 bool enable;
1546
1547 #ifdef CONFIG_HIST_TRIGGERS
1548 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1549 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1550 #else
1551 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1552 #endif
1553 if (enable)
1554 ops = param ? &event_enable_count_trigger_ops :
1555 &event_enable_trigger_ops;
1556 else
1557 ops = param ? &event_disable_count_trigger_ops :
1558 &event_disable_trigger_ops;
1559
1560 return ops;
1561 }
1562
1563 static struct event_command trigger_enable_cmd = {
1564 .name = ENABLE_EVENT_STR,
1565 .trigger_type = ETT_EVENT_ENABLE,
1566 .func = event_enable_trigger_func,
1567 .reg = event_enable_register_trigger,
1568 .unreg = event_enable_unregister_trigger,
1569 .get_trigger_ops = event_enable_get_trigger_ops,
1570 .set_filter = set_trigger_filter,
1571 };
1572
1573 static struct event_command trigger_disable_cmd = {
1574 .name = DISABLE_EVENT_STR,
1575 .trigger_type = ETT_EVENT_ENABLE,
1576 .func = event_enable_trigger_func,
1577 .reg = event_enable_register_trigger,
1578 .unreg = event_enable_unregister_trigger,
1579 .get_trigger_ops = event_enable_get_trigger_ops,
1580 .set_filter = set_trigger_filter,
1581 };
1582
1583 static __init void unregister_trigger_enable_disable_cmds(void)
1584 {
1585 unregister_event_command(&trigger_enable_cmd);
1586 unregister_event_command(&trigger_disable_cmd);
1587 }
1588
1589 static __init int register_trigger_enable_disable_cmds(void)
1590 {
1591 int ret;
1592
1593 ret = register_event_command(&trigger_enable_cmd);
1594 if (WARN_ON(ret < 0))
1595 return ret;
1596 ret = register_event_command(&trigger_disable_cmd);
1597 if (WARN_ON(ret < 0))
1598 unregister_trigger_enable_disable_cmds();
1599
1600 return ret;
1601 }
1602
1603 static __init int register_trigger_traceon_traceoff_cmds(void)
1604 {
1605 int ret;
1606
1607 ret = register_event_command(&trigger_traceon_cmd);
1608 if (WARN_ON(ret < 0))
1609 return ret;
1610 ret = register_event_command(&trigger_traceoff_cmd);
1611 if (WARN_ON(ret < 0))
1612 unregister_trigger_traceon_traceoff_cmds();
1613
1614 return ret;
1615 }
1616
1617 __init int register_trigger_cmds(void)
1618 {
1619 register_trigger_traceon_traceoff_cmds();
1620 register_trigger_snapshot_cmd();
1621 register_trigger_stacktrace_cmd();
1622 register_trigger_enable_disable_cmds();
1623 register_trigger_hist_enable_disable_cmds();
1624 register_trigger_hist_cmd();
1625
1626 return 0;
1627 }