]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/trace/trace_events_trigger.c
Merge branch 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu...
[mirror_ubuntu-artful-kernel.git] / kernel / trace / trace_events_trigger.c
1 /*
2 * trace_events_trigger - trace event triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19 */
20
21 #include <linux/module.h>
22 #include <linux/ctype.h>
23 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/rculist.h>
26
27 #include "trace.h"
28
29 static LIST_HEAD(trigger_commands);
30 static DEFINE_MUTEX(trigger_cmd_mutex);
31
32 void trigger_data_free(struct event_trigger_data *data)
33 {
34 if (data->cmd_ops->set_filter)
35 data->cmd_ops->set_filter(NULL, data, NULL);
36
37 synchronize_sched(); /* make sure current triggers exit before free */
38 kfree(data);
39 }
40
41 /**
42 * event_triggers_call - Call triggers associated with a trace event
43 * @file: The trace_event_file associated with the event
44 * @rec: The trace entry for the event, NULL for unconditional invocation
45 *
46 * For each trigger associated with an event, invoke the trigger
47 * function registered with the associated trigger command. If rec is
48 * non-NULL, it means that the trigger requires further processing and
49 * shouldn't be unconditionally invoked. If rec is non-NULL and the
50 * trigger has a filter associated with it, rec will checked against
51 * the filter and if the record matches the trigger will be invoked.
52 * If the trigger is a 'post_trigger', meaning it shouldn't be invoked
53 * in any case until the current event is written, the trigger
54 * function isn't invoked but the bit associated with the deferred
55 * trigger is set in the return value.
56 *
57 * Returns an enum event_trigger_type value containing a set bit for
58 * any trigger that should be deferred, ETT_NONE if nothing to defer.
59 *
60 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
61 *
62 * Return: an enum event_trigger_type value containing a set bit for
63 * any trigger that should be deferred, ETT_NONE if nothing to defer.
64 */
65 enum event_trigger_type
66 event_triggers_call(struct trace_event_file *file, void *rec)
67 {
68 struct event_trigger_data *data;
69 enum event_trigger_type tt = ETT_NONE;
70 struct event_filter *filter;
71
72 if (list_empty(&file->triggers))
73 return tt;
74
75 list_for_each_entry_rcu(data, &file->triggers, list) {
76 if (data->paused)
77 continue;
78 if (!rec) {
79 data->ops->func(data, rec);
80 continue;
81 }
82 filter = rcu_dereference_sched(data->filter);
83 if (filter && !filter_match_preds(filter, rec))
84 continue;
85 if (event_command_post_trigger(data->cmd_ops)) {
86 tt |= data->cmd_ops->trigger_type;
87 continue;
88 }
89 data->ops->func(data, rec);
90 }
91 return tt;
92 }
93 EXPORT_SYMBOL_GPL(event_triggers_call);
94
95 /**
96 * event_triggers_post_call - Call 'post_triggers' for a trace event
97 * @file: The trace_event_file associated with the event
98 * @tt: enum event_trigger_type containing a set bit for each trigger to invoke
99 * @rec: The trace entry for the event
100 *
101 * For each trigger associated with an event, invoke the trigger
102 * function registered with the associated trigger command, if the
103 * corresponding bit is set in the tt enum passed into this function.
104 * See @event_triggers_call for details on how those bits are set.
105 *
106 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
107 */
108 void
109 event_triggers_post_call(struct trace_event_file *file,
110 enum event_trigger_type tt,
111 void *rec)
112 {
113 struct event_trigger_data *data;
114
115 list_for_each_entry_rcu(data, &file->triggers, list) {
116 if (data->paused)
117 continue;
118 if (data->cmd_ops->trigger_type & tt)
119 data->ops->func(data, rec);
120 }
121 }
122 EXPORT_SYMBOL_GPL(event_triggers_post_call);
123
124 #define SHOW_AVAILABLE_TRIGGERS (void *)(1UL)
125
126 static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
127 {
128 struct trace_event_file *event_file = event_file_data(m->private);
129
130 if (t == SHOW_AVAILABLE_TRIGGERS)
131 return NULL;
132
133 return seq_list_next(t, &event_file->triggers, pos);
134 }
135
136 static void *trigger_start(struct seq_file *m, loff_t *pos)
137 {
138 struct trace_event_file *event_file;
139
140 /* ->stop() is called even if ->start() fails */
141 mutex_lock(&event_mutex);
142 event_file = event_file_data(m->private);
143 if (unlikely(!event_file))
144 return ERR_PTR(-ENODEV);
145
146 if (list_empty(&event_file->triggers))
147 return *pos == 0 ? SHOW_AVAILABLE_TRIGGERS : NULL;
148
149 return seq_list_start(&event_file->triggers, *pos);
150 }
151
152 static void trigger_stop(struct seq_file *m, void *t)
153 {
154 mutex_unlock(&event_mutex);
155 }
156
157 static int trigger_show(struct seq_file *m, void *v)
158 {
159 struct event_trigger_data *data;
160 struct event_command *p;
161
162 if (v == SHOW_AVAILABLE_TRIGGERS) {
163 seq_puts(m, "# Available triggers:\n");
164 seq_putc(m, '#');
165 mutex_lock(&trigger_cmd_mutex);
166 list_for_each_entry_reverse(p, &trigger_commands, list)
167 seq_printf(m, " %s", p->name);
168 seq_putc(m, '\n');
169 mutex_unlock(&trigger_cmd_mutex);
170 return 0;
171 }
172
173 data = list_entry(v, struct event_trigger_data, list);
174 data->ops->print(m, data->ops, data);
175
176 return 0;
177 }
178
179 static const struct seq_operations event_triggers_seq_ops = {
180 .start = trigger_start,
181 .next = trigger_next,
182 .stop = trigger_stop,
183 .show = trigger_show,
184 };
185
186 static int event_trigger_regex_open(struct inode *inode, struct file *file)
187 {
188 int ret = 0;
189
190 mutex_lock(&event_mutex);
191
192 if (unlikely(!event_file_data(file))) {
193 mutex_unlock(&event_mutex);
194 return -ENODEV;
195 }
196
197 if ((file->f_mode & FMODE_WRITE) &&
198 (file->f_flags & O_TRUNC)) {
199 struct trace_event_file *event_file;
200 struct event_command *p;
201
202 event_file = event_file_data(file);
203
204 list_for_each_entry(p, &trigger_commands, list) {
205 if (p->unreg_all)
206 p->unreg_all(event_file);
207 }
208 }
209
210 if (file->f_mode & FMODE_READ) {
211 ret = seq_open(file, &event_triggers_seq_ops);
212 if (!ret) {
213 struct seq_file *m = file->private_data;
214 m->private = file;
215 }
216 }
217
218 mutex_unlock(&event_mutex);
219
220 return ret;
221 }
222
223 static int trigger_process_regex(struct trace_event_file *file, char *buff)
224 {
225 char *command, *next = buff;
226 struct event_command *p;
227 int ret = -EINVAL;
228
229 command = strsep(&next, ": \t");
230 command = (command[0] != '!') ? command : command + 1;
231
232 mutex_lock(&trigger_cmd_mutex);
233 list_for_each_entry(p, &trigger_commands, list) {
234 if (strcmp(p->name, command) == 0) {
235 ret = p->func(p, file, buff, command, next);
236 goto out_unlock;
237 }
238 }
239 out_unlock:
240 mutex_unlock(&trigger_cmd_mutex);
241
242 return ret;
243 }
244
245 static ssize_t event_trigger_regex_write(struct file *file,
246 const char __user *ubuf,
247 size_t cnt, loff_t *ppos)
248 {
249 struct trace_event_file *event_file;
250 ssize_t ret;
251 char *buf;
252
253 if (!cnt)
254 return 0;
255
256 if (cnt >= PAGE_SIZE)
257 return -EINVAL;
258
259 buf = memdup_user_nul(ubuf, cnt);
260 if (IS_ERR(buf))
261 return PTR_ERR(buf);
262
263 strim(buf);
264
265 mutex_lock(&event_mutex);
266 event_file = event_file_data(file);
267 if (unlikely(!event_file)) {
268 mutex_unlock(&event_mutex);
269 kfree(buf);
270 return -ENODEV;
271 }
272 ret = trigger_process_regex(event_file, buf);
273 mutex_unlock(&event_mutex);
274
275 kfree(buf);
276 if (ret < 0)
277 goto out;
278
279 *ppos += cnt;
280 ret = cnt;
281 out:
282 return ret;
283 }
284
285 static int event_trigger_regex_release(struct inode *inode, struct file *file)
286 {
287 mutex_lock(&event_mutex);
288
289 if (file->f_mode & FMODE_READ)
290 seq_release(inode, file);
291
292 mutex_unlock(&event_mutex);
293
294 return 0;
295 }
296
297 static ssize_t
298 event_trigger_write(struct file *filp, const char __user *ubuf,
299 size_t cnt, loff_t *ppos)
300 {
301 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
302 }
303
304 static int
305 event_trigger_open(struct inode *inode, struct file *filp)
306 {
307 return event_trigger_regex_open(inode, filp);
308 }
309
310 static int
311 event_trigger_release(struct inode *inode, struct file *file)
312 {
313 return event_trigger_regex_release(inode, file);
314 }
315
316 const struct file_operations event_trigger_fops = {
317 .open = event_trigger_open,
318 .read = seq_read,
319 .write = event_trigger_write,
320 .llseek = tracing_lseek,
321 .release = event_trigger_release,
322 };
323
324 /*
325 * Currently we only register event commands from __init, so mark this
326 * __init too.
327 */
328 __init int register_event_command(struct event_command *cmd)
329 {
330 struct event_command *p;
331 int ret = 0;
332
333 mutex_lock(&trigger_cmd_mutex);
334 list_for_each_entry(p, &trigger_commands, list) {
335 if (strcmp(cmd->name, p->name) == 0) {
336 ret = -EBUSY;
337 goto out_unlock;
338 }
339 }
340 list_add(&cmd->list, &trigger_commands);
341 out_unlock:
342 mutex_unlock(&trigger_cmd_mutex);
343
344 return ret;
345 }
346
347 /*
348 * Currently we only unregister event commands from __init, so mark
349 * this __init too.
350 */
351 __init int unregister_event_command(struct event_command *cmd)
352 {
353 struct event_command *p, *n;
354 int ret = -ENODEV;
355
356 mutex_lock(&trigger_cmd_mutex);
357 list_for_each_entry_safe(p, n, &trigger_commands, list) {
358 if (strcmp(cmd->name, p->name) == 0) {
359 ret = 0;
360 list_del_init(&p->list);
361 goto out_unlock;
362 }
363 }
364 out_unlock:
365 mutex_unlock(&trigger_cmd_mutex);
366
367 return ret;
368 }
369
370 /**
371 * event_trigger_print - Generic event_trigger_ops @print implementation
372 * @name: The name of the event trigger
373 * @m: The seq_file being printed to
374 * @data: Trigger-specific data
375 * @filter_str: filter_str to print, if present
376 *
377 * Common implementation for event triggers to print themselves.
378 *
379 * Usually wrapped by a function that simply sets the @name of the
380 * trigger command and then invokes this.
381 *
382 * Return: 0 on success, errno otherwise
383 */
384 static int
385 event_trigger_print(const char *name, struct seq_file *m,
386 void *data, char *filter_str)
387 {
388 long count = (long)data;
389
390 seq_puts(m, name);
391
392 if (count == -1)
393 seq_puts(m, ":unlimited");
394 else
395 seq_printf(m, ":count=%ld", count);
396
397 if (filter_str)
398 seq_printf(m, " if %s\n", filter_str);
399 else
400 seq_putc(m, '\n');
401
402 return 0;
403 }
404
405 /**
406 * event_trigger_init - Generic event_trigger_ops @init implementation
407 * @ops: The trigger ops associated with the trigger
408 * @data: Trigger-specific data
409 *
410 * Common implementation of event trigger initialization.
411 *
412 * Usually used directly as the @init method in event trigger
413 * implementations.
414 *
415 * Return: 0 on success, errno otherwise
416 */
417 int event_trigger_init(struct event_trigger_ops *ops,
418 struct event_trigger_data *data)
419 {
420 data->ref++;
421 return 0;
422 }
423
424 /**
425 * event_trigger_free - Generic event_trigger_ops @free implementation
426 * @ops: The trigger ops associated with the trigger
427 * @data: Trigger-specific data
428 *
429 * Common implementation of event trigger de-initialization.
430 *
431 * Usually used directly as the @free method in event trigger
432 * implementations.
433 */
434 static void
435 event_trigger_free(struct event_trigger_ops *ops,
436 struct event_trigger_data *data)
437 {
438 if (WARN_ON_ONCE(data->ref <= 0))
439 return;
440
441 data->ref--;
442 if (!data->ref)
443 trigger_data_free(data);
444 }
445
446 int trace_event_trigger_enable_disable(struct trace_event_file *file,
447 int trigger_enable)
448 {
449 int ret = 0;
450
451 if (trigger_enable) {
452 if (atomic_inc_return(&file->tm_ref) > 1)
453 return ret;
454 set_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
455 ret = trace_event_enable_disable(file, 1, 1);
456 } else {
457 if (atomic_dec_return(&file->tm_ref) > 0)
458 return ret;
459 clear_bit(EVENT_FILE_FL_TRIGGER_MODE_BIT, &file->flags);
460 ret = trace_event_enable_disable(file, 0, 1);
461 }
462
463 return ret;
464 }
465
466 /**
467 * clear_event_triggers - Clear all triggers associated with a trace array
468 * @tr: The trace array to clear
469 *
470 * For each trigger, the triggering event has its tm_ref decremented
471 * via trace_event_trigger_enable_disable(), and any associated event
472 * (in the case of enable/disable_event triggers) will have its sm_ref
473 * decremented via free()->trace_event_enable_disable(). That
474 * combination effectively reverses the soft-mode/trigger state added
475 * by trigger registration.
476 *
477 * Must be called with event_mutex held.
478 */
479 void
480 clear_event_triggers(struct trace_array *tr)
481 {
482 struct trace_event_file *file;
483
484 list_for_each_entry(file, &tr->events, list) {
485 struct event_trigger_data *data;
486 list_for_each_entry_rcu(data, &file->triggers, list) {
487 trace_event_trigger_enable_disable(file, 0);
488 if (data->ops->free)
489 data->ops->free(data->ops, data);
490 }
491 }
492 }
493
494 /**
495 * update_cond_flag - Set or reset the TRIGGER_COND bit
496 * @file: The trace_event_file associated with the event
497 *
498 * If an event has triggers and any of those triggers has a filter or
499 * a post_trigger, trigger invocation needs to be deferred until after
500 * the current event has logged its data, and the event should have
501 * its TRIGGER_COND bit set, otherwise the TRIGGER_COND bit should be
502 * cleared.
503 */
504 void update_cond_flag(struct trace_event_file *file)
505 {
506 struct event_trigger_data *data;
507 bool set_cond = false;
508
509 list_for_each_entry_rcu(data, &file->triggers, list) {
510 if (data->filter || event_command_post_trigger(data->cmd_ops) ||
511 event_command_needs_rec(data->cmd_ops)) {
512 set_cond = true;
513 break;
514 }
515 }
516
517 if (set_cond)
518 set_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
519 else
520 clear_bit(EVENT_FILE_FL_TRIGGER_COND_BIT, &file->flags);
521 }
522
523 /**
524 * register_trigger - Generic event_command @reg implementation
525 * @glob: The raw string used to register the trigger
526 * @ops: The trigger ops associated with the trigger
527 * @data: Trigger-specific data to associate with the trigger
528 * @file: The trace_event_file associated with the event
529 *
530 * Common implementation for event trigger registration.
531 *
532 * Usually used directly as the @reg method in event command
533 * implementations.
534 *
535 * Return: 0 on success, errno otherwise
536 */
537 static int register_trigger(char *glob, struct event_trigger_ops *ops,
538 struct event_trigger_data *data,
539 struct trace_event_file *file)
540 {
541 struct event_trigger_data *test;
542 int ret = 0;
543
544 list_for_each_entry_rcu(test, &file->triggers, list) {
545 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
546 ret = -EEXIST;
547 goto out;
548 }
549 }
550
551 if (data->ops->init) {
552 ret = data->ops->init(data->ops, data);
553 if (ret < 0)
554 goto out;
555 }
556
557 list_add_rcu(&data->list, &file->triggers);
558 ret++;
559
560 update_cond_flag(file);
561 if (trace_event_trigger_enable_disable(file, 1) < 0) {
562 list_del_rcu(&data->list);
563 update_cond_flag(file);
564 ret--;
565 }
566 out:
567 return ret;
568 }
569
570 /**
571 * unregister_trigger - Generic event_command @unreg implementation
572 * @glob: The raw string used to register the trigger
573 * @ops: The trigger ops associated with the trigger
574 * @test: Trigger-specific data used to find the trigger to remove
575 * @file: The trace_event_file associated with the event
576 *
577 * Common implementation for event trigger unregistration.
578 *
579 * Usually used directly as the @unreg method in event command
580 * implementations.
581 */
582 void unregister_trigger(char *glob, struct event_trigger_ops *ops,
583 struct event_trigger_data *test,
584 struct trace_event_file *file)
585 {
586 struct event_trigger_data *data;
587 bool unregistered = false;
588
589 list_for_each_entry_rcu(data, &file->triggers, list) {
590 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
591 unregistered = true;
592 list_del_rcu(&data->list);
593 trace_event_trigger_enable_disable(file, 0);
594 update_cond_flag(file);
595 break;
596 }
597 }
598
599 if (unregistered && data->ops->free)
600 data->ops->free(data->ops, data);
601 }
602
603 /**
604 * event_trigger_callback - Generic event_command @func implementation
605 * @cmd_ops: The command ops, used for trigger registration
606 * @file: The trace_event_file associated with the event
607 * @glob: The raw string used to register the trigger
608 * @cmd: The cmd portion of the string used to register the trigger
609 * @param: The params portion of the string used to register the trigger
610 *
611 * Common implementation for event command parsing and trigger
612 * instantiation.
613 *
614 * Usually used directly as the @func method in event command
615 * implementations.
616 *
617 * Return: 0 on success, errno otherwise
618 */
619 static int
620 event_trigger_callback(struct event_command *cmd_ops,
621 struct trace_event_file *file,
622 char *glob, char *cmd, char *param)
623 {
624 struct event_trigger_data *trigger_data;
625 struct event_trigger_ops *trigger_ops;
626 char *trigger = NULL;
627 char *number;
628 int ret;
629
630 /* separate the trigger from the filter (t:n [if filter]) */
631 if (param && isdigit(param[0]))
632 trigger = strsep(&param, " \t");
633
634 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
635
636 ret = -ENOMEM;
637 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
638 if (!trigger_data)
639 goto out;
640
641 trigger_data->count = -1;
642 trigger_data->ops = trigger_ops;
643 trigger_data->cmd_ops = cmd_ops;
644 INIT_LIST_HEAD(&trigger_data->list);
645 INIT_LIST_HEAD(&trigger_data->named_list);
646
647 if (glob[0] == '!') {
648 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
649 kfree(trigger_data);
650 ret = 0;
651 goto out;
652 }
653
654 if (trigger) {
655 number = strsep(&trigger, ":");
656
657 ret = -EINVAL;
658 if (!strlen(number))
659 goto out_free;
660
661 /*
662 * We use the callback data field (which is a pointer)
663 * as our counter.
664 */
665 ret = kstrtoul(number, 0, &trigger_data->count);
666 if (ret)
667 goto out_free;
668 }
669
670 if (!param) /* if param is non-empty, it's supposed to be a filter */
671 goto out_reg;
672
673 if (!cmd_ops->set_filter)
674 goto out_reg;
675
676 ret = cmd_ops->set_filter(param, trigger_data, file);
677 if (ret < 0)
678 goto out_free;
679
680 out_reg:
681 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
682 /*
683 * The above returns on success the # of functions enabled,
684 * but if it didn't find any functions it returns zero.
685 * Consider no functions a failure too.
686 */
687 if (!ret) {
688 ret = -ENOENT;
689 goto out_free;
690 } else if (ret < 0)
691 goto out_free;
692 ret = 0;
693 out:
694 return ret;
695
696 out_free:
697 if (cmd_ops->set_filter)
698 cmd_ops->set_filter(NULL, trigger_data, NULL);
699 kfree(trigger_data);
700 goto out;
701 }
702
703 /**
704 * set_trigger_filter - Generic event_command @set_filter implementation
705 * @filter_str: The filter string for the trigger, NULL to remove filter
706 * @trigger_data: Trigger-specific data
707 * @file: The trace_event_file associated with the event
708 *
709 * Common implementation for event command filter parsing and filter
710 * instantiation.
711 *
712 * Usually used directly as the @set_filter method in event command
713 * implementations.
714 *
715 * Also used to remove a filter (if filter_str = NULL).
716 *
717 * Return: 0 on success, errno otherwise
718 */
719 int set_trigger_filter(char *filter_str,
720 struct event_trigger_data *trigger_data,
721 struct trace_event_file *file)
722 {
723 struct event_trigger_data *data = trigger_data;
724 struct event_filter *filter = NULL, *tmp;
725 int ret = -EINVAL;
726 char *s;
727
728 if (!filter_str) /* clear the current filter */
729 goto assign;
730
731 s = strsep(&filter_str, " \t");
732
733 if (!strlen(s) || strcmp(s, "if") != 0)
734 goto out;
735
736 if (!filter_str)
737 goto out;
738
739 /* The filter is for the 'trigger' event, not the triggered event */
740 ret = create_event_filter(file->event_call, filter_str, false, &filter);
741 if (ret)
742 goto out;
743 assign:
744 tmp = rcu_access_pointer(data->filter);
745
746 rcu_assign_pointer(data->filter, filter);
747
748 if (tmp) {
749 /* Make sure the call is done with the filter */
750 synchronize_sched();
751 free_event_filter(tmp);
752 }
753
754 kfree(data->filter_str);
755 data->filter_str = NULL;
756
757 if (filter_str) {
758 data->filter_str = kstrdup(filter_str, GFP_KERNEL);
759 if (!data->filter_str) {
760 free_event_filter(rcu_access_pointer(data->filter));
761 data->filter = NULL;
762 ret = -ENOMEM;
763 }
764 }
765 out:
766 return ret;
767 }
768
769 static LIST_HEAD(named_triggers);
770
771 /**
772 * find_named_trigger - Find the common named trigger associated with @name
773 * @name: The name of the set of named triggers to find the common data for
774 *
775 * Named triggers are sets of triggers that share a common set of
776 * trigger data. The first named trigger registered with a given name
777 * owns the common trigger data that the others subsequently
778 * registered with the same name will reference. This function
779 * returns the common trigger data associated with that first
780 * registered instance.
781 *
782 * Return: the common trigger data for the given named trigger on
783 * success, NULL otherwise.
784 */
785 struct event_trigger_data *find_named_trigger(const char *name)
786 {
787 struct event_trigger_data *data;
788
789 if (!name)
790 return NULL;
791
792 list_for_each_entry(data, &named_triggers, named_list) {
793 if (data->named_data)
794 continue;
795 if (strcmp(data->name, name) == 0)
796 return data;
797 }
798
799 return NULL;
800 }
801
802 /**
803 * is_named_trigger - determine if a given trigger is a named trigger
804 * @test: The trigger data to test
805 *
806 * Return: true if 'test' is a named trigger, false otherwise.
807 */
808 bool is_named_trigger(struct event_trigger_data *test)
809 {
810 struct event_trigger_data *data;
811
812 list_for_each_entry(data, &named_triggers, named_list) {
813 if (test == data)
814 return true;
815 }
816
817 return false;
818 }
819
820 /**
821 * save_named_trigger - save the trigger in the named trigger list
822 * @name: The name of the named trigger set
823 * @data: The trigger data to save
824 *
825 * Return: 0 if successful, negative error otherwise.
826 */
827 int save_named_trigger(const char *name, struct event_trigger_data *data)
828 {
829 data->name = kstrdup(name, GFP_KERNEL);
830 if (!data->name)
831 return -ENOMEM;
832
833 list_add(&data->named_list, &named_triggers);
834
835 return 0;
836 }
837
838 /**
839 * del_named_trigger - delete a trigger from the named trigger list
840 * @data: The trigger data to delete
841 */
842 void del_named_trigger(struct event_trigger_data *data)
843 {
844 kfree(data->name);
845 data->name = NULL;
846
847 list_del(&data->named_list);
848 }
849
850 static void __pause_named_trigger(struct event_trigger_data *data, bool pause)
851 {
852 struct event_trigger_data *test;
853
854 list_for_each_entry(test, &named_triggers, named_list) {
855 if (strcmp(test->name, data->name) == 0) {
856 if (pause) {
857 test->paused_tmp = test->paused;
858 test->paused = true;
859 } else {
860 test->paused = test->paused_tmp;
861 }
862 }
863 }
864 }
865
866 /**
867 * pause_named_trigger - Pause all named triggers with the same name
868 * @data: The trigger data of a named trigger to pause
869 *
870 * Pauses a named trigger along with all other triggers having the
871 * same name. Because named triggers share a common set of data,
872 * pausing only one is meaningless, so pausing one named trigger needs
873 * to pause all triggers with the same name.
874 */
875 void pause_named_trigger(struct event_trigger_data *data)
876 {
877 __pause_named_trigger(data, true);
878 }
879
880 /**
881 * unpause_named_trigger - Un-pause all named triggers with the same name
882 * @data: The trigger data of a named trigger to unpause
883 *
884 * Un-pauses a named trigger along with all other triggers having the
885 * same name. Because named triggers share a common set of data,
886 * unpausing only one is meaningless, so unpausing one named trigger
887 * needs to unpause all triggers with the same name.
888 */
889 void unpause_named_trigger(struct event_trigger_data *data)
890 {
891 __pause_named_trigger(data, false);
892 }
893
894 /**
895 * set_named_trigger_data - Associate common named trigger data
896 * @data: The trigger data of a named trigger to unpause
897 *
898 * Named triggers are sets of triggers that share a common set of
899 * trigger data. The first named trigger registered with a given name
900 * owns the common trigger data that the others subsequently
901 * registered with the same name will reference. This function
902 * associates the common trigger data from the first trigger with the
903 * given trigger.
904 */
905 void set_named_trigger_data(struct event_trigger_data *data,
906 struct event_trigger_data *named_data)
907 {
908 data->named_data = named_data;
909 }
910
911 static void
912 traceon_trigger(struct event_trigger_data *data, void *rec)
913 {
914 if (tracing_is_on())
915 return;
916
917 tracing_on();
918 }
919
920 static void
921 traceon_count_trigger(struct event_trigger_data *data, void *rec)
922 {
923 if (tracing_is_on())
924 return;
925
926 if (!data->count)
927 return;
928
929 if (data->count != -1)
930 (data->count)--;
931
932 tracing_on();
933 }
934
935 static void
936 traceoff_trigger(struct event_trigger_data *data, void *rec)
937 {
938 if (!tracing_is_on())
939 return;
940
941 tracing_off();
942 }
943
944 static void
945 traceoff_count_trigger(struct event_trigger_data *data, void *rec)
946 {
947 if (!tracing_is_on())
948 return;
949
950 if (!data->count)
951 return;
952
953 if (data->count != -1)
954 (data->count)--;
955
956 tracing_off();
957 }
958
959 static int
960 traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
961 struct event_trigger_data *data)
962 {
963 return event_trigger_print("traceon", m, (void *)data->count,
964 data->filter_str);
965 }
966
967 static int
968 traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
969 struct event_trigger_data *data)
970 {
971 return event_trigger_print("traceoff", m, (void *)data->count,
972 data->filter_str);
973 }
974
975 static struct event_trigger_ops traceon_trigger_ops = {
976 .func = traceon_trigger,
977 .print = traceon_trigger_print,
978 .init = event_trigger_init,
979 .free = event_trigger_free,
980 };
981
982 static struct event_trigger_ops traceon_count_trigger_ops = {
983 .func = traceon_count_trigger,
984 .print = traceon_trigger_print,
985 .init = event_trigger_init,
986 .free = event_trigger_free,
987 };
988
989 static struct event_trigger_ops traceoff_trigger_ops = {
990 .func = traceoff_trigger,
991 .print = traceoff_trigger_print,
992 .init = event_trigger_init,
993 .free = event_trigger_free,
994 };
995
996 static struct event_trigger_ops traceoff_count_trigger_ops = {
997 .func = traceoff_count_trigger,
998 .print = traceoff_trigger_print,
999 .init = event_trigger_init,
1000 .free = event_trigger_free,
1001 };
1002
1003 static struct event_trigger_ops *
1004 onoff_get_trigger_ops(char *cmd, char *param)
1005 {
1006 struct event_trigger_ops *ops;
1007
1008 /* we register both traceon and traceoff to this callback */
1009 if (strcmp(cmd, "traceon") == 0)
1010 ops = param ? &traceon_count_trigger_ops :
1011 &traceon_trigger_ops;
1012 else
1013 ops = param ? &traceoff_count_trigger_ops :
1014 &traceoff_trigger_ops;
1015
1016 return ops;
1017 }
1018
1019 static struct event_command trigger_traceon_cmd = {
1020 .name = "traceon",
1021 .trigger_type = ETT_TRACE_ONOFF,
1022 .func = event_trigger_callback,
1023 .reg = register_trigger,
1024 .unreg = unregister_trigger,
1025 .get_trigger_ops = onoff_get_trigger_ops,
1026 .set_filter = set_trigger_filter,
1027 };
1028
1029 static struct event_command trigger_traceoff_cmd = {
1030 .name = "traceoff",
1031 .trigger_type = ETT_TRACE_ONOFF,
1032 .flags = EVENT_CMD_FL_POST_TRIGGER,
1033 .func = event_trigger_callback,
1034 .reg = register_trigger,
1035 .unreg = unregister_trigger,
1036 .get_trigger_ops = onoff_get_trigger_ops,
1037 .set_filter = set_trigger_filter,
1038 };
1039
1040 #ifdef CONFIG_TRACER_SNAPSHOT
1041 static void
1042 snapshot_trigger(struct event_trigger_data *data, void *rec)
1043 {
1044 tracing_snapshot();
1045 }
1046
1047 static void
1048 snapshot_count_trigger(struct event_trigger_data *data, void *rec)
1049 {
1050 if (!data->count)
1051 return;
1052
1053 if (data->count != -1)
1054 (data->count)--;
1055
1056 snapshot_trigger(data, rec);
1057 }
1058
1059 static int
1060 register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
1061 struct event_trigger_data *data,
1062 struct trace_event_file *file)
1063 {
1064 int ret = register_trigger(glob, ops, data, file);
1065
1066 if (ret > 0 && tracing_alloc_snapshot() != 0) {
1067 unregister_trigger(glob, ops, data, file);
1068 ret = 0;
1069 }
1070
1071 return ret;
1072 }
1073
1074 static int
1075 snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1076 struct event_trigger_data *data)
1077 {
1078 return event_trigger_print("snapshot", m, (void *)data->count,
1079 data->filter_str);
1080 }
1081
1082 static struct event_trigger_ops snapshot_trigger_ops = {
1083 .func = snapshot_trigger,
1084 .print = snapshot_trigger_print,
1085 .init = event_trigger_init,
1086 .free = event_trigger_free,
1087 };
1088
1089 static struct event_trigger_ops snapshot_count_trigger_ops = {
1090 .func = snapshot_count_trigger,
1091 .print = snapshot_trigger_print,
1092 .init = event_trigger_init,
1093 .free = event_trigger_free,
1094 };
1095
1096 static struct event_trigger_ops *
1097 snapshot_get_trigger_ops(char *cmd, char *param)
1098 {
1099 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
1100 }
1101
1102 static struct event_command trigger_snapshot_cmd = {
1103 .name = "snapshot",
1104 .trigger_type = ETT_SNAPSHOT,
1105 .func = event_trigger_callback,
1106 .reg = register_snapshot_trigger,
1107 .unreg = unregister_trigger,
1108 .get_trigger_ops = snapshot_get_trigger_ops,
1109 .set_filter = set_trigger_filter,
1110 };
1111
1112 static __init int register_trigger_snapshot_cmd(void)
1113 {
1114 int ret;
1115
1116 ret = register_event_command(&trigger_snapshot_cmd);
1117 WARN_ON(ret < 0);
1118
1119 return ret;
1120 }
1121 #else
1122 static __init int register_trigger_snapshot_cmd(void) { return 0; }
1123 #endif /* CONFIG_TRACER_SNAPSHOT */
1124
1125 #ifdef CONFIG_STACKTRACE
1126 /*
1127 * Skip 3:
1128 * stacktrace_trigger()
1129 * event_triggers_post_call()
1130 * trace_event_raw_event_xxx()
1131 */
1132 #define STACK_SKIP 3
1133
1134 static void
1135 stacktrace_trigger(struct event_trigger_data *data, void *rec)
1136 {
1137 trace_dump_stack(STACK_SKIP);
1138 }
1139
1140 static void
1141 stacktrace_count_trigger(struct event_trigger_data *data, void *rec)
1142 {
1143 if (!data->count)
1144 return;
1145
1146 if (data->count != -1)
1147 (data->count)--;
1148
1149 stacktrace_trigger(data, rec);
1150 }
1151
1152 static int
1153 stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
1154 struct event_trigger_data *data)
1155 {
1156 return event_trigger_print("stacktrace", m, (void *)data->count,
1157 data->filter_str);
1158 }
1159
1160 static struct event_trigger_ops stacktrace_trigger_ops = {
1161 .func = stacktrace_trigger,
1162 .print = stacktrace_trigger_print,
1163 .init = event_trigger_init,
1164 .free = event_trigger_free,
1165 };
1166
1167 static struct event_trigger_ops stacktrace_count_trigger_ops = {
1168 .func = stacktrace_count_trigger,
1169 .print = stacktrace_trigger_print,
1170 .init = event_trigger_init,
1171 .free = event_trigger_free,
1172 };
1173
1174 static struct event_trigger_ops *
1175 stacktrace_get_trigger_ops(char *cmd, char *param)
1176 {
1177 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
1178 }
1179
1180 static struct event_command trigger_stacktrace_cmd = {
1181 .name = "stacktrace",
1182 .trigger_type = ETT_STACKTRACE,
1183 .flags = EVENT_CMD_FL_POST_TRIGGER,
1184 .func = event_trigger_callback,
1185 .reg = register_trigger,
1186 .unreg = unregister_trigger,
1187 .get_trigger_ops = stacktrace_get_trigger_ops,
1188 .set_filter = set_trigger_filter,
1189 };
1190
1191 static __init int register_trigger_stacktrace_cmd(void)
1192 {
1193 int ret;
1194
1195 ret = register_event_command(&trigger_stacktrace_cmd);
1196 WARN_ON(ret < 0);
1197
1198 return ret;
1199 }
1200 #else
1201 static __init int register_trigger_stacktrace_cmd(void) { return 0; }
1202 #endif /* CONFIG_STACKTRACE */
1203
1204 static __init void unregister_trigger_traceon_traceoff_cmds(void)
1205 {
1206 unregister_event_command(&trigger_traceon_cmd);
1207 unregister_event_command(&trigger_traceoff_cmd);
1208 }
1209
1210 static void
1211 event_enable_trigger(struct event_trigger_data *data, void *rec)
1212 {
1213 struct enable_trigger_data *enable_data = data->private_data;
1214
1215 if (enable_data->enable)
1216 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1217 else
1218 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &enable_data->file->flags);
1219 }
1220
1221 static void
1222 event_enable_count_trigger(struct event_trigger_data *data, void *rec)
1223 {
1224 struct enable_trigger_data *enable_data = data->private_data;
1225
1226 if (!data->count)
1227 return;
1228
1229 /* Skip if the event is in a state we want to switch to */
1230 if (enable_data->enable == !(enable_data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
1231 return;
1232
1233 if (data->count != -1)
1234 (data->count)--;
1235
1236 event_enable_trigger(data, rec);
1237 }
1238
1239 int event_enable_trigger_print(struct seq_file *m,
1240 struct event_trigger_ops *ops,
1241 struct event_trigger_data *data)
1242 {
1243 struct enable_trigger_data *enable_data = data->private_data;
1244
1245 seq_printf(m, "%s:%s:%s",
1246 enable_data->hist ?
1247 (enable_data->enable ? ENABLE_HIST_STR : DISABLE_HIST_STR) :
1248 (enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR),
1249 enable_data->file->event_call->class->system,
1250 trace_event_name(enable_data->file->event_call));
1251
1252 if (data->count == -1)
1253 seq_puts(m, ":unlimited");
1254 else
1255 seq_printf(m, ":count=%ld", data->count);
1256
1257 if (data->filter_str)
1258 seq_printf(m, " if %s\n", data->filter_str);
1259 else
1260 seq_putc(m, '\n');
1261
1262 return 0;
1263 }
1264
1265 void event_enable_trigger_free(struct event_trigger_ops *ops,
1266 struct event_trigger_data *data)
1267 {
1268 struct enable_trigger_data *enable_data = data->private_data;
1269
1270 if (WARN_ON_ONCE(data->ref <= 0))
1271 return;
1272
1273 data->ref--;
1274 if (!data->ref) {
1275 /* Remove the SOFT_MODE flag */
1276 trace_event_enable_disable(enable_data->file, 0, 1);
1277 module_put(enable_data->file->event_call->mod);
1278 trigger_data_free(data);
1279 kfree(enable_data);
1280 }
1281 }
1282
1283 static struct event_trigger_ops event_enable_trigger_ops = {
1284 .func = event_enable_trigger,
1285 .print = event_enable_trigger_print,
1286 .init = event_trigger_init,
1287 .free = event_enable_trigger_free,
1288 };
1289
1290 static struct event_trigger_ops event_enable_count_trigger_ops = {
1291 .func = event_enable_count_trigger,
1292 .print = event_enable_trigger_print,
1293 .init = event_trigger_init,
1294 .free = event_enable_trigger_free,
1295 };
1296
1297 static struct event_trigger_ops event_disable_trigger_ops = {
1298 .func = event_enable_trigger,
1299 .print = event_enable_trigger_print,
1300 .init = event_trigger_init,
1301 .free = event_enable_trigger_free,
1302 };
1303
1304 static struct event_trigger_ops event_disable_count_trigger_ops = {
1305 .func = event_enable_count_trigger,
1306 .print = event_enable_trigger_print,
1307 .init = event_trigger_init,
1308 .free = event_enable_trigger_free,
1309 };
1310
1311 int event_enable_trigger_func(struct event_command *cmd_ops,
1312 struct trace_event_file *file,
1313 char *glob, char *cmd, char *param)
1314 {
1315 struct trace_event_file *event_enable_file;
1316 struct enable_trigger_data *enable_data;
1317 struct event_trigger_data *trigger_data;
1318 struct event_trigger_ops *trigger_ops;
1319 struct trace_array *tr = file->tr;
1320 const char *system;
1321 const char *event;
1322 bool hist = false;
1323 char *trigger;
1324 char *number;
1325 bool enable;
1326 int ret;
1327
1328 if (!param)
1329 return -EINVAL;
1330
1331 /* separate the trigger from the filter (s:e:n [if filter]) */
1332 trigger = strsep(&param, " \t");
1333 if (!trigger)
1334 return -EINVAL;
1335
1336 system = strsep(&trigger, ":");
1337 if (!trigger)
1338 return -EINVAL;
1339
1340 event = strsep(&trigger, ":");
1341
1342 ret = -EINVAL;
1343 event_enable_file = find_event_file(tr, system, event);
1344 if (!event_enable_file)
1345 goto out;
1346
1347 #ifdef CONFIG_HIST_TRIGGERS
1348 hist = ((strcmp(cmd, ENABLE_HIST_STR) == 0) ||
1349 (strcmp(cmd, DISABLE_HIST_STR) == 0));
1350
1351 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1352 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1353 #else
1354 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1355 #endif
1356 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1357
1358 ret = -ENOMEM;
1359 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1360 if (!trigger_data)
1361 goto out;
1362
1363 enable_data = kzalloc(sizeof(*enable_data), GFP_KERNEL);
1364 if (!enable_data) {
1365 kfree(trigger_data);
1366 goto out;
1367 }
1368
1369 trigger_data->count = -1;
1370 trigger_data->ops = trigger_ops;
1371 trigger_data->cmd_ops = cmd_ops;
1372 INIT_LIST_HEAD(&trigger_data->list);
1373 RCU_INIT_POINTER(trigger_data->filter, NULL);
1374
1375 enable_data->hist = hist;
1376 enable_data->enable = enable;
1377 enable_data->file = event_enable_file;
1378 trigger_data->private_data = enable_data;
1379
1380 if (glob[0] == '!') {
1381 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1382 kfree(trigger_data);
1383 kfree(enable_data);
1384 ret = 0;
1385 goto out;
1386 }
1387
1388 if (trigger) {
1389 number = strsep(&trigger, ":");
1390
1391 ret = -EINVAL;
1392 if (!strlen(number))
1393 goto out_free;
1394
1395 /*
1396 * We use the callback data field (which is a pointer)
1397 * as our counter.
1398 */
1399 ret = kstrtoul(number, 0, &trigger_data->count);
1400 if (ret)
1401 goto out_free;
1402 }
1403
1404 if (!param) /* if param is non-empty, it's supposed to be a filter */
1405 goto out_reg;
1406
1407 if (!cmd_ops->set_filter)
1408 goto out_reg;
1409
1410 ret = cmd_ops->set_filter(param, trigger_data, file);
1411 if (ret < 0)
1412 goto out_free;
1413
1414 out_reg:
1415 /* Don't let event modules unload while probe registered */
1416 ret = try_module_get(event_enable_file->event_call->mod);
1417 if (!ret) {
1418 ret = -EBUSY;
1419 goto out_free;
1420 }
1421
1422 ret = trace_event_enable_disable(event_enable_file, 1, 1);
1423 if (ret < 0)
1424 goto out_put;
1425 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1426 /*
1427 * The above returns on success the # of functions enabled,
1428 * but if it didn't find any functions it returns zero.
1429 * Consider no functions a failure too.
1430 */
1431 if (!ret) {
1432 ret = -ENOENT;
1433 goto out_disable;
1434 } else if (ret < 0)
1435 goto out_disable;
1436 /* Just return zero, not the number of enabled functions */
1437 ret = 0;
1438 out:
1439 return ret;
1440
1441 out_disable:
1442 trace_event_enable_disable(event_enable_file, 0, 1);
1443 out_put:
1444 module_put(event_enable_file->event_call->mod);
1445 out_free:
1446 if (cmd_ops->set_filter)
1447 cmd_ops->set_filter(NULL, trigger_data, NULL);
1448 kfree(trigger_data);
1449 kfree(enable_data);
1450 goto out;
1451 }
1452
1453 int event_enable_register_trigger(char *glob,
1454 struct event_trigger_ops *ops,
1455 struct event_trigger_data *data,
1456 struct trace_event_file *file)
1457 {
1458 struct enable_trigger_data *enable_data = data->private_data;
1459 struct enable_trigger_data *test_enable_data;
1460 struct event_trigger_data *test;
1461 int ret = 0;
1462
1463 list_for_each_entry_rcu(test, &file->triggers, list) {
1464 test_enable_data = test->private_data;
1465 if (test_enable_data &&
1466 (test->cmd_ops->trigger_type ==
1467 data->cmd_ops->trigger_type) &&
1468 (test_enable_data->file == enable_data->file)) {
1469 ret = -EEXIST;
1470 goto out;
1471 }
1472 }
1473
1474 if (data->ops->init) {
1475 ret = data->ops->init(data->ops, data);
1476 if (ret < 0)
1477 goto out;
1478 }
1479
1480 list_add_rcu(&data->list, &file->triggers);
1481 ret++;
1482
1483 update_cond_flag(file);
1484 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1485 list_del_rcu(&data->list);
1486 update_cond_flag(file);
1487 ret--;
1488 }
1489 out:
1490 return ret;
1491 }
1492
1493 void event_enable_unregister_trigger(char *glob,
1494 struct event_trigger_ops *ops,
1495 struct event_trigger_data *test,
1496 struct trace_event_file *file)
1497 {
1498 struct enable_trigger_data *test_enable_data = test->private_data;
1499 struct enable_trigger_data *enable_data;
1500 struct event_trigger_data *data;
1501 bool unregistered = false;
1502
1503 list_for_each_entry_rcu(data, &file->triggers, list) {
1504 enable_data = data->private_data;
1505 if (enable_data &&
1506 (data->cmd_ops->trigger_type ==
1507 test->cmd_ops->trigger_type) &&
1508 (enable_data->file == test_enable_data->file)) {
1509 unregistered = true;
1510 list_del_rcu(&data->list);
1511 trace_event_trigger_enable_disable(file, 0);
1512 update_cond_flag(file);
1513 break;
1514 }
1515 }
1516
1517 if (unregistered && data->ops->free)
1518 data->ops->free(data->ops, data);
1519 }
1520
1521 static struct event_trigger_ops *
1522 event_enable_get_trigger_ops(char *cmd, char *param)
1523 {
1524 struct event_trigger_ops *ops;
1525 bool enable;
1526
1527 #ifdef CONFIG_HIST_TRIGGERS
1528 enable = ((strcmp(cmd, ENABLE_EVENT_STR) == 0) ||
1529 (strcmp(cmd, ENABLE_HIST_STR) == 0));
1530 #else
1531 enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
1532 #endif
1533 if (enable)
1534 ops = param ? &event_enable_count_trigger_ops :
1535 &event_enable_trigger_ops;
1536 else
1537 ops = param ? &event_disable_count_trigger_ops :
1538 &event_disable_trigger_ops;
1539
1540 return ops;
1541 }
1542
1543 static struct event_command trigger_enable_cmd = {
1544 .name = ENABLE_EVENT_STR,
1545 .trigger_type = ETT_EVENT_ENABLE,
1546 .func = event_enable_trigger_func,
1547 .reg = event_enable_register_trigger,
1548 .unreg = event_enable_unregister_trigger,
1549 .get_trigger_ops = event_enable_get_trigger_ops,
1550 .set_filter = set_trigger_filter,
1551 };
1552
1553 static struct event_command trigger_disable_cmd = {
1554 .name = DISABLE_EVENT_STR,
1555 .trigger_type = ETT_EVENT_ENABLE,
1556 .func = event_enable_trigger_func,
1557 .reg = event_enable_register_trigger,
1558 .unreg = event_enable_unregister_trigger,
1559 .get_trigger_ops = event_enable_get_trigger_ops,
1560 .set_filter = set_trigger_filter,
1561 };
1562
1563 static __init void unregister_trigger_enable_disable_cmds(void)
1564 {
1565 unregister_event_command(&trigger_enable_cmd);
1566 unregister_event_command(&trigger_disable_cmd);
1567 }
1568
1569 static __init int register_trigger_enable_disable_cmds(void)
1570 {
1571 int ret;
1572
1573 ret = register_event_command(&trigger_enable_cmd);
1574 if (WARN_ON(ret < 0))
1575 return ret;
1576 ret = register_event_command(&trigger_disable_cmd);
1577 if (WARN_ON(ret < 0))
1578 unregister_trigger_enable_disable_cmds();
1579
1580 return ret;
1581 }
1582
1583 static __init int register_trigger_traceon_traceoff_cmds(void)
1584 {
1585 int ret;
1586
1587 ret = register_event_command(&trigger_traceon_cmd);
1588 if (WARN_ON(ret < 0))
1589 return ret;
1590 ret = register_event_command(&trigger_traceoff_cmd);
1591 if (WARN_ON(ret < 0))
1592 unregister_trigger_traceon_traceoff_cmds();
1593
1594 return ret;
1595 }
1596
1597 __init int register_trigger_cmds(void)
1598 {
1599 register_trigger_traceon_traceoff_cmds();
1600 register_trigger_snapshot_cmd();
1601 register_trigger_stacktrace_cmd();
1602 register_trigger_enable_disable_cmds();
1603 register_trigger_hist_enable_disable_cmds();
1604 register_trigger_hist_cmd();
1605
1606 return 0;
1607 }