]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - kernel/trace/trace_events_trigger.c
tracing: Add 'stacktrace' event trigger command
[mirror_ubuntu-jammy-kernel.git] / kernel / trace / trace_events_trigger.c
CommitLineData
85f2b082
TZ
1/*
2 * trace_events_trigger - trace event triggers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
19 */
20
21#include <linux/module.h>
22#include <linux/ctype.h>
23#include <linux/mutex.h>
24#include <linux/slab.h>
25
26#include "trace.h"
27
28static LIST_HEAD(trigger_commands);
29static DEFINE_MUTEX(trigger_cmd_mutex);
30
2a2df321
TZ
31static void
32trigger_data_free(struct event_trigger_data *data)
33{
34 synchronize_sched(); /* make sure current triggers exit before free */
35 kfree(data);
36}
37
85f2b082
TZ
38/**
39 * event_triggers_call - Call triggers associated with a trace event
40 * @file: The ftrace_event_file associated with the event
41 *
42 * For each trigger associated with an event, invoke the trigger
43 * function registered with the associated trigger command.
44 *
45 * Called from tracepoint handlers (with rcu_read_lock_sched() held).
46 *
47 * Return: an enum event_trigger_type value containing a set bit for
48 * any trigger that should be deferred, ETT_NONE if nothing to defer.
49 */
50void event_triggers_call(struct ftrace_event_file *file)
51{
52 struct event_trigger_data *data;
53
54 if (list_empty(&file->triggers))
55 return;
56
57 list_for_each_entry_rcu(data, &file->triggers, list)
58 data->ops->func(data);
59}
60EXPORT_SYMBOL_GPL(event_triggers_call);
61
62static void *trigger_next(struct seq_file *m, void *t, loff_t *pos)
63{
64 struct ftrace_event_file *event_file = event_file_data(m->private);
65
66 return seq_list_next(t, &event_file->triggers, pos);
67}
68
69static void *trigger_start(struct seq_file *m, loff_t *pos)
70{
71 struct ftrace_event_file *event_file;
72
73 /* ->stop() is called even if ->start() fails */
74 mutex_lock(&event_mutex);
75 event_file = event_file_data(m->private);
76 if (unlikely(!event_file))
77 return ERR_PTR(-ENODEV);
78
79 return seq_list_start(&event_file->triggers, *pos);
80}
81
82static void trigger_stop(struct seq_file *m, void *t)
83{
84 mutex_unlock(&event_mutex);
85}
86
87static int trigger_show(struct seq_file *m, void *v)
88{
89 struct event_trigger_data *data;
90
91 data = list_entry(v, struct event_trigger_data, list);
92 data->ops->print(m, data->ops, data);
93
94 return 0;
95}
96
97static const struct seq_operations event_triggers_seq_ops = {
98 .start = trigger_start,
99 .next = trigger_next,
100 .stop = trigger_stop,
101 .show = trigger_show,
102};
103
104static int event_trigger_regex_open(struct inode *inode, struct file *file)
105{
106 int ret = 0;
107
108 mutex_lock(&event_mutex);
109
110 if (unlikely(!event_file_data(file))) {
111 mutex_unlock(&event_mutex);
112 return -ENODEV;
113 }
114
115 if (file->f_mode & FMODE_READ) {
116 ret = seq_open(file, &event_triggers_seq_ops);
117 if (!ret) {
118 struct seq_file *m = file->private_data;
119 m->private = file;
120 }
121 }
122
123 mutex_unlock(&event_mutex);
124
125 return ret;
126}
127
128static int trigger_process_regex(struct ftrace_event_file *file, char *buff)
129{
130 char *command, *next = buff;
131 struct event_command *p;
132 int ret = -EINVAL;
133
134 command = strsep(&next, ": \t");
135 command = (command[0] != '!') ? command : command + 1;
136
137 mutex_lock(&trigger_cmd_mutex);
138 list_for_each_entry(p, &trigger_commands, list) {
139 if (strcmp(p->name, command) == 0) {
140 ret = p->func(p, file, buff, command, next);
141 goto out_unlock;
142 }
143 }
144 out_unlock:
145 mutex_unlock(&trigger_cmd_mutex);
146
147 return ret;
148}
149
150static ssize_t event_trigger_regex_write(struct file *file,
151 const char __user *ubuf,
152 size_t cnt, loff_t *ppos)
153{
154 struct ftrace_event_file *event_file;
155 ssize_t ret;
156 char *buf;
157
158 if (!cnt)
159 return 0;
160
161 if (cnt >= PAGE_SIZE)
162 return -EINVAL;
163
164 buf = (char *)__get_free_page(GFP_TEMPORARY);
165 if (!buf)
166 return -ENOMEM;
167
168 if (copy_from_user(buf, ubuf, cnt)) {
169 free_page((unsigned long)buf);
170 return -EFAULT;
171 }
172 buf[cnt] = '\0';
173 strim(buf);
174
175 mutex_lock(&event_mutex);
176 event_file = event_file_data(file);
177 if (unlikely(!event_file)) {
178 mutex_unlock(&event_mutex);
179 free_page((unsigned long)buf);
180 return -ENODEV;
181 }
182 ret = trigger_process_regex(event_file, buf);
183 mutex_unlock(&event_mutex);
184
185 free_page((unsigned long)buf);
186 if (ret < 0)
187 goto out;
188
189 *ppos += cnt;
190 ret = cnt;
191 out:
192 return ret;
193}
194
195static int event_trigger_regex_release(struct inode *inode, struct file *file)
196{
197 mutex_lock(&event_mutex);
198
199 if (file->f_mode & FMODE_READ)
200 seq_release(inode, file);
201
202 mutex_unlock(&event_mutex);
203
204 return 0;
205}
206
207static ssize_t
208event_trigger_write(struct file *filp, const char __user *ubuf,
209 size_t cnt, loff_t *ppos)
210{
211 return event_trigger_regex_write(filp, ubuf, cnt, ppos);
212}
213
214static int
215event_trigger_open(struct inode *inode, struct file *filp)
216{
217 return event_trigger_regex_open(inode, filp);
218}
219
220static int
221event_trigger_release(struct inode *inode, struct file *file)
222{
223 return event_trigger_regex_release(inode, file);
224}
225
226const struct file_operations event_trigger_fops = {
227 .open = event_trigger_open,
228 .read = seq_read,
229 .write = event_trigger_write,
230 .llseek = ftrace_filter_lseek,
231 .release = event_trigger_release,
232};
233
2a2df321
TZ
234/*
235 * Currently we only register event commands from __init, so mark this
236 * __init too.
237 */
238static __init int register_event_command(struct event_command *cmd)
239{
240 struct event_command *p;
241 int ret = 0;
242
243 mutex_lock(&trigger_cmd_mutex);
244 list_for_each_entry(p, &trigger_commands, list) {
245 if (strcmp(cmd->name, p->name) == 0) {
246 ret = -EBUSY;
247 goto out_unlock;
248 }
249 }
250 list_add(&cmd->list, &trigger_commands);
251 out_unlock:
252 mutex_unlock(&trigger_cmd_mutex);
253
254 return ret;
255}
256
257/*
258 * Currently we only unregister event commands from __init, so mark
259 * this __init too.
260 */
261static __init int unregister_event_command(struct event_command *cmd)
262{
263 struct event_command *p, *n;
264 int ret = -ENODEV;
265
266 mutex_lock(&trigger_cmd_mutex);
267 list_for_each_entry_safe(p, n, &trigger_commands, list) {
268 if (strcmp(cmd->name, p->name) == 0) {
269 ret = 0;
270 list_del_init(&p->list);
271 goto out_unlock;
272 }
273 }
274 out_unlock:
275 mutex_unlock(&trigger_cmd_mutex);
276
277 return ret;
278}
279
280/**
281 * event_trigger_print - Generic event_trigger_ops @print implementation
282 * @name: The name of the event trigger
283 * @m: The seq_file being printed to
284 * @data: Trigger-specific data
285 * @filter_str: filter_str to print, if present
286 *
287 * Common implementation for event triggers to print themselves.
288 *
289 * Usually wrapped by a function that simply sets the @name of the
290 * trigger command and then invokes this.
291 *
292 * Return: 0 on success, errno otherwise
293 */
294static int
295event_trigger_print(const char *name, struct seq_file *m,
296 void *data, char *filter_str)
297{
298 long count = (long)data;
299
300 seq_printf(m, "%s", name);
301
302 if (count == -1)
303 seq_puts(m, ":unlimited");
304 else
305 seq_printf(m, ":count=%ld", count);
306
307 if (filter_str)
308 seq_printf(m, " if %s\n", filter_str);
309 else
310 seq_puts(m, "\n");
311
312 return 0;
313}
314
315/**
316 * event_trigger_init - Generic event_trigger_ops @init implementation
317 * @ops: The trigger ops associated with the trigger
318 * @data: Trigger-specific data
319 *
320 * Common implementation of event trigger initialization.
321 *
322 * Usually used directly as the @init method in event trigger
323 * implementations.
324 *
325 * Return: 0 on success, errno otherwise
326 */
327static int
328event_trigger_init(struct event_trigger_ops *ops,
329 struct event_trigger_data *data)
330{
331 data->ref++;
332 return 0;
333}
334
335/**
336 * event_trigger_free - Generic event_trigger_ops @free implementation
337 * @ops: The trigger ops associated with the trigger
338 * @data: Trigger-specific data
339 *
340 * Common implementation of event trigger de-initialization.
341 *
342 * Usually used directly as the @free method in event trigger
343 * implementations.
344 */
345static void
346event_trigger_free(struct event_trigger_ops *ops,
347 struct event_trigger_data *data)
348{
349 if (WARN_ON_ONCE(data->ref <= 0))
350 return;
351
352 data->ref--;
353 if (!data->ref)
354 trigger_data_free(data);
355}
356
85f2b082
TZ
357static int trace_event_trigger_enable_disable(struct ftrace_event_file *file,
358 int trigger_enable)
359{
360 int ret = 0;
361
362 if (trigger_enable) {
363 if (atomic_inc_return(&file->tm_ref) > 1)
364 return ret;
365 set_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
366 ret = trace_event_enable_disable(file, 1, 1);
367 } else {
368 if (atomic_dec_return(&file->tm_ref) > 0)
369 return ret;
370 clear_bit(FTRACE_EVENT_FL_TRIGGER_MODE_BIT, &file->flags);
371 ret = trace_event_enable_disable(file, 0, 1);
372 }
373
374 return ret;
375}
376
377/**
378 * clear_event_triggers - Clear all triggers associated with a trace array
379 * @tr: The trace array to clear
380 *
381 * For each trigger, the triggering event has its tm_ref decremented
382 * via trace_event_trigger_enable_disable(), and any associated event
383 * (in the case of enable/disable_event triggers) will have its sm_ref
384 * decremented via free()->trace_event_enable_disable(). That
385 * combination effectively reverses the soft-mode/trigger state added
386 * by trigger registration.
387 *
388 * Must be called with event_mutex held.
389 */
390void
391clear_event_triggers(struct trace_array *tr)
392{
393 struct ftrace_event_file *file;
394
395 list_for_each_entry(file, &tr->events, list) {
396 struct event_trigger_data *data;
397 list_for_each_entry_rcu(data, &file->triggers, list) {
398 trace_event_trigger_enable_disable(file, 0);
399 if (data->ops->free)
400 data->ops->free(data->ops, data);
401 }
402 }
403}
404
2a2df321
TZ
405/**
406 * register_trigger - Generic event_command @reg implementation
407 * @glob: The raw string used to register the trigger
408 * @ops: The trigger ops associated with the trigger
409 * @data: Trigger-specific data to associate with the trigger
410 * @file: The ftrace_event_file associated with the event
411 *
412 * Common implementation for event trigger registration.
413 *
414 * Usually used directly as the @reg method in event command
415 * implementations.
416 *
417 * Return: 0 on success, errno otherwise
418 */
419static int register_trigger(char *glob, struct event_trigger_ops *ops,
420 struct event_trigger_data *data,
421 struct ftrace_event_file *file)
422{
423 struct event_trigger_data *test;
424 int ret = 0;
425
426 list_for_each_entry_rcu(test, &file->triggers, list) {
427 if (test->cmd_ops->trigger_type == data->cmd_ops->trigger_type) {
428 ret = -EEXIST;
429 goto out;
430 }
431 }
432
433 if (data->ops->init) {
434 ret = data->ops->init(data->ops, data);
435 if (ret < 0)
436 goto out;
437 }
438
439 list_add_rcu(&data->list, &file->triggers);
440 ret++;
441
442 if (trace_event_trigger_enable_disable(file, 1) < 0) {
443 list_del_rcu(&data->list);
444 ret--;
445 }
446out:
447 return ret;
448}
449
450/**
451 * unregister_trigger - Generic event_command @unreg implementation
452 * @glob: The raw string used to register the trigger
453 * @ops: The trigger ops associated with the trigger
454 * @test: Trigger-specific data used to find the trigger to remove
455 * @file: The ftrace_event_file associated with the event
456 *
457 * Common implementation for event trigger unregistration.
458 *
459 * Usually used directly as the @unreg method in event command
460 * implementations.
461 */
462static void unregister_trigger(char *glob, struct event_trigger_ops *ops,
463 struct event_trigger_data *test,
464 struct ftrace_event_file *file)
465{
466 struct event_trigger_data *data;
467 bool unregistered = false;
468
469 list_for_each_entry_rcu(data, &file->triggers, list) {
470 if (data->cmd_ops->trigger_type == test->cmd_ops->trigger_type) {
471 unregistered = true;
472 list_del_rcu(&data->list);
473 trace_event_trigger_enable_disable(file, 0);
474 break;
475 }
476 }
477
478 if (unregistered && data->ops->free)
479 data->ops->free(data->ops, data);
480}
481
482/**
483 * event_trigger_callback - Generic event_command @func implementation
484 * @cmd_ops: The command ops, used for trigger registration
485 * @file: The ftrace_event_file associated with the event
486 * @glob: The raw string used to register the trigger
487 * @cmd: The cmd portion of the string used to register the trigger
488 * @param: The params portion of the string used to register the trigger
489 *
490 * Common implementation for event command parsing and trigger
491 * instantiation.
492 *
493 * Usually used directly as the @func method in event command
494 * implementations.
495 *
496 * Return: 0 on success, errno otherwise
497 */
498static int
499event_trigger_callback(struct event_command *cmd_ops,
500 struct ftrace_event_file *file,
501 char *glob, char *cmd, char *param)
502{
503 struct event_trigger_data *trigger_data;
504 struct event_trigger_ops *trigger_ops;
505 char *trigger = NULL;
506 char *number;
507 int ret;
508
509 /* separate the trigger from the filter (t:n [if filter]) */
510 if (param && isdigit(param[0]))
511 trigger = strsep(&param, " \t");
512
513 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
514
515 ret = -ENOMEM;
516 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
517 if (!trigger_data)
518 goto out;
519
520 trigger_data->count = -1;
521 trigger_data->ops = trigger_ops;
522 trigger_data->cmd_ops = cmd_ops;
523 INIT_LIST_HEAD(&trigger_data->list);
524
525 if (glob[0] == '!') {
526 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
527 kfree(trigger_data);
528 ret = 0;
529 goto out;
530 }
531
532 if (trigger) {
533 number = strsep(&trigger, ":");
534
535 ret = -EINVAL;
536 if (!strlen(number))
537 goto out_free;
538
539 /*
540 * We use the callback data field (which is a pointer)
541 * as our counter.
542 */
543 ret = kstrtoul(number, 0, &trigger_data->count);
544 if (ret)
545 goto out_free;
546 }
547
548 if (!param) /* if param is non-empty, it's supposed to be a filter */
549 goto out_reg;
550
551 if (!cmd_ops->set_filter)
552 goto out_reg;
553
554 ret = cmd_ops->set_filter(param, trigger_data, file);
555 if (ret < 0)
556 goto out_free;
557
558 out_reg:
559 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
560 /*
561 * The above returns on success the # of functions enabled,
562 * but if it didn't find any functions it returns zero.
563 * Consider no functions a failure too.
564 */
565 if (!ret) {
566 ret = -ENOENT;
567 goto out_free;
568 } else if (ret < 0)
569 goto out_free;
570 ret = 0;
571 out:
572 return ret;
573
574 out_free:
575 kfree(trigger_data);
576 goto out;
577}
578
579static void
580traceon_trigger(struct event_trigger_data *data)
581{
582 if (tracing_is_on())
583 return;
584
585 tracing_on();
586}
587
588static void
589traceon_count_trigger(struct event_trigger_data *data)
590{
591 if (!data->count)
592 return;
593
594 if (data->count != -1)
595 (data->count)--;
596
597 traceon_trigger(data);
598}
599
600static void
601traceoff_trigger(struct event_trigger_data *data)
602{
603 if (!tracing_is_on())
604 return;
605
606 tracing_off();
607}
608
609static void
610traceoff_count_trigger(struct event_trigger_data *data)
611{
612 if (!data->count)
613 return;
614
615 if (data->count != -1)
616 (data->count)--;
617
618 traceoff_trigger(data);
619}
620
621static int
622traceon_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
623 struct event_trigger_data *data)
624{
625 return event_trigger_print("traceon", m, (void *)data->count,
626 data->filter_str);
627}
628
629static int
630traceoff_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
631 struct event_trigger_data *data)
632{
633 return event_trigger_print("traceoff", m, (void *)data->count,
634 data->filter_str);
635}
636
637static struct event_trigger_ops traceon_trigger_ops = {
638 .func = traceon_trigger,
639 .print = traceon_trigger_print,
640 .init = event_trigger_init,
641 .free = event_trigger_free,
642};
643
644static struct event_trigger_ops traceon_count_trigger_ops = {
645 .func = traceon_count_trigger,
646 .print = traceon_trigger_print,
647 .init = event_trigger_init,
648 .free = event_trigger_free,
649};
650
651static struct event_trigger_ops traceoff_trigger_ops = {
652 .func = traceoff_trigger,
653 .print = traceoff_trigger_print,
654 .init = event_trigger_init,
655 .free = event_trigger_free,
656};
657
658static struct event_trigger_ops traceoff_count_trigger_ops = {
659 .func = traceoff_count_trigger,
660 .print = traceoff_trigger_print,
661 .init = event_trigger_init,
662 .free = event_trigger_free,
663};
664
665static struct event_trigger_ops *
666onoff_get_trigger_ops(char *cmd, char *param)
667{
668 struct event_trigger_ops *ops;
669
670 /* we register both traceon and traceoff to this callback */
671 if (strcmp(cmd, "traceon") == 0)
672 ops = param ? &traceon_count_trigger_ops :
673 &traceon_trigger_ops;
674 else
675 ops = param ? &traceoff_count_trigger_ops :
676 &traceoff_trigger_ops;
677
678 return ops;
679}
680
681static struct event_command trigger_traceon_cmd = {
682 .name = "traceon",
683 .trigger_type = ETT_TRACE_ONOFF,
684 .func = event_trigger_callback,
685 .reg = register_trigger,
686 .unreg = unregister_trigger,
687 .get_trigger_ops = onoff_get_trigger_ops,
688};
689
690static struct event_command trigger_traceoff_cmd = {
691 .name = "traceoff",
692 .trigger_type = ETT_TRACE_ONOFF,
693 .func = event_trigger_callback,
694 .reg = register_trigger,
695 .unreg = unregister_trigger,
696 .get_trigger_ops = onoff_get_trigger_ops,
697};
698
93e31ffb
TZ
699#ifdef CONFIG_TRACER_SNAPSHOT
700static void
701snapshot_trigger(struct event_trigger_data *data)
702{
703 tracing_snapshot();
704}
705
706static void
707snapshot_count_trigger(struct event_trigger_data *data)
708{
709 if (!data->count)
710 return;
711
712 if (data->count != -1)
713 (data->count)--;
714
715 snapshot_trigger(data);
716}
717
718static int
719register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
720 struct event_trigger_data *data,
721 struct ftrace_event_file *file)
722{
723 int ret = register_trigger(glob, ops, data, file);
724
725 if (ret > 0 && tracing_alloc_snapshot() != 0) {
726 unregister_trigger(glob, ops, data, file);
727 ret = 0;
728 }
729
730 return ret;
731}
732
733static int
734snapshot_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
735 struct event_trigger_data *data)
736{
737 return event_trigger_print("snapshot", m, (void *)data->count,
738 data->filter_str);
739}
740
741static struct event_trigger_ops snapshot_trigger_ops = {
742 .func = snapshot_trigger,
743 .print = snapshot_trigger_print,
744 .init = event_trigger_init,
745 .free = event_trigger_free,
746};
747
748static struct event_trigger_ops snapshot_count_trigger_ops = {
749 .func = snapshot_count_trigger,
750 .print = snapshot_trigger_print,
751 .init = event_trigger_init,
752 .free = event_trigger_free,
753};
754
755static struct event_trigger_ops *
756snapshot_get_trigger_ops(char *cmd, char *param)
757{
758 return param ? &snapshot_count_trigger_ops : &snapshot_trigger_ops;
759}
760
761static struct event_command trigger_snapshot_cmd = {
762 .name = "snapshot",
763 .trigger_type = ETT_SNAPSHOT,
764 .func = event_trigger_callback,
765 .reg = register_snapshot_trigger,
766 .unreg = unregister_trigger,
767 .get_trigger_ops = snapshot_get_trigger_ops,
768};
769
770static __init int register_trigger_snapshot_cmd(void)
771{
772 int ret;
773
774 ret = register_event_command(&trigger_snapshot_cmd);
775 WARN_ON(ret < 0);
776
777 return ret;
778}
779#else
780static __init int register_trigger_snapshot_cmd(void) { return 0; }
781#endif /* CONFIG_TRACER_SNAPSHOT */
782
f21ecbb3
TZ
783#ifdef CONFIG_STACKTRACE
784/*
785 * Skip 3:
786 * stacktrace_trigger()
787 * event_triggers_post_call()
788 * ftrace_raw_event_xxx()
789 */
790#define STACK_SKIP 3
791
792static void
793stacktrace_trigger(struct event_trigger_data *data)
794{
795 trace_dump_stack(STACK_SKIP);
796}
797
798static void
799stacktrace_count_trigger(struct event_trigger_data *data)
800{
801 if (!data->count)
802 return;
803
804 if (data->count != -1)
805 (data->count)--;
806
807 stacktrace_trigger(data);
808}
809
810static int
811stacktrace_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
812 struct event_trigger_data *data)
813{
814 return event_trigger_print("stacktrace", m, (void *)data->count,
815 data->filter_str);
816}
817
818static struct event_trigger_ops stacktrace_trigger_ops = {
819 .func = stacktrace_trigger,
820 .print = stacktrace_trigger_print,
821 .init = event_trigger_init,
822 .free = event_trigger_free,
823};
824
825static struct event_trigger_ops stacktrace_count_trigger_ops = {
826 .func = stacktrace_count_trigger,
827 .print = stacktrace_trigger_print,
828 .init = event_trigger_init,
829 .free = event_trigger_free,
830};
831
832static struct event_trigger_ops *
833stacktrace_get_trigger_ops(char *cmd, char *param)
834{
835 return param ? &stacktrace_count_trigger_ops : &stacktrace_trigger_ops;
836}
837
838static struct event_command trigger_stacktrace_cmd = {
839 .name = "stacktrace",
840 .trigger_type = ETT_STACKTRACE,
841 .post_trigger = true,
842 .func = event_trigger_callback,
843 .reg = register_trigger,
844 .unreg = unregister_trigger,
845 .get_trigger_ops = stacktrace_get_trigger_ops,
846};
847
848static __init int register_trigger_stacktrace_cmd(void)
849{
850 int ret;
851
852 ret = register_event_command(&trigger_stacktrace_cmd);
853 WARN_ON(ret < 0);
854
855 return ret;
856}
857#else
858static __init int register_trigger_stacktrace_cmd(void) { return 0; }
859#endif /* CONFIG_STACKTRACE */
860
2a2df321
TZ
861static __init void unregister_trigger_traceon_traceoff_cmds(void)
862{
863 unregister_event_command(&trigger_traceon_cmd);
864 unregister_event_command(&trigger_traceoff_cmd);
865}
866
867static __init int register_trigger_traceon_traceoff_cmds(void)
868{
869 int ret;
870
871 ret = register_event_command(&trigger_traceon_cmd);
872 if (WARN_ON(ret < 0))
873 return ret;
874 ret = register_event_command(&trigger_traceoff_cmd);
875 if (WARN_ON(ret < 0))
876 unregister_trigger_traceon_traceoff_cmds();
877
878 return ret;
879}
880
85f2b082
TZ
881__init int register_trigger_cmds(void)
882{
2a2df321 883 register_trigger_traceon_traceoff_cmds();
93e31ffb 884 register_trigger_snapshot_cmd();
f21ecbb3 885 register_trigger_stacktrace_cmd();
2a2df321 886
85f2b082
TZ
887 return 0;
888}