2 * trace_events_hist - trace event hist triggers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
23 #include "tracing_map.h"
28 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
, void *event
);
31 struct ftrace_event_field
*field
;
38 static u64
hist_field_none(struct hist_field
*field
, void *event
)
43 static u64
hist_field_counter(struct hist_field
*field
, void *event
)
48 static u64
hist_field_string(struct hist_field
*hist_field
, void *event
)
50 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
52 return (u64
)(unsigned long)addr
;
55 static u64
hist_field_dynstring(struct hist_field
*hist_field
, void *event
)
57 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
58 int str_loc
= str_item
& 0xffff;
59 char *addr
= (char *)(event
+ str_loc
);
61 return (u64
)(unsigned long)addr
;
64 static u64
hist_field_pstring(struct hist_field
*hist_field
, void *event
)
66 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
68 return (u64
)(unsigned long)*addr
;
71 #define DEFINE_HIST_FIELD_FN(type) \
72 static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
74 type *addr = (type *)(event + hist_field->field->offset); \
76 return (u64)(unsigned long)*addr; \
79 DEFINE_HIST_FIELD_FN(s64
);
80 DEFINE_HIST_FIELD_FN(u64
);
81 DEFINE_HIST_FIELD_FN(s32
);
82 DEFINE_HIST_FIELD_FN(u32
);
83 DEFINE_HIST_FIELD_FN(s16
);
84 DEFINE_HIST_FIELD_FN(u16
);
85 DEFINE_HIST_FIELD_FN(s8
);
86 DEFINE_HIST_FIELD_FN(u8
);
88 #define for_each_hist_field(i, hist_data) \
89 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
91 #define for_each_hist_val_field(i, hist_data) \
92 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
94 #define for_each_hist_key_field(i, hist_data) \
95 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
97 #define HIST_STACKTRACE_DEPTH 16
98 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
99 #define HIST_STACKTRACE_SKIP 5
101 #define HITCOUNT_IDX 0
102 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
104 enum hist_field_flags
{
105 HIST_FIELD_FL_HITCOUNT
= 1,
106 HIST_FIELD_FL_KEY
= 2,
107 HIST_FIELD_FL_STRING
= 4,
108 HIST_FIELD_FL_HEX
= 8,
109 HIST_FIELD_FL_SYM
= 16,
110 HIST_FIELD_FL_SYM_OFFSET
= 32,
111 HIST_FIELD_FL_EXECNAME
= 64,
112 HIST_FIELD_FL_SYSCALL
= 128,
113 HIST_FIELD_FL_STACKTRACE
= 256,
116 struct hist_trigger_attrs
{
123 unsigned int map_bits
;
126 struct hist_trigger_data
{
127 struct hist_field
*fields
[TRACING_MAP_FIELDS_MAX
];
130 unsigned int n_fields
;
131 unsigned int key_size
;
132 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
133 unsigned int n_sort_keys
;
134 struct trace_event_file
*event_file
;
135 struct hist_trigger_attrs
*attrs
;
136 struct tracing_map
*map
;
139 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
141 hist_field_fn_t fn
= NULL
;
143 switch (field_size
) {
173 static int parse_map_size(char *str
)
175 unsigned long size
, map_bits
;
184 ret
= kstrtoul(str
, 0, &size
);
188 map_bits
= ilog2(roundup_pow_of_two(size
));
189 if (map_bits
< TRACING_MAP_BITS_MIN
||
190 map_bits
> TRACING_MAP_BITS_MAX
)
198 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
203 kfree(attrs
->sort_key_str
);
204 kfree(attrs
->keys_str
);
205 kfree(attrs
->vals_str
);
209 static struct hist_trigger_attrs
*parse_hist_trigger_attrs(char *trigger_str
)
211 struct hist_trigger_attrs
*attrs
;
214 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
216 return ERR_PTR(-ENOMEM
);
218 while (trigger_str
) {
219 char *str
= strsep(&trigger_str
, ":");
221 if ((strncmp(str
, "key=", strlen("key=")) == 0) ||
222 (strncmp(str
, "keys=", strlen("keys=")) == 0))
223 attrs
->keys_str
= kstrdup(str
, GFP_KERNEL
);
224 else if ((strncmp(str
, "val=", strlen("val=")) == 0) ||
225 (strncmp(str
, "vals=", strlen("vals=")) == 0) ||
226 (strncmp(str
, "values=", strlen("values=")) == 0))
227 attrs
->vals_str
= kstrdup(str
, GFP_KERNEL
);
228 else if (strncmp(str
, "sort=", strlen("sort=")) == 0)
229 attrs
->sort_key_str
= kstrdup(str
, GFP_KERNEL
);
230 else if (strcmp(str
, "pause") == 0)
232 else if ((strcmp(str
, "cont") == 0) ||
233 (strcmp(str
, "continue") == 0))
235 else if (strcmp(str
, "clear") == 0)
237 else if (strncmp(str
, "size=", strlen("size=")) == 0) {
238 int map_bits
= parse_map_size(str
);
244 attrs
->map_bits
= map_bits
;
251 if (!attrs
->keys_str
) {
258 destroy_hist_trigger_attrs(attrs
);
263 static inline void save_comm(char *comm
, struct task_struct
*task
)
266 strcpy(comm
, "<idle>");
270 if (WARN_ON_ONCE(task
->pid
< 0)) {
271 strcpy(comm
, "<XXX>");
275 memcpy(comm
, task
->comm
, TASK_COMM_LEN
);
278 static void hist_trigger_elt_comm_free(struct tracing_map_elt
*elt
)
280 kfree((char *)elt
->private_data
);
283 static int hist_trigger_elt_comm_alloc(struct tracing_map_elt
*elt
)
285 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
286 struct hist_field
*key_field
;
289 for_each_hist_key_field(i
, hist_data
) {
290 key_field
= hist_data
->fields
[i
];
292 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
293 unsigned int size
= TASK_COMM_LEN
+ 1;
295 elt
->private_data
= kzalloc(size
, GFP_KERNEL
);
296 if (!elt
->private_data
)
305 static void hist_trigger_elt_comm_copy(struct tracing_map_elt
*to
,
306 struct tracing_map_elt
*from
)
308 char *comm_from
= from
->private_data
;
309 char *comm_to
= to
->private_data
;
312 memcpy(comm_to
, comm_from
, TASK_COMM_LEN
+ 1);
315 static void hist_trigger_elt_comm_init(struct tracing_map_elt
*elt
)
317 char *comm
= elt
->private_data
;
320 save_comm(comm
, current
);
323 static const struct tracing_map_ops hist_trigger_elt_comm_ops
= {
324 .elt_alloc
= hist_trigger_elt_comm_alloc
,
325 .elt_copy
= hist_trigger_elt_comm_copy
,
326 .elt_free
= hist_trigger_elt_comm_free
,
327 .elt_init
= hist_trigger_elt_comm_init
,
330 static void destroy_hist_field(struct hist_field
*hist_field
)
335 static struct hist_field
*create_hist_field(struct ftrace_event_field
*field
,
338 struct hist_field
*hist_field
;
340 if (field
&& is_function_field(field
))
343 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
347 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
348 hist_field
->fn
= hist_field_counter
;
352 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
353 hist_field
->fn
= hist_field_none
;
357 if (is_string_field(field
)) {
358 flags
|= HIST_FIELD_FL_STRING
;
360 if (field
->filter_type
== FILTER_STATIC_STRING
)
361 hist_field
->fn
= hist_field_string
;
362 else if (field
->filter_type
== FILTER_DYN_STRING
)
363 hist_field
->fn
= hist_field_dynstring
;
365 hist_field
->fn
= hist_field_pstring
;
367 hist_field
->fn
= select_value_fn(field
->size
,
369 if (!hist_field
->fn
) {
370 destroy_hist_field(hist_field
);
375 hist_field
->field
= field
;
376 hist_field
->flags
= flags
;
381 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
385 for (i
= 0; i
< TRACING_MAP_FIELDS_MAX
; i
++) {
386 if (hist_data
->fields
[i
]) {
387 destroy_hist_field(hist_data
->fields
[i
]);
388 hist_data
->fields
[i
] = NULL
;
393 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
395 hist_data
->fields
[HITCOUNT_IDX
] =
396 create_hist_field(NULL
, HIST_FIELD_FL_HITCOUNT
);
397 if (!hist_data
->fields
[HITCOUNT_IDX
])
402 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
408 static int create_val_field(struct hist_trigger_data
*hist_data
,
409 unsigned int val_idx
,
410 struct trace_event_file
*file
,
413 struct ftrace_event_field
*field
= NULL
;
414 unsigned long flags
= 0;
418 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
421 field_name
= strsep(&field_str
, ".");
423 if (strcmp(field_str
, "hex") == 0)
424 flags
|= HIST_FIELD_FL_HEX
;
431 field
= trace_find_event_field(file
->event_call
, field_name
);
437 hist_data
->fields
[val_idx
] = create_hist_field(field
, flags
);
438 if (!hist_data
->fields
[val_idx
]) {
445 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
451 static int create_val_fields(struct hist_trigger_data
*hist_data
,
452 struct trace_event_file
*file
)
454 char *fields_str
, *field_str
;
458 ret
= create_hitcount_val(hist_data
);
462 fields_str
= hist_data
->attrs
->vals_str
;
466 strsep(&fields_str
, "=");
470 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
471 j
< TRACING_MAP_VALS_MAX
; i
++) {
472 field_str
= strsep(&fields_str
, ",");
475 if (strcmp(field_str
, "hitcount") == 0)
477 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
481 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
487 static int create_key_field(struct hist_trigger_data
*hist_data
,
488 unsigned int key_idx
,
489 unsigned int key_offset
,
490 struct trace_event_file
*file
,
493 struct ftrace_event_field
*field
= NULL
;
494 unsigned long flags
= 0;
495 unsigned int key_size
;
498 if (WARN_ON(key_idx
>= TRACING_MAP_FIELDS_MAX
))
501 flags
|= HIST_FIELD_FL_KEY
;
503 if (strcmp(field_str
, "stacktrace") == 0) {
504 flags
|= HIST_FIELD_FL_STACKTRACE
;
505 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
507 char *field_name
= strsep(&field_str
, ".");
510 if (strcmp(field_str
, "hex") == 0)
511 flags
|= HIST_FIELD_FL_HEX
;
512 else if (strcmp(field_str
, "sym") == 0)
513 flags
|= HIST_FIELD_FL_SYM
;
514 else if (strcmp(field_str
, "sym-offset") == 0)
515 flags
|= HIST_FIELD_FL_SYM_OFFSET
;
516 else if ((strcmp(field_str
, "execname") == 0) &&
517 (strcmp(field_name
, "common_pid") == 0))
518 flags
|= HIST_FIELD_FL_EXECNAME
;
519 else if (strcmp(field_str
, "syscall") == 0)
520 flags
|= HIST_FIELD_FL_SYSCALL
;
527 field
= trace_find_event_field(file
->event_call
, field_name
);
533 if (is_string_field(field
))
534 key_size
= MAX_FILTER_STR_VAL
;
536 key_size
= field
->size
;
539 hist_data
->fields
[key_idx
] = create_hist_field(field
, flags
);
540 if (!hist_data
->fields
[key_idx
]) {
545 key_size
= ALIGN(key_size
, sizeof(u64
));
546 hist_data
->fields
[key_idx
]->size
= key_size
;
547 hist_data
->fields
[key_idx
]->offset
= key_offset
;
548 hist_data
->key_size
+= key_size
;
549 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
556 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
564 static int create_key_fields(struct hist_trigger_data
*hist_data
,
565 struct trace_event_file
*file
)
567 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
568 char *fields_str
, *field_str
;
571 fields_str
= hist_data
->attrs
->keys_str
;
575 strsep(&fields_str
, "=");
579 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
580 field_str
= strsep(&fields_str
, ",");
583 ret
= create_key_field(hist_data
, i
, key_offset
,
598 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
599 struct trace_event_file
*file
)
603 ret
= create_val_fields(hist_data
, file
);
607 ret
= create_key_fields(hist_data
, file
);
611 hist_data
->n_fields
= hist_data
->n_vals
+ hist_data
->n_keys
;
616 static int is_descending(const char *str
)
621 if (strcmp(str
, "descending") == 0)
624 if (strcmp(str
, "ascending") == 0)
630 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
632 char *fields_str
= hist_data
->attrs
->sort_key_str
;
633 struct ftrace_event_field
*field
= NULL
;
634 struct tracing_map_sort_key
*sort_key
;
635 int descending
, ret
= 0;
638 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
643 strsep(&fields_str
, "=");
649 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
650 char *field_str
, *field_name
;
652 sort_key
= &hist_data
->sort_keys
[i
];
654 field_str
= strsep(&fields_str
, ",");
661 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
666 field_name
= strsep(&field_str
, ".");
672 if (strcmp(field_name
, "hitcount") == 0) {
673 descending
= is_descending(field_str
);
674 if (descending
< 0) {
678 sort_key
->descending
= descending
;
682 for (j
= 1; j
< hist_data
->n_fields
; j
++) {
683 field
= hist_data
->fields
[j
]->field
;
684 if (field
&& (strcmp(field_name
, field
->name
) == 0)) {
685 sort_key
->field_idx
= j
;
686 descending
= is_descending(field_str
);
687 if (descending
< 0) {
691 sort_key
->descending
= descending
;
695 if (j
== hist_data
->n_fields
) {
700 hist_data
->n_sort_keys
= i
;
705 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
707 destroy_hist_trigger_attrs(hist_data
->attrs
);
708 destroy_hist_fields(hist_data
);
709 tracing_map_destroy(hist_data
->map
);
713 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
715 struct tracing_map
*map
= hist_data
->map
;
716 struct ftrace_event_field
*field
;
717 struct hist_field
*hist_field
;
720 for_each_hist_field(i
, hist_data
) {
721 hist_field
= hist_data
->fields
[i
];
722 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
723 tracing_map_cmp_fn_t cmp_fn
;
725 field
= hist_field
->field
;
727 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
728 cmp_fn
= tracing_map_cmp_none
;
729 else if (is_string_field(field
))
730 cmp_fn
= tracing_map_cmp_string
;
732 cmp_fn
= tracing_map_cmp_num(field
->size
,
734 idx
= tracing_map_add_key_field(map
,
739 idx
= tracing_map_add_sum_field(map
);
748 static bool need_tracing_map_ops(struct hist_trigger_data
*hist_data
)
750 struct hist_field
*key_field
;
753 for_each_hist_key_field(i
, hist_data
) {
754 key_field
= hist_data
->fields
[i
];
756 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
)
763 static struct hist_trigger_data
*
764 create_hist_data(unsigned int map_bits
,
765 struct hist_trigger_attrs
*attrs
,
766 struct trace_event_file
*file
)
768 const struct tracing_map_ops
*map_ops
= NULL
;
769 struct hist_trigger_data
*hist_data
;
772 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
774 return ERR_PTR(-ENOMEM
);
776 hist_data
->attrs
= attrs
;
778 ret
= create_hist_fields(hist_data
, file
);
782 ret
= create_sort_keys(hist_data
);
786 if (need_tracing_map_ops(hist_data
))
787 map_ops
= &hist_trigger_elt_comm_ops
;
789 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
791 if (IS_ERR(hist_data
->map
)) {
792 ret
= PTR_ERR(hist_data
->map
);
793 hist_data
->map
= NULL
;
797 ret
= create_tracing_map_fields(hist_data
);
801 ret
= tracing_map_init(hist_data
->map
);
805 hist_data
->event_file
= file
;
809 hist_data
->attrs
= NULL
;
811 destroy_hist_data(hist_data
);
813 hist_data
= ERR_PTR(ret
);
818 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
819 struct tracing_map_elt
*elt
,
822 struct hist_field
*hist_field
;
826 for_each_hist_val_field(i
, hist_data
) {
827 hist_field
= hist_data
->fields
[i
];
828 hist_val
= hist_field
->fn(hist_field
, rec
);
829 tracing_map_update_sum(elt
, i
, hist_val
);
833 static inline void add_to_key(char *compound_key
, void *key
,
834 struct hist_field
*key_field
, void *rec
)
836 size_t size
= key_field
->size
;
838 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
839 struct ftrace_event_field
*field
;
841 field
= key_field
->field
;
842 if (field
->filter_type
== FILTER_DYN_STRING
)
843 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
844 else if (field
->filter_type
== FILTER_PTR_STRING
)
846 else if (field
->filter_type
== FILTER_STATIC_STRING
)
849 /* ensure NULL-termination */
850 if (size
> key_field
->size
- 1)
851 size
= key_field
->size
- 1;
854 memcpy(compound_key
+ key_field
->offset
, key
, size
);
857 static void event_hist_trigger(struct event_trigger_data
*data
, void *rec
)
859 struct hist_trigger_data
*hist_data
= data
->private_data
;
860 bool use_compound_key
= (hist_data
->n_keys
> 1);
861 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
862 char compound_key
[HIST_KEY_SIZE_MAX
];
863 struct stack_trace stacktrace
;
864 struct hist_field
*key_field
;
865 struct tracing_map_elt
*elt
;
870 memset(compound_key
, 0, hist_data
->key_size
);
872 for_each_hist_key_field(i
, hist_data
) {
873 key_field
= hist_data
->fields
[i
];
875 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
876 stacktrace
.max_entries
= HIST_STACKTRACE_DEPTH
;
877 stacktrace
.entries
= entries
;
878 stacktrace
.nr_entries
= 0;
879 stacktrace
.skip
= HIST_STACKTRACE_SKIP
;
881 memset(stacktrace
.entries
, 0, HIST_STACKTRACE_SIZE
);
882 save_stack_trace(&stacktrace
);
886 field_contents
= key_field
->fn(key_field
, rec
);
887 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
888 key
= (void *)(unsigned long)field_contents
;
889 use_compound_key
= true;
891 key
= (void *)&field_contents
;
894 if (use_compound_key
)
895 add_to_key(compound_key
, key
, key_field
, rec
);
898 if (use_compound_key
)
901 elt
= tracing_map_insert(hist_data
->map
, key
);
903 hist_trigger_elt_update(hist_data
, elt
, rec
);
906 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
907 unsigned long *stacktrace_entries
,
908 unsigned int max_entries
)
910 char str
[KSYM_SYMBOL_LEN
];
911 unsigned int spaces
= 8;
914 for (i
= 0; i
< max_entries
; i
++) {
915 if (stacktrace_entries
[i
] == ULONG_MAX
)
918 seq_printf(m
, "%*c", 1 + spaces
, ' ');
919 sprint_symbol(str
, stacktrace_entries
[i
]);
920 seq_printf(m
, "%s\n", str
);
925 hist_trigger_entry_print(struct seq_file
*m
,
926 struct hist_trigger_data
*hist_data
, void *key
,
927 struct tracing_map_elt
*elt
)
929 struct hist_field
*key_field
;
930 char str
[KSYM_SYMBOL_LEN
];
931 bool multiline
= false;
937 for_each_hist_key_field(i
, hist_data
) {
938 key_field
= hist_data
->fields
[i
];
940 if (i
> hist_data
->n_vals
)
943 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
944 uval
= *(u64
*)(key
+ key_field
->offset
);
945 seq_printf(m
, "%s: %llx",
946 key_field
->field
->name
, uval
);
947 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
948 uval
= *(u64
*)(key
+ key_field
->offset
);
949 sprint_symbol_no_offset(str
, uval
);
950 seq_printf(m
, "%s: [%llx] %-45s",
951 key_field
->field
->name
, uval
, str
);
952 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
953 uval
= *(u64
*)(key
+ key_field
->offset
);
954 sprint_symbol(str
, uval
);
955 seq_printf(m
, "%s: [%llx] %-55s",
956 key_field
->field
->name
, uval
, str
);
957 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
958 char *comm
= elt
->private_data
;
960 uval
= *(u64
*)(key
+ key_field
->offset
);
961 seq_printf(m
, "%s: %-16s[%10llu]",
962 key_field
->field
->name
, comm
, uval
);
963 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
964 const char *syscall_name
;
966 uval
= *(u64
*)(key
+ key_field
->offset
);
967 syscall_name
= get_syscall_name(uval
);
969 syscall_name
= "unknown_syscall";
971 seq_printf(m
, "%s: %-30s[%3llu]",
972 key_field
->field
->name
, syscall_name
, uval
);
973 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
974 seq_puts(m
, "stacktrace:\n");
975 hist_trigger_stacktrace_print(m
,
976 key
+ key_field
->offset
,
977 HIST_STACKTRACE_DEPTH
);
979 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
980 seq_printf(m
, "%s: %-50s", key_field
->field
->name
,
981 (char *)(key
+ key_field
->offset
));
983 uval
= *(u64
*)(key
+ key_field
->offset
);
984 seq_printf(m
, "%s: %10llu", key_field
->field
->name
,
994 seq_printf(m
, " hitcount: %10llu",
995 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
997 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
998 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
999 seq_printf(m
, " %s: %10llx",
1000 hist_data
->fields
[i
]->field
->name
,
1001 tracing_map_read_sum(elt
, i
));
1003 seq_printf(m
, " %s: %10llu",
1004 hist_data
->fields
[i
]->field
->name
,
1005 tracing_map_read_sum(elt
, i
));
1012 static int print_entries(struct seq_file
*m
,
1013 struct hist_trigger_data
*hist_data
)
1015 struct tracing_map_sort_entry
**sort_entries
= NULL
;
1016 struct tracing_map
*map
= hist_data
->map
;
1017 unsigned int i
, n_entries
;
1019 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
1020 hist_data
->n_sort_keys
,
1025 for (i
= 0; i
< n_entries
; i
++)
1026 hist_trigger_entry_print(m
, hist_data
,
1027 sort_entries
[i
]->key
,
1028 sort_entries
[i
]->elt
);
1030 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
1035 static void hist_trigger_show(struct seq_file
*m
,
1036 struct event_trigger_data
*data
, int n
)
1038 struct hist_trigger_data
*hist_data
;
1039 int n_entries
, ret
= 0;
1042 seq_puts(m
, "\n\n");
1044 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
1045 data
->ops
->print(m
, data
->ops
, data
);
1046 seq_puts(m
, "#\n\n");
1048 hist_data
= data
->private_data
;
1049 n_entries
= print_entries(m
, hist_data
);
1050 if (n_entries
< 0) {
1055 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
1056 (u64
)atomic64_read(&hist_data
->map
->hits
),
1057 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
1060 static int hist_show(struct seq_file
*m
, void *v
)
1062 struct event_trigger_data
*data
;
1063 struct trace_event_file
*event_file
;
1066 mutex_lock(&event_mutex
);
1068 event_file
= event_file_data(m
->private);
1069 if (unlikely(!event_file
)) {
1074 list_for_each_entry_rcu(data
, &event_file
->triggers
, list
) {
1075 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
1076 hist_trigger_show(m
, data
, n
++);
1080 mutex_unlock(&event_mutex
);
1085 static int event_hist_open(struct inode
*inode
, struct file
*file
)
1087 return single_open(file
, hist_show
, file
);
1090 const struct file_operations event_hist_fops
= {
1091 .open
= event_hist_open
,
1093 .llseek
= seq_lseek
,
1094 .release
= single_release
,
1097 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
1099 const char *flags_str
= NULL
;
1101 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
1103 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
1105 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
1106 flags_str
= "sym-offset";
1107 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
1108 flags_str
= "execname";
1109 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
1110 flags_str
= "syscall";
1115 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
1117 seq_printf(m
, "%s", hist_field
->field
->name
);
1118 if (hist_field
->flags
) {
1119 const char *flags_str
= get_hist_field_flags(hist_field
);
1122 seq_printf(m
, ".%s", flags_str
);
1126 static int event_hist_trigger_print(struct seq_file
*m
,
1127 struct event_trigger_ops
*ops
,
1128 struct event_trigger_data
*data
)
1130 struct hist_trigger_data
*hist_data
= data
->private_data
;
1131 struct hist_field
*key_field
;
1134 seq_puts(m
, "hist:keys=");
1136 for_each_hist_key_field(i
, hist_data
) {
1137 key_field
= hist_data
->fields
[i
];
1139 if (i
> hist_data
->n_vals
)
1142 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
1143 seq_puts(m
, "stacktrace");
1145 hist_field_print(m
, key_field
);
1148 seq_puts(m
, ":vals=");
1150 for_each_hist_val_field(i
, hist_data
) {
1151 if (i
== HITCOUNT_IDX
)
1152 seq_puts(m
, "hitcount");
1155 hist_field_print(m
, hist_data
->fields
[i
]);
1159 seq_puts(m
, ":sort=");
1161 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
1162 struct tracing_map_sort_key
*sort_key
;
1164 sort_key
= &hist_data
->sort_keys
[i
];
1169 if (sort_key
->field_idx
== HITCOUNT_IDX
)
1170 seq_puts(m
, "hitcount");
1172 unsigned int idx
= sort_key
->field_idx
;
1174 if (WARN_ON(idx
>= TRACING_MAP_FIELDS_MAX
))
1177 hist_field_print(m
, hist_data
->fields
[idx
]);
1180 if (sort_key
->descending
)
1181 seq_puts(m
, ".descending");
1184 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
1186 if (data
->filter_str
)
1187 seq_printf(m
, " if %s", data
->filter_str
);
1190 seq_puts(m
, " [paused]");
1192 seq_puts(m
, " [active]");
1199 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
1200 struct event_trigger_data
*data
)
1202 struct hist_trigger_data
*hist_data
= data
->private_data
;
1204 if (WARN_ON_ONCE(data
->ref
<= 0))
1209 trigger_data_free(data
);
1210 destroy_hist_data(hist_data
);
1214 static struct event_trigger_ops event_hist_trigger_ops
= {
1215 .func
= event_hist_trigger
,
1216 .print
= event_hist_trigger_print
,
1217 .init
= event_trigger_init
,
1218 .free
= event_hist_trigger_free
,
1221 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
1224 return &event_hist_trigger_ops
;
1227 static void hist_clear(struct event_trigger_data
*data
)
1229 struct hist_trigger_data
*hist_data
= data
->private_data
;
1232 paused
= data
->paused
;
1233 data
->paused
= true;
1235 synchronize_sched();
1237 tracing_map_clear(hist_data
->map
);
1239 data
->paused
= paused
;
1242 static bool hist_trigger_match(struct event_trigger_data
*data
,
1243 struct event_trigger_data
*data_test
)
1245 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
1246 struct hist_trigger_data
*hist_data
, *hist_data_test
;
1247 struct hist_field
*key_field
, *key_field_test
;
1250 hist_data
= data
->private_data
;
1251 hist_data_test
= data_test
->private_data
;
1253 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
1254 hist_data
->n_fields
!= hist_data_test
->n_fields
||
1255 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
1258 if ((data
->filter_str
&& !data_test
->filter_str
) ||
1259 (!data
->filter_str
&& data_test
->filter_str
))
1262 for_each_hist_field(i
, hist_data
) {
1263 key_field
= hist_data
->fields
[i
];
1264 key_field_test
= hist_data_test
->fields
[i
];
1266 if (key_field
->flags
!= key_field_test
->flags
)
1268 if (key_field
->field
!= key_field_test
->field
)
1270 if (key_field
->offset
!= key_field_test
->offset
)
1274 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
1275 sort_key
= &hist_data
->sort_keys
[i
];
1276 sort_key_test
= &hist_data_test
->sort_keys
[i
];
1278 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
1279 sort_key
->descending
!= sort_key_test
->descending
)
1283 if (data
->filter_str
&&
1284 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
1290 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
1291 struct event_trigger_data
*data
,
1292 struct trace_event_file
*file
)
1294 struct hist_trigger_data
*hist_data
= data
->private_data
;
1295 struct event_trigger_data
*test
;
1298 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1299 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1300 if (!hist_trigger_match(data
, test
))
1302 if (hist_data
->attrs
->pause
)
1303 test
->paused
= true;
1304 else if (hist_data
->attrs
->cont
)
1305 test
->paused
= false;
1306 else if (hist_data
->attrs
->clear
)
1314 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
1319 if (hist_data
->attrs
->pause
)
1320 data
->paused
= true;
1322 if (data
->ops
->init
) {
1323 ret
= data
->ops
->init(data
->ops
, data
);
1328 list_add_rcu(&data
->list
, &file
->triggers
);
1331 update_cond_flag(file
);
1332 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
1333 list_del_rcu(&data
->list
);
1334 update_cond_flag(file
);
1341 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
1342 struct event_trigger_data
*data
,
1343 struct trace_event_file
*file
)
1345 struct event_trigger_data
*test
;
1346 bool unregistered
= false;
1348 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1349 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1350 if (!hist_trigger_match(data
, test
))
1352 unregistered
= true;
1353 list_del_rcu(&test
->list
);
1354 trace_event_trigger_enable_disable(file
, 0);
1355 update_cond_flag(file
);
1360 if (unregistered
&& test
->ops
->free
)
1361 test
->ops
->free(test
->ops
, test
);
1364 static void hist_unreg_all(struct trace_event_file
*file
)
1366 struct event_trigger_data
*test
;
1368 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1369 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1370 list_del_rcu(&test
->list
);
1371 trace_event_trigger_enable_disable(file
, 0);
1372 update_cond_flag(file
);
1373 if (test
->ops
->free
)
1374 test
->ops
->free(test
->ops
, test
);
1379 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
1380 struct trace_event_file
*file
,
1381 char *glob
, char *cmd
, char *param
)
1383 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
1384 struct event_trigger_data
*trigger_data
;
1385 struct hist_trigger_attrs
*attrs
;
1386 struct event_trigger_ops
*trigger_ops
;
1387 struct hist_trigger_data
*hist_data
;
1394 /* separate the trigger from the filter (k:v [if filter]) */
1395 trigger
= strsep(¶m
, " \t");
1399 attrs
= parse_hist_trigger_attrs(trigger
);
1401 return PTR_ERR(attrs
);
1403 if (attrs
->map_bits
)
1404 hist_trigger_bits
= attrs
->map_bits
;
1406 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
);
1407 if (IS_ERR(hist_data
)) {
1408 destroy_hist_trigger_attrs(attrs
);
1409 return PTR_ERR(hist_data
);
1412 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
1415 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
1419 trigger_data
->count
= -1;
1420 trigger_data
->ops
= trigger_ops
;
1421 trigger_data
->cmd_ops
= cmd_ops
;
1423 INIT_LIST_HEAD(&trigger_data
->list
);
1424 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
1426 trigger_data
->private_data
= hist_data
;
1428 /* if param is non-empty, it's supposed to be a filter */
1429 if (param
&& cmd_ops
->set_filter
) {
1430 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
1435 if (glob
[0] == '!') {
1436 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
1441 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
1443 * The above returns on success the # of triggers registered,
1444 * but if it didn't register any it returns zero. Consider no
1445 * triggers registered a failure too.
1448 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
1453 /* Just return zero, not the number of registered triggers */
1458 if (cmd_ops
->set_filter
)
1459 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
1461 kfree(trigger_data
);
1463 destroy_hist_data(hist_data
);
1467 static struct event_command trigger_hist_cmd
= {
1469 .trigger_type
= ETT_EVENT_HIST
,
1470 .flags
= EVENT_CMD_FL_NEEDS_REC
,
1471 .func
= event_hist_trigger_func
,
1472 .reg
= hist_register_trigger
,
1473 .unreg
= hist_unregister_trigger
,
1474 .unreg_all
= hist_unreg_all
,
1475 .get_trigger_ops
= event_hist_get_trigger_ops
,
1476 .set_filter
= set_trigger_filter
,
1479 __init
int register_trigger_hist_cmd(void)
1483 ret
= register_event_command(&trigger_hist_cmd
);
1490 hist_enable_trigger(struct event_trigger_data
*data
, void *rec
)
1492 struct enable_trigger_data
*enable_data
= data
->private_data
;
1493 struct event_trigger_data
*test
;
1495 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
) {
1496 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
1497 if (enable_data
->enable
)
1498 test
->paused
= false;
1500 test
->paused
= true;
1506 hist_enable_count_trigger(struct event_trigger_data
*data
, void *rec
)
1511 if (data
->count
!= -1)
1514 hist_enable_trigger(data
, rec
);
1517 static struct event_trigger_ops hist_enable_trigger_ops
= {
1518 .func
= hist_enable_trigger
,
1519 .print
= event_enable_trigger_print
,
1520 .init
= event_trigger_init
,
1521 .free
= event_enable_trigger_free
,
1524 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
1525 .func
= hist_enable_count_trigger
,
1526 .print
= event_enable_trigger_print
,
1527 .init
= event_trigger_init
,
1528 .free
= event_enable_trigger_free
,
1531 static struct event_trigger_ops hist_disable_trigger_ops
= {
1532 .func
= hist_enable_trigger
,
1533 .print
= event_enable_trigger_print
,
1534 .init
= event_trigger_init
,
1535 .free
= event_enable_trigger_free
,
1538 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
1539 .func
= hist_enable_count_trigger
,
1540 .print
= event_enable_trigger_print
,
1541 .init
= event_trigger_init
,
1542 .free
= event_enable_trigger_free
,
1545 static struct event_trigger_ops
*
1546 hist_enable_get_trigger_ops(char *cmd
, char *param
)
1548 struct event_trigger_ops
*ops
;
1551 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
1554 ops
= param
? &hist_enable_count_trigger_ops
:
1555 &hist_enable_trigger_ops
;
1557 ops
= param
? &hist_disable_count_trigger_ops
:
1558 &hist_disable_trigger_ops
;
1563 static void hist_enable_unreg_all(struct trace_event_file
*file
)
1565 struct event_trigger_data
*test
;
1567 list_for_each_entry_rcu(test
, &file
->triggers
, list
) {
1568 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
1569 list_del_rcu(&test
->list
);
1570 update_cond_flag(file
);
1571 trace_event_trigger_enable_disable(file
, 0);
1572 if (test
->ops
->free
)
1573 test
->ops
->free(test
->ops
, test
);
1578 static struct event_command trigger_hist_enable_cmd
= {
1579 .name
= ENABLE_HIST_STR
,
1580 .trigger_type
= ETT_HIST_ENABLE
,
1581 .func
= event_enable_trigger_func
,
1582 .reg
= event_enable_register_trigger
,
1583 .unreg
= event_enable_unregister_trigger
,
1584 .unreg_all
= hist_enable_unreg_all
,
1585 .get_trigger_ops
= hist_enable_get_trigger_ops
,
1586 .set_filter
= set_trigger_filter
,
1589 static struct event_command trigger_hist_disable_cmd
= {
1590 .name
= DISABLE_HIST_STR
,
1591 .trigger_type
= ETT_HIST_ENABLE
,
1592 .func
= event_enable_trigger_func
,
1593 .reg
= event_enable_register_trigger
,
1594 .unreg
= event_enable_unregister_trigger
,
1595 .unreg_all
= hist_enable_unreg_all
,
1596 .get_trigger_ops
= hist_enable_get_trigger_ops
,
1597 .set_filter
= set_trigger_filter
,
1600 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
1602 unregister_event_command(&trigger_hist_enable_cmd
);
1603 unregister_event_command(&trigger_hist_disable_cmd
);
1606 __init
int register_trigger_hist_enable_disable_cmds(void)
1610 ret
= register_event_command(&trigger_hist_enable_cmd
);
1611 if (WARN_ON(ret
< 0))
1613 ret
= register_event_command(&trigger_hist_disable_cmd
);
1614 if (WARN_ON(ret
< 0))
1615 unregister_trigger_hist_enable_disable_cmds();