1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
21 #include "tracing_map.h"
22 #include "trace_synth.h"
25 C(NONE, "No error"), \
26 C(DUPLICATE_VAR, "Variable already defined"), \
27 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
28 C(TOO_MANY_VARS, "Too many variables defined"), \
29 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
30 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
31 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
32 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
33 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
34 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
35 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
36 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
37 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
38 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
39 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
40 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
41 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
42 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
43 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
44 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
45 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
46 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
47 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
48 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
49 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
50 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
51 C(TOO_MANY_PARAMS, "Too many action params"), \
52 C(PARAM_NOT_FOUND, "Couldn't find param"), \
53 C(INVALID_PARAM, "Invalid action param"), \
54 C(ACTION_NOT_FOUND, "No action found"), \
55 C(NO_SAVE_PARAMS, "No params found for save()"), \
56 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
57 C(ACTION_MISMATCH, "Handler doesn't support action"), \
58 C(NO_CLOSING_PAREN, "No closing paren found"), \
59 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
60 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
61 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
62 C(VAR_NOT_FOUND, "Couldn't find variable"), \
63 C(FIELD_NOT_FOUND, "Couldn't find field"), \
64 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
65 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
66 C(EMPTY_SORT_FIELD, "Empty sort field"), \
67 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
68 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"), \
69 C(INVALID_STR_OPERAND, "String type can not be an operand in expression"),
72 #define C(a, b) HIST_ERR_##a
79 static const char *err_text
[] = { ERRORS
};
83 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
84 struct tracing_map_elt
*elt
,
85 struct trace_buffer
*buffer
,
86 struct ring_buffer_event
*rbe
,
89 #define HIST_FIELD_OPERANDS_MAX 2
90 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
91 #define HIST_ACTIONS_MAX 8
101 * A hist_var (histogram variable) contains variable information for
102 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
103 * flag set. A hist_var has a variable name e.g. ts0, and is
104 * associated with a given histogram trigger, as specified by
105 * hist_data. The hist_var idx is the unique index assigned to the
106 * variable by the hist trigger's tracing_map. The idx is what is
107 * used to set a variable's value and, by a variable reference, to
112 struct hist_trigger_data
*hist_data
;
117 struct ftrace_event_field
*field
;
123 unsigned int is_signed
;
124 unsigned long buckets
;
126 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
127 struct hist_trigger_data
*hist_data
;
130 * Variable fields contain variable-specific info in var.
133 enum field_op_id
operator;
138 * The name field is used for EXPR and VAR_REF fields. VAR
139 * fields contain the variable name in var.name.
144 * When a histogram trigger is hit, if it has any references
145 * to variables, the values of those variables are collected
146 * into a var_ref_vals array by resolve_var_refs(). The
147 * current value of each variable is read from the tracing_map
148 * using the hist field's hist_var.idx and entered into the
149 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
151 unsigned int var_ref_idx
;
154 unsigned int var_str_idx
;
157 static u64
hist_field_none(struct hist_field
*field
,
158 struct tracing_map_elt
*elt
,
159 struct trace_buffer
*buffer
,
160 struct ring_buffer_event
*rbe
,
166 static u64
hist_field_counter(struct hist_field
*field
,
167 struct tracing_map_elt
*elt
,
168 struct trace_buffer
*buffer
,
169 struct ring_buffer_event
*rbe
,
175 static u64
hist_field_string(struct hist_field
*hist_field
,
176 struct tracing_map_elt
*elt
,
177 struct trace_buffer
*buffer
,
178 struct ring_buffer_event
*rbe
,
181 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
183 return (u64
)(unsigned long)addr
;
186 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
187 struct tracing_map_elt
*elt
,
188 struct trace_buffer
*buffer
,
189 struct ring_buffer_event
*rbe
,
192 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
193 int str_loc
= str_item
& 0xffff;
194 char *addr
= (char *)(event
+ str_loc
);
196 return (u64
)(unsigned long)addr
;
199 static u64
hist_field_pstring(struct hist_field
*hist_field
,
200 struct tracing_map_elt
*elt
,
201 struct trace_buffer
*buffer
,
202 struct ring_buffer_event
*rbe
,
205 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
207 return (u64
)(unsigned long)*addr
;
210 static u64
hist_field_log2(struct hist_field
*hist_field
,
211 struct tracing_map_elt
*elt
,
212 struct trace_buffer
*buffer
,
213 struct ring_buffer_event
*rbe
,
216 struct hist_field
*operand
= hist_field
->operands
[0];
218 u64 val
= operand
->fn(operand
, elt
, buffer
, rbe
, event
);
220 return (u64
) ilog2(roundup_pow_of_two(val
));
223 static u64
hist_field_bucket(struct hist_field
*hist_field
,
224 struct tracing_map_elt
*elt
,
225 struct trace_buffer
*buffer
,
226 struct ring_buffer_event
*rbe
,
229 struct hist_field
*operand
= hist_field
->operands
[0];
230 unsigned long buckets
= hist_field
->buckets
;
232 u64 val
= operand
->fn(operand
, elt
, buffer
, rbe
, event
);
234 if (WARN_ON_ONCE(!buckets
))
238 val
= div64_ul(val
, buckets
);
240 val
= (u64
)((unsigned long)val
/ buckets
);
241 return val
* buckets
;
244 static u64
hist_field_plus(struct hist_field
*hist_field
,
245 struct tracing_map_elt
*elt
,
246 struct trace_buffer
*buffer
,
247 struct ring_buffer_event
*rbe
,
250 struct hist_field
*operand1
= hist_field
->operands
[0];
251 struct hist_field
*operand2
= hist_field
->operands
[1];
253 u64 val1
= operand1
->fn(operand1
, elt
, buffer
, rbe
, event
);
254 u64 val2
= operand2
->fn(operand2
, elt
, buffer
, rbe
, event
);
259 static u64
hist_field_minus(struct hist_field
*hist_field
,
260 struct tracing_map_elt
*elt
,
261 struct trace_buffer
*buffer
,
262 struct ring_buffer_event
*rbe
,
265 struct hist_field
*operand1
= hist_field
->operands
[0];
266 struct hist_field
*operand2
= hist_field
->operands
[1];
268 u64 val1
= operand1
->fn(operand1
, elt
, buffer
, rbe
, event
);
269 u64 val2
= operand2
->fn(operand2
, elt
, buffer
, rbe
, event
);
274 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
275 struct tracing_map_elt
*elt
,
276 struct trace_buffer
*buffer
,
277 struct ring_buffer_event
*rbe
,
280 struct hist_field
*operand
= hist_field
->operands
[0];
282 s64 sval
= (s64
)operand
->fn(operand
, elt
, buffer
, rbe
, event
);
283 u64 val
= (u64
)-sval
;
288 #define DEFINE_HIST_FIELD_FN(type) \
289 static u64 hist_field_##type(struct hist_field *hist_field, \
290 struct tracing_map_elt *elt, \
291 struct trace_buffer *buffer, \
292 struct ring_buffer_event *rbe, \
295 type *addr = (type *)(event + hist_field->field->offset); \
297 return (u64)(unsigned long)*addr; \
300 DEFINE_HIST_FIELD_FN(s64
);
301 DEFINE_HIST_FIELD_FN(u64
);
302 DEFINE_HIST_FIELD_FN(s32
);
303 DEFINE_HIST_FIELD_FN(u32
);
304 DEFINE_HIST_FIELD_FN(s16
);
305 DEFINE_HIST_FIELD_FN(u16
);
306 DEFINE_HIST_FIELD_FN(s8
);
307 DEFINE_HIST_FIELD_FN(u8
);
309 #define for_each_hist_field(i, hist_data) \
310 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
312 #define for_each_hist_val_field(i, hist_data) \
313 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
315 #define for_each_hist_key_field(i, hist_data) \
316 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
318 #define HIST_STACKTRACE_DEPTH 16
319 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
320 #define HIST_STACKTRACE_SKIP 5
322 #define HITCOUNT_IDX 0
323 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
325 enum hist_field_flags
{
326 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
327 HIST_FIELD_FL_KEY
= 1 << 1,
328 HIST_FIELD_FL_STRING
= 1 << 2,
329 HIST_FIELD_FL_HEX
= 1 << 3,
330 HIST_FIELD_FL_SYM
= 1 << 4,
331 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
332 HIST_FIELD_FL_EXECNAME
= 1 << 6,
333 HIST_FIELD_FL_SYSCALL
= 1 << 7,
334 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
335 HIST_FIELD_FL_LOG2
= 1 << 9,
336 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
337 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
338 HIST_FIELD_FL_VAR
= 1 << 12,
339 HIST_FIELD_FL_EXPR
= 1 << 13,
340 HIST_FIELD_FL_VAR_REF
= 1 << 14,
341 HIST_FIELD_FL_CPU
= 1 << 15,
342 HIST_FIELD_FL_ALIAS
= 1 << 16,
343 HIST_FIELD_FL_BUCKET
= 1 << 17,
348 char *name
[TRACING_MAP_VARS_MAX
];
349 char *expr
[TRACING_MAP_VARS_MAX
];
352 struct hist_trigger_attrs
{
362 unsigned int map_bits
;
364 char *assignment_str
[TRACING_MAP_VARS_MAX
];
365 unsigned int n_assignments
;
367 char *action_str
[HIST_ACTIONS_MAX
];
368 unsigned int n_actions
;
370 struct var_defs var_defs
;
374 struct hist_field
*var
;
375 struct hist_field
*val
;
378 struct field_var_hist
{
379 struct hist_trigger_data
*hist_data
;
383 struct hist_trigger_data
{
384 struct hist_field
*fields
[HIST_FIELDS_MAX
];
387 unsigned int n_fields
;
389 unsigned int n_var_str
;
390 unsigned int key_size
;
391 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
392 unsigned int n_sort_keys
;
393 struct trace_event_file
*event_file
;
394 struct hist_trigger_attrs
*attrs
;
395 struct tracing_map
*map
;
396 bool enable_timestamps
;
398 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
399 unsigned int n_var_refs
;
401 struct action_data
*actions
[HIST_ACTIONS_MAX
];
402 unsigned int n_actions
;
404 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
405 unsigned int n_field_vars
;
406 unsigned int n_field_var_str
;
407 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
408 unsigned int n_field_var_hists
;
410 struct field_var
*save_vars
[SYNTH_FIELDS_MAX
];
411 unsigned int n_save_vars
;
412 unsigned int n_save_var_str
;
417 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
418 struct tracing_map_elt
*elt
,
419 struct trace_buffer
*buffer
, void *rec
,
420 struct ring_buffer_event
*rbe
, void *key
,
421 struct action_data
*data
, u64
*var_ref_vals
);
423 typedef bool (*check_track_val_fn_t
) (u64 track_val
, u64 var_val
);
438 enum handler_id handler
;
439 enum action_id action
;
443 unsigned int n_params
;
444 char *params
[SYNTH_FIELDS_MAX
];
447 * When a histogram trigger is hit, the values of any
448 * references to variables, including variables being passed
449 * as parameters to synthetic events, are collected into a
450 * var_ref_vals array. This var_ref_idx array is an array of
451 * indices into the var_ref_vals array, one for each synthetic
452 * event param, and is passed to the synthetic event
455 unsigned int var_ref_idx
[TRACING_MAP_VARS_MAX
];
456 struct synth_event
*synth_event
;
457 bool use_trace_keyword
;
458 char *synth_event_name
;
468 * var_str contains the $-unstripped variable
469 * name referenced by var_ref, and used when
470 * printing the action. Because var_ref
471 * creation is deferred to create_actions(),
472 * we need a per-action way to save it until
473 * then, thus var_str.
478 * var_ref refers to the variable being
479 * tracked e.g onmax($var).
481 struct hist_field
*var_ref
;
484 * track_var contains the 'invisible' tracking
485 * variable created to keep the current
488 struct hist_field
*track_var
;
490 check_track_val_fn_t check_val
;
491 action_fn_t save_data
;
500 unsigned int key_len
;
502 struct tracing_map_elt elt
;
504 struct action_data
*action_data
;
505 struct hist_trigger_data
*hist_data
;
508 struct hist_elt_data
{
511 char **field_var_str
;
515 struct snapshot_context
{
516 struct tracing_map_elt
*elt
;
520 static void track_data_free(struct track_data
*track_data
)
522 struct hist_elt_data
*elt_data
;
527 kfree(track_data
->key
);
529 elt_data
= track_data
->elt
.private_data
;
531 kfree(elt_data
->comm
);
538 static struct track_data
*track_data_alloc(unsigned int key_len
,
539 struct action_data
*action_data
,
540 struct hist_trigger_data
*hist_data
)
542 struct track_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
543 struct hist_elt_data
*elt_data
;
546 return ERR_PTR(-ENOMEM
);
548 data
->key
= kzalloc(key_len
, GFP_KERNEL
);
550 track_data_free(data
);
551 return ERR_PTR(-ENOMEM
);
554 data
->key_len
= key_len
;
555 data
->action_data
= action_data
;
556 data
->hist_data
= hist_data
;
558 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
560 track_data_free(data
);
561 return ERR_PTR(-ENOMEM
);
564 data
->elt
.private_data
= elt_data
;
566 elt_data
->comm
= kzalloc(TASK_COMM_LEN
, GFP_KERNEL
);
567 if (!elt_data
->comm
) {
568 track_data_free(data
);
569 return ERR_PTR(-ENOMEM
);
575 static char last_cmd
[MAX_FILTER_STR_VAL
];
576 static char last_cmd_loc
[MAX_FILTER_STR_VAL
];
578 static int errpos(char *str
)
580 return err_pos(last_cmd
, str
);
583 static void last_cmd_set(struct trace_event_file
*file
, char *str
)
585 const char *system
= NULL
, *name
= NULL
;
586 struct trace_event_call
*call
;
591 strcpy(last_cmd
, "hist:");
592 strncat(last_cmd
, str
, MAX_FILTER_STR_VAL
- 1 - sizeof("hist:"));
595 call
= file
->event_call
;
596 system
= call
->class->system
;
598 name
= trace_event_name(call
);
605 snprintf(last_cmd_loc
, MAX_FILTER_STR_VAL
, "hist:%s:%s", system
, name
);
608 static void hist_err(struct trace_array
*tr
, u8 err_type
, u8 err_pos
)
610 tracing_log_err(tr
, last_cmd_loc
, last_cmd
, err_text
,
614 static void hist_err_clear(void)
617 last_cmd_loc
[0] = '\0';
620 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
621 unsigned int *var_ref_idx
);
623 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
624 unsigned int *var_ref_idx
)
626 struct tracepoint
*tp
= event
->tp
;
628 if (unlikely(atomic_read(&tp
->key
.enabled
) > 0)) {
629 struct tracepoint_func
*probe_func_ptr
;
630 synth_probe_func_t probe_func
;
633 if (!(cpu_online(raw_smp_processor_id())))
636 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
637 if (probe_func_ptr
) {
639 probe_func
= probe_func_ptr
->func
;
640 __data
= probe_func_ptr
->data
;
641 probe_func(__data
, var_ref_vals
, var_ref_idx
);
642 } while ((++probe_func_ptr
)->func
);
647 static void action_trace(struct hist_trigger_data
*hist_data
,
648 struct tracing_map_elt
*elt
,
649 struct trace_buffer
*buffer
, void *rec
,
650 struct ring_buffer_event
*rbe
, void *key
,
651 struct action_data
*data
, u64
*var_ref_vals
)
653 struct synth_event
*event
= data
->synth_event
;
655 trace_synth(event
, var_ref_vals
, data
->var_ref_idx
);
658 struct hist_var_data
{
659 struct list_head list
;
660 struct hist_trigger_data
*hist_data
;
663 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
664 struct tracing_map_elt
*elt
,
665 struct trace_buffer
*buffer
,
666 struct ring_buffer_event
*rbe
,
669 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
670 struct trace_array
*tr
= hist_data
->event_file
->tr
;
672 u64 ts
= ring_buffer_event_time_stamp(buffer
, rbe
);
674 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
680 static u64
hist_field_cpu(struct hist_field
*hist_field
,
681 struct tracing_map_elt
*elt
,
682 struct trace_buffer
*buffer
,
683 struct ring_buffer_event
*rbe
,
686 int cpu
= smp_processor_id();
692 * check_field_for_var_ref - Check if a VAR_REF field references a variable
693 * @hist_field: The VAR_REF field to check
694 * @var_data: The hist trigger that owns the variable
695 * @var_idx: The trigger variable identifier
697 * Check the given VAR_REF field to see whether or not it references
698 * the given variable associated with the given trigger.
700 * Return: The VAR_REF field if it does reference the variable, NULL if not
702 static struct hist_field
*
703 check_field_for_var_ref(struct hist_field
*hist_field
,
704 struct hist_trigger_data
*var_data
,
705 unsigned int var_idx
)
707 WARN_ON(!(hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
));
709 if (hist_field
&& hist_field
->var
.idx
== var_idx
&&
710 hist_field
->var
.hist_data
== var_data
)
717 * find_var_ref - Check if a trigger has a reference to a trigger variable
718 * @hist_data: The hist trigger that might have a reference to the variable
719 * @var_data: The hist trigger that owns the variable
720 * @var_idx: The trigger variable identifier
722 * Check the list of var_refs[] on the first hist trigger to see
723 * whether any of them are references to the variable on the second
726 * Return: The VAR_REF field referencing the variable if so, NULL if not
728 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
729 struct hist_trigger_data
*var_data
,
730 unsigned int var_idx
)
732 struct hist_field
*hist_field
;
735 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
736 hist_field
= hist_data
->var_refs
[i
];
737 if (check_field_for_var_ref(hist_field
, var_data
, var_idx
))
745 * find_any_var_ref - Check if there is a reference to a given trigger variable
746 * @hist_data: The hist trigger
747 * @var_idx: The trigger variable identifier
749 * Check to see whether the given variable is currently referenced by
752 * The trigger the variable is defined on is explicitly excluded - the
753 * assumption being that a self-reference doesn't prevent a trigger
754 * from being removed.
756 * Return: The VAR_REF field referencing the variable if so, NULL if not
758 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
759 unsigned int var_idx
)
761 struct trace_array
*tr
= hist_data
->event_file
->tr
;
762 struct hist_field
*found
= NULL
;
763 struct hist_var_data
*var_data
;
765 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
766 if (var_data
->hist_data
== hist_data
)
768 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
777 * check_var_refs - Check if there is a reference to any of trigger's variables
778 * @hist_data: The hist trigger
780 * A trigger can define one or more variables. If any one of them is
781 * currently referenced by any other trigger, this function will
784 * Typically used to determine whether or not a trigger can be removed
785 * - if there are any references to a trigger's variables, it cannot.
787 * Return: True if there is a reference to any of trigger's variables
789 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
791 struct hist_field
*field
;
795 for_each_hist_field(i
, hist_data
) {
796 field
= hist_data
->fields
[i
];
797 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
798 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
808 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
810 struct trace_array
*tr
= hist_data
->event_file
->tr
;
811 struct hist_var_data
*var_data
, *found
= NULL
;
813 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
814 if (var_data
->hist_data
== hist_data
) {
823 static bool field_has_hist_vars(struct hist_field
*hist_field
,
834 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
835 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
838 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
839 struct hist_field
*operand
;
841 operand
= hist_field
->operands
[i
];
842 if (field_has_hist_vars(operand
, level
+ 1))
849 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
851 struct hist_field
*hist_field
;
854 for_each_hist_field(i
, hist_data
) {
855 hist_field
= hist_data
->fields
[i
];
856 if (field_has_hist_vars(hist_field
, 0))
863 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
865 struct trace_array
*tr
= hist_data
->event_file
->tr
;
866 struct hist_var_data
*var_data
;
868 var_data
= find_hist_vars(hist_data
);
872 if (tracing_check_open_get_tr(tr
))
875 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
881 var_data
->hist_data
= hist_data
;
882 list_add(&var_data
->list
, &tr
->hist_vars
);
887 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
889 struct trace_array
*tr
= hist_data
->event_file
->tr
;
890 struct hist_var_data
*var_data
;
892 var_data
= find_hist_vars(hist_data
);
896 if (WARN_ON(check_var_refs(hist_data
)))
899 list_del(&var_data
->list
);
906 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
907 const char *var_name
)
909 struct hist_field
*hist_field
, *found
= NULL
;
912 for_each_hist_field(i
, hist_data
) {
913 hist_field
= hist_data
->fields
[i
];
914 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
915 strcmp(hist_field
->var
.name
, var_name
) == 0) {
924 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
925 struct trace_event_file
*file
,
926 const char *var_name
)
928 struct hist_trigger_data
*test_data
;
929 struct event_trigger_data
*test
;
930 struct hist_field
*hist_field
;
932 lockdep_assert_held(&event_mutex
);
934 hist_field
= find_var_field(hist_data
, var_name
);
938 list_for_each_entry(test
, &file
->triggers
, list
) {
939 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
940 test_data
= test
->private_data
;
941 hist_field
= find_var_field(test_data
, var_name
);
950 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
955 struct hist_trigger_data
*var_hist_data
;
956 struct hist_var_data
*var_data
;
957 struct trace_event_file
*file
, *found
= NULL
;
960 return find_event_file(tr
, system
, event_name
);
962 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
963 var_hist_data
= var_data
->hist_data
;
964 file
= var_hist_data
->event_file
;
968 if (find_var_field(var_hist_data
, var_name
)) {
970 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
, errpos(var_name
));
981 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
982 const char *var_name
)
984 struct hist_trigger_data
*test_data
;
985 struct event_trigger_data
*test
;
986 struct hist_field
*hist_field
;
988 lockdep_assert_held(&event_mutex
);
990 list_for_each_entry(test
, &file
->triggers
, list
) {
991 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
992 test_data
= test
->private_data
;
993 hist_field
= find_var_field(test_data
, var_name
);
1002 static struct hist_field
*
1003 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
1005 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1006 struct hist_field
*hist_field
, *found
= NULL
;
1007 struct trace_event_file
*file
;
1010 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
1011 struct action_data
*data
= hist_data
->actions
[i
];
1013 if (data
->handler
== HANDLER_ONMATCH
) {
1014 char *system
= data
->match_data
.event_system
;
1015 char *event_name
= data
->match_data
.event
;
1017 file
= find_var_file(tr
, system
, event_name
, var_name
);
1020 hist_field
= find_file_var(file
, var_name
);
1023 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
,
1025 return ERR_PTR(-EINVAL
);
1035 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
1040 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1041 struct hist_field
*hist_field
= NULL
;
1042 struct trace_event_file
*file
;
1044 if (!system
|| !event_name
) {
1045 hist_field
= find_match_var(hist_data
, var_name
);
1046 if (IS_ERR(hist_field
))
1052 file
= find_var_file(tr
, system
, event_name
, var_name
);
1056 hist_field
= find_file_var(file
, var_name
);
1061 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
1062 struct tracing_map_elt
*elt
,
1063 struct trace_buffer
*buffer
,
1064 struct ring_buffer_event
*rbe
,
1067 struct hist_elt_data
*elt_data
;
1070 if (WARN_ON_ONCE(!elt
))
1073 elt_data
= elt
->private_data
;
1074 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
1079 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
1080 u64
*var_ref_vals
, bool self
)
1082 struct hist_trigger_data
*var_data
;
1083 struct tracing_map_elt
*var_elt
;
1084 struct hist_field
*hist_field
;
1085 unsigned int i
, var_idx
;
1086 bool resolved
= true;
1089 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1090 hist_field
= hist_data
->var_refs
[i
];
1091 var_idx
= hist_field
->var
.idx
;
1092 var_data
= hist_field
->var
.hist_data
;
1094 if (var_data
== NULL
) {
1099 if ((self
&& var_data
!= hist_data
) ||
1100 (!self
&& var_data
== hist_data
))
1103 var_elt
= tracing_map_lookup(var_data
->map
, key
);
1109 if (!tracing_map_var_set(var_elt
, var_idx
)) {
1114 if (self
|| !hist_field
->read_once
)
1115 var_val
= tracing_map_read_var(var_elt
, var_idx
);
1117 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
1119 var_ref_vals
[i
] = var_val
;
1125 static const char *hist_field_name(struct hist_field
*field
,
1128 const char *field_name
= "";
1134 field_name
= field
->field
->name
;
1135 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
1136 field
->flags
& HIST_FIELD_FL_ALIAS
||
1137 field
->flags
& HIST_FIELD_FL_BUCKET
)
1138 field_name
= hist_field_name(field
->operands
[0], ++level
);
1139 else if (field
->flags
& HIST_FIELD_FL_CPU
)
1140 field_name
= "common_cpu";
1141 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
1142 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
1143 if (field
->system
) {
1144 static char full_name
[MAX_FILTER_STR_VAL
];
1146 strcat(full_name
, field
->system
);
1147 strcat(full_name
, ".");
1148 strcat(full_name
, field
->event_name
);
1149 strcat(full_name
, ".");
1150 strcat(full_name
, field
->name
);
1151 field_name
= full_name
;
1153 field_name
= field
->name
;
1154 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
1155 field_name
= "common_timestamp";
1157 if (field_name
== NULL
)
1163 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
1165 hist_field_fn_t fn
= NULL
;
1167 switch (field_size
) {
1169 if (field_is_signed
)
1170 fn
= hist_field_s64
;
1172 fn
= hist_field_u64
;
1175 if (field_is_signed
)
1176 fn
= hist_field_s32
;
1178 fn
= hist_field_u32
;
1181 if (field_is_signed
)
1182 fn
= hist_field_s16
;
1184 fn
= hist_field_u16
;
1187 if (field_is_signed
)
1197 static int parse_map_size(char *str
)
1199 unsigned long size
, map_bits
;
1202 ret
= kstrtoul(str
, 0, &size
);
1206 map_bits
= ilog2(roundup_pow_of_two(size
));
1207 if (map_bits
< TRACING_MAP_BITS_MIN
||
1208 map_bits
> TRACING_MAP_BITS_MAX
)
1216 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
1223 for (i
= 0; i
< attrs
->n_assignments
; i
++)
1224 kfree(attrs
->assignment_str
[i
]);
1226 for (i
= 0; i
< attrs
->n_actions
; i
++)
1227 kfree(attrs
->action_str
[i
]);
1230 kfree(attrs
->sort_key_str
);
1231 kfree(attrs
->keys_str
);
1232 kfree(attrs
->vals_str
);
1233 kfree(attrs
->clock
);
1237 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
1241 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
1244 if ((str_has_prefix(str
, "onmatch(")) ||
1245 (str_has_prefix(str
, "onmax(")) ||
1246 (str_has_prefix(str
, "onchange("))) {
1247 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
1248 if (!attrs
->action_str
[attrs
->n_actions
]) {
1258 static int parse_assignment(struct trace_array
*tr
,
1259 char *str
, struct hist_trigger_attrs
*attrs
)
1263 if ((len
= str_has_prefix(str
, "key=")) ||
1264 (len
= str_has_prefix(str
, "keys="))) {
1265 attrs
->keys_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1266 if (!attrs
->keys_str
) {
1270 } else if ((len
= str_has_prefix(str
, "val=")) ||
1271 (len
= str_has_prefix(str
, "vals=")) ||
1272 (len
= str_has_prefix(str
, "values="))) {
1273 attrs
->vals_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1274 if (!attrs
->vals_str
) {
1278 } else if ((len
= str_has_prefix(str
, "sort="))) {
1279 attrs
->sort_key_str
= kstrdup(str
+ len
, GFP_KERNEL
);
1280 if (!attrs
->sort_key_str
) {
1284 } else if (str_has_prefix(str
, "name=")) {
1285 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
1290 } else if ((len
= str_has_prefix(str
, "clock="))) {
1293 str
= strstrip(str
);
1294 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
1295 if (!attrs
->clock
) {
1299 } else if ((len
= str_has_prefix(str
, "size="))) {
1300 int map_bits
= parse_map_size(str
+ len
);
1306 attrs
->map_bits
= map_bits
;
1310 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
1311 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(str
));
1316 assignment
= kstrdup(str
, GFP_KERNEL
);
1322 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
1328 static struct hist_trigger_attrs
*
1329 parse_hist_trigger_attrs(struct trace_array
*tr
, char *trigger_str
)
1331 struct hist_trigger_attrs
*attrs
;
1334 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
1336 return ERR_PTR(-ENOMEM
);
1338 while (trigger_str
) {
1339 char *str
= strsep(&trigger_str
, ":");
1342 rhs
= strchr(str
, '=');
1344 if (!strlen(++rhs
)) {
1346 hist_err(tr
, HIST_ERR_EMPTY_ASSIGNMENT
, errpos(str
));
1349 ret
= parse_assignment(tr
, str
, attrs
);
1352 } else if (strcmp(str
, "pause") == 0)
1353 attrs
->pause
= true;
1354 else if ((strcmp(str
, "cont") == 0) ||
1355 (strcmp(str
, "continue") == 0))
1357 else if (strcmp(str
, "clear") == 0)
1358 attrs
->clear
= true;
1360 ret
= parse_action(str
, attrs
);
1366 if (!attrs
->keys_str
) {
1371 if (!attrs
->clock
) {
1372 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
1373 if (!attrs
->clock
) {
1381 destroy_hist_trigger_attrs(attrs
);
1383 return ERR_PTR(ret
);
1386 static inline void save_comm(char *comm
, struct task_struct
*task
)
1389 strcpy(comm
, "<idle>");
1393 if (WARN_ON_ONCE(task
->pid
< 0)) {
1394 strcpy(comm
, "<XXX>");
1398 strncpy(comm
, task
->comm
, TASK_COMM_LEN
);
1401 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
1405 for (i
= 0; i
< elt_data
->n_field_var_str
; i
++)
1406 kfree(elt_data
->field_var_str
[i
]);
1408 kfree(elt_data
->field_var_str
);
1410 kfree(elt_data
->comm
);
1414 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
1416 struct hist_elt_data
*elt_data
= elt
->private_data
;
1418 hist_elt_data_free(elt_data
);
1421 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
1423 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
1424 unsigned int size
= TASK_COMM_LEN
;
1425 struct hist_elt_data
*elt_data
;
1426 struct hist_field
*hist_field
;
1427 unsigned int i
, n_str
;
1429 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
1433 for_each_hist_field(i
, hist_data
) {
1434 hist_field
= hist_data
->fields
[i
];
1436 if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
1437 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
1438 if (!elt_data
->comm
) {
1446 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_save_var_str
+
1447 hist_data
->n_var_str
;
1448 if (n_str
> SYNTH_FIELDS_MAX
) {
1449 hist_elt_data_free(elt_data
);
1453 BUILD_BUG_ON(STR_VAR_LEN_MAX
& (sizeof(u64
) - 1));
1455 size
= STR_VAR_LEN_MAX
;
1457 elt_data
->field_var_str
= kcalloc(n_str
, sizeof(char *), GFP_KERNEL
);
1458 if (!elt_data
->field_var_str
) {
1459 hist_elt_data_free(elt_data
);
1462 elt_data
->n_field_var_str
= n_str
;
1464 for (i
= 0; i
< n_str
; i
++) {
1465 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
1466 if (!elt_data
->field_var_str
[i
]) {
1467 hist_elt_data_free(elt_data
);
1472 elt
->private_data
= elt_data
;
1477 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
1479 struct hist_elt_data
*elt_data
= elt
->private_data
;
1482 save_comm(elt_data
->comm
, current
);
1485 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
1486 .elt_alloc
= hist_trigger_elt_data_alloc
,
1487 .elt_free
= hist_trigger_elt_data_free
,
1488 .elt_init
= hist_trigger_elt_data_init
,
1491 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
1493 const char *flags_str
= NULL
;
1495 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
1497 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
1499 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
1500 flags_str
= "sym-offset";
1501 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
1502 flags_str
= "execname";
1503 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
1504 flags_str
= "syscall";
1505 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
1507 else if (hist_field
->flags
& HIST_FIELD_FL_BUCKET
)
1508 flags_str
= "buckets";
1509 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
1510 flags_str
= "usecs";
1515 static void expr_field_str(struct hist_field
*field
, char *expr
)
1517 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
1520 strcat(expr
, hist_field_name(field
, 0));
1522 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
1523 const char *flags_str
= get_hist_field_flags(field
);
1527 strcat(expr
, flags_str
);
1532 static char *expr_str(struct hist_field
*field
, unsigned int level
)
1539 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
1543 if (!field
->operands
[0]) {
1544 expr_field_str(field
, expr
);
1548 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
1552 subexpr
= expr_str(field
->operands
[0], ++level
);
1557 strcat(expr
, subexpr
);
1565 expr_field_str(field
->operands
[0], expr
);
1567 switch (field
->operator) {
1568 case FIELD_OP_MINUS
:
1579 expr_field_str(field
->operands
[1], expr
);
1584 static int contains_operator(char *str
)
1586 enum field_op_id field_op
= FIELD_OP_NONE
;
1589 op
= strpbrk(str
, "+-");
1591 return FIELD_OP_NONE
;
1596 * Unfortunately, the modifier ".sym-offset"
1597 * can confuse things.
1599 if (op
- str
>= 4 && !strncmp(op
- 4, ".sym-offset", 11))
1600 return FIELD_OP_NONE
;
1603 field_op
= FIELD_OP_UNARY_MINUS
;
1605 field_op
= FIELD_OP_MINUS
;
1608 field_op
= FIELD_OP_PLUS
;
1617 static void get_hist_field(struct hist_field
*hist_field
)
1622 static void __destroy_hist_field(struct hist_field
*hist_field
)
1624 if (--hist_field
->ref
> 1)
1627 kfree(hist_field
->var
.name
);
1628 kfree(hist_field
->name
);
1630 /* Can likely be a const */
1631 kfree_const(hist_field
->type
);
1633 kfree(hist_field
->system
);
1634 kfree(hist_field
->event_name
);
1639 static void destroy_hist_field(struct hist_field
*hist_field
,
1650 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
1651 return; /* var refs will be destroyed separately */
1653 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
1654 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
1656 __destroy_hist_field(hist_field
);
1659 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
1660 struct ftrace_event_field
*field
,
1661 unsigned long flags
,
1664 struct hist_field
*hist_field
;
1666 if (field
&& is_function_field(field
))
1669 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
1673 hist_field
->ref
= 1;
1675 hist_field
->hist_data
= hist_data
;
1677 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
1678 goto out
; /* caller will populate */
1680 if (flags
& HIST_FIELD_FL_VAR_REF
) {
1681 hist_field
->fn
= hist_field_var_ref
;
1685 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
1686 hist_field
->fn
= hist_field_counter
;
1687 hist_field
->size
= sizeof(u64
);
1688 hist_field
->type
= "u64";
1692 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
1693 hist_field
->fn
= hist_field_none
;
1697 if (flags
& (HIST_FIELD_FL_LOG2
| HIST_FIELD_FL_BUCKET
)) {
1698 unsigned long fl
= flags
& ~(HIST_FIELD_FL_LOG2
| HIST_FIELD_FL_BUCKET
);
1699 hist_field
->fn
= flags
& HIST_FIELD_FL_LOG2
? hist_field_log2
:
1701 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
1702 hist_field
->size
= hist_field
->operands
[0]->size
;
1703 hist_field
->type
= kstrdup_const(hist_field
->operands
[0]->type
, GFP_KERNEL
);
1704 if (!hist_field
->type
)
1709 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
1710 hist_field
->fn
= hist_field_timestamp
;
1711 hist_field
->size
= sizeof(u64
);
1712 hist_field
->type
= "u64";
1716 if (flags
& HIST_FIELD_FL_CPU
) {
1717 hist_field
->fn
= hist_field_cpu
;
1718 hist_field
->size
= sizeof(int);
1719 hist_field
->type
= "unsigned int";
1723 if (WARN_ON_ONCE(!field
))
1726 /* Pointers to strings are just pointers and dangerous to dereference */
1727 if (is_string_field(field
) &&
1728 (field
->filter_type
!= FILTER_PTR_STRING
)) {
1729 flags
|= HIST_FIELD_FL_STRING
;
1731 hist_field
->size
= MAX_FILTER_STR_VAL
;
1732 hist_field
->type
= kstrdup_const(field
->type
, GFP_KERNEL
);
1733 if (!hist_field
->type
)
1736 if (field
->filter_type
== FILTER_STATIC_STRING
) {
1737 hist_field
->fn
= hist_field_string
;
1738 hist_field
->size
= field
->size
;
1739 } else if (field
->filter_type
== FILTER_DYN_STRING
)
1740 hist_field
->fn
= hist_field_dynstring
;
1742 hist_field
->fn
= hist_field_pstring
;
1744 hist_field
->size
= field
->size
;
1745 hist_field
->is_signed
= field
->is_signed
;
1746 hist_field
->type
= kstrdup_const(field
->type
, GFP_KERNEL
);
1747 if (!hist_field
->type
)
1750 hist_field
->fn
= select_value_fn(field
->size
,
1752 if (!hist_field
->fn
) {
1753 destroy_hist_field(hist_field
, 0);
1758 hist_field
->field
= field
;
1759 hist_field
->flags
= flags
;
1762 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
1763 if (!hist_field
->var
.name
)
1769 destroy_hist_field(hist_field
, 0);
1773 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
1777 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
1778 if (hist_data
->fields
[i
]) {
1779 destroy_hist_field(hist_data
->fields
[i
], 0);
1780 hist_data
->fields
[i
] = NULL
;
1784 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1785 WARN_ON(!(hist_data
->var_refs
[i
]->flags
& HIST_FIELD_FL_VAR_REF
));
1786 __destroy_hist_field(hist_data
->var_refs
[i
]);
1787 hist_data
->var_refs
[i
] = NULL
;
1791 static int init_var_ref(struct hist_field
*ref_field
,
1792 struct hist_field
*var_field
,
1793 char *system
, char *event_name
)
1797 ref_field
->var
.idx
= var_field
->var
.idx
;
1798 ref_field
->var
.hist_data
= var_field
->hist_data
;
1799 ref_field
->size
= var_field
->size
;
1800 ref_field
->is_signed
= var_field
->is_signed
;
1801 ref_field
->flags
|= var_field
->flags
&
1802 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
1805 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
1806 if (!ref_field
->system
)
1811 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
1812 if (!ref_field
->event_name
) {
1818 if (var_field
->var
.name
) {
1819 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
1820 if (!ref_field
->name
) {
1824 } else if (var_field
->name
) {
1825 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
1826 if (!ref_field
->name
) {
1832 ref_field
->type
= kstrdup_const(var_field
->type
, GFP_KERNEL
);
1833 if (!ref_field
->type
) {
1840 kfree(ref_field
->system
);
1841 kfree(ref_field
->event_name
);
1842 kfree(ref_field
->name
);
1847 static int find_var_ref_idx(struct hist_trigger_data
*hist_data
,
1848 struct hist_field
*var_field
)
1850 struct hist_field
*ref_field
;
1853 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1854 ref_field
= hist_data
->var_refs
[i
];
1855 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
1856 ref_field
->var
.hist_data
== var_field
->hist_data
)
1864 * create_var_ref - Create a variable reference and attach it to trigger
1865 * @hist_data: The trigger that will be referencing the variable
1866 * @var_field: The VAR field to create a reference to
1867 * @system: The optional system string
1868 * @event_name: The optional event_name string
1870 * Given a variable hist_field, create a VAR_REF hist_field that
1871 * represents a reference to it.
1873 * This function also adds the reference to the trigger that
1874 * now references the variable.
1876 * Return: The VAR_REF field if successful, NULL if not
1878 static struct hist_field
*create_var_ref(struct hist_trigger_data
*hist_data
,
1879 struct hist_field
*var_field
,
1880 char *system
, char *event_name
)
1882 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
1883 struct hist_field
*ref_field
;
1886 /* Check if the variable already exists */
1887 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
1888 ref_field
= hist_data
->var_refs
[i
];
1889 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
1890 ref_field
->var
.hist_data
== var_field
->hist_data
) {
1891 get_hist_field(ref_field
);
1896 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
1898 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
1899 destroy_hist_field(ref_field
, 0);
1903 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
1904 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
1910 static bool is_var_ref(char *var_name
)
1912 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
1918 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
1924 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
1925 name
= hist_data
->attrs
->var_defs
.name
[i
];
1927 if (strcmp(var_name
, name
) == 0) {
1928 field
= hist_data
->attrs
->var_defs
.expr
[i
];
1929 if (contains_operator(field
) || is_var_ref(field
))
1938 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
1939 char *system
, char *event_name
,
1942 struct trace_event_call
*call
;
1944 if (system
&& event_name
) {
1945 call
= hist_data
->event_file
->event_call
;
1947 if (strcmp(system
, call
->class->system
) != 0)
1950 if (strcmp(event_name
, trace_event_name(call
)) != 0)
1954 if (!!system
!= !!event_name
)
1957 if (!is_var_ref(var_name
))
1962 return field_name_from_var(hist_data
, var_name
);
1965 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
1966 char *system
, char *event_name
,
1969 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
1970 struct trace_array
*tr
= hist_data
->event_file
->tr
;
1972 if (!is_var_ref(var_name
))
1977 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
1979 ref_field
= create_var_ref(hist_data
, var_field
,
1980 system
, event_name
);
1983 hist_err(tr
, HIST_ERR_VAR_NOT_FOUND
, errpos(var_name
));
1988 static struct ftrace_event_field
*
1989 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
1990 char *field_str
, unsigned long *flags
, unsigned long *buckets
)
1992 struct ftrace_event_field
*field
= NULL
;
1993 char *field_name
, *modifier
, *str
;
1994 struct trace_array
*tr
= file
->tr
;
1996 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
1998 return ERR_PTR(-ENOMEM
);
2000 field_name
= strsep(&modifier
, ".");
2002 if (strcmp(modifier
, "hex") == 0)
2003 *flags
|= HIST_FIELD_FL_HEX
;
2004 else if (strcmp(modifier
, "sym") == 0)
2005 *flags
|= HIST_FIELD_FL_SYM
;
2006 else if (strcmp(modifier
, "sym-offset") == 0)
2007 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
2008 else if ((strcmp(modifier
, "execname") == 0) &&
2009 (strcmp(field_name
, "common_pid") == 0))
2010 *flags
|= HIST_FIELD_FL_EXECNAME
;
2011 else if (strcmp(modifier
, "syscall") == 0)
2012 *flags
|= HIST_FIELD_FL_SYSCALL
;
2013 else if (strcmp(modifier
, "log2") == 0)
2014 *flags
|= HIST_FIELD_FL_LOG2
;
2015 else if (strcmp(modifier
, "usecs") == 0)
2016 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
2017 else if (strncmp(modifier
, "bucket", 6) == 0) {
2022 if (*modifier
== 's')
2024 if (*modifier
!= '=')
2027 ret
= kstrtoul(modifier
, 0, buckets
);
2028 if (ret
|| !(*buckets
))
2030 *flags
|= HIST_FIELD_FL_BUCKET
;
2033 hist_err(tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(modifier
));
2034 field
= ERR_PTR(-EINVAL
);
2039 if (strcmp(field_name
, "common_timestamp") == 0) {
2040 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
2041 hist_data
->enable_timestamps
= true;
2042 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
2043 hist_data
->attrs
->ts_in_usecs
= true;
2044 } else if (strcmp(field_name
, "common_cpu") == 0)
2045 *flags
|= HIST_FIELD_FL_CPU
;
2047 field
= trace_find_event_field(file
->event_call
, field_name
);
2048 if (!field
|| !field
->size
) {
2050 * For backward compatibility, if field_name
2051 * was "cpu", then we treat this the same as
2052 * common_cpu. This also works for "CPU".
2054 if (field
&& field
->filter_type
== FILTER_CPU
) {
2055 *flags
|= HIST_FIELD_FL_CPU
;
2057 hist_err(tr
, HIST_ERR_FIELD_NOT_FOUND
,
2058 errpos(field_name
));
2059 field
= ERR_PTR(-EINVAL
);
2070 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
2071 struct hist_field
*var_ref
,
2074 struct hist_field
*alias
= NULL
;
2075 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
2077 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2081 alias
->fn
= var_ref
->fn
;
2082 alias
->operands
[0] = var_ref
;
2084 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
2085 destroy_hist_field(alias
, 0);
2089 alias
->var_ref_idx
= var_ref
->var_ref_idx
;
2094 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
2095 struct trace_event_file
*file
, char *str
,
2096 unsigned long *flags
, char *var_name
)
2098 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
2099 struct ftrace_event_field
*field
= NULL
;
2100 struct hist_field
*hist_field
= NULL
;
2101 unsigned long buckets
= 0;
2104 s
= strchr(str
, '.');
2106 s
= strchr(++s
, '.');
2108 ref_system
= strsep(&str
, ".");
2113 ref_event
= strsep(&str
, ".");
2122 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
2124 hist_field
= parse_var_ref(hist_data
, ref_system
,
2125 ref_event
, ref_var
);
2128 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
2139 field
= parse_field(hist_data
, file
, str
, flags
, &buckets
);
2140 if (IS_ERR(field
)) {
2141 ret
= PTR_ERR(field
);
2145 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
2150 hist_field
->buckets
= buckets
;
2154 return ERR_PTR(ret
);
2157 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2158 struct trace_event_file
*file
,
2159 char *str
, unsigned long flags
,
2160 char *var_name
, unsigned int level
);
2162 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
2163 struct trace_event_file
*file
,
2164 char *str
, unsigned long flags
,
2165 char *var_name
, unsigned int level
)
2167 struct hist_field
*operand1
, *expr
= NULL
;
2168 unsigned long operand_flags
;
2172 /* we support only -(xxx) i.e. explicit parens required */
2175 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2180 str
++; /* skip leading '-' */
2182 s
= strchr(str
, '(');
2190 s
= strrchr(str
, ')');
2194 ret
= -EINVAL
; /* no closing ')' */
2198 flags
|= HIST_FIELD_FL_EXPR
;
2199 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2206 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
2207 if (IS_ERR(operand1
)) {
2208 ret
= PTR_ERR(operand1
);
2211 if (operand1
->flags
& HIST_FIELD_FL_STRING
) {
2212 /* String type can not be the operand of unary operator. */
2213 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(str
));
2214 destroy_hist_field(operand1
, 0);
2219 expr
->flags
|= operand1
->flags
&
2220 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2221 expr
->fn
= hist_field_unary_minus
;
2222 expr
->operands
[0] = operand1
;
2223 expr
->size
= operand1
->size
;
2224 expr
->is_signed
= operand1
->is_signed
;
2225 expr
->operator = FIELD_OP_UNARY_MINUS
;
2226 expr
->name
= expr_str(expr
, 0);
2227 expr
->type
= kstrdup_const(operand1
->type
, GFP_KERNEL
);
2235 destroy_hist_field(expr
, 0);
2236 return ERR_PTR(ret
);
2239 static int check_expr_operands(struct trace_array
*tr
,
2240 struct hist_field
*operand1
,
2241 struct hist_field
*operand2
)
2243 unsigned long operand1_flags
= operand1
->flags
;
2244 unsigned long operand2_flags
= operand2
->flags
;
2246 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
2247 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
2248 struct hist_field
*var
;
2250 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
2253 operand1_flags
= var
->flags
;
2256 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
2257 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
2258 struct hist_field
*var
;
2260 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
2263 operand2_flags
= var
->flags
;
2266 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
2267 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
2268 hist_err(tr
, HIST_ERR_TIMESTAMP_MISMATCH
, 0);
2275 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
2276 struct trace_event_file
*file
,
2277 char *str
, unsigned long flags
,
2278 char *var_name
, unsigned int level
)
2280 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
2281 unsigned long operand_flags
;
2282 int field_op
, ret
= -EINVAL
;
2283 char *sep
, *operand1_str
;
2286 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
2287 return ERR_PTR(-EINVAL
);
2290 field_op
= contains_operator(str
);
2292 if (field_op
== FIELD_OP_NONE
)
2293 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
2295 if (field_op
== FIELD_OP_UNARY_MINUS
)
2296 return parse_unary(hist_data
, file
, str
, flags
, var_name
, ++level
);
2299 case FIELD_OP_MINUS
:
2309 operand1_str
= strsep(&str
, sep
);
2310 if (!operand1_str
|| !str
)
2314 operand1
= parse_atom(hist_data
, file
, operand1_str
,
2315 &operand_flags
, NULL
);
2316 if (IS_ERR(operand1
)) {
2317 ret
= PTR_ERR(operand1
);
2321 if (operand1
->flags
& HIST_FIELD_FL_STRING
) {
2322 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(operand1_str
));
2327 /* rest of string could be another expression e.g. b+c in a+b+c */
2329 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
2330 if (IS_ERR(operand2
)) {
2331 ret
= PTR_ERR(operand2
);
2335 if (operand2
->flags
& HIST_FIELD_FL_STRING
) {
2336 hist_err(file
->tr
, HIST_ERR_INVALID_STR_OPERAND
, errpos(str
));
2341 ret
= check_expr_operands(file
->tr
, operand1
, operand2
);
2345 flags
|= HIST_FIELD_FL_EXPR
;
2347 flags
|= operand1
->flags
&
2348 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
2350 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
2356 operand1
->read_once
= true;
2357 operand2
->read_once
= true;
2359 expr
->operands
[0] = operand1
;
2360 expr
->operands
[1] = operand2
;
2362 /* The operand sizes should be the same, so just pick one */
2363 expr
->size
= operand1
->size
;
2364 expr
->is_signed
= operand1
->is_signed
;
2366 expr
->operator = field_op
;
2367 expr
->name
= expr_str(expr
, 0);
2368 expr
->type
= kstrdup_const(operand1
->type
, GFP_KERNEL
);
2375 case FIELD_OP_MINUS
:
2376 expr
->fn
= hist_field_minus
;
2379 expr
->fn
= hist_field_plus
;
2388 destroy_hist_field(operand1
, 0);
2389 destroy_hist_field(operand2
, 0);
2390 destroy_hist_field(expr
, 0);
2392 return ERR_PTR(ret
);
2395 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
2396 struct trace_event_file
*file
)
2398 struct event_trigger_data
*test
;
2400 lockdep_assert_held(&event_mutex
);
2402 list_for_each_entry(test
, &file
->triggers
, list
) {
2403 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2404 if (test
->private_data
== hist_data
)
2405 return test
->filter_str
;
2412 static struct event_command trigger_hist_cmd
;
2413 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
2414 struct trace_event_file
*file
,
2415 char *glob
, char *cmd
, char *param
);
2417 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
2418 struct hist_trigger_data
*hist_data
,
2419 unsigned int n_keys
)
2421 struct hist_field
*target_hist_field
, *hist_field
;
2422 unsigned int n
, i
, j
;
2424 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
2427 i
= hist_data
->n_vals
;
2428 j
= target_hist_data
->n_vals
;
2430 for (n
= 0; n
< n_keys
; n
++) {
2431 hist_field
= hist_data
->fields
[i
+ n
];
2432 target_hist_field
= target_hist_data
->fields
[j
+ n
];
2434 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
2436 if (hist_field
->size
!= target_hist_field
->size
)
2438 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
2445 static struct hist_trigger_data
*
2446 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
2447 struct trace_event_file
*file
)
2449 struct hist_trigger_data
*hist_data
;
2450 struct event_trigger_data
*test
;
2451 unsigned int n_keys
;
2453 lockdep_assert_held(&event_mutex
);
2455 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
2457 list_for_each_entry(test
, &file
->triggers
, list
) {
2458 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2459 hist_data
= test
->private_data
;
2461 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
2469 static struct trace_event_file
*event_file(struct trace_array
*tr
,
2470 char *system
, char *event_name
)
2472 struct trace_event_file
*file
;
2474 file
= __find_event_file(tr
, system
, event_name
);
2476 return ERR_PTR(-EINVAL
);
2481 static struct hist_field
*
2482 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
2483 char *system
, char *event_name
, char *field_name
)
2485 struct hist_field
*event_var
;
2486 char *synthetic_name
;
2488 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2489 if (!synthetic_name
)
2490 return ERR_PTR(-ENOMEM
);
2492 strcpy(synthetic_name
, "synthetic_");
2493 strcat(synthetic_name
, field_name
);
2495 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
2497 kfree(synthetic_name
);
2503 * create_field_var_hist - Automatically create a histogram and var for a field
2504 * @target_hist_data: The target hist trigger
2505 * @subsys_name: Optional subsystem name
2506 * @event_name: Optional event name
2507 * @field_name: The name of the field (and the resulting variable)
2509 * Hist trigger actions fetch data from variables, not directly from
2510 * events. However, for convenience, users are allowed to directly
2511 * specify an event field in an action, which will be automatically
2512 * converted into a variable on their behalf.
2514 * If a user specifies a field on an event that isn't the event the
2515 * histogram currently being defined (the target event histogram), the
2516 * only way that can be accomplished is if a new hist trigger is
2517 * created and the field variable defined on that.
2519 * This function creates a new histogram compatible with the target
2520 * event (meaning a histogram with the same key as the target
2521 * histogram), and creates a variable for the specified field, but
2522 * with 'synthetic_' prepended to the variable name in order to avoid
2523 * collision with normal field variables.
2525 * Return: The variable created for the field.
2527 static struct hist_field
*
2528 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
2529 char *subsys_name
, char *event_name
, char *field_name
)
2531 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
2532 struct hist_trigger_data
*hist_data
;
2533 unsigned int i
, n
, first
= true;
2534 struct field_var_hist
*var_hist
;
2535 struct trace_event_file
*file
;
2536 struct hist_field
*key_field
;
2537 struct hist_field
*event_var
;
2542 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
2543 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
2544 return ERR_PTR(-EINVAL
);
2547 file
= event_file(tr
, subsys_name
, event_name
);
2550 hist_err(tr
, HIST_ERR_EVENT_FILE_NOT_FOUND
, errpos(field_name
));
2551 ret
= PTR_ERR(file
);
2552 return ERR_PTR(ret
);
2556 * Look for a histogram compatible with target. We'll use the
2557 * found histogram specification to create a new matching
2558 * histogram with our variable on it. target_hist_data is not
2559 * yet a registered histogram so we can't use that.
2561 hist_data
= find_compatible_hist(target_hist_data
, file
);
2563 hist_err(tr
, HIST_ERR_HIST_NOT_FOUND
, errpos(field_name
));
2564 return ERR_PTR(-EINVAL
);
2567 /* See if a synthetic field variable has already been created */
2568 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
2569 event_name
, field_name
);
2570 if (!IS_ERR_OR_NULL(event_var
))
2573 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
2575 return ERR_PTR(-ENOMEM
);
2577 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
2580 return ERR_PTR(-ENOMEM
);
2583 /* Use the same keys as the compatible histogram */
2584 strcat(cmd
, "keys=");
2586 for_each_hist_key_field(i
, hist_data
) {
2587 key_field
= hist_data
->fields
[i
];
2590 strcat(cmd
, key_field
->field
->name
);
2594 /* Create the synthetic field variable specification */
2595 strcat(cmd
, ":synthetic_");
2596 strcat(cmd
, field_name
);
2598 strcat(cmd
, field_name
);
2600 /* Use the same filter as the compatible histogram */
2601 saved_filter
= find_trigger_filter(hist_data
, file
);
2603 strcat(cmd
, " if ");
2604 strcat(cmd
, saved_filter
);
2607 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
2608 if (!var_hist
->cmd
) {
2611 return ERR_PTR(-ENOMEM
);
2614 /* Save the compatible histogram information */
2615 var_hist
->hist_data
= hist_data
;
2617 /* Create the new histogram with our variable */
2618 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
2622 kfree(var_hist
->cmd
);
2624 hist_err(tr
, HIST_ERR_HIST_CREATE_FAIL
, errpos(field_name
));
2625 return ERR_PTR(ret
);
2630 /* If we can't find the variable, something went wrong */
2631 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
2632 event_name
, field_name
);
2633 if (IS_ERR_OR_NULL(event_var
)) {
2634 kfree(var_hist
->cmd
);
2636 hist_err(tr
, HIST_ERR_SYNTH_VAR_NOT_FOUND
, errpos(field_name
));
2637 return ERR_PTR(-EINVAL
);
2640 n
= target_hist_data
->n_field_var_hists
;
2641 target_hist_data
->field_var_hists
[n
] = var_hist
;
2642 target_hist_data
->n_field_var_hists
++;
2647 static struct hist_field
*
2648 find_target_event_var(struct hist_trigger_data
*hist_data
,
2649 char *subsys_name
, char *event_name
, char *var_name
)
2651 struct trace_event_file
*file
= hist_data
->event_file
;
2652 struct hist_field
*hist_field
= NULL
;
2655 struct trace_event_call
*call
;
2660 call
= file
->event_call
;
2662 if (strcmp(subsys_name
, call
->class->system
) != 0)
2665 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2669 hist_field
= find_var_field(hist_data
, var_name
);
2674 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
2675 struct trace_buffer
*buffer
,
2676 struct ring_buffer_event
*rbe
,
2678 struct field_var
**field_vars
,
2679 unsigned int n_field_vars
,
2680 unsigned int field_var_str_start
)
2682 struct hist_elt_data
*elt_data
= elt
->private_data
;
2683 unsigned int i
, j
, var_idx
;
2686 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
2687 struct field_var
*field_var
= field_vars
[i
];
2688 struct hist_field
*var
= field_var
->var
;
2689 struct hist_field
*val
= field_var
->val
;
2691 var_val
= val
->fn(val
, elt
, buffer
, rbe
, rec
);
2692 var_idx
= var
->var
.idx
;
2694 if (val
->flags
& HIST_FIELD_FL_STRING
) {
2695 char *str
= elt_data
->field_var_str
[j
++];
2696 char *val_str
= (char *)(uintptr_t)var_val
;
2699 size
= min(val
->size
, STR_VAR_LEN_MAX
);
2700 strscpy(str
, val_str
, size
);
2701 var_val
= (u64
)(uintptr_t)str
;
2703 tracing_map_set_var(elt
, var_idx
, var_val
);
2707 static void update_field_vars(struct hist_trigger_data
*hist_data
,
2708 struct tracing_map_elt
*elt
,
2709 struct trace_buffer
*buffer
,
2710 struct ring_buffer_event
*rbe
,
2713 __update_field_vars(elt
, buffer
, rbe
, rec
, hist_data
->field_vars
,
2714 hist_data
->n_field_vars
, 0);
2717 static void save_track_data_vars(struct hist_trigger_data
*hist_data
,
2718 struct tracing_map_elt
*elt
,
2719 struct trace_buffer
*buffer
, void *rec
,
2720 struct ring_buffer_event
*rbe
, void *key
,
2721 struct action_data
*data
, u64
*var_ref_vals
)
2723 __update_field_vars(elt
, buffer
, rbe
, rec
, hist_data
->save_vars
,
2724 hist_data
->n_save_vars
, hist_data
->n_field_var_str
);
2727 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
2728 struct trace_event_file
*file
,
2729 char *name
, int size
, const char *type
)
2731 struct hist_field
*var
;
2734 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
2735 var
= ERR_PTR(-EINVAL
);
2739 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
2741 var
= ERR_PTR(-ENOMEM
);
2745 idx
= tracing_map_add_var(hist_data
->map
);
2748 var
= ERR_PTR(-EINVAL
);
2753 var
->flags
= HIST_FIELD_FL_VAR
;
2755 var
->var
.hist_data
= var
->hist_data
= hist_data
;
2757 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
2758 var
->type
= kstrdup_const(type
, GFP_KERNEL
);
2759 if (!var
->var
.name
|| !var
->type
) {
2760 kfree_const(var
->type
);
2761 kfree(var
->var
.name
);
2763 var
= ERR_PTR(-ENOMEM
);
2769 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
2770 struct trace_event_file
*file
,
2773 struct hist_field
*val
= NULL
, *var
= NULL
;
2774 unsigned long flags
= HIST_FIELD_FL_VAR
;
2775 struct trace_array
*tr
= file
->tr
;
2776 struct field_var
*field_var
;
2779 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
2780 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
2785 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
2787 hist_err(tr
, HIST_ERR_FIELD_VAR_PARSE_FAIL
, errpos(field_name
));
2792 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
2794 hist_err(tr
, HIST_ERR_VAR_CREATE_FIND_FAIL
, errpos(field_name
));
2800 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
2808 field_var
->var
= var
;
2809 field_var
->val
= val
;
2813 field_var
= ERR_PTR(ret
);
2818 * create_target_field_var - Automatically create a variable for a field
2819 * @target_hist_data: The target hist trigger
2820 * @subsys_name: Optional subsystem name
2821 * @event_name: Optional event name
2822 * @var_name: The name of the field (and the resulting variable)
2824 * Hist trigger actions fetch data from variables, not directly from
2825 * events. However, for convenience, users are allowed to directly
2826 * specify an event field in an action, which will be automatically
2827 * converted into a variable on their behalf.
2829 * This function creates a field variable with the name var_name on
2830 * the hist trigger currently being defined on the target event. If
2831 * subsys_name and event_name are specified, this function simply
2832 * verifies that they do in fact match the target event subsystem and
2835 * Return: The variable created for the field.
2837 static struct field_var
*
2838 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
2839 char *subsys_name
, char *event_name
, char *var_name
)
2841 struct trace_event_file
*file
= target_hist_data
->event_file
;
2844 struct trace_event_call
*call
;
2849 call
= file
->event_call
;
2851 if (strcmp(subsys_name
, call
->class->system
) != 0)
2854 if (strcmp(event_name
, trace_event_name(call
)) != 0)
2858 return create_field_var(target_hist_data
, file
, var_name
);
2861 static bool check_track_val_max(u64 track_val
, u64 var_val
)
2863 if (var_val
<= track_val
)
2869 static bool check_track_val_changed(u64 track_val
, u64 var_val
)
2871 if (var_val
== track_val
)
2877 static u64
get_track_val(struct hist_trigger_data
*hist_data
,
2878 struct tracing_map_elt
*elt
,
2879 struct action_data
*data
)
2881 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
2884 track_val
= tracing_map_read_var(elt
, track_var_idx
);
2889 static void save_track_val(struct hist_trigger_data
*hist_data
,
2890 struct tracing_map_elt
*elt
,
2891 struct action_data
*data
, u64 var_val
)
2893 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
2895 tracing_map_set_var(elt
, track_var_idx
, var_val
);
2898 static void save_track_data(struct hist_trigger_data
*hist_data
,
2899 struct tracing_map_elt
*elt
,
2900 struct trace_buffer
*buffer
, void *rec
,
2901 struct ring_buffer_event
*rbe
, void *key
,
2902 struct action_data
*data
, u64
*var_ref_vals
)
2904 if (data
->track_data
.save_data
)
2905 data
->track_data
.save_data(hist_data
, elt
, buffer
, rec
, rbe
,
2906 key
, data
, var_ref_vals
);
2909 static bool check_track_val(struct tracing_map_elt
*elt
,
2910 struct action_data
*data
,
2913 struct hist_trigger_data
*hist_data
;
2916 hist_data
= data
->track_data
.track_var
->hist_data
;
2917 track_val
= get_track_val(hist_data
, elt
, data
);
2919 return data
->track_data
.check_val(track_val
, var_val
);
2922 #ifdef CONFIG_TRACER_SNAPSHOT
2923 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
2925 /* called with tr->max_lock held */
2926 struct track_data
*track_data
= tr
->cond_snapshot
->cond_data
;
2927 struct hist_elt_data
*elt_data
, *track_elt_data
;
2928 struct snapshot_context
*context
= cond_data
;
2929 struct action_data
*action
;
2935 action
= track_data
->action_data
;
2937 track_val
= get_track_val(track_data
->hist_data
, context
->elt
,
2938 track_data
->action_data
);
2940 if (!action
->track_data
.check_val(track_data
->track_val
, track_val
))
2943 track_data
->track_val
= track_val
;
2944 memcpy(track_data
->key
, context
->key
, track_data
->key_len
);
2946 elt_data
= context
->elt
->private_data
;
2947 track_elt_data
= track_data
->elt
.private_data
;
2949 strncpy(track_elt_data
->comm
, elt_data
->comm
, TASK_COMM_LEN
);
2951 track_data
->updated
= true;
2956 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
2957 struct tracing_map_elt
*elt
,
2958 struct trace_buffer
*buffer
, void *rec
,
2959 struct ring_buffer_event
*rbe
, void *key
,
2960 struct action_data
*data
,
2963 struct trace_event_file
*file
= hist_data
->event_file
;
2964 struct snapshot_context context
;
2969 tracing_snapshot_cond(file
->tr
, &context
);
2972 static void hist_trigger_print_key(struct seq_file
*m
,
2973 struct hist_trigger_data
*hist_data
,
2975 struct tracing_map_elt
*elt
);
2977 static struct action_data
*snapshot_action(struct hist_trigger_data
*hist_data
)
2981 if (!hist_data
->n_actions
)
2984 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
2985 struct action_data
*data
= hist_data
->actions
[i
];
2987 if (data
->action
== ACTION_SNAPSHOT
)
2994 static void track_data_snapshot_print(struct seq_file
*m
,
2995 struct hist_trigger_data
*hist_data
)
2997 struct trace_event_file
*file
= hist_data
->event_file
;
2998 struct track_data
*track_data
;
2999 struct action_data
*action
;
3001 track_data
= tracing_cond_snapshot_data(file
->tr
);
3005 if (!track_data
->updated
)
3008 action
= snapshot_action(hist_data
);
3012 seq_puts(m
, "\nSnapshot taken (see tracing/snapshot). Details:\n");
3013 seq_printf(m
, "\ttriggering value { %s(%s) }: %10llu",
3014 action
->handler
== HANDLER_ONMAX
? "onmax" : "onchange",
3015 action
->track_data
.var_str
, track_data
->track_val
);
3017 seq_puts(m
, "\ttriggered by event with key: ");
3018 hist_trigger_print_key(m
, hist_data
, track_data
->key
, &track_data
->elt
);
3022 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
3026 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
3027 struct tracing_map_elt
*elt
,
3028 struct trace_buffer
*buffer
, void *rec
,
3029 struct ring_buffer_event
*rbe
, void *key
,
3030 struct action_data
*data
,
3031 u64
*var_ref_vals
) {}
3032 static void track_data_snapshot_print(struct seq_file
*m
,
3033 struct hist_trigger_data
*hist_data
) {}
3034 #endif /* CONFIG_TRACER_SNAPSHOT */
3036 static void track_data_print(struct seq_file
*m
,
3037 struct hist_trigger_data
*hist_data
,
3038 struct tracing_map_elt
*elt
,
3039 struct action_data
*data
)
3041 u64 track_val
= get_track_val(hist_data
, elt
, data
);
3042 unsigned int i
, save_var_idx
;
3044 if (data
->handler
== HANDLER_ONMAX
)
3045 seq_printf(m
, "\n\tmax: %10llu", track_val
);
3046 else if (data
->handler
== HANDLER_ONCHANGE
)
3047 seq_printf(m
, "\n\tchanged: %10llu", track_val
);
3049 if (data
->action
== ACTION_SNAPSHOT
)
3052 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
3053 struct hist_field
*save_val
= hist_data
->save_vars
[i
]->val
;
3054 struct hist_field
*save_var
= hist_data
->save_vars
[i
]->var
;
3057 save_var_idx
= save_var
->var
.idx
;
3059 val
= tracing_map_read_var(elt
, save_var_idx
);
3061 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
3062 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
3063 (char *)(uintptr_t)(val
));
3065 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
3069 static void ontrack_action(struct hist_trigger_data
*hist_data
,
3070 struct tracing_map_elt
*elt
,
3071 struct trace_buffer
*buffer
, void *rec
,
3072 struct ring_buffer_event
*rbe
, void *key
,
3073 struct action_data
*data
, u64
*var_ref_vals
)
3075 u64 var_val
= var_ref_vals
[data
->track_data
.var_ref
->var_ref_idx
];
3077 if (check_track_val(elt
, data
, var_val
)) {
3078 save_track_val(hist_data
, elt
, data
, var_val
);
3079 save_track_data(hist_data
, elt
, buffer
, rec
, rbe
,
3080 key
, data
, var_ref_vals
);
3084 static void action_data_destroy(struct action_data
*data
)
3088 lockdep_assert_held(&event_mutex
);
3090 kfree(data
->action_name
);
3092 for (i
= 0; i
< data
->n_params
; i
++)
3093 kfree(data
->params
[i
]);
3095 if (data
->synth_event
)
3096 data
->synth_event
->ref
--;
3098 kfree(data
->synth_event_name
);
3103 static void track_data_destroy(struct hist_trigger_data
*hist_data
,
3104 struct action_data
*data
)
3106 struct trace_event_file
*file
= hist_data
->event_file
;
3108 destroy_hist_field(data
->track_data
.track_var
, 0);
3110 if (data
->action
== ACTION_SNAPSHOT
) {
3111 struct track_data
*track_data
;
3113 track_data
= tracing_cond_snapshot_data(file
->tr
);
3114 if (track_data
&& track_data
->hist_data
== hist_data
) {
3115 tracing_snapshot_cond_disable(file
->tr
);
3116 track_data_free(track_data
);
3120 kfree(data
->track_data
.var_str
);
3122 action_data_destroy(data
);
3125 static int action_create(struct hist_trigger_data
*hist_data
,
3126 struct action_data
*data
);
3128 static int track_data_create(struct hist_trigger_data
*hist_data
,
3129 struct action_data
*data
)
3131 struct hist_field
*var_field
, *ref_field
, *track_var
= NULL
;
3132 struct trace_event_file
*file
= hist_data
->event_file
;
3133 struct trace_array
*tr
= file
->tr
;
3134 char *track_data_var_str
;
3137 track_data_var_str
= data
->track_data
.var_str
;
3138 if (track_data_var_str
[0] != '$') {
3139 hist_err(tr
, HIST_ERR_ONX_NOT_VAR
, errpos(track_data_var_str
));
3142 track_data_var_str
++;
3144 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, track_data_var_str
);
3146 hist_err(tr
, HIST_ERR_ONX_VAR_NOT_FOUND
, errpos(track_data_var_str
));
3150 ref_field
= create_var_ref(hist_data
, var_field
, NULL
, NULL
);
3154 data
->track_data
.var_ref
= ref_field
;
3156 if (data
->handler
== HANDLER_ONMAX
)
3157 track_var
= create_var(hist_data
, file
, "__max", sizeof(u64
), "u64");
3158 if (IS_ERR(track_var
)) {
3159 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3160 ret
= PTR_ERR(track_var
);
3164 if (data
->handler
== HANDLER_ONCHANGE
)
3165 track_var
= create_var(hist_data
, file
, "__change", sizeof(u64
), "u64");
3166 if (IS_ERR(track_var
)) {
3167 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
3168 ret
= PTR_ERR(track_var
);
3171 data
->track_data
.track_var
= track_var
;
3173 ret
= action_create(hist_data
, data
);
3178 static int parse_action_params(struct trace_array
*tr
, char *params
,
3179 struct action_data
*data
)
3181 char *param
, *saved_param
;
3182 bool first_param
= true;
3186 if (data
->n_params
>= SYNTH_FIELDS_MAX
) {
3187 hist_err(tr
, HIST_ERR_TOO_MANY_PARAMS
, 0);
3191 param
= strsep(¶ms
, ",");
3193 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, 0);
3198 param
= strstrip(param
);
3199 if (strlen(param
) < 2) {
3200 hist_err(tr
, HIST_ERR_INVALID_PARAM
, errpos(param
));
3205 saved_param
= kstrdup(param
, GFP_KERNEL
);
3211 if (first_param
&& data
->use_trace_keyword
) {
3212 data
->synth_event_name
= saved_param
;
3213 first_param
= false;
3216 first_param
= false;
3218 data
->params
[data
->n_params
++] = saved_param
;
3224 static int action_parse(struct trace_array
*tr
, char *str
, struct action_data
*data
,
3225 enum handler_id handler
)
3232 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3237 action_name
= strsep(&str
, "(");
3238 if (!action_name
|| !str
) {
3239 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
3244 if (str_has_prefix(action_name
, "save")) {
3245 char *params
= strsep(&str
, ")");
3248 hist_err(tr
, HIST_ERR_NO_SAVE_PARAMS
, 0);
3253 ret
= parse_action_params(tr
, params
, data
);
3257 if (handler
== HANDLER_ONMAX
)
3258 data
->track_data
.check_val
= check_track_val_max
;
3259 else if (handler
== HANDLER_ONCHANGE
)
3260 data
->track_data
.check_val
= check_track_val_changed
;
3262 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3267 data
->track_data
.save_data
= save_track_data_vars
;
3268 data
->fn
= ontrack_action
;
3269 data
->action
= ACTION_SAVE
;
3270 } else if (str_has_prefix(action_name
, "snapshot")) {
3271 char *params
= strsep(&str
, ")");
3274 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(params
));
3279 if (handler
== HANDLER_ONMAX
)
3280 data
->track_data
.check_val
= check_track_val_max
;
3281 else if (handler
== HANDLER_ONCHANGE
)
3282 data
->track_data
.check_val
= check_track_val_changed
;
3284 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
3289 data
->track_data
.save_data
= save_track_data_snapshot
;
3290 data
->fn
= ontrack_action
;
3291 data
->action
= ACTION_SNAPSHOT
;
3293 char *params
= strsep(&str
, ")");
3295 if (str_has_prefix(action_name
, "trace"))
3296 data
->use_trace_keyword
= true;
3299 ret
= parse_action_params(tr
, params
, data
);
3304 if (handler
== HANDLER_ONMAX
)
3305 data
->track_data
.check_val
= check_track_val_max
;
3306 else if (handler
== HANDLER_ONCHANGE
)
3307 data
->track_data
.check_val
= check_track_val_changed
;
3309 if (handler
!= HANDLER_ONMATCH
) {
3310 data
->track_data
.save_data
= action_trace
;
3311 data
->fn
= ontrack_action
;
3313 data
->fn
= action_trace
;
3315 data
->action
= ACTION_TRACE
;
3318 data
->action_name
= kstrdup(action_name
, GFP_KERNEL
);
3319 if (!data
->action_name
) {
3324 data
->handler
= handler
;
3329 static struct action_data
*track_data_parse(struct hist_trigger_data
*hist_data
,
3330 char *str
, enum handler_id handler
)
3332 struct action_data
*data
;
3336 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3338 return ERR_PTR(-ENOMEM
);
3340 var_str
= strsep(&str
, ")");
3341 if (!var_str
|| !str
) {
3346 data
->track_data
.var_str
= kstrdup(var_str
, GFP_KERNEL
);
3347 if (!data
->track_data
.var_str
) {
3352 ret
= action_parse(hist_data
->event_file
->tr
, str
, data
, handler
);
3358 track_data_destroy(hist_data
, data
);
3359 data
= ERR_PTR(ret
);
3363 static void onmatch_destroy(struct action_data
*data
)
3365 kfree(data
->match_data
.event
);
3366 kfree(data
->match_data
.event_system
);
3368 action_data_destroy(data
);
3371 static void destroy_field_var(struct field_var
*field_var
)
3376 destroy_hist_field(field_var
->var
, 0);
3377 destroy_hist_field(field_var
->val
, 0);
3382 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
3386 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
3387 destroy_field_var(hist_data
->field_vars
[i
]);
3389 for (i
= 0; i
< hist_data
->n_save_vars
; i
++)
3390 destroy_field_var(hist_data
->save_vars
[i
]);
3393 static void save_field_var(struct hist_trigger_data
*hist_data
,
3394 struct field_var
*field_var
)
3396 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
3398 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
3399 hist_data
->n_field_var_str
++;
3403 static int check_synth_field(struct synth_event
*event
,
3404 struct hist_field
*hist_field
,
3405 unsigned int field_pos
)
3407 struct synth_field
*field
;
3409 if (field_pos
>= event
->n_fields
)
3412 field
= event
->fields
[field_pos
];
3415 * A dynamic string synth field can accept static or
3416 * dynamic. A static string synth field can only accept a
3417 * same-sized static string, which is checked for later.
3419 if (strstr(hist_field
->type
, "char[") && field
->is_string
3420 && field
->is_dynamic
)
3423 if (strcmp(field
->type
, hist_field
->type
) != 0) {
3424 if (field
->size
!= hist_field
->size
||
3425 (!field
->is_string
&& field
->is_signed
!= hist_field
->is_signed
))
3432 static struct hist_field
*
3433 trace_action_find_var(struct hist_trigger_data
*hist_data
,
3434 struct action_data
*data
,
3435 char *system
, char *event
, char *var
)
3437 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3438 struct hist_field
*hist_field
;
3440 var
++; /* skip '$' */
3442 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
3444 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3445 system
= data
->match_data
.event_system
;
3446 event
= data
->match_data
.event
;
3449 hist_field
= find_event_var(hist_data
, system
, event
, var
);
3453 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, errpos(var
));
3458 static struct hist_field
*
3459 trace_action_create_field_var(struct hist_trigger_data
*hist_data
,
3460 struct action_data
*data
, char *system
,
3461 char *event
, char *var
)
3463 struct hist_field
*hist_field
= NULL
;
3464 struct field_var
*field_var
;
3467 * First try to create a field var on the target event (the
3468 * currently being defined). This will create a variable for
3469 * unqualified fields on the target event, or if qualified,
3470 * target fields that have qualified names matching the target.
3472 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
3474 if (field_var
&& !IS_ERR(field_var
)) {
3475 save_field_var(hist_data
, field_var
);
3476 hist_field
= field_var
->var
;
3480 * If no explicit system.event is specified, default to
3481 * looking for fields on the onmatch(system.event.xxx)
3484 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
3485 system
= data
->match_data
.event_system
;
3486 event
= data
->match_data
.event
;
3492 * At this point, we're looking at a field on another
3493 * event. Because we can't modify a hist trigger on
3494 * another event to add a variable for a field, we need
3495 * to create a new trigger on that event and create the
3496 * variable at the same time.
3498 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
3499 if (IS_ERR(hist_field
))
3505 destroy_field_var(field_var
);
3510 static int trace_action_create(struct hist_trigger_data
*hist_data
,
3511 struct action_data
*data
)
3513 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3514 char *event_name
, *param
, *system
= NULL
;
3515 struct hist_field
*hist_field
, *var_ref
;
3517 unsigned int field_pos
= 0;
3518 struct synth_event
*event
;
3519 char *synth_event_name
;
3520 int var_ref_idx
, ret
= 0;
3522 lockdep_assert_held(&event_mutex
);
3524 if (data
->use_trace_keyword
)
3525 synth_event_name
= data
->synth_event_name
;
3527 synth_event_name
= data
->action_name
;
3529 event
= find_synth_event(synth_event_name
);
3531 hist_err(tr
, HIST_ERR_SYNTH_EVENT_NOT_FOUND
, errpos(synth_event_name
));
3537 for (i
= 0; i
< data
->n_params
; i
++) {
3540 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
3546 system
= strsep(¶m
, ".");
3548 param
= (char *)system
;
3549 system
= event_name
= NULL
;
3551 event_name
= strsep(¶m
, ".");
3559 if (param
[0] == '$')
3560 hist_field
= trace_action_find_var(hist_data
, data
,
3564 hist_field
= trace_action_create_field_var(hist_data
,
3576 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
3577 var_ref
= create_var_ref(hist_data
, hist_field
,
3578 system
, event_name
);
3585 var_ref_idx
= find_var_ref_idx(hist_data
, var_ref
);
3586 if (WARN_ON(var_ref_idx
< 0)) {
3592 data
->var_ref_idx
[i
] = var_ref_idx
;
3599 hist_err(tr
, HIST_ERR_SYNTH_TYPE_MISMATCH
, errpos(param
));
3605 if (field_pos
!= event
->n_fields
) {
3606 hist_err(tr
, HIST_ERR_SYNTH_COUNT_MISMATCH
, errpos(event
->name
));
3611 data
->synth_event
= event
;
3620 static int action_create(struct hist_trigger_data
*hist_data
,
3621 struct action_data
*data
)
3623 struct trace_event_file
*file
= hist_data
->event_file
;
3624 struct trace_array
*tr
= file
->tr
;
3625 struct track_data
*track_data
;
3626 struct field_var
*field_var
;
3631 if (data
->action
== ACTION_TRACE
)
3632 return trace_action_create(hist_data
, data
);
3634 if (data
->action
== ACTION_SNAPSHOT
) {
3635 track_data
= track_data_alloc(hist_data
->key_size
, data
, hist_data
);
3636 if (IS_ERR(track_data
)) {
3637 ret
= PTR_ERR(track_data
);
3641 ret
= tracing_snapshot_cond_enable(file
->tr
, track_data
,
3642 cond_snapshot_update
);
3644 track_data_free(track_data
);
3649 if (data
->action
== ACTION_SAVE
) {
3650 if (hist_data
->n_save_vars
) {
3652 hist_err(tr
, HIST_ERR_TOO_MANY_SAVE_ACTIONS
, 0);
3656 for (i
= 0; i
< data
->n_params
; i
++) {
3657 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
3663 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
3664 if (IS_ERR(field_var
)) {
3665 hist_err(tr
, HIST_ERR_FIELD_VAR_CREATE_FAIL
,
3667 ret
= PTR_ERR(field_var
);
3672 hist_data
->save_vars
[hist_data
->n_save_vars
++] = field_var
;
3673 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
3674 hist_data
->n_save_var_str
++;
3682 static int onmatch_create(struct hist_trigger_data
*hist_data
,
3683 struct action_data
*data
)
3685 return action_create(hist_data
, data
);
3688 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
3690 char *match_event
, *match_event_system
;
3691 struct action_data
*data
;
3694 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
3696 return ERR_PTR(-ENOMEM
);
3698 match_event
= strsep(&str
, ")");
3699 if (!match_event
|| !str
) {
3700 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(match_event
));
3704 match_event_system
= strsep(&match_event
, ".");
3706 hist_err(tr
, HIST_ERR_SUBSYS_NOT_FOUND
, errpos(match_event_system
));
3710 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
3711 hist_err(tr
, HIST_ERR_INVALID_SUBSYS_EVENT
, errpos(match_event
));
3715 data
->match_data
.event
= kstrdup(match_event
, GFP_KERNEL
);
3716 if (!data
->match_data
.event
) {
3721 data
->match_data
.event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
3722 if (!data
->match_data
.event_system
) {
3727 ret
= action_parse(tr
, str
, data
, HANDLER_ONMATCH
);
3733 onmatch_destroy(data
);
3734 data
= ERR_PTR(ret
);
3738 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
3740 hist_data
->fields
[HITCOUNT_IDX
] =
3741 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
3742 if (!hist_data
->fields
[HITCOUNT_IDX
])
3745 hist_data
->n_vals
++;
3746 hist_data
->n_fields
++;
3748 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
3754 static int __create_val_field(struct hist_trigger_data
*hist_data
,
3755 unsigned int val_idx
,
3756 struct trace_event_file
*file
,
3757 char *var_name
, char *field_str
,
3758 unsigned long flags
)
3760 struct hist_field
*hist_field
;
3763 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, 0);
3764 if (IS_ERR(hist_field
)) {
3765 ret
= PTR_ERR(hist_field
);
3769 hist_data
->fields
[val_idx
] = hist_field
;
3771 ++hist_data
->n_vals
;
3772 ++hist_data
->n_fields
;
3774 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
3780 static int create_val_field(struct hist_trigger_data
*hist_data
,
3781 unsigned int val_idx
,
3782 struct trace_event_file
*file
,
3785 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
3788 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
3791 static const char *no_comm
= "(no comm)";
3793 static u64
hist_field_execname(struct hist_field
*hist_field
,
3794 struct tracing_map_elt
*elt
,
3795 struct trace_buffer
*buffer
,
3796 struct ring_buffer_event
*rbe
,
3799 struct hist_elt_data
*elt_data
;
3801 if (WARN_ON_ONCE(!elt
))
3802 return (u64
)(unsigned long)no_comm
;
3804 elt_data
= elt
->private_data
;
3806 if (WARN_ON_ONCE(!elt_data
->comm
))
3807 return (u64
)(unsigned long)no_comm
;
3809 return (u64
)(unsigned long)(elt_data
->comm
);
3812 /* Convert a var that points to common_pid.execname to a string */
3813 static void update_var_execname(struct hist_field
*hist_field
)
3815 hist_field
->flags
= HIST_FIELD_FL_STRING
| HIST_FIELD_FL_VAR
|
3816 HIST_FIELD_FL_EXECNAME
;
3817 hist_field
->size
= MAX_FILTER_STR_VAL
;
3818 hist_field
->is_signed
= 0;
3820 kfree_const(hist_field
->type
);
3821 hist_field
->type
= "char[]";
3823 hist_field
->fn
= hist_field_execname
;
3826 static int create_var_field(struct hist_trigger_data
*hist_data
,
3827 unsigned int val_idx
,
3828 struct trace_event_file
*file
,
3829 char *var_name
, char *expr_str
)
3831 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3832 unsigned long flags
= 0;
3835 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
3838 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
3839 hist_err(tr
, HIST_ERR_DUPLICATE_VAR
, errpos(var_name
));
3843 flags
|= HIST_FIELD_FL_VAR
;
3844 hist_data
->n_vars
++;
3845 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
3848 ret
= __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
3850 if (!ret
&& hist_data
->fields
[val_idx
]->flags
& HIST_FIELD_FL_EXECNAME
)
3851 update_var_execname(hist_data
->fields
[val_idx
]);
3853 if (!ret
&& hist_data
->fields
[val_idx
]->flags
& HIST_FIELD_FL_STRING
)
3854 hist_data
->fields
[val_idx
]->var_str_idx
= hist_data
->n_var_str
++;
3859 static int create_val_fields(struct hist_trigger_data
*hist_data
,
3860 struct trace_event_file
*file
)
3862 char *fields_str
, *field_str
;
3863 unsigned int i
, j
= 1;
3866 ret
= create_hitcount_val(hist_data
);
3870 fields_str
= hist_data
->attrs
->vals_str
;
3874 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
3875 j
< TRACING_MAP_VALS_MAX
; i
++) {
3876 field_str
= strsep(&fields_str
, ",");
3880 if (strcmp(field_str
, "hitcount") == 0)
3883 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
3888 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
3894 static int create_key_field(struct hist_trigger_data
*hist_data
,
3895 unsigned int key_idx
,
3896 unsigned int key_offset
,
3897 struct trace_event_file
*file
,
3900 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3901 struct hist_field
*hist_field
= NULL
;
3902 unsigned long flags
= 0;
3903 unsigned int key_size
;
3906 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
3909 flags
|= HIST_FIELD_FL_KEY
;
3911 if (strcmp(field_str
, "stacktrace") == 0) {
3912 flags
|= HIST_FIELD_FL_STACKTRACE
;
3913 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
3914 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
3916 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
3918 if (IS_ERR(hist_field
)) {
3919 ret
= PTR_ERR(hist_field
);
3923 if (field_has_hist_vars(hist_field
, 0)) {
3924 hist_err(tr
, HIST_ERR_INVALID_REF_KEY
, errpos(field_str
));
3925 destroy_hist_field(hist_field
, 0);
3930 key_size
= hist_field
->size
;
3933 hist_data
->fields
[key_idx
] = hist_field
;
3935 key_size
= ALIGN(key_size
, sizeof(u64
));
3936 hist_data
->fields
[key_idx
]->size
= key_size
;
3937 hist_data
->fields
[key_idx
]->offset
= key_offset
;
3939 hist_data
->key_size
+= key_size
;
3941 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
3946 hist_data
->n_keys
++;
3947 hist_data
->n_fields
++;
3949 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
3957 static int create_key_fields(struct hist_trigger_data
*hist_data
,
3958 struct trace_event_file
*file
)
3960 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
3961 char *fields_str
, *field_str
;
3964 fields_str
= hist_data
->attrs
->keys_str
;
3968 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
3969 field_str
= strsep(&fields_str
, ",");
3972 ret
= create_key_field(hist_data
, i
, key_offset
,
3987 static int create_var_fields(struct hist_trigger_data
*hist_data
,
3988 struct trace_event_file
*file
)
3990 unsigned int i
, j
= hist_data
->n_vals
;
3993 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
3995 for (i
= 0; i
< n_vars
; i
++) {
3996 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
3997 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
3999 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
4007 static void free_var_defs(struct hist_trigger_data
*hist_data
)
4011 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
4012 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
4013 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
4016 hist_data
->attrs
->var_defs
.n_vars
= 0;
4019 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
4021 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4022 char *s
, *str
, *var_name
, *field_str
;
4023 unsigned int i
, j
, n_vars
= 0;
4026 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
4027 str
= hist_data
->attrs
->assignment_str
[i
];
4028 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
4029 field_str
= strsep(&str
, ",");
4033 var_name
= strsep(&field_str
, "=");
4034 if (!var_name
|| !field_str
) {
4035 hist_err(tr
, HIST_ERR_MALFORMED_ASSIGNMENT
,
4041 if (n_vars
== TRACING_MAP_VARS_MAX
) {
4042 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(var_name
));
4047 s
= kstrdup(var_name
, GFP_KERNEL
);
4052 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
4054 s
= kstrdup(field_str
, GFP_KERNEL
);
4059 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
4061 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
4067 free_var_defs(hist_data
);
4072 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
4073 struct trace_event_file
*file
)
4077 ret
= parse_var_defs(hist_data
);
4081 ret
= create_val_fields(hist_data
, file
);
4085 ret
= create_var_fields(hist_data
, file
);
4089 ret
= create_key_fields(hist_data
, file
);
4093 free_var_defs(hist_data
);
4098 static int is_descending(struct trace_array
*tr
, const char *str
)
4103 if (strcmp(str
, "descending") == 0)
4106 if (strcmp(str
, "ascending") == 0)
4109 hist_err(tr
, HIST_ERR_INVALID_SORT_MODIFIER
, errpos((char *)str
));
4114 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
4116 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4117 char *fields_str
= hist_data
->attrs
->sort_key_str
;
4118 struct tracing_map_sort_key
*sort_key
;
4119 int descending
, ret
= 0;
4120 unsigned int i
, j
, k
;
4122 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
4127 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
4128 struct hist_field
*hist_field
;
4129 char *field_str
, *field_name
;
4130 const char *test_name
;
4132 sort_key
= &hist_data
->sort_keys
[i
];
4134 field_str
= strsep(&fields_str
, ",");
4140 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
4144 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
4145 hist_err(tr
, HIST_ERR_TOO_MANY_SORT_FIELDS
, errpos("sort="));
4150 field_name
= strsep(&field_str
, ".");
4151 if (!field_name
|| !*field_name
) {
4153 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
4157 if (strcmp(field_name
, "hitcount") == 0) {
4158 descending
= is_descending(tr
, field_str
);
4159 if (descending
< 0) {
4163 sort_key
->descending
= descending
;
4167 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
4170 hist_field
= hist_data
->fields
[j
];
4171 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
4176 test_name
= hist_field_name(hist_field
, 0);
4178 if (strcmp(field_name
, test_name
) == 0) {
4179 sort_key
->field_idx
= idx
;
4180 descending
= is_descending(tr
, field_str
);
4181 if (descending
< 0) {
4185 sort_key
->descending
= descending
;
4189 if (j
== hist_data
->n_fields
) {
4191 hist_err(tr
, HIST_ERR_INVALID_SORT_FIELD
, errpos(field_name
));
4196 hist_data
->n_sort_keys
= i
;
4201 static void destroy_actions(struct hist_trigger_data
*hist_data
)
4205 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4206 struct action_data
*data
= hist_data
->actions
[i
];
4208 if (data
->handler
== HANDLER_ONMATCH
)
4209 onmatch_destroy(data
);
4210 else if (data
->handler
== HANDLER_ONMAX
||
4211 data
->handler
== HANDLER_ONCHANGE
)
4212 track_data_destroy(hist_data
, data
);
4218 static int parse_actions(struct hist_trigger_data
*hist_data
)
4220 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4221 struct action_data
*data
;
4227 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4228 str
= hist_data
->attrs
->action_str
[i
];
4230 if ((len
= str_has_prefix(str
, "onmatch("))) {
4231 char *action_str
= str
+ len
;
4233 data
= onmatch_parse(tr
, action_str
);
4235 ret
= PTR_ERR(data
);
4238 } else if ((len
= str_has_prefix(str
, "onmax("))) {
4239 char *action_str
= str
+ len
;
4241 data
= track_data_parse(hist_data
, action_str
,
4244 ret
= PTR_ERR(data
);
4247 } else if ((len
= str_has_prefix(str
, "onchange("))) {
4248 char *action_str
= str
+ len
;
4250 data
= track_data_parse(hist_data
, action_str
,
4253 ret
= PTR_ERR(data
);
4261 hist_data
->actions
[hist_data
->n_actions
++] = data
;
4267 static int create_actions(struct hist_trigger_data
*hist_data
)
4269 struct action_data
*data
;
4273 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
4274 data
= hist_data
->actions
[i
];
4276 if (data
->handler
== HANDLER_ONMATCH
) {
4277 ret
= onmatch_create(hist_data
, data
);
4280 } else if (data
->handler
== HANDLER_ONMAX
||
4281 data
->handler
== HANDLER_ONCHANGE
) {
4282 ret
= track_data_create(hist_data
, data
);
4294 static void print_actions(struct seq_file
*m
,
4295 struct hist_trigger_data
*hist_data
,
4296 struct tracing_map_elt
*elt
)
4300 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4301 struct action_data
*data
= hist_data
->actions
[i
];
4303 if (data
->action
== ACTION_SNAPSHOT
)
4306 if (data
->handler
== HANDLER_ONMAX
||
4307 data
->handler
== HANDLER_ONCHANGE
)
4308 track_data_print(m
, hist_data
, elt
, data
);
4312 static void print_action_spec(struct seq_file
*m
,
4313 struct hist_trigger_data
*hist_data
,
4314 struct action_data
*data
)
4318 if (data
->action
== ACTION_SAVE
) {
4319 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4320 seq_printf(m
, "%s", hist_data
->save_vars
[i
]->var
->var
.name
);
4321 if (i
< hist_data
->n_save_vars
- 1)
4324 } else if (data
->action
== ACTION_TRACE
) {
4325 if (data
->use_trace_keyword
)
4326 seq_printf(m
, "%s", data
->synth_event_name
);
4327 for (i
= 0; i
< data
->n_params
; i
++) {
4328 if (i
|| data
->use_trace_keyword
)
4330 seq_printf(m
, "%s", data
->params
[i
]);
4335 static void print_track_data_spec(struct seq_file
*m
,
4336 struct hist_trigger_data
*hist_data
,
4337 struct action_data
*data
)
4339 if (data
->handler
== HANDLER_ONMAX
)
4340 seq_puts(m
, ":onmax(");
4341 else if (data
->handler
== HANDLER_ONCHANGE
)
4342 seq_puts(m
, ":onchange(");
4343 seq_printf(m
, "%s", data
->track_data
.var_str
);
4344 seq_printf(m
, ").%s(", data
->action_name
);
4346 print_action_spec(m
, hist_data
, data
);
4351 static void print_onmatch_spec(struct seq_file
*m
,
4352 struct hist_trigger_data
*hist_data
,
4353 struct action_data
*data
)
4355 seq_printf(m
, ":onmatch(%s.%s).", data
->match_data
.event_system
,
4356 data
->match_data
.event
);
4358 seq_printf(m
, "%s(", data
->action_name
);
4360 print_action_spec(m
, hist_data
, data
);
4365 static bool actions_match(struct hist_trigger_data
*hist_data
,
4366 struct hist_trigger_data
*hist_data_test
)
4370 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
4373 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4374 struct action_data
*data
= hist_data
->actions
[i
];
4375 struct action_data
*data_test
= hist_data_test
->actions
[i
];
4376 char *action_name
, *action_name_test
;
4378 if (data
->handler
!= data_test
->handler
)
4380 if (data
->action
!= data_test
->action
)
4383 if (data
->n_params
!= data_test
->n_params
)
4386 for (j
= 0; j
< data
->n_params
; j
++) {
4387 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
4391 if (data
->use_trace_keyword
)
4392 action_name
= data
->synth_event_name
;
4394 action_name
= data
->action_name
;
4396 if (data_test
->use_trace_keyword
)
4397 action_name_test
= data_test
->synth_event_name
;
4399 action_name_test
= data_test
->action_name
;
4401 if (strcmp(action_name
, action_name_test
) != 0)
4404 if (data
->handler
== HANDLER_ONMATCH
) {
4405 if (strcmp(data
->match_data
.event_system
,
4406 data_test
->match_data
.event_system
) != 0)
4408 if (strcmp(data
->match_data
.event
,
4409 data_test
->match_data
.event
) != 0)
4411 } else if (data
->handler
== HANDLER_ONMAX
||
4412 data
->handler
== HANDLER_ONCHANGE
) {
4413 if (strcmp(data
->track_data
.var_str
,
4414 data_test
->track_data
.var_str
) != 0)
4423 static void print_actions_spec(struct seq_file
*m
,
4424 struct hist_trigger_data
*hist_data
)
4428 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4429 struct action_data
*data
= hist_data
->actions
[i
];
4431 if (data
->handler
== HANDLER_ONMATCH
)
4432 print_onmatch_spec(m
, hist_data
, data
);
4433 else if (data
->handler
== HANDLER_ONMAX
||
4434 data
->handler
== HANDLER_ONCHANGE
)
4435 print_track_data_spec(m
, hist_data
, data
);
4439 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
4443 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
4444 kfree(hist_data
->field_var_hists
[i
]->cmd
);
4445 kfree(hist_data
->field_var_hists
[i
]);
4449 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
4454 destroy_hist_trigger_attrs(hist_data
->attrs
);
4455 destroy_hist_fields(hist_data
);
4456 tracing_map_destroy(hist_data
->map
);
4458 destroy_actions(hist_data
);
4459 destroy_field_vars(hist_data
);
4460 destroy_field_var_hists(hist_data
);
4465 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
4467 struct tracing_map
*map
= hist_data
->map
;
4468 struct ftrace_event_field
*field
;
4469 struct hist_field
*hist_field
;
4472 for_each_hist_field(i
, hist_data
) {
4473 hist_field
= hist_data
->fields
[i
];
4474 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
4475 tracing_map_cmp_fn_t cmp_fn
;
4477 field
= hist_field
->field
;
4479 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
4480 cmp_fn
= tracing_map_cmp_none
;
4481 else if (!field
|| hist_field
->flags
& HIST_FIELD_FL_CPU
)
4482 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
4483 hist_field
->is_signed
);
4484 else if (is_string_field(field
))
4485 cmp_fn
= tracing_map_cmp_string
;
4487 cmp_fn
= tracing_map_cmp_num(field
->size
,
4489 idx
= tracing_map_add_key_field(map
,
4492 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
4493 idx
= tracing_map_add_sum_field(map
);
4498 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4499 idx
= tracing_map_add_var(map
);
4502 hist_field
->var
.idx
= idx
;
4503 hist_field
->var
.hist_data
= hist_data
;
4510 static struct hist_trigger_data
*
4511 create_hist_data(unsigned int map_bits
,
4512 struct hist_trigger_attrs
*attrs
,
4513 struct trace_event_file
*file
,
4516 const struct tracing_map_ops
*map_ops
= NULL
;
4517 struct hist_trigger_data
*hist_data
;
4520 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
4522 return ERR_PTR(-ENOMEM
);
4524 hist_data
->attrs
= attrs
;
4525 hist_data
->remove
= remove
;
4526 hist_data
->event_file
= file
;
4528 ret
= parse_actions(hist_data
);
4532 ret
= create_hist_fields(hist_data
, file
);
4536 ret
= create_sort_keys(hist_data
);
4540 map_ops
= &hist_trigger_elt_data_ops
;
4542 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
4543 map_ops
, hist_data
);
4544 if (IS_ERR(hist_data
->map
)) {
4545 ret
= PTR_ERR(hist_data
->map
);
4546 hist_data
->map
= NULL
;
4550 ret
= create_tracing_map_fields(hist_data
);
4556 hist_data
->attrs
= NULL
;
4558 destroy_hist_data(hist_data
);
4560 hist_data
= ERR_PTR(ret
);
4565 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
4566 struct tracing_map_elt
*elt
,
4567 struct trace_buffer
*buffer
, void *rec
,
4568 struct ring_buffer_event
*rbe
,
4571 struct hist_elt_data
*elt_data
;
4572 struct hist_field
*hist_field
;
4573 unsigned int i
, var_idx
;
4576 elt_data
= elt
->private_data
;
4577 elt_data
->var_ref_vals
= var_ref_vals
;
4579 for_each_hist_val_field(i
, hist_data
) {
4580 hist_field
= hist_data
->fields
[i
];
4581 hist_val
= hist_field
->fn(hist_field
, elt
, buffer
, rbe
, rec
);
4582 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4583 var_idx
= hist_field
->var
.idx
;
4585 if (hist_field
->flags
& HIST_FIELD_FL_STRING
) {
4586 unsigned int str_start
, var_str_idx
, idx
;
4587 char *str
, *val_str
;
4590 str_start
= hist_data
->n_field_var_str
+
4591 hist_data
->n_save_var_str
;
4592 var_str_idx
= hist_field
->var_str_idx
;
4593 idx
= str_start
+ var_str_idx
;
4595 str
= elt_data
->field_var_str
[idx
];
4596 val_str
= (char *)(uintptr_t)hist_val
;
4598 size
= min(hist_field
->size
, STR_VAR_LEN_MAX
);
4599 strscpy(str
, val_str
, size
);
4601 hist_val
= (u64
)(uintptr_t)str
;
4603 tracing_map_set_var(elt
, var_idx
, hist_val
);
4606 tracing_map_update_sum(elt
, i
, hist_val
);
4609 for_each_hist_key_field(i
, hist_data
) {
4610 hist_field
= hist_data
->fields
[i
];
4611 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
4612 hist_val
= hist_field
->fn(hist_field
, elt
, buffer
, rbe
, rec
);
4613 var_idx
= hist_field
->var
.idx
;
4614 tracing_map_set_var(elt
, var_idx
, hist_val
);
4618 update_field_vars(hist_data
, elt
, buffer
, rbe
, rec
);
4621 static inline void add_to_key(char *compound_key
, void *key
,
4622 struct hist_field
*key_field
, void *rec
)
4624 size_t size
= key_field
->size
;
4626 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4627 struct ftrace_event_field
*field
;
4629 field
= key_field
->field
;
4630 if (field
->filter_type
== FILTER_DYN_STRING
)
4631 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
4632 else if (field
->filter_type
== FILTER_STATIC_STRING
)
4635 /* ensure NULL-termination */
4636 if (size
> key_field
->size
- 1)
4637 size
= key_field
->size
- 1;
4639 strncpy(compound_key
+ key_field
->offset
, (char *)key
, size
);
4641 memcpy(compound_key
+ key_field
->offset
, key
, size
);
4645 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
4646 struct tracing_map_elt
*elt
,
4647 struct trace_buffer
*buffer
, void *rec
,
4648 struct ring_buffer_event
*rbe
, void *key
,
4651 struct action_data
*data
;
4654 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4655 data
= hist_data
->actions
[i
];
4656 data
->fn(hist_data
, elt
, buffer
, rec
, rbe
, key
, data
, var_ref_vals
);
4660 static void event_hist_trigger(struct event_trigger_data
*data
,
4661 struct trace_buffer
*buffer
, void *rec
,
4662 struct ring_buffer_event
*rbe
)
4664 struct hist_trigger_data
*hist_data
= data
->private_data
;
4665 bool use_compound_key
= (hist_data
->n_keys
> 1);
4666 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
4667 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
4668 char compound_key
[HIST_KEY_SIZE_MAX
];
4669 struct tracing_map_elt
*elt
= NULL
;
4670 struct hist_field
*key_field
;
4675 memset(compound_key
, 0, hist_data
->key_size
);
4677 for_each_hist_key_field(i
, hist_data
) {
4678 key_field
= hist_data
->fields
[i
];
4680 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
4681 memset(entries
, 0, HIST_STACKTRACE_SIZE
);
4682 stack_trace_save(entries
, HIST_STACKTRACE_DEPTH
,
4683 HIST_STACKTRACE_SKIP
);
4686 field_contents
= key_field
->fn(key_field
, elt
, buffer
, rbe
, rec
);
4687 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4688 key
= (void *)(unsigned long)field_contents
;
4689 use_compound_key
= true;
4691 key
= (void *)&field_contents
;
4694 if (use_compound_key
)
4695 add_to_key(compound_key
, key
, key_field
, rec
);
4698 if (use_compound_key
)
4701 if (hist_data
->n_var_refs
&&
4702 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
4705 elt
= tracing_map_insert(hist_data
->map
, key
);
4709 hist_trigger_elt_update(hist_data
, elt
, buffer
, rec
, rbe
, var_ref_vals
);
4711 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
4712 hist_trigger_actions(hist_data
, elt
, buffer
, rec
, rbe
, key
, var_ref_vals
);
4715 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
4716 unsigned long *stacktrace_entries
,
4717 unsigned int max_entries
)
4719 char str
[KSYM_SYMBOL_LEN
];
4720 unsigned int spaces
= 8;
4723 for (i
= 0; i
< max_entries
; i
++) {
4724 if (!stacktrace_entries
[i
])
4727 seq_printf(m
, "%*c", 1 + spaces
, ' ');
4728 sprint_symbol(str
, stacktrace_entries
[i
]);
4729 seq_printf(m
, "%s\n", str
);
4733 static void hist_trigger_print_key(struct seq_file
*m
,
4734 struct hist_trigger_data
*hist_data
,
4736 struct tracing_map_elt
*elt
)
4738 struct hist_field
*key_field
;
4739 char str
[KSYM_SYMBOL_LEN
];
4740 bool multiline
= false;
4741 const char *field_name
;
4747 for_each_hist_key_field(i
, hist_data
) {
4748 key_field
= hist_data
->fields
[i
];
4750 if (i
> hist_data
->n_vals
)
4753 field_name
= hist_field_name(key_field
, 0);
4755 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
4756 uval
= *(u64
*)(key
+ key_field
->offset
);
4757 seq_printf(m
, "%s: %llx", field_name
, uval
);
4758 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
4759 uval
= *(u64
*)(key
+ key_field
->offset
);
4760 sprint_symbol_no_offset(str
, uval
);
4761 seq_printf(m
, "%s: [%llx] %-45s", field_name
,
4763 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
4764 uval
= *(u64
*)(key
+ key_field
->offset
);
4765 sprint_symbol(str
, uval
);
4766 seq_printf(m
, "%s: [%llx] %-55s", field_name
,
4768 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
4769 struct hist_elt_data
*elt_data
= elt
->private_data
;
4772 if (WARN_ON_ONCE(!elt_data
))
4775 comm
= elt_data
->comm
;
4777 uval
= *(u64
*)(key
+ key_field
->offset
);
4778 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
4780 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
4781 const char *syscall_name
;
4783 uval
= *(u64
*)(key
+ key_field
->offset
);
4784 syscall_name
= get_syscall_name(uval
);
4786 syscall_name
= "unknown_syscall";
4788 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
4789 syscall_name
, uval
);
4790 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
4791 seq_puts(m
, "stacktrace:\n");
4792 hist_trigger_stacktrace_print(m
,
4793 key
+ key_field
->offset
,
4794 HIST_STACKTRACE_DEPTH
);
4796 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
4797 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
4798 *(u64
*)(key
+ key_field
->offset
));
4799 } else if (key_field
->flags
& HIST_FIELD_FL_BUCKET
) {
4800 unsigned long buckets
= key_field
->buckets
;
4801 uval
= *(u64
*)(key
+ key_field
->offset
);
4802 seq_printf(m
, "%s: ~ %llu-%llu", field_name
,
4803 uval
, uval
+ buckets
-1);
4804 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
4805 seq_printf(m
, "%s: %-50s", field_name
,
4806 (char *)(key
+ key_field
->offset
));
4808 uval
= *(u64
*)(key
+ key_field
->offset
);
4809 seq_printf(m
, "%s: %10llu", field_name
, uval
);
4819 static void hist_trigger_entry_print(struct seq_file
*m
,
4820 struct hist_trigger_data
*hist_data
,
4822 struct tracing_map_elt
*elt
)
4824 const char *field_name
;
4827 hist_trigger_print_key(m
, hist_data
, key
, elt
);
4829 seq_printf(m
, " hitcount: %10llu",
4830 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
4832 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
4833 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
4835 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_VAR
||
4836 hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_EXPR
)
4839 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
4840 seq_printf(m
, " %s: %10llx", field_name
,
4841 tracing_map_read_sum(elt
, i
));
4843 seq_printf(m
, " %s: %10llu", field_name
,
4844 tracing_map_read_sum(elt
, i
));
4848 print_actions(m
, hist_data
, elt
);
4853 static int print_entries(struct seq_file
*m
,
4854 struct hist_trigger_data
*hist_data
)
4856 struct tracing_map_sort_entry
**sort_entries
= NULL
;
4857 struct tracing_map
*map
= hist_data
->map
;
4860 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
4861 hist_data
->n_sort_keys
,
4866 for (i
= 0; i
< n_entries
; i
++)
4867 hist_trigger_entry_print(m
, hist_data
,
4868 sort_entries
[i
]->key
,
4869 sort_entries
[i
]->elt
);
4871 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
4876 static void hist_trigger_show(struct seq_file
*m
,
4877 struct event_trigger_data
*data
, int n
)
4879 struct hist_trigger_data
*hist_data
;
4883 seq_puts(m
, "\n\n");
4885 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
4886 data
->ops
->print(m
, data
->ops
, data
);
4887 seq_puts(m
, "#\n\n");
4889 hist_data
= data
->private_data
;
4890 n_entries
= print_entries(m
, hist_data
);
4894 track_data_snapshot_print(m
, hist_data
);
4896 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
4897 (u64
)atomic64_read(&hist_data
->map
->hits
),
4898 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
4901 static int hist_show(struct seq_file
*m
, void *v
)
4903 struct event_trigger_data
*data
;
4904 struct trace_event_file
*event_file
;
4907 mutex_lock(&event_mutex
);
4909 event_file
= event_file_data(m
->private);
4910 if (unlikely(!event_file
)) {
4915 list_for_each_entry(data
, &event_file
->triggers
, list
) {
4916 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
4917 hist_trigger_show(m
, data
, n
++);
4921 mutex_unlock(&event_mutex
);
4926 static int event_hist_open(struct inode
*inode
, struct file
*file
)
4930 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
4934 return single_open(file
, hist_show
, file
);
4937 const struct file_operations event_hist_fops
= {
4938 .open
= event_hist_open
,
4940 .llseek
= seq_lseek
,
4941 .release
= single_release
,
4944 #ifdef CONFIG_HIST_TRIGGERS_DEBUG
4945 static void hist_field_debug_show_flags(struct seq_file
*m
,
4946 unsigned long flags
)
4948 seq_puts(m
, " flags:\n");
4950 if (flags
& HIST_FIELD_FL_KEY
)
4951 seq_puts(m
, " HIST_FIELD_FL_KEY\n");
4952 else if (flags
& HIST_FIELD_FL_HITCOUNT
)
4953 seq_puts(m
, " VAL: HIST_FIELD_FL_HITCOUNT\n");
4954 else if (flags
& HIST_FIELD_FL_VAR
)
4955 seq_puts(m
, " HIST_FIELD_FL_VAR\n");
4956 else if (flags
& HIST_FIELD_FL_VAR_REF
)
4957 seq_puts(m
, " HIST_FIELD_FL_VAR_REF\n");
4959 seq_puts(m
, " VAL: normal u64 value\n");
4961 if (flags
& HIST_FIELD_FL_ALIAS
)
4962 seq_puts(m
, " HIST_FIELD_FL_ALIAS\n");
4965 static int hist_field_debug_show(struct seq_file
*m
,
4966 struct hist_field
*field
, unsigned long flags
)
4968 if ((field
->flags
& flags
) != flags
) {
4969 seq_printf(m
, "ERROR: bad flags - %lx\n", flags
);
4973 hist_field_debug_show_flags(m
, field
->flags
);
4975 seq_printf(m
, " ftrace_event_field name: %s\n",
4976 field
->field
->name
);
4978 if (field
->flags
& HIST_FIELD_FL_VAR
) {
4979 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
4980 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
4984 if (field
->flags
& HIST_FIELD_FL_ALIAS
)
4985 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
4986 field
->var_ref_idx
);
4988 if (field
->flags
& HIST_FIELD_FL_VAR_REF
) {
4989 seq_printf(m
, " name: %s\n", field
->name
);
4990 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
4992 seq_printf(m
, " var.hist_data: %p\n", field
->var
.hist_data
);
4993 seq_printf(m
, " var_ref_idx (into hist_data->var_refs[]): %u\n",
4994 field
->var_ref_idx
);
4996 seq_printf(m
, " system: %s\n", field
->system
);
4997 if (field
->event_name
)
4998 seq_printf(m
, " event_name: %s\n", field
->event_name
);
5001 seq_printf(m
, " type: %s\n", field
->type
);
5002 seq_printf(m
, " size: %u\n", field
->size
);
5003 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
5008 static int field_var_debug_show(struct seq_file
*m
,
5009 struct field_var
*field_var
, unsigned int i
,
5012 const char *vars_name
= save_vars
? "save_vars" : "field_vars";
5013 struct hist_field
*field
;
5016 seq_printf(m
, "\n hist_data->%s[%d]:\n", vars_name
, i
);
5018 field
= field_var
->var
;
5020 seq_printf(m
, "\n %s[%d].var:\n", vars_name
, i
);
5022 hist_field_debug_show_flags(m
, field
->flags
);
5023 seq_printf(m
, " var.name: %s\n", field
->var
.name
);
5024 seq_printf(m
, " var.idx (into tracing_map_elt.vars[]): %u\n",
5027 field
= field_var
->val
;
5029 seq_printf(m
, "\n %s[%d].val:\n", vars_name
, i
);
5031 seq_printf(m
, " ftrace_event_field name: %s\n",
5032 field
->field
->name
);
5038 seq_printf(m
, " type: %s\n", field
->type
);
5039 seq_printf(m
, " size: %u\n", field
->size
);
5040 seq_printf(m
, " is_signed: %u\n", field
->is_signed
);
5045 static int hist_action_debug_show(struct seq_file
*m
,
5046 struct action_data
*data
, int i
)
5050 if (data
->handler
== HANDLER_ONMAX
||
5051 data
->handler
== HANDLER_ONCHANGE
) {
5052 seq_printf(m
, "\n hist_data->actions[%d].track_data.var_ref:\n", i
);
5053 ret
= hist_field_debug_show(m
, data
->track_data
.var_ref
,
5054 HIST_FIELD_FL_VAR_REF
);
5058 seq_printf(m
, "\n hist_data->actions[%d].track_data.track_var:\n", i
);
5059 ret
= hist_field_debug_show(m
, data
->track_data
.track_var
,
5065 if (data
->handler
== HANDLER_ONMATCH
) {
5066 seq_printf(m
, "\n hist_data->actions[%d].match_data.event_system: %s\n",
5067 i
, data
->match_data
.event_system
);
5068 seq_printf(m
, " hist_data->actions[%d].match_data.event: %s\n",
5069 i
, data
->match_data
.event
);
5075 static int hist_actions_debug_show(struct seq_file
*m
,
5076 struct hist_trigger_data
*hist_data
)
5080 if (hist_data
->n_actions
)
5081 seq_puts(m
, "\n action tracking variables (for onmax()/onchange()/onmatch()):\n");
5083 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5084 struct action_data
*action
= hist_data
->actions
[i
];
5086 ret
= hist_action_debug_show(m
, action
, i
);
5091 if (hist_data
->n_save_vars
)
5092 seq_puts(m
, "\n save action variables (save() params):\n");
5094 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
5095 ret
= field_var_debug_show(m
, hist_data
->save_vars
[i
], i
, true);
5103 static void hist_trigger_debug_show(struct seq_file
*m
,
5104 struct event_trigger_data
*data
, int n
)
5106 struct hist_trigger_data
*hist_data
;
5110 seq_puts(m
, "\n\n");
5112 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
5113 data
->ops
->print(m
, data
->ops
, data
);
5114 seq_puts(m
, "#\n\n");
5116 hist_data
= data
->private_data
;
5118 seq_printf(m
, "hist_data: %p\n\n", hist_data
);
5119 seq_printf(m
, " n_vals: %u\n", hist_data
->n_vals
);
5120 seq_printf(m
, " n_keys: %u\n", hist_data
->n_keys
);
5121 seq_printf(m
, " n_fields: %u\n", hist_data
->n_fields
);
5123 seq_puts(m
, "\n val fields:\n\n");
5125 seq_puts(m
, " hist_data->fields[0]:\n");
5126 ret
= hist_field_debug_show(m
, hist_data
->fields
[0],
5127 HIST_FIELD_FL_HITCOUNT
);
5131 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
5132 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
5133 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
], 0);
5138 seq_puts(m
, "\n key fields:\n");
5140 for (i
= hist_data
->n_vals
; i
< hist_data
->n_fields
; i
++) {
5141 seq_printf(m
, "\n hist_data->fields[%d]:\n", i
);
5142 ret
= hist_field_debug_show(m
, hist_data
->fields
[i
],
5148 if (hist_data
->n_var_refs
)
5149 seq_puts(m
, "\n variable reference fields:\n");
5151 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
5152 seq_printf(m
, "\n hist_data->var_refs[%d]:\n", i
);
5153 ret
= hist_field_debug_show(m
, hist_data
->var_refs
[i
],
5154 HIST_FIELD_FL_VAR_REF
);
5159 if (hist_data
->n_field_vars
)
5160 seq_puts(m
, "\n field variables:\n");
5162 for (i
= 0; i
< hist_data
->n_field_vars
; i
++) {
5163 ret
= field_var_debug_show(m
, hist_data
->field_vars
[i
], i
, false);
5168 ret
= hist_actions_debug_show(m
, hist_data
);
5173 static int hist_debug_show(struct seq_file
*m
, void *v
)
5175 struct event_trigger_data
*data
;
5176 struct trace_event_file
*event_file
;
5179 mutex_lock(&event_mutex
);
5181 event_file
= event_file_data(m
->private);
5182 if (unlikely(!event_file
)) {
5187 list_for_each_entry(data
, &event_file
->triggers
, list
) {
5188 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
5189 hist_trigger_debug_show(m
, data
, n
++);
5193 mutex_unlock(&event_mutex
);
5198 static int event_hist_debug_open(struct inode
*inode
, struct file
*file
)
5202 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
5206 return single_open(file
, hist_debug_show
, file
);
5209 const struct file_operations event_hist_debug_fops
= {
5210 .open
= event_hist_debug_open
,
5212 .llseek
= seq_lseek
,
5213 .release
= single_release
,
5217 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
5219 const char *field_name
= hist_field_name(hist_field
, 0);
5221 if (hist_field
->var
.name
)
5222 seq_printf(m
, "%s=", hist_field
->var
.name
);
5224 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
5225 seq_puts(m
, "common_cpu");
5226 else if (field_name
) {
5227 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
5228 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
5230 seq_printf(m
, "%s", field_name
);
5231 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
5232 seq_puts(m
, "common_timestamp");
5234 if (hist_field
->flags
) {
5235 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
5236 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
)) {
5237 const char *flags
= get_hist_field_flags(hist_field
);
5240 seq_printf(m
, ".%s", flags
);
5243 if (hist_field
->buckets
)
5244 seq_printf(m
, "=%ld", hist_field
->buckets
);
5247 static int event_hist_trigger_print(struct seq_file
*m
,
5248 struct event_trigger_ops
*ops
,
5249 struct event_trigger_data
*data
)
5251 struct hist_trigger_data
*hist_data
= data
->private_data
;
5252 struct hist_field
*field
;
5253 bool have_var
= false;
5256 seq_puts(m
, "hist:");
5259 seq_printf(m
, "%s:", data
->name
);
5261 seq_puts(m
, "keys=");
5263 for_each_hist_key_field(i
, hist_data
) {
5264 field
= hist_data
->fields
[i
];
5266 if (i
> hist_data
->n_vals
)
5269 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
)
5270 seq_puts(m
, "stacktrace");
5272 hist_field_print(m
, field
);
5275 seq_puts(m
, ":vals=");
5277 for_each_hist_val_field(i
, hist_data
) {
5278 field
= hist_data
->fields
[i
];
5279 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5284 if (i
== HITCOUNT_IDX
)
5285 seq_puts(m
, "hitcount");
5288 hist_field_print(m
, field
);
5297 for_each_hist_val_field(i
, hist_data
) {
5298 field
= hist_data
->fields
[i
];
5300 if (field
->flags
& HIST_FIELD_FL_VAR
) {
5303 hist_field_print(m
, field
);
5308 seq_puts(m
, ":sort=");
5310 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
5311 struct tracing_map_sort_key
*sort_key
;
5312 unsigned int idx
, first_key_idx
;
5315 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
5317 sort_key
= &hist_data
->sort_keys
[i
];
5318 idx
= sort_key
->field_idx
;
5320 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
5326 if (idx
== HITCOUNT_IDX
)
5327 seq_puts(m
, "hitcount");
5329 if (idx
>= first_key_idx
)
5330 idx
+= hist_data
->n_vars
;
5331 hist_field_print(m
, hist_data
->fields
[idx
]);
5334 if (sort_key
->descending
)
5335 seq_puts(m
, ".descending");
5337 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
5338 if (hist_data
->enable_timestamps
)
5339 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
5341 print_actions_spec(m
, hist_data
);
5343 if (data
->filter_str
)
5344 seq_printf(m
, " if %s", data
->filter_str
);
5347 seq_puts(m
, " [paused]");
5349 seq_puts(m
, " [active]");
5356 static int event_hist_trigger_init(struct event_trigger_ops
*ops
,
5357 struct event_trigger_data
*data
)
5359 struct hist_trigger_data
*hist_data
= data
->private_data
;
5361 if (!data
->ref
&& hist_data
->attrs
->name
)
5362 save_named_trigger(hist_data
->attrs
->name
, data
);
5369 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
5371 struct trace_event_file
*file
;
5376 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5377 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
5378 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
5379 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
5380 "!hist", "hist", cmd
);
5381 WARN_ON_ONCE(ret
< 0);
5385 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
5386 struct event_trigger_data
*data
)
5388 struct hist_trigger_data
*hist_data
= data
->private_data
;
5390 if (WARN_ON_ONCE(data
->ref
<= 0))
5396 del_named_trigger(data
);
5398 trigger_data_free(data
);
5400 remove_hist_vars(hist_data
);
5402 unregister_field_var_hists(hist_data
);
5404 destroy_hist_data(hist_data
);
5408 static struct event_trigger_ops event_hist_trigger_ops
= {
5409 .func
= event_hist_trigger
,
5410 .print
= event_hist_trigger_print
,
5411 .init
= event_hist_trigger_init
,
5412 .free
= event_hist_trigger_free
,
5415 static int event_hist_trigger_named_init(struct event_trigger_ops
*ops
,
5416 struct event_trigger_data
*data
)
5420 save_named_trigger(data
->named_data
->name
, data
);
5422 event_hist_trigger_init(ops
, data
->named_data
);
5427 static void event_hist_trigger_named_free(struct event_trigger_ops
*ops
,
5428 struct event_trigger_data
*data
)
5430 if (WARN_ON_ONCE(data
->ref
<= 0))
5433 event_hist_trigger_free(ops
, data
->named_data
);
5437 del_named_trigger(data
);
5438 trigger_data_free(data
);
5442 static struct event_trigger_ops event_hist_trigger_named_ops
= {
5443 .func
= event_hist_trigger
,
5444 .print
= event_hist_trigger_print
,
5445 .init
= event_hist_trigger_named_init
,
5446 .free
= event_hist_trigger_named_free
,
5449 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
5452 return &event_hist_trigger_ops
;
5455 static void hist_clear(struct event_trigger_data
*data
)
5457 struct hist_trigger_data
*hist_data
= data
->private_data
;
5460 pause_named_trigger(data
);
5462 tracepoint_synchronize_unregister();
5464 tracing_map_clear(hist_data
->map
);
5467 unpause_named_trigger(data
);
5470 static bool compatible_field(struct ftrace_event_field
*field
,
5471 struct ftrace_event_field
*test_field
)
5473 if (field
== test_field
)
5475 if (field
== NULL
|| test_field
== NULL
)
5477 if (strcmp(field
->name
, test_field
->name
) != 0)
5479 if (strcmp(field
->type
, test_field
->type
) != 0)
5481 if (field
->size
!= test_field
->size
)
5483 if (field
->is_signed
!= test_field
->is_signed
)
5489 static bool hist_trigger_match(struct event_trigger_data
*data
,
5490 struct event_trigger_data
*data_test
,
5491 struct event_trigger_data
*named_data
,
5494 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
5495 struct hist_trigger_data
*hist_data
, *hist_data_test
;
5496 struct hist_field
*key_field
, *key_field_test
;
5499 if (named_data
&& (named_data
!= data_test
) &&
5500 (named_data
!= data_test
->named_data
))
5503 if (!named_data
&& is_named_trigger(data_test
))
5506 hist_data
= data
->private_data
;
5507 hist_data_test
= data_test
->private_data
;
5509 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
5510 hist_data
->n_fields
!= hist_data_test
->n_fields
||
5511 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
5514 if (!ignore_filter
) {
5515 if ((data
->filter_str
&& !data_test
->filter_str
) ||
5516 (!data
->filter_str
&& data_test
->filter_str
))
5520 for_each_hist_field(i
, hist_data
) {
5521 key_field
= hist_data
->fields
[i
];
5522 key_field_test
= hist_data_test
->fields
[i
];
5524 if (key_field
->flags
!= key_field_test
->flags
)
5526 if (!compatible_field(key_field
->field
, key_field_test
->field
))
5528 if (key_field
->offset
!= key_field_test
->offset
)
5530 if (key_field
->size
!= key_field_test
->size
)
5532 if (key_field
->is_signed
!= key_field_test
->is_signed
)
5534 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
5536 if (key_field
->var
.name
&&
5537 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
5541 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
5542 sort_key
= &hist_data
->sort_keys
[i
];
5543 sort_key_test
= &hist_data_test
->sort_keys
[i
];
5545 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
5546 sort_key
->descending
!= sort_key_test
->descending
)
5550 if (!ignore_filter
&& data
->filter_str
&&
5551 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
5554 if (!actions_match(hist_data
, hist_data_test
))
5560 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
5561 struct event_trigger_data
*data
,
5562 struct trace_event_file
*file
)
5564 struct hist_trigger_data
*hist_data
= data
->private_data
;
5565 struct event_trigger_data
*test
, *named_data
= NULL
;
5566 struct trace_array
*tr
= file
->tr
;
5569 if (hist_data
->attrs
->name
) {
5570 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5572 if (!hist_trigger_match(data
, named_data
, named_data
,
5574 hist_err(tr
, HIST_ERR_NAMED_MISMATCH
, errpos(hist_data
->attrs
->name
));
5581 if (hist_data
->attrs
->name
&& !named_data
)
5584 lockdep_assert_held(&event_mutex
);
5586 list_for_each_entry(test
, &file
->triggers
, list
) {
5587 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5588 if (!hist_trigger_match(data
, test
, named_data
, false))
5590 if (hist_data
->attrs
->pause
)
5591 test
->paused
= true;
5592 else if (hist_data
->attrs
->cont
)
5593 test
->paused
= false;
5594 else if (hist_data
->attrs
->clear
)
5597 hist_err(tr
, HIST_ERR_TRIGGER_EEXIST
, 0);
5604 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
5605 hist_err(tr
, HIST_ERR_TRIGGER_ENOENT_CLEAR
, 0);
5610 if (hist_data
->attrs
->pause
)
5611 data
->paused
= true;
5614 data
->private_data
= named_data
->private_data
;
5615 set_named_trigger_data(data
, named_data
);
5616 data
->ops
= &event_hist_trigger_named_ops
;
5619 if (data
->ops
->init
) {
5620 ret
= data
->ops
->init(data
->ops
, data
);
5625 if (hist_data
->enable_timestamps
) {
5626 char *clock
= hist_data
->attrs
->clock
;
5628 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
5630 hist_err(tr
, HIST_ERR_SET_CLOCK_FAIL
, errpos(clock
));
5634 tracing_set_filter_buffering(file
->tr
, true);
5638 destroy_hist_data(hist_data
);
5645 static int hist_trigger_enable(struct event_trigger_data
*data
,
5646 struct trace_event_file
*file
)
5650 list_add_tail_rcu(&data
->list
, &file
->triggers
);
5652 update_cond_flag(file
);
5654 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
5655 list_del_rcu(&data
->list
);
5656 update_cond_flag(file
);
5663 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
5664 struct trace_event_file
*file
)
5666 struct hist_trigger_data
*hist_data
= data
->private_data
;
5667 struct event_trigger_data
*test
, *named_data
= NULL
;
5670 lockdep_assert_held(&event_mutex
);
5672 if (hist_data
->attrs
->name
)
5673 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5675 list_for_each_entry(test
, &file
->triggers
, list
) {
5676 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5677 if (hist_trigger_match(data
, test
, named_data
, false)) {
5687 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
5688 struct trace_event_file
*file
)
5690 struct hist_trigger_data
*hist_data
= data
->private_data
;
5691 struct event_trigger_data
*test
, *named_data
= NULL
;
5693 lockdep_assert_held(&event_mutex
);
5695 if (hist_data
->attrs
->name
)
5696 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5698 list_for_each_entry(test
, &file
->triggers
, list
) {
5699 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5700 if (!hist_trigger_match(data
, test
, named_data
, false))
5702 hist_data
= test
->private_data
;
5703 if (check_var_refs(hist_data
))
5712 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
5713 struct event_trigger_data
*data
,
5714 struct trace_event_file
*file
)
5716 struct hist_trigger_data
*hist_data
= data
->private_data
;
5717 struct event_trigger_data
*test
, *named_data
= NULL
;
5718 bool unregistered
= false;
5720 lockdep_assert_held(&event_mutex
);
5722 if (hist_data
->attrs
->name
)
5723 named_data
= find_named_trigger(hist_data
->attrs
->name
);
5725 list_for_each_entry(test
, &file
->triggers
, list
) {
5726 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5727 if (!hist_trigger_match(data
, test
, named_data
, false))
5729 unregistered
= true;
5730 list_del_rcu(&test
->list
);
5731 trace_event_trigger_enable_disable(file
, 0);
5732 update_cond_flag(file
);
5737 if (unregistered
&& test
->ops
->free
)
5738 test
->ops
->free(test
->ops
, test
);
5740 if (hist_data
->enable_timestamps
) {
5741 if (!hist_data
->remove
|| unregistered
)
5742 tracing_set_filter_buffering(file
->tr
, false);
5746 static bool hist_file_check_refs(struct trace_event_file
*file
)
5748 struct hist_trigger_data
*hist_data
;
5749 struct event_trigger_data
*test
;
5751 lockdep_assert_held(&event_mutex
);
5753 list_for_each_entry(test
, &file
->triggers
, list
) {
5754 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5755 hist_data
= test
->private_data
;
5756 if (check_var_refs(hist_data
))
5764 static void hist_unreg_all(struct trace_event_file
*file
)
5766 struct event_trigger_data
*test
, *n
;
5767 struct hist_trigger_data
*hist_data
;
5768 struct synth_event
*se
;
5769 const char *se_name
;
5771 lockdep_assert_held(&event_mutex
);
5773 if (hist_file_check_refs(file
))
5776 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
5777 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
5778 hist_data
= test
->private_data
;
5779 list_del_rcu(&test
->list
);
5780 trace_event_trigger_enable_disable(file
, 0);
5782 se_name
= trace_event_name(file
->event_call
);
5783 se
= find_synth_event(se_name
);
5787 update_cond_flag(file
);
5788 if (hist_data
->enable_timestamps
)
5789 tracing_set_filter_buffering(file
->tr
, false);
5790 if (test
->ops
->free
)
5791 test
->ops
->free(test
->ops
, test
);
5796 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
5797 struct trace_event_file
*file
,
5798 char *glob
, char *cmd
, char *param
)
5800 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
5801 struct event_trigger_data
*trigger_data
;
5802 struct hist_trigger_attrs
*attrs
;
5803 struct event_trigger_ops
*trigger_ops
;
5804 struct hist_trigger_data
*hist_data
;
5805 struct synth_event
*se
;
5806 const char *se_name
;
5807 bool remove
= false;
5811 lockdep_assert_held(&event_mutex
);
5813 if (glob
&& strlen(glob
)) {
5815 last_cmd_set(file
, param
);
5825 * separate the trigger from the filter (k:v [if filter])
5826 * allowing for whitespace in the trigger
5828 p
= trigger
= param
;
5830 p
= strstr(p
, "if");
5835 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
5839 if (p
>= param
+ strlen(param
) - (sizeof("if") - 1) - 1)
5841 if (*(p
+ sizeof("if") - 1) != ' ' && *(p
+ sizeof("if") - 1) != '\t') {
5852 param
= strstrip(p
);
5853 trigger
= strstrip(trigger
);
5856 attrs
= parse_hist_trigger_attrs(file
->tr
, trigger
);
5858 return PTR_ERR(attrs
);
5860 if (attrs
->map_bits
)
5861 hist_trigger_bits
= attrs
->map_bits
;
5863 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
5864 if (IS_ERR(hist_data
)) {
5865 destroy_hist_trigger_attrs(attrs
);
5866 return PTR_ERR(hist_data
);
5869 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
5871 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
5872 if (!trigger_data
) {
5877 trigger_data
->count
= -1;
5878 trigger_data
->ops
= trigger_ops
;
5879 trigger_data
->cmd_ops
= cmd_ops
;
5881 INIT_LIST_HEAD(&trigger_data
->list
);
5882 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
5884 trigger_data
->private_data
= hist_data
;
5886 /* if param is non-empty, it's supposed to be a filter */
5887 if (param
&& cmd_ops
->set_filter
) {
5888 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
5894 if (!have_hist_trigger_match(trigger_data
, file
))
5897 if (hist_trigger_check_refs(trigger_data
, file
)) {
5902 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
5903 se_name
= trace_event_name(file
->event_call
);
5904 se
= find_synth_event(se_name
);
5911 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
5913 * The above returns on success the # of triggers registered,
5914 * but if it didn't register any it returns zero. Consider no
5915 * triggers registered a failure too.
5918 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
5924 if (get_named_trigger_data(trigger_data
))
5927 if (has_hist_vars(hist_data
))
5928 save_hist_vars(hist_data
);
5930 ret
= create_actions(hist_data
);
5934 ret
= tracing_map_init(hist_data
->map
);
5938 ret
= hist_trigger_enable(trigger_data
, file
);
5942 se_name
= trace_event_name(file
->event_call
);
5943 se
= find_synth_event(se_name
);
5946 /* Just return zero, not the number of registered triggers */
5954 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
5956 if (cmd_ops
->set_filter
)
5957 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
5959 remove_hist_vars(hist_data
);
5961 kfree(trigger_data
);
5963 destroy_hist_data(hist_data
);
5967 static struct event_command trigger_hist_cmd
= {
5969 .trigger_type
= ETT_EVENT_HIST
,
5970 .flags
= EVENT_CMD_FL_NEEDS_REC
,
5971 .func
= event_hist_trigger_func
,
5972 .reg
= hist_register_trigger
,
5973 .unreg
= hist_unregister_trigger
,
5974 .unreg_all
= hist_unreg_all
,
5975 .get_trigger_ops
= event_hist_get_trigger_ops
,
5976 .set_filter
= set_trigger_filter
,
5979 __init
int register_trigger_hist_cmd(void)
5983 ret
= register_event_command(&trigger_hist_cmd
);
5990 hist_enable_trigger(struct event_trigger_data
*data
,
5991 struct trace_buffer
*buffer
, void *rec
,
5992 struct ring_buffer_event
*event
)
5994 struct enable_trigger_data
*enable_data
= data
->private_data
;
5995 struct event_trigger_data
*test
;
5997 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
,
5998 lockdep_is_held(&event_mutex
)) {
5999 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6000 if (enable_data
->enable
)
6001 test
->paused
= false;
6003 test
->paused
= true;
6009 hist_enable_count_trigger(struct event_trigger_data
*data
,
6010 struct trace_buffer
*buffer
, void *rec
,
6011 struct ring_buffer_event
*event
)
6016 if (data
->count
!= -1)
6019 hist_enable_trigger(data
, buffer
, rec
, event
);
6022 static struct event_trigger_ops hist_enable_trigger_ops
= {
6023 .func
= hist_enable_trigger
,
6024 .print
= event_enable_trigger_print
,
6025 .init
= event_trigger_init
,
6026 .free
= event_enable_trigger_free
,
6029 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
6030 .func
= hist_enable_count_trigger
,
6031 .print
= event_enable_trigger_print
,
6032 .init
= event_trigger_init
,
6033 .free
= event_enable_trigger_free
,
6036 static struct event_trigger_ops hist_disable_trigger_ops
= {
6037 .func
= hist_enable_trigger
,
6038 .print
= event_enable_trigger_print
,
6039 .init
= event_trigger_init
,
6040 .free
= event_enable_trigger_free
,
6043 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
6044 .func
= hist_enable_count_trigger
,
6045 .print
= event_enable_trigger_print
,
6046 .init
= event_trigger_init
,
6047 .free
= event_enable_trigger_free
,
6050 static struct event_trigger_ops
*
6051 hist_enable_get_trigger_ops(char *cmd
, char *param
)
6053 struct event_trigger_ops
*ops
;
6056 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
6059 ops
= param
? &hist_enable_count_trigger_ops
:
6060 &hist_enable_trigger_ops
;
6062 ops
= param
? &hist_disable_count_trigger_ops
:
6063 &hist_disable_trigger_ops
;
6068 static void hist_enable_unreg_all(struct trace_event_file
*file
)
6070 struct event_trigger_data
*test
, *n
;
6072 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
6073 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
6074 list_del_rcu(&test
->list
);
6075 update_cond_flag(file
);
6076 trace_event_trigger_enable_disable(file
, 0);
6077 if (test
->ops
->free
)
6078 test
->ops
->free(test
->ops
, test
);
6083 static struct event_command trigger_hist_enable_cmd
= {
6084 .name
= ENABLE_HIST_STR
,
6085 .trigger_type
= ETT_HIST_ENABLE
,
6086 .func
= event_enable_trigger_func
,
6087 .reg
= event_enable_register_trigger
,
6088 .unreg
= event_enable_unregister_trigger
,
6089 .unreg_all
= hist_enable_unreg_all
,
6090 .get_trigger_ops
= hist_enable_get_trigger_ops
,
6091 .set_filter
= set_trigger_filter
,
6094 static struct event_command trigger_hist_disable_cmd
= {
6095 .name
= DISABLE_HIST_STR
,
6096 .trigger_type
= ETT_HIST_ENABLE
,
6097 .func
= event_enable_trigger_func
,
6098 .reg
= event_enable_register_trigger
,
6099 .unreg
= event_enable_unregister_trigger
,
6100 .unreg_all
= hist_enable_unreg_all
,
6101 .get_trigger_ops
= hist_enable_get_trigger_ops
,
6102 .set_filter
= set_trigger_filter
,
6105 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
6107 unregister_event_command(&trigger_hist_enable_cmd
);
6108 unregister_event_command(&trigger_hist_disable_cmd
);
6111 __init
int register_trigger_hist_enable_disable_cmds(void)
6115 ret
= register_event_command(&trigger_hist_enable_cmd
);
6116 if (WARN_ON(ret
< 0))
6118 ret
= register_event_command(&trigger_hist_disable_cmd
);
6119 if (WARN_ON(ret
< 0))
6120 unregister_trigger_hist_enable_disable_cmds();