1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_hist - trace event hist triggers
5 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
21 #include "tracing_map.h"
23 #include "trace_dynevent.h"
25 #define SYNTH_SYSTEM "synthetic"
26 #define SYNTH_FIELDS_MAX 32
28 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
31 C(NONE, "No error"), \
32 C(DUPLICATE_VAR, "Variable already defined"), \
33 C(VAR_NOT_UNIQUE, "Variable name not unique, need to use fully qualified name (subsys.event.var) for variable"), \
34 C(TOO_MANY_VARS, "Too many variables defined"), \
35 C(MALFORMED_ASSIGNMENT, "Malformed assignment"), \
36 C(NAMED_MISMATCH, "Named hist trigger doesn't match existing named trigger (includes variables)"), \
37 C(TRIGGER_EEXIST, "Hist trigger already exists"), \
38 C(TRIGGER_ENOENT_CLEAR, "Can't clear or continue a nonexistent hist trigger"), \
39 C(SET_CLOCK_FAIL, "Couldn't set trace_clock"), \
40 C(BAD_FIELD_MODIFIER, "Invalid field modifier"), \
41 C(TOO_MANY_SUBEXPR, "Too many subexpressions (3 max)"), \
42 C(TIMESTAMP_MISMATCH, "Timestamp units in expression don't match"), \
43 C(TOO_MANY_FIELD_VARS, "Too many field variables defined"), \
44 C(EVENT_FILE_NOT_FOUND, "Event file not found"), \
45 C(HIST_NOT_FOUND, "Matching event histogram not found"), \
46 C(HIST_CREATE_FAIL, "Couldn't create histogram for field"), \
47 C(SYNTH_VAR_NOT_FOUND, "Couldn't find synthetic variable"), \
48 C(SYNTH_EVENT_NOT_FOUND,"Couldn't find synthetic event"), \
49 C(SYNTH_TYPE_MISMATCH, "Param type doesn't match synthetic event field type"), \
50 C(SYNTH_COUNT_MISMATCH, "Param count doesn't match synthetic event field count"), \
51 C(FIELD_VAR_PARSE_FAIL, "Couldn't parse field variable"), \
52 C(VAR_CREATE_FIND_FAIL, "Couldn't create or find variable"), \
53 C(ONX_NOT_VAR, "For onmax(x) or onchange(x), x must be a variable"), \
54 C(ONX_VAR_NOT_FOUND, "Couldn't find onmax or onchange variable"), \
55 C(ONX_VAR_CREATE_FAIL, "Couldn't create onmax or onchange variable"), \
56 C(FIELD_VAR_CREATE_FAIL,"Couldn't create field variable"), \
57 C(TOO_MANY_PARAMS, "Too many action params"), \
58 C(PARAM_NOT_FOUND, "Couldn't find param"), \
59 C(INVALID_PARAM, "Invalid action param"), \
60 C(ACTION_NOT_FOUND, "No action found"), \
61 C(NO_SAVE_PARAMS, "No params found for save()"), \
62 C(TOO_MANY_SAVE_ACTIONS,"Can't have more than one save() action per hist"), \
63 C(ACTION_MISMATCH, "Handler doesn't support action"), \
64 C(NO_CLOSING_PAREN, "No closing paren found"), \
65 C(SUBSYS_NOT_FOUND, "Missing subsystem"), \
66 C(INVALID_SUBSYS_EVENT, "Invalid subsystem or event name"), \
67 C(INVALID_REF_KEY, "Using variable references in keys not supported"), \
68 C(VAR_NOT_FOUND, "Couldn't find variable"), \
69 C(FIELD_NOT_FOUND, "Couldn't find field"), \
70 C(EMPTY_ASSIGNMENT, "Empty assignment"), \
71 C(INVALID_SORT_MODIFIER,"Invalid sort modifier"), \
72 C(EMPTY_SORT_FIELD, "Empty sort field"), \
73 C(TOO_MANY_SORT_FIELDS, "Too many sort fields (Max = 2)"), \
74 C(INVALID_SORT_FIELD, "Sort field must be a key or a val"),
77 #define C(a, b) HIST_ERR_##a
84 static const char *err_text
[] = { ERRORS
};
88 typedef u64 (*hist_field_fn_t
) (struct hist_field
*field
,
89 struct tracing_map_elt
*elt
,
90 struct ring_buffer_event
*rbe
,
93 #define HIST_FIELD_OPERANDS_MAX 2
94 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
95 #define HIST_ACTIONS_MAX 8
101 FIELD_OP_UNARY_MINUS
,
105 * A hist_var (histogram variable) contains variable information for
106 * hist_fields having the HIST_FIELD_FL_VAR or HIST_FIELD_FL_VAR_REF
107 * flag set. A hist_var has a variable name e.g. ts0, and is
108 * associated with a given histogram trigger, as specified by
109 * hist_data. The hist_var idx is the unique index assigned to the
110 * variable by the hist trigger's tracing_map. The idx is what is
111 * used to set a variable's value and, by a variable reference, to
116 struct hist_trigger_data
*hist_data
;
121 struct ftrace_event_field
*field
;
126 unsigned int is_signed
;
128 struct hist_field
*operands
[HIST_FIELD_OPERANDS_MAX
];
129 struct hist_trigger_data
*hist_data
;
132 * Variable fields contain variable-specific info in var.
135 enum field_op_id
operator;
140 * The name field is used for EXPR and VAR_REF fields. VAR
141 * fields contain the variable name in var.name.
146 * When a histogram trigger is hit, if it has any references
147 * to variables, the values of those variables are collected
148 * into a var_ref_vals array by resolve_var_refs(). The
149 * current value of each variable is read from the tracing_map
150 * using the hist field's hist_var.idx and entered into the
151 * var_ref_idx entry i.e. var_ref_vals[var_ref_idx].
153 unsigned int var_ref_idx
;
157 static u64
hist_field_none(struct hist_field
*field
,
158 struct tracing_map_elt
*elt
,
159 struct ring_buffer_event
*rbe
,
165 static u64
hist_field_counter(struct hist_field
*field
,
166 struct tracing_map_elt
*elt
,
167 struct ring_buffer_event
*rbe
,
173 static u64
hist_field_string(struct hist_field
*hist_field
,
174 struct tracing_map_elt
*elt
,
175 struct ring_buffer_event
*rbe
,
178 char *addr
= (char *)(event
+ hist_field
->field
->offset
);
180 return (u64
)(unsigned long)addr
;
183 static u64
hist_field_dynstring(struct hist_field
*hist_field
,
184 struct tracing_map_elt
*elt
,
185 struct ring_buffer_event
*rbe
,
188 u32 str_item
= *(u32
*)(event
+ hist_field
->field
->offset
);
189 int str_loc
= str_item
& 0xffff;
190 char *addr
= (char *)(event
+ str_loc
);
192 return (u64
)(unsigned long)addr
;
195 static u64
hist_field_pstring(struct hist_field
*hist_field
,
196 struct tracing_map_elt
*elt
,
197 struct ring_buffer_event
*rbe
,
200 char **addr
= (char **)(event
+ hist_field
->field
->offset
);
202 return (u64
)(unsigned long)*addr
;
205 static u64
hist_field_log2(struct hist_field
*hist_field
,
206 struct tracing_map_elt
*elt
,
207 struct ring_buffer_event
*rbe
,
210 struct hist_field
*operand
= hist_field
->operands
[0];
212 u64 val
= operand
->fn(operand
, elt
, rbe
, event
);
214 return (u64
) ilog2(roundup_pow_of_two(val
));
217 static u64
hist_field_plus(struct hist_field
*hist_field
,
218 struct tracing_map_elt
*elt
,
219 struct ring_buffer_event
*rbe
,
222 struct hist_field
*operand1
= hist_field
->operands
[0];
223 struct hist_field
*operand2
= hist_field
->operands
[1];
225 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
226 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
231 static u64
hist_field_minus(struct hist_field
*hist_field
,
232 struct tracing_map_elt
*elt
,
233 struct ring_buffer_event
*rbe
,
236 struct hist_field
*operand1
= hist_field
->operands
[0];
237 struct hist_field
*operand2
= hist_field
->operands
[1];
239 u64 val1
= operand1
->fn(operand1
, elt
, rbe
, event
);
240 u64 val2
= operand2
->fn(operand2
, elt
, rbe
, event
);
245 static u64
hist_field_unary_minus(struct hist_field
*hist_field
,
246 struct tracing_map_elt
*elt
,
247 struct ring_buffer_event
*rbe
,
250 struct hist_field
*operand
= hist_field
->operands
[0];
252 s64 sval
= (s64
)operand
->fn(operand
, elt
, rbe
, event
);
253 u64 val
= (u64
)-sval
;
258 #define DEFINE_HIST_FIELD_FN(type) \
259 static u64 hist_field_##type(struct hist_field *hist_field, \
260 struct tracing_map_elt *elt, \
261 struct ring_buffer_event *rbe, \
264 type *addr = (type *)(event + hist_field->field->offset); \
266 return (u64)(unsigned long)*addr; \
269 DEFINE_HIST_FIELD_FN(s64
);
270 DEFINE_HIST_FIELD_FN(u64
);
271 DEFINE_HIST_FIELD_FN(s32
);
272 DEFINE_HIST_FIELD_FN(u32
);
273 DEFINE_HIST_FIELD_FN(s16
);
274 DEFINE_HIST_FIELD_FN(u16
);
275 DEFINE_HIST_FIELD_FN(s8
);
276 DEFINE_HIST_FIELD_FN(u8
);
278 #define for_each_hist_field(i, hist_data) \
279 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
281 #define for_each_hist_val_field(i, hist_data) \
282 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
284 #define for_each_hist_key_field(i, hist_data) \
285 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
287 #define HIST_STACKTRACE_DEPTH 16
288 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
289 #define HIST_STACKTRACE_SKIP 5
291 #define HITCOUNT_IDX 0
292 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
294 enum hist_field_flags
{
295 HIST_FIELD_FL_HITCOUNT
= 1 << 0,
296 HIST_FIELD_FL_KEY
= 1 << 1,
297 HIST_FIELD_FL_STRING
= 1 << 2,
298 HIST_FIELD_FL_HEX
= 1 << 3,
299 HIST_FIELD_FL_SYM
= 1 << 4,
300 HIST_FIELD_FL_SYM_OFFSET
= 1 << 5,
301 HIST_FIELD_FL_EXECNAME
= 1 << 6,
302 HIST_FIELD_FL_SYSCALL
= 1 << 7,
303 HIST_FIELD_FL_STACKTRACE
= 1 << 8,
304 HIST_FIELD_FL_LOG2
= 1 << 9,
305 HIST_FIELD_FL_TIMESTAMP
= 1 << 10,
306 HIST_FIELD_FL_TIMESTAMP_USECS
= 1 << 11,
307 HIST_FIELD_FL_VAR
= 1 << 12,
308 HIST_FIELD_FL_EXPR
= 1 << 13,
309 HIST_FIELD_FL_VAR_REF
= 1 << 14,
310 HIST_FIELD_FL_CPU
= 1 << 15,
311 HIST_FIELD_FL_ALIAS
= 1 << 16,
316 char *name
[TRACING_MAP_VARS_MAX
];
317 char *expr
[TRACING_MAP_VARS_MAX
];
320 struct hist_trigger_attrs
{
330 unsigned int map_bits
;
332 char *assignment_str
[TRACING_MAP_VARS_MAX
];
333 unsigned int n_assignments
;
335 char *action_str
[HIST_ACTIONS_MAX
];
336 unsigned int n_actions
;
338 struct var_defs var_defs
;
342 struct hist_field
*var
;
343 struct hist_field
*val
;
346 struct field_var_hist
{
347 struct hist_trigger_data
*hist_data
;
351 struct hist_trigger_data
{
352 struct hist_field
*fields
[HIST_FIELDS_MAX
];
355 unsigned int n_fields
;
357 unsigned int key_size
;
358 struct tracing_map_sort_key sort_keys
[TRACING_MAP_SORT_KEYS_MAX
];
359 unsigned int n_sort_keys
;
360 struct trace_event_file
*event_file
;
361 struct hist_trigger_attrs
*attrs
;
362 struct tracing_map
*map
;
363 bool enable_timestamps
;
365 struct hist_field
*var_refs
[TRACING_MAP_VARS_MAX
];
366 unsigned int n_var_refs
;
368 struct action_data
*actions
[HIST_ACTIONS_MAX
];
369 unsigned int n_actions
;
371 struct field_var
*field_vars
[SYNTH_FIELDS_MAX
];
372 unsigned int n_field_vars
;
373 unsigned int n_field_var_str
;
374 struct field_var_hist
*field_var_hists
[SYNTH_FIELDS_MAX
];
375 unsigned int n_field_var_hists
;
377 struct field_var
*save_vars
[SYNTH_FIELDS_MAX
];
378 unsigned int n_save_vars
;
379 unsigned int n_save_var_str
;
382 static int create_synth_event(int argc
, const char **argv
);
383 static int synth_event_show(struct seq_file
*m
, struct dyn_event
*ev
);
384 static int synth_event_release(struct dyn_event
*ev
);
385 static bool synth_event_is_busy(struct dyn_event
*ev
);
386 static bool synth_event_match(const char *system
, const char *event
,
387 int argc
, const char **argv
, struct dyn_event
*ev
);
389 static struct dyn_event_operations synth_event_ops
= {
390 .create
= create_synth_event
,
391 .show
= synth_event_show
,
392 .is_busy
= synth_event_is_busy
,
393 .free
= synth_event_release
,
394 .match
= synth_event_match
,
407 struct dyn_event devent
;
410 struct synth_field
**fields
;
411 unsigned int n_fields
;
413 struct trace_event_class
class;
414 struct trace_event_call call
;
415 struct tracepoint
*tp
;
419 static bool is_synth_event(struct dyn_event
*ev
)
421 return ev
->ops
== &synth_event_ops
;
424 static struct synth_event
*to_synth_event(struct dyn_event
*ev
)
426 return container_of(ev
, struct synth_event
, devent
);
429 static bool synth_event_is_busy(struct dyn_event
*ev
)
431 struct synth_event
*event
= to_synth_event(ev
);
433 return event
->ref
!= 0;
436 static bool synth_event_match(const char *system
, const char *event
,
437 int argc
, const char **argv
, struct dyn_event
*ev
)
439 struct synth_event
*sev
= to_synth_event(ev
);
441 return strcmp(sev
->name
, event
) == 0 &&
442 (!system
|| strcmp(system
, SYNTH_SYSTEM
) == 0);
447 typedef void (*action_fn_t
) (struct hist_trigger_data
*hist_data
,
448 struct tracing_map_elt
*elt
, void *rec
,
449 struct ring_buffer_event
*rbe
, void *key
,
450 struct action_data
*data
, u64
*var_ref_vals
);
452 typedef bool (*check_track_val_fn_t
) (u64 track_val
, u64 var_val
);
467 enum handler_id handler
;
468 enum action_id action
;
472 unsigned int n_params
;
473 char *params
[SYNTH_FIELDS_MAX
];
476 * When a histogram trigger is hit, the values of any
477 * references to variables, including variables being passed
478 * as parameters to synthetic events, are collected into a
479 * var_ref_vals array. This var_ref_idx array is an array of
480 * indices into the var_ref_vals array, one for each synthetic
481 * event param, and is passed to the synthetic event
484 unsigned int var_ref_idx
[TRACING_MAP_VARS_MAX
];
485 struct synth_event
*synth_event
;
486 bool use_trace_keyword
;
487 char *synth_event_name
;
497 * var_str contains the $-unstripped variable
498 * name referenced by var_ref, and used when
499 * printing the action. Because var_ref
500 * creation is deferred to create_actions(),
501 * we need a per-action way to save it until
502 * then, thus var_str.
507 * var_ref refers to the variable being
508 * tracked e.g onmax($var).
510 struct hist_field
*var_ref
;
513 * track_var contains the 'invisible' tracking
514 * variable created to keep the current
517 struct hist_field
*track_var
;
519 check_track_val_fn_t check_val
;
520 action_fn_t save_data
;
529 unsigned int key_len
;
531 struct tracing_map_elt elt
;
533 struct action_data
*action_data
;
534 struct hist_trigger_data
*hist_data
;
537 struct hist_elt_data
{
540 char *field_var_str
[SYNTH_FIELDS_MAX
];
543 struct snapshot_context
{
544 struct tracing_map_elt
*elt
;
548 static void track_data_free(struct track_data
*track_data
)
550 struct hist_elt_data
*elt_data
;
555 kfree(track_data
->key
);
557 elt_data
= track_data
->elt
.private_data
;
559 kfree(elt_data
->comm
);
566 static struct track_data
*track_data_alloc(unsigned int key_len
,
567 struct action_data
*action_data
,
568 struct hist_trigger_data
*hist_data
)
570 struct track_data
*data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
571 struct hist_elt_data
*elt_data
;
574 return ERR_PTR(-ENOMEM
);
576 data
->key
= kzalloc(key_len
, GFP_KERNEL
);
578 track_data_free(data
);
579 return ERR_PTR(-ENOMEM
);
582 data
->key_len
= key_len
;
583 data
->action_data
= action_data
;
584 data
->hist_data
= hist_data
;
586 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
588 track_data_free(data
);
589 return ERR_PTR(-ENOMEM
);
591 data
->elt
.private_data
= elt_data
;
593 elt_data
->comm
= kzalloc(TASK_COMM_LEN
, GFP_KERNEL
);
594 if (!elt_data
->comm
) {
595 track_data_free(data
);
596 return ERR_PTR(-ENOMEM
);
602 static char last_cmd
[MAX_FILTER_STR_VAL
];
603 static char last_cmd_loc
[MAX_FILTER_STR_VAL
];
605 static int errpos(char *str
)
607 return err_pos(last_cmd
, str
);
610 static void last_cmd_set(struct trace_event_file
*file
, char *str
)
612 const char *system
= NULL
, *name
= NULL
;
613 struct trace_event_call
*call
;
618 strcpy(last_cmd
, "hist:");
619 strncat(last_cmd
, str
, MAX_FILTER_STR_VAL
- 1 - sizeof("hist:"));
622 call
= file
->event_call
;
624 system
= call
->class->system
;
626 name
= trace_event_name(call
);
633 snprintf(last_cmd_loc
, MAX_FILTER_STR_VAL
, "hist:%s:%s", system
, name
);
636 static void hist_err(struct trace_array
*tr
, u8 err_type
, u8 err_pos
)
638 tracing_log_err(tr
, last_cmd_loc
, last_cmd
, err_text
,
642 static void hist_err_clear(void)
645 last_cmd_loc
[0] = '\0';
648 struct synth_trace_event
{
649 struct trace_entry ent
;
653 static int synth_event_define_fields(struct trace_event_call
*call
)
655 struct synth_trace_event trace
;
656 int offset
= offsetof(typeof(trace
), fields
);
657 struct synth_event
*event
= call
->data
;
658 unsigned int i
, size
, n_u64
;
663 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
664 size
= event
->fields
[i
]->size
;
665 is_signed
= event
->fields
[i
]->is_signed
;
666 type
= event
->fields
[i
]->type
;
667 name
= event
->fields
[i
]->name
;
668 ret
= trace_define_field(call
, type
, name
, offset
, size
,
669 is_signed
, FILTER_OTHER
);
673 event
->fields
[i
]->offset
= n_u64
;
675 if (event
->fields
[i
]->is_string
) {
676 offset
+= STR_VAR_LEN_MAX
;
677 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
679 offset
+= sizeof(u64
);
684 event
->n_u64
= n_u64
;
689 static bool synth_field_signed(char *type
)
691 if (str_has_prefix(type
, "u"))
693 if (strcmp(type
, "gfp_t") == 0)
699 static int synth_field_is_string(char *type
)
701 if (strstr(type
, "char[") != NULL
)
707 static int synth_field_string_size(char *type
)
709 char buf
[4], *end
, *start
;
713 start
= strstr(type
, "char[");
716 start
+= sizeof("char[") - 1;
718 end
= strchr(type
, ']');
719 if (!end
|| end
< start
)
726 strncpy(buf
, start
, len
);
729 err
= kstrtouint(buf
, 0, &size
);
733 if (size
> STR_VAR_LEN_MAX
)
739 static int synth_field_size(char *type
)
743 if (strcmp(type
, "s64") == 0)
745 else if (strcmp(type
, "u64") == 0)
747 else if (strcmp(type
, "s32") == 0)
749 else if (strcmp(type
, "u32") == 0)
751 else if (strcmp(type
, "s16") == 0)
753 else if (strcmp(type
, "u16") == 0)
755 else if (strcmp(type
, "s8") == 0)
757 else if (strcmp(type
, "u8") == 0)
759 else if (strcmp(type
, "char") == 0)
761 else if (strcmp(type
, "unsigned char") == 0)
762 size
= sizeof(unsigned char);
763 else if (strcmp(type
, "int") == 0)
765 else if (strcmp(type
, "unsigned int") == 0)
766 size
= sizeof(unsigned int);
767 else if (strcmp(type
, "long") == 0)
769 else if (strcmp(type
, "unsigned long") == 0)
770 size
= sizeof(unsigned long);
771 else if (strcmp(type
, "pid_t") == 0)
772 size
= sizeof(pid_t
);
773 else if (strcmp(type
, "gfp_t") == 0)
774 size
= sizeof(gfp_t
);
775 else if (synth_field_is_string(type
))
776 size
= synth_field_string_size(type
);
781 static const char *synth_field_fmt(char *type
)
783 const char *fmt
= "%llu";
785 if (strcmp(type
, "s64") == 0)
787 else if (strcmp(type
, "u64") == 0)
789 else if (strcmp(type
, "s32") == 0)
791 else if (strcmp(type
, "u32") == 0)
793 else if (strcmp(type
, "s16") == 0)
795 else if (strcmp(type
, "u16") == 0)
797 else if (strcmp(type
, "s8") == 0)
799 else if (strcmp(type
, "u8") == 0)
801 else if (strcmp(type
, "char") == 0)
803 else if (strcmp(type
, "unsigned char") == 0)
805 else if (strcmp(type
, "int") == 0)
807 else if (strcmp(type
, "unsigned int") == 0)
809 else if (strcmp(type
, "long") == 0)
811 else if (strcmp(type
, "unsigned long") == 0)
813 else if (strcmp(type
, "pid_t") == 0)
815 else if (strcmp(type
, "gfp_t") == 0)
817 else if (synth_field_is_string(type
))
823 static enum print_line_t
print_synth_event(struct trace_iterator
*iter
,
825 struct trace_event
*event
)
827 struct trace_array
*tr
= iter
->tr
;
828 struct trace_seq
*s
= &iter
->seq
;
829 struct synth_trace_event
*entry
;
830 struct synth_event
*se
;
831 unsigned int i
, n_u64
;
835 entry
= (struct synth_trace_event
*)iter
->ent
;
836 se
= container_of(event
, struct synth_event
, call
.event
);
838 trace_seq_printf(s
, "%s: ", se
->name
);
840 for (i
= 0, n_u64
= 0; i
< se
->n_fields
; i
++) {
841 if (trace_seq_has_overflowed(s
))
844 fmt
= synth_field_fmt(se
->fields
[i
]->type
);
846 /* parameter types */
847 if (tr
&& tr
->trace_flags
& TRACE_ITER_VERBOSE
)
848 trace_seq_printf(s
, "%s ", fmt
);
850 snprintf(print_fmt
, sizeof(print_fmt
), "%%s=%s%%s", fmt
);
852 /* parameter values */
853 if (se
->fields
[i
]->is_string
) {
854 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
855 (char *)&entry
->fields
[n_u64
],
856 i
== se
->n_fields
- 1 ? "" : " ");
857 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
859 struct trace_print_flags __flags
[] = {
860 __def_gfpflag_names
, {-1, NULL
} };
862 trace_seq_printf(s
, print_fmt
, se
->fields
[i
]->name
,
863 entry
->fields
[n_u64
],
864 i
== se
->n_fields
- 1 ? "" : " ");
866 if (strcmp(se
->fields
[i
]->type
, "gfp_t") == 0) {
867 trace_seq_puts(s
, " (");
868 trace_print_flags_seq(s
, "|",
869 entry
->fields
[n_u64
],
871 trace_seq_putc(s
, ')');
877 trace_seq_putc(s
, '\n');
879 return trace_handle_return(s
);
882 static struct trace_event_functions synth_event_funcs
= {
883 .trace
= print_synth_event
886 static notrace
void trace_event_raw_event_synth(void *__data
,
888 unsigned int *var_ref_idx
)
890 struct trace_event_file
*trace_file
= __data
;
891 struct synth_trace_event
*entry
;
892 struct trace_event_buffer fbuffer
;
893 struct trace_buffer
*buffer
;
894 struct synth_event
*event
;
895 unsigned int i
, n_u64
, val_idx
;
898 event
= trace_file
->event_call
->data
;
900 if (trace_trigger_soft_disabled(trace_file
))
903 fields_size
= event
->n_u64
* sizeof(u64
);
906 * Avoid ring buffer recursion detection, as this event
907 * is being performed within another event.
909 buffer
= trace_file
->tr
->array_buffer
.buffer
;
910 ring_buffer_nest_start(buffer
);
912 entry
= trace_event_buffer_reserve(&fbuffer
, trace_file
,
913 sizeof(*entry
) + fields_size
);
917 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
918 val_idx
= var_ref_idx
[i
];
919 if (event
->fields
[i
]->is_string
) {
920 char *str_val
= (char *)(long)var_ref_vals
[val_idx
];
921 char *str_field
= (char *)&entry
->fields
[n_u64
];
923 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
924 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
926 struct synth_field
*field
= event
->fields
[i
];
927 u64 val
= var_ref_vals
[val_idx
];
929 switch (field
->size
) {
931 *(u8
*)&entry
->fields
[n_u64
] = (u8
)val
;
935 *(u16
*)&entry
->fields
[n_u64
] = (u16
)val
;
939 *(u32
*)&entry
->fields
[n_u64
] = (u32
)val
;
943 entry
->fields
[n_u64
] = val
;
950 trace_event_buffer_commit(&fbuffer
);
952 ring_buffer_nest_end(buffer
);
955 static void free_synth_event_print_fmt(struct trace_event_call
*call
)
958 kfree(call
->print_fmt
);
959 call
->print_fmt
= NULL
;
963 static int __set_synth_event_print_fmt(struct synth_event
*event
,
970 /* When len=0, we just calculate the needed length */
971 #define LEN_OR_ZERO (len ? len - pos : 0)
973 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
974 for (i
= 0; i
< event
->n_fields
; i
++) {
975 fmt
= synth_field_fmt(event
->fields
[i
]->type
);
976 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s=%s%s",
977 event
->fields
[i
]->name
, fmt
,
978 i
== event
->n_fields
- 1 ? "" : ", ");
980 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
982 for (i
= 0; i
< event
->n_fields
; i
++) {
983 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
984 ", REC->%s", event
->fields
[i
]->name
);
989 /* return the length of print_fmt */
993 static int set_synth_event_print_fmt(struct trace_event_call
*call
)
995 struct synth_event
*event
= call
->data
;
999 /* First: called with 0 length to calculate the needed length */
1000 len
= __set_synth_event_print_fmt(event
, NULL
, 0);
1002 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
1006 /* Second: actually write the @print_fmt */
1007 __set_synth_event_print_fmt(event
, print_fmt
, len
+ 1);
1008 call
->print_fmt
= print_fmt
;
1013 static void free_synth_field(struct synth_field
*field
)
1020 static struct synth_field
*parse_synth_field(int argc
, const char **argv
,
1023 struct synth_field
*field
;
1024 const char *prefix
= NULL
, *field_type
= argv
[0], *field_name
, *array
;
1027 if (field_type
[0] == ';')
1030 if (!strcmp(field_type
, "unsigned")) {
1032 return ERR_PTR(-EINVAL
);
1033 prefix
= "unsigned ";
1034 field_type
= argv
[1];
1035 field_name
= argv
[2];
1038 field_name
= argv
[1];
1042 field
= kzalloc(sizeof(*field
), GFP_KERNEL
);
1044 return ERR_PTR(-ENOMEM
);
1046 len
= strlen(field_name
);
1047 array
= strchr(field_name
, '[');
1049 len
-= strlen(array
);
1050 else if (field_name
[len
- 1] == ';')
1053 field
->name
= kmemdup_nul(field_name
, len
, GFP_KERNEL
);
1059 if (field_type
[0] == ';')
1061 len
= strlen(field_type
) + 1;
1063 len
+= strlen(array
);
1065 len
+= strlen(prefix
);
1067 field
->type
= kzalloc(len
, GFP_KERNEL
);
1073 strcat(field
->type
, prefix
);
1074 strcat(field
->type
, field_type
);
1076 strcat(field
->type
, array
);
1077 if (field
->type
[len
- 1] == ';')
1078 field
->type
[len
- 1] = '\0';
1081 field
->size
= synth_field_size(field
->type
);
1087 if (synth_field_is_string(field
->type
))
1088 field
->is_string
= true;
1090 field
->is_signed
= synth_field_signed(field
->type
);
1095 free_synth_field(field
);
1096 field
= ERR_PTR(ret
);
1100 static void free_synth_tracepoint(struct tracepoint
*tp
)
1109 static struct tracepoint
*alloc_synth_tracepoint(char *name
)
1111 struct tracepoint
*tp
;
1113 tp
= kzalloc(sizeof(*tp
), GFP_KERNEL
);
1115 return ERR_PTR(-ENOMEM
);
1117 tp
->name
= kstrdup(name
, GFP_KERNEL
);
1120 return ERR_PTR(-ENOMEM
);
1126 typedef void (*synth_probe_func_t
) (void *__data
, u64
*var_ref_vals
,
1127 unsigned int *var_ref_idx
);
1129 static inline void trace_synth(struct synth_event
*event
, u64
*var_ref_vals
,
1130 unsigned int *var_ref_idx
)
1132 struct tracepoint
*tp
= event
->tp
;
1134 if (unlikely(atomic_read(&tp
->key
.enabled
) > 0)) {
1135 struct tracepoint_func
*probe_func_ptr
;
1136 synth_probe_func_t probe_func
;
1139 if (!(cpu_online(raw_smp_processor_id())))
1142 probe_func_ptr
= rcu_dereference_sched((tp
)->funcs
);
1143 if (probe_func_ptr
) {
1145 probe_func
= probe_func_ptr
->func
;
1146 __data
= probe_func_ptr
->data
;
1147 probe_func(__data
, var_ref_vals
, var_ref_idx
);
1148 } while ((++probe_func_ptr
)->func
);
1153 static struct synth_event
*find_synth_event(const char *name
)
1155 struct dyn_event
*pos
;
1156 struct synth_event
*event
;
1158 for_each_dyn_event(pos
) {
1159 if (!is_synth_event(pos
))
1161 event
= to_synth_event(pos
);
1162 if (strcmp(event
->name
, name
) == 0)
1169 static int register_synth_event(struct synth_event
*event
)
1171 struct trace_event_call
*call
= &event
->call
;
1174 event
->call
.class = &event
->class;
1175 event
->class.system
= kstrdup(SYNTH_SYSTEM
, GFP_KERNEL
);
1176 if (!event
->class.system
) {
1181 event
->tp
= alloc_synth_tracepoint(event
->name
);
1182 if (IS_ERR(event
->tp
)) {
1183 ret
= PTR_ERR(event
->tp
);
1188 INIT_LIST_HEAD(&call
->class->fields
);
1189 call
->event
.funcs
= &synth_event_funcs
;
1190 call
->class->define_fields
= synth_event_define_fields
;
1192 ret
= register_trace_event(&call
->event
);
1197 call
->flags
= TRACE_EVENT_FL_TRACEPOINT
;
1198 call
->class->reg
= trace_event_reg
;
1199 call
->class->probe
= trace_event_raw_event_synth
;
1201 call
->tp
= event
->tp
;
1203 ret
= trace_add_event_call(call
);
1205 pr_warn("Failed to register synthetic event: %s\n",
1206 trace_event_name(call
));
1210 ret
= set_synth_event_print_fmt(call
);
1212 trace_remove_event_call(call
);
1218 unregister_trace_event(&call
->event
);
1222 static int unregister_synth_event(struct synth_event
*event
)
1224 struct trace_event_call
*call
= &event
->call
;
1227 ret
= trace_remove_event_call(call
);
1232 static void free_synth_event(struct synth_event
*event
)
1239 for (i
= 0; i
< event
->n_fields
; i
++)
1240 free_synth_field(event
->fields
[i
]);
1242 kfree(event
->fields
);
1244 kfree(event
->class.system
);
1245 free_synth_tracepoint(event
->tp
);
1246 free_synth_event_print_fmt(&event
->call
);
1250 static struct synth_event
*alloc_synth_event(const char *name
, int n_fields
,
1251 struct synth_field
**fields
)
1253 struct synth_event
*event
;
1256 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
1258 event
= ERR_PTR(-ENOMEM
);
1262 event
->name
= kstrdup(name
, GFP_KERNEL
);
1265 event
= ERR_PTR(-ENOMEM
);
1269 event
->fields
= kcalloc(n_fields
, sizeof(*event
->fields
), GFP_KERNEL
);
1270 if (!event
->fields
) {
1271 free_synth_event(event
);
1272 event
= ERR_PTR(-ENOMEM
);
1276 dyn_event_init(&event
->devent
, &synth_event_ops
);
1278 for (i
= 0; i
< n_fields
; i
++)
1279 event
->fields
[i
] = fields
[i
];
1281 event
->n_fields
= n_fields
;
1286 static void action_trace(struct hist_trigger_data
*hist_data
,
1287 struct tracing_map_elt
*elt
, void *rec
,
1288 struct ring_buffer_event
*rbe
, void *key
,
1289 struct action_data
*data
, u64
*var_ref_vals
)
1291 struct synth_event
*event
= data
->synth_event
;
1293 trace_synth(event
, var_ref_vals
, data
->var_ref_idx
);
1296 struct hist_var_data
{
1297 struct list_head list
;
1298 struct hist_trigger_data
*hist_data
;
1301 static int synth_event_check_arg_fn(void *data
)
1303 struct dynevent_arg_pair
*arg_pair
= data
;
1306 size
= synth_field_size((char *)arg_pair
->lhs
);
1308 return size
? 0 : -EINVAL
;
1312 * synth_event_add_field - Add a new field to a synthetic event cmd
1313 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1314 * @type: The type of the new field to add
1315 * @name: The name of the new field to add
1317 * Add a new field to a synthetic event cmd object. Field ordering is in
1318 * the same order the fields are added.
1320 * See synth_field_size() for available types. If field_name contains
1321 * [n] the field is considered to be an array.
1323 * Return: 0 if successful, error otherwise.
1325 int synth_event_add_field(struct dynevent_cmd
*cmd
, const char *type
,
1328 struct dynevent_arg_pair arg_pair
;
1331 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1337 dynevent_arg_pair_init(&arg_pair
, 0, ';');
1339 arg_pair
.lhs
= type
;
1340 arg_pair
.rhs
= name
;
1342 ret
= dynevent_arg_pair_add(cmd
, &arg_pair
, synth_event_check_arg_fn
);
1346 if (++cmd
->n_fields
> SYNTH_FIELDS_MAX
)
1351 EXPORT_SYMBOL_GPL(synth_event_add_field
);
1354 * synth_event_add_field_str - Add a new field to a synthetic event cmd
1355 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1356 * @type_name: The type and name of the new field to add, as a single string
1358 * Add a new field to a synthetic event cmd object, as a single
1359 * string. The @type_name string is expected to be of the form 'type
1360 * name', which will be appended by ';'. No sanity checking is done -
1361 * what's passed in is assumed to already be well-formed. Field
1362 * ordering is in the same order the fields are added.
1364 * See synth_field_size() for available types. If field_name contains
1365 * [n] the field is considered to be an array.
1367 * Return: 0 if successful, error otherwise.
1369 int synth_event_add_field_str(struct dynevent_cmd
*cmd
, const char *type_name
)
1371 struct dynevent_arg arg
;
1374 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1380 dynevent_arg_init(&arg
, ';');
1382 arg
.str
= type_name
;
1384 ret
= dynevent_arg_add(cmd
, &arg
, NULL
);
1388 if (++cmd
->n_fields
> SYNTH_FIELDS_MAX
)
1393 EXPORT_SYMBOL_GPL(synth_event_add_field_str
);
1396 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1397 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1398 * @fields: An array of type/name field descriptions
1399 * @n_fields: The number of field descriptions contained in the fields array
1401 * Add a new set of fields to a synthetic event cmd object. The event
1402 * fields that will be defined for the event should be passed in as an
1403 * array of struct synth_field_desc, and the number of elements in the
1404 * array passed in as n_fields. Field ordering will retain the
1405 * ordering given in the fields array.
1407 * See synth_field_size() for available types. If field_name contains
1408 * [n] the field is considered to be an array.
1410 * Return: 0 if successful, error otherwise.
1412 int synth_event_add_fields(struct dynevent_cmd
*cmd
,
1413 struct synth_field_desc
*fields
,
1414 unsigned int n_fields
)
1419 for (i
= 0; i
< n_fields
; i
++) {
1420 if (fields
[i
].type
== NULL
|| fields
[i
].name
== NULL
) {
1425 ret
= synth_event_add_field(cmd
, fields
[i
].type
, fields
[i
].name
);
1432 EXPORT_SYMBOL_GPL(synth_event_add_fields
);
1435 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1436 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1437 * @name: The name of the synthetic event
1438 * @mod: The module creating the event, NULL if not created from a module
1439 * @args: Variable number of arg (pairs), one pair for each field
1441 * NOTE: Users normally won't want to call this function directly, but
1442 * rather use the synth_event_gen_cmd_start() wrapper, which
1443 * automatically adds a NULL to the end of the arg list. If this
1444 * function is used directly, make sure the last arg in the variable
1447 * Generate a synthetic event command to be executed by
1448 * synth_event_gen_cmd_end(). This function can be used to generate
1449 * the complete command or only the first part of it; in the latter
1450 * case, synth_event_add_field(), synth_event_add_field_str(), or
1451 * synth_event_add_fields() can be used to add more fields following
1454 * There should be an even number variable args, each pair consisting
1455 * of a type followed by a field name.
1457 * See synth_field_size() for available types. If field_name contains
1458 * [n] the field is considered to be an array.
1460 * Return: 0 if successful, error otherwise.
1462 int __synth_event_gen_cmd_start(struct dynevent_cmd
*cmd
, const char *name
,
1463 struct module
*mod
, ...)
1465 struct dynevent_arg arg
;
1469 cmd
->event_name
= name
;
1470 cmd
->private_data
= mod
;
1472 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1475 dynevent_arg_init(&arg
, 0);
1477 ret
= dynevent_arg_add(cmd
, &arg
, NULL
);
1481 va_start(args
, mod
);
1483 const char *type
, *name
;
1485 type
= va_arg(args
, const char *);
1488 name
= va_arg(args
, const char *);
1492 if (++cmd
->n_fields
> SYNTH_FIELDS_MAX
) {
1497 ret
= synth_event_add_field(cmd
, type
, name
);
1505 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start
);
1508 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1509 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1510 * @name: The name of the synthetic event
1511 * @fields: An array of type/name field descriptions
1512 * @n_fields: The number of field descriptions contained in the fields array
1514 * Generate a synthetic event command to be executed by
1515 * synth_event_gen_cmd_end(). This function can be used to generate
1516 * the complete command or only the first part of it; in the latter
1517 * case, synth_event_add_field(), synth_event_add_field_str(), or
1518 * synth_event_add_fields() can be used to add more fields following
1521 * The event fields that will be defined for the event should be
1522 * passed in as an array of struct synth_field_desc, and the number of
1523 * elements in the array passed in as n_fields. Field ordering will
1524 * retain the ordering given in the fields array.
1526 * See synth_field_size() for available types. If field_name contains
1527 * [n] the field is considered to be an array.
1529 * Return: 0 if successful, error otherwise.
1531 int synth_event_gen_cmd_array_start(struct dynevent_cmd
*cmd
, const char *name
,
1533 struct synth_field_desc
*fields
,
1534 unsigned int n_fields
)
1536 struct dynevent_arg arg
;
1540 cmd
->event_name
= name
;
1541 cmd
->private_data
= mod
;
1543 if (cmd
->type
!= DYNEVENT_TYPE_SYNTH
)
1546 if (n_fields
> SYNTH_FIELDS_MAX
)
1549 dynevent_arg_init(&arg
, 0);
1551 ret
= dynevent_arg_add(cmd
, &arg
, NULL
);
1555 for (i
= 0; i
< n_fields
; i
++) {
1556 if (fields
[i
].type
== NULL
|| fields
[i
].name
== NULL
)
1559 ret
= synth_event_add_field(cmd
, fields
[i
].type
, fields
[i
].name
);
1566 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start
);
1568 static int __create_synth_event(int argc
, const char *name
, const char **argv
)
1570 struct synth_field
*field
, *fields
[SYNTH_FIELDS_MAX
];
1571 struct synth_event
*event
= NULL
;
1572 int i
, consumed
= 0, n_fields
= 0, ret
= 0;
1576 * - Add synthetic event: <event_name> field[;field] ...
1577 * - Remove synthetic event: !<event_name> field[;field] ...
1578 * where 'field' = type field_name
1581 if (name
[0] == '\0' || argc
< 1)
1584 mutex_lock(&event_mutex
);
1586 event
= find_synth_event(name
);
1592 for (i
= 0; i
< argc
- 1; i
++) {
1593 if (strcmp(argv
[i
], ";") == 0)
1595 if (n_fields
== SYNTH_FIELDS_MAX
) {
1600 field
= parse_synth_field(argc
- i
, &argv
[i
], &consumed
);
1601 if (IS_ERR(field
)) {
1602 ret
= PTR_ERR(field
);
1605 fields
[n_fields
++] = field
;
1609 if (i
< argc
&& strcmp(argv
[i
], ";") != 0) {
1614 event
= alloc_synth_event(name
, n_fields
, fields
);
1615 if (IS_ERR(event
)) {
1616 ret
= PTR_ERR(event
);
1620 ret
= register_synth_event(event
);
1622 dyn_event_add(&event
->devent
);
1624 free_synth_event(event
);
1626 mutex_unlock(&event_mutex
);
1630 for (i
= 0; i
< n_fields
; i
++)
1631 free_synth_field(fields
[i
]);
1637 * synth_event_create - Create a new synthetic event
1638 * @name: The name of the new sythetic event
1639 * @fields: An array of type/name field descriptions
1640 * @n_fields: The number of field descriptions contained in the fields array
1641 * @mod: The module creating the event, NULL if not created from a module
1643 * Create a new synthetic event with the given name under the
1644 * trace/events/synthetic/ directory. The event fields that will be
1645 * defined for the event should be passed in as an array of struct
1646 * synth_field_desc, and the number elements in the array passed in as
1647 * n_fields. Field ordering will retain the ordering given in the
1650 * If the new synthetic event is being created from a module, the mod
1651 * param must be non-NULL. This will ensure that the trace buffer
1652 * won't contain unreadable events.
1654 * The new synth event should be deleted using synth_event_delete()
1655 * function. The new synthetic event can be generated from modules or
1656 * other kernel code using trace_synth_event() and related functions.
1658 * Return: 0 if successful, error otherwise.
1660 int synth_event_create(const char *name
, struct synth_field_desc
*fields
,
1661 unsigned int n_fields
, struct module
*mod
)
1663 struct dynevent_cmd cmd
;
1667 buf
= kzalloc(MAX_DYNEVENT_CMD_LEN
, GFP_KERNEL
);
1671 synth_event_cmd_init(&cmd
, buf
, MAX_DYNEVENT_CMD_LEN
);
1673 ret
= synth_event_gen_cmd_array_start(&cmd
, name
, mod
,
1678 ret
= synth_event_gen_cmd_end(&cmd
);
1684 EXPORT_SYMBOL_GPL(synth_event_create
);
1686 static int destroy_synth_event(struct synth_event
*se
)
1693 ret
= unregister_synth_event(se
);
1695 dyn_event_remove(&se
->devent
);
1696 free_synth_event(se
);
1704 * synth_event_delete - Delete a synthetic event
1705 * @event_name: The name of the new sythetic event
1707 * Delete a synthetic event that was created with synth_event_create().
1709 * Return: 0 if successful, error otherwise.
1711 int synth_event_delete(const char *event_name
)
1713 struct synth_event
*se
= NULL
;
1714 struct module
*mod
= NULL
;
1717 mutex_lock(&event_mutex
);
1718 se
= find_synth_event(event_name
);
1721 ret
= destroy_synth_event(se
);
1723 mutex_unlock(&event_mutex
);
1726 mutex_lock(&trace_types_lock
);
1728 * It is safest to reset the ring buffer if the module
1729 * being unloaded registered any events that were
1730 * used. The only worry is if a new module gets
1731 * loaded, and takes on the same id as the events of
1732 * this module. When printing out the buffer, traced
1733 * events left over from this module may be passed to
1734 * the new module events and unexpected results may
1737 tracing_reset_all_online_cpus();
1738 mutex_unlock(&trace_types_lock
);
1743 EXPORT_SYMBOL_GPL(synth_event_delete
);
1745 static int create_or_delete_synth_event(int argc
, char **argv
)
1747 const char *name
= argv
[0];
1750 /* trace_run_command() ensures argc != 0 */
1751 if (name
[0] == '!') {
1752 ret
= synth_event_delete(name
+ 1);
1756 ret
= __create_synth_event(argc
- 1, name
, (const char **)argv
+ 1);
1757 return ret
== -ECANCELED
? -EINVAL
: ret
;
1760 static int synth_event_run_command(struct dynevent_cmd
*cmd
)
1762 struct synth_event
*se
;
1765 ret
= trace_run_command(cmd
->seq
.buffer
, create_or_delete_synth_event
);
1769 se
= find_synth_event(cmd
->event_name
);
1773 se
->mod
= cmd
->private_data
;
1779 * synth_event_cmd_init - Initialize a synthetic event command object
1780 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1781 * @buf: A pointer to the buffer used to build the command
1782 * @maxlen: The length of the buffer passed in @buf
1784 * Initialize a synthetic event command object. Use this before
1785 * calling any of the other dyenvent_cmd functions.
1787 void synth_event_cmd_init(struct dynevent_cmd
*cmd
, char *buf
, int maxlen
)
1789 dynevent_cmd_init(cmd
, buf
, maxlen
, DYNEVENT_TYPE_SYNTH
,
1790 synth_event_run_command
);
1792 EXPORT_SYMBOL_GPL(synth_event_cmd_init
);
1795 * synth_event_trace - Trace a synthetic event
1796 * @file: The trace_event_file representing the synthetic event
1797 * @n_vals: The number of values in vals
1798 * @args: Variable number of args containing the event values
1800 * Trace a synthetic event using the values passed in the variable
1803 * The argument list should be a list 'n_vals' u64 values. The number
1804 * of vals must match the number of field in the synthetic event, and
1805 * must be in the same order as the synthetic event fields.
1807 * All vals should be cast to u64, and string vals are just pointers
1808 * to strings, cast to u64. Strings will be copied into space
1809 * reserved in the event for the string, using these pointers.
1811 * Return: 0 on success, err otherwise.
1813 int synth_event_trace(struct trace_event_file
*file
, unsigned int n_vals
, ...)
1815 struct trace_event_buffer fbuffer
;
1816 struct synth_trace_event
*entry
;
1817 struct trace_buffer
*buffer
;
1818 struct synth_event
*event
;
1819 unsigned int i
, n_u64
;
1820 int fields_size
= 0;
1825 * Normal event generation doesn't get called at all unless
1826 * the ENABLED bit is set (which attaches the probe thus
1827 * allowing this code to be called, etc). Because this is
1828 * called directly by the user, we don't have that but we
1829 * still need to honor not logging when disabled.
1831 if (!(file
->flags
& EVENT_FILE_FL_ENABLED
) ||
1832 trace_trigger_soft_disabled(file
))
1835 event
= file
->event_call
->data
;
1837 if (n_vals
!= event
->n_fields
)
1840 fields_size
= event
->n_u64
* sizeof(u64
);
1843 * Avoid ring buffer recursion detection, as this event
1844 * is being performed within another event.
1846 buffer
= file
->tr
->array_buffer
.buffer
;
1847 ring_buffer_nest_start(buffer
);
1849 entry
= trace_event_buffer_reserve(&fbuffer
, file
,
1850 sizeof(*entry
) + fields_size
);
1856 va_start(args
, n_vals
);
1857 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
1860 val
= va_arg(args
, u64
);
1862 if (event
->fields
[i
]->is_string
) {
1863 char *str_val
= (char *)(long)val
;
1864 char *str_field
= (char *)&entry
->fields
[n_u64
];
1866 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
1867 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
1869 entry
->fields
[n_u64
] = val
;
1875 trace_event_buffer_commit(&fbuffer
);
1877 ring_buffer_nest_end(buffer
);
1881 EXPORT_SYMBOL_GPL(synth_event_trace
);
1884 * synth_event_trace_array - Trace a synthetic event from an array
1885 * @file: The trace_event_file representing the synthetic event
1886 * @vals: Array of values
1887 * @n_vals: The number of values in vals
1889 * Trace a synthetic event using the values passed in as 'vals'.
1891 * The 'vals' array is just an array of 'n_vals' u64. The number of
1892 * vals must match the number of field in the synthetic event, and
1893 * must be in the same order as the synthetic event fields.
1895 * All vals should be cast to u64, and string vals are just pointers
1896 * to strings, cast to u64. Strings will be copied into space
1897 * reserved in the event for the string, using these pointers.
1899 * Return: 0 on success, err otherwise.
1901 int synth_event_trace_array(struct trace_event_file
*file
, u64
*vals
,
1902 unsigned int n_vals
)
1904 struct trace_event_buffer fbuffer
;
1905 struct synth_trace_event
*entry
;
1906 struct trace_buffer
*buffer
;
1907 struct synth_event
*event
;
1908 unsigned int i
, n_u64
;
1909 int fields_size
= 0;
1913 * Normal event generation doesn't get called at all unless
1914 * the ENABLED bit is set (which attaches the probe thus
1915 * allowing this code to be called, etc). Because this is
1916 * called directly by the user, we don't have that but we
1917 * still need to honor not logging when disabled.
1919 if (!(file
->flags
& EVENT_FILE_FL_ENABLED
) ||
1920 trace_trigger_soft_disabled(file
))
1923 event
= file
->event_call
->data
;
1925 if (n_vals
!= event
->n_fields
)
1928 fields_size
= event
->n_u64
* sizeof(u64
);
1931 * Avoid ring buffer recursion detection, as this event
1932 * is being performed within another event.
1934 buffer
= file
->tr
->array_buffer
.buffer
;
1935 ring_buffer_nest_start(buffer
);
1937 entry
= trace_event_buffer_reserve(&fbuffer
, file
,
1938 sizeof(*entry
) + fields_size
);
1944 for (i
= 0, n_u64
= 0; i
< event
->n_fields
; i
++) {
1945 if (event
->fields
[i
]->is_string
) {
1946 char *str_val
= (char *)(long)vals
[i
];
1947 char *str_field
= (char *)&entry
->fields
[n_u64
];
1949 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
1950 n_u64
+= STR_VAR_LEN_MAX
/ sizeof(u64
);
1952 entry
->fields
[n_u64
] = vals
[i
];
1957 trace_event_buffer_commit(&fbuffer
);
1959 ring_buffer_nest_end(buffer
);
1963 EXPORT_SYMBOL_GPL(synth_event_trace_array
);
1966 * synth_event_trace_start - Start piecewise synthetic event trace
1967 * @file: The trace_event_file representing the synthetic event
1968 * @trace_state: A pointer to object tracking the piecewise trace state
1970 * Start the trace of a synthetic event field-by-field rather than all
1973 * This function 'opens' an event trace, which means space is reserved
1974 * for the event in the trace buffer, after which the event's
1975 * individual field values can be set through either
1976 * synth_event_add_next_val() or synth_event_add_val().
1978 * A pointer to a trace_state object is passed in, which will keep
1979 * track of the current event trace state until the event trace is
1980 * closed (and the event finally traced) using
1981 * synth_event_trace_end().
1983 * Note that synth_event_trace_end() must be called after all values
1984 * have been added for each event trace, regardless of whether adding
1985 * all field values succeeded or not.
1987 * Note also that for a given event trace, all fields must be added
1988 * using either synth_event_add_next_val() or synth_event_add_val()
1989 * but not both together or interleaved.
1991 * Return: 0 on success, err otherwise.
1993 int synth_event_trace_start(struct trace_event_file
*file
,
1994 struct synth_event_trace_state
*trace_state
)
1996 struct synth_trace_event
*entry
;
1997 int fields_size
= 0;
2005 memset(trace_state
, '\0', sizeof(*trace_state
));
2008 * Normal event tracing doesn't get called at all unless the
2009 * ENABLED bit is set (which attaches the probe thus allowing
2010 * this code to be called, etc). Because this is called
2011 * directly by the user, we don't have that but we still need
2012 * to honor not logging when disabled. For the the iterated
2013 * trace case, we save the enabed state upon start and just
2014 * ignore the following data calls.
2016 if (!(file
->flags
& EVENT_FILE_FL_ENABLED
) ||
2017 trace_trigger_soft_disabled(file
)) {
2018 trace_state
->enabled
= false;
2022 trace_state
->enabled
= true;
2024 trace_state
->event
= file
->event_call
->data
;
2026 fields_size
= trace_state
->event
->n_u64
* sizeof(u64
);
2029 * Avoid ring buffer recursion detection, as this event
2030 * is being performed within another event.
2032 trace_state
->buffer
= file
->tr
->array_buffer
.buffer
;
2033 ring_buffer_nest_start(trace_state
->buffer
);
2035 entry
= trace_event_buffer_reserve(&trace_state
->fbuffer
, file
,
2036 sizeof(*entry
) + fields_size
);
2038 ring_buffer_nest_end(trace_state
->buffer
);
2043 trace_state
->entry
= entry
;
2047 EXPORT_SYMBOL_GPL(synth_event_trace_start
);
2049 static int __synth_event_add_val(const char *field_name
, u64 val
,
2050 struct synth_event_trace_state
*trace_state
)
2052 struct synth_field
*field
= NULL
;
2053 struct synth_trace_event
*entry
;
2054 struct synth_event
*event
;
2062 /* can't mix add_next_synth_val() with add_synth_val() */
2064 if (trace_state
->add_next
) {
2068 trace_state
->add_name
= true;
2070 if (trace_state
->add_name
) {
2074 trace_state
->add_next
= true;
2077 if (!trace_state
->enabled
)
2080 event
= trace_state
->event
;
2081 if (trace_state
->add_name
) {
2082 for (i
= 0; i
< event
->n_fields
; i
++) {
2083 field
= event
->fields
[i
];
2084 if (strcmp(field
->name
, field_name
) == 0)
2092 if (trace_state
->cur_field
>= event
->n_fields
) {
2096 field
= event
->fields
[trace_state
->cur_field
++];
2099 entry
= trace_state
->entry
;
2100 if (field
->is_string
) {
2101 char *str_val
= (char *)(long)val
;
2109 str_field
= (char *)&entry
->fields
[field
->offset
];
2110 strscpy(str_field
, str_val
, STR_VAR_LEN_MAX
);
2112 entry
->fields
[field
->offset
] = val
;
2118 * synth_event_add_next_val - Add the next field's value to an open synth trace
2119 * @val: The value to set the next field to
2120 * @trace_state: A pointer to object tracking the piecewise trace state
2122 * Set the value of the next field in an event that's been opened by
2123 * synth_event_trace_start().
2125 * The val param should be the value cast to u64. If the value points
2126 * to a string, the val param should be a char * cast to u64.
2128 * This function assumes all the fields in an event are to be set one
2129 * after another - successive calls to this function are made, one for
2130 * each field, in the order of the fields in the event, until all
2131 * fields have been set. If you'd rather set each field individually
2132 * without regard to ordering, synth_event_add_val() can be used
2135 * Note however that synth_event_add_next_val() and
2136 * synth_event_add_val() can't be intermixed for a given event trace -
2137 * one or the other but not both can be used at the same time.
2139 * Note also that synth_event_trace_end() must be called after all
2140 * values have been added for each event trace, regardless of whether
2141 * adding all field values succeeded or not.
2143 * Return: 0 on success, err otherwise.
2145 int synth_event_add_next_val(u64 val
,
2146 struct synth_event_trace_state
*trace_state
)
2148 return __synth_event_add_val(NULL
, val
, trace_state
);
2150 EXPORT_SYMBOL_GPL(synth_event_add_next_val
);
2153 * synth_event_add_val - Add a named field's value to an open synth trace
2154 * @field_name: The name of the synthetic event field value to set
2155 * @val: The value to set the next field to
2156 * @trace_state: A pointer to object tracking the piecewise trace state
2158 * Set the value of the named field in an event that's been opened by
2159 * synth_event_trace_start().
2161 * The val param should be the value cast to u64. If the value points
2162 * to a string, the val param should be a char * cast to u64.
2164 * This function looks up the field name, and if found, sets the field
2165 * to the specified value. This lookup makes this function more
2166 * expensive than synth_event_add_next_val(), so use that or the
2167 * none-piecewise synth_event_trace() instead if efficiency is more
2170 * Note however that synth_event_add_next_val() and
2171 * synth_event_add_val() can't be intermixed for a given event trace -
2172 * one or the other but not both can be used at the same time.
2174 * Note also that synth_event_trace_end() must be called after all
2175 * values have been added for each event trace, regardless of whether
2176 * adding all field values succeeded or not.
2178 * Return: 0 on success, err otherwise.
2180 int synth_event_add_val(const char *field_name
, u64 val
,
2181 struct synth_event_trace_state
*trace_state
)
2183 return __synth_event_add_val(field_name
, val
, trace_state
);
2185 EXPORT_SYMBOL_GPL(synth_event_add_val
);
2188 * synth_event_trace_end - End piecewise synthetic event trace
2189 * @trace_state: A pointer to object tracking the piecewise trace state
2191 * End the trace of a synthetic event opened by
2192 * synth_event_trace__start().
2194 * This function 'closes' an event trace, which basically means that
2195 * it commits the reserved event and cleans up other loose ends.
2197 * A pointer to a trace_state object is passed in, which will keep
2198 * track of the current event trace state opened with
2199 * synth_event_trace_start().
2201 * Note that this function must be called after all values have been
2202 * added for each event trace, regardless of whether adding all field
2203 * values succeeded or not.
2205 * Return: 0 on success, err otherwise.
2207 int synth_event_trace_end(struct synth_event_trace_state
*trace_state
)
2212 trace_event_buffer_commit(&trace_state
->fbuffer
);
2214 ring_buffer_nest_end(trace_state
->buffer
);
2218 EXPORT_SYMBOL_GPL(synth_event_trace_end
);
2220 static int create_synth_event(int argc
, const char **argv
)
2222 const char *name
= argv
[0];
2225 if (name
[0] != 's' || name
[1] != ':')
2229 /* This interface accepts group name prefix */
2230 if (strchr(name
, '/')) {
2231 len
= str_has_prefix(name
, SYNTH_SYSTEM
"/");
2236 return __create_synth_event(argc
- 1, name
, argv
+ 1);
2239 static int synth_event_release(struct dyn_event
*ev
)
2241 struct synth_event
*event
= to_synth_event(ev
);
2247 ret
= unregister_synth_event(event
);
2251 dyn_event_remove(ev
);
2252 free_synth_event(event
);
2256 static int __synth_event_show(struct seq_file
*m
, struct synth_event
*event
)
2258 struct synth_field
*field
;
2261 seq_printf(m
, "%s\t", event
->name
);
2263 for (i
= 0; i
< event
->n_fields
; i
++) {
2264 field
= event
->fields
[i
];
2266 /* parameter values */
2267 seq_printf(m
, "%s %s%s", field
->type
, field
->name
,
2268 i
== event
->n_fields
- 1 ? "" : "; ");
2276 static int synth_event_show(struct seq_file
*m
, struct dyn_event
*ev
)
2278 struct synth_event
*event
= to_synth_event(ev
);
2280 seq_printf(m
, "s:%s/", event
->class.system
);
2282 return __synth_event_show(m
, event
);
2285 static int synth_events_seq_show(struct seq_file
*m
, void *v
)
2287 struct dyn_event
*ev
= v
;
2289 if (!is_synth_event(ev
))
2292 return __synth_event_show(m
, to_synth_event(ev
));
2295 static const struct seq_operations synth_events_seq_op
= {
2296 .start
= dyn_event_seq_start
,
2297 .next
= dyn_event_seq_next
,
2298 .stop
= dyn_event_seq_stop
,
2299 .show
= synth_events_seq_show
,
2302 static int synth_events_open(struct inode
*inode
, struct file
*file
)
2306 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
2310 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
2311 ret
= dyn_events_release_all(&synth_event_ops
);
2316 return seq_open(file
, &synth_events_seq_op
);
2319 static ssize_t
synth_events_write(struct file
*file
,
2320 const char __user
*buffer
,
2321 size_t count
, loff_t
*ppos
)
2323 return trace_parse_run_command(file
, buffer
, count
, ppos
,
2324 create_or_delete_synth_event
);
2327 static const struct file_operations synth_events_fops
= {
2328 .open
= synth_events_open
,
2329 .write
= synth_events_write
,
2331 .llseek
= seq_lseek
,
2332 .release
= seq_release
,
2335 static u64
hist_field_timestamp(struct hist_field
*hist_field
,
2336 struct tracing_map_elt
*elt
,
2337 struct ring_buffer_event
*rbe
,
2340 struct hist_trigger_data
*hist_data
= hist_field
->hist_data
;
2341 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2343 u64 ts
= ring_buffer_event_time_stamp(rbe
);
2345 if (hist_data
->attrs
->ts_in_usecs
&& trace_clock_in_ns(tr
))
2351 static u64
hist_field_cpu(struct hist_field
*hist_field
,
2352 struct tracing_map_elt
*elt
,
2353 struct ring_buffer_event
*rbe
,
2356 int cpu
= smp_processor_id();
2362 * check_field_for_var_ref - Check if a VAR_REF field references a variable
2363 * @hist_field: The VAR_REF field to check
2364 * @var_data: The hist trigger that owns the variable
2365 * @var_idx: The trigger variable identifier
2367 * Check the given VAR_REF field to see whether or not it references
2368 * the given variable associated with the given trigger.
2370 * Return: The VAR_REF field if it does reference the variable, NULL if not
2372 static struct hist_field
*
2373 check_field_for_var_ref(struct hist_field
*hist_field
,
2374 struct hist_trigger_data
*var_data
,
2375 unsigned int var_idx
)
2377 WARN_ON(!(hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR_REF
));
2379 if (hist_field
&& hist_field
->var
.idx
== var_idx
&&
2380 hist_field
->var
.hist_data
== var_data
)
2387 * find_var_ref - Check if a trigger has a reference to a trigger variable
2388 * @hist_data: The hist trigger that might have a reference to the variable
2389 * @var_data: The hist trigger that owns the variable
2390 * @var_idx: The trigger variable identifier
2392 * Check the list of var_refs[] on the first hist trigger to see
2393 * whether any of them are references to the variable on the second
2396 * Return: The VAR_REF field referencing the variable if so, NULL if not
2398 static struct hist_field
*find_var_ref(struct hist_trigger_data
*hist_data
,
2399 struct hist_trigger_data
*var_data
,
2400 unsigned int var_idx
)
2402 struct hist_field
*hist_field
;
2405 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2406 hist_field
= hist_data
->var_refs
[i
];
2407 if (check_field_for_var_ref(hist_field
, var_data
, var_idx
))
2415 * find_any_var_ref - Check if there is a reference to a given trigger variable
2416 * @hist_data: The hist trigger
2417 * @var_idx: The trigger variable identifier
2419 * Check to see whether the given variable is currently referenced by
2420 * any other trigger.
2422 * The trigger the variable is defined on is explicitly excluded - the
2423 * assumption being that a self-reference doesn't prevent a trigger
2424 * from being removed.
2426 * Return: The VAR_REF field referencing the variable if so, NULL if not
2428 static struct hist_field
*find_any_var_ref(struct hist_trigger_data
*hist_data
,
2429 unsigned int var_idx
)
2431 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2432 struct hist_field
*found
= NULL
;
2433 struct hist_var_data
*var_data
;
2435 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
2436 if (var_data
->hist_data
== hist_data
)
2438 found
= find_var_ref(var_data
->hist_data
, hist_data
, var_idx
);
2447 * check_var_refs - Check if there is a reference to any of trigger's variables
2448 * @hist_data: The hist trigger
2450 * A trigger can define one or more variables. If any one of them is
2451 * currently referenced by any other trigger, this function will
2454 * Typically used to determine whether or not a trigger can be removed
2455 * - if there are any references to a trigger's variables, it cannot.
2457 * Return: True if there is a reference to any of trigger's variables
2459 static bool check_var_refs(struct hist_trigger_data
*hist_data
)
2461 struct hist_field
*field
;
2465 for_each_hist_field(i
, hist_data
) {
2466 field
= hist_data
->fields
[i
];
2467 if (field
&& field
->flags
& HIST_FIELD_FL_VAR
) {
2468 if (find_any_var_ref(hist_data
, field
->var
.idx
)) {
2478 static struct hist_var_data
*find_hist_vars(struct hist_trigger_data
*hist_data
)
2480 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2481 struct hist_var_data
*var_data
, *found
= NULL
;
2483 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
2484 if (var_data
->hist_data
== hist_data
) {
2493 static bool field_has_hist_vars(struct hist_field
*hist_field
,
2504 if (hist_field
->flags
& HIST_FIELD_FL_VAR
||
2505 hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
2508 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++) {
2509 struct hist_field
*operand
;
2511 operand
= hist_field
->operands
[i
];
2512 if (field_has_hist_vars(operand
, level
+ 1))
2519 static bool has_hist_vars(struct hist_trigger_data
*hist_data
)
2521 struct hist_field
*hist_field
;
2524 for_each_hist_field(i
, hist_data
) {
2525 hist_field
= hist_data
->fields
[i
];
2526 if (field_has_hist_vars(hist_field
, 0))
2533 static int save_hist_vars(struct hist_trigger_data
*hist_data
)
2535 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2536 struct hist_var_data
*var_data
;
2538 var_data
= find_hist_vars(hist_data
);
2542 if (tracing_check_open_get_tr(tr
))
2545 var_data
= kzalloc(sizeof(*var_data
), GFP_KERNEL
);
2547 trace_array_put(tr
);
2551 var_data
->hist_data
= hist_data
;
2552 list_add(&var_data
->list
, &tr
->hist_vars
);
2557 static void remove_hist_vars(struct hist_trigger_data
*hist_data
)
2559 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2560 struct hist_var_data
*var_data
;
2562 var_data
= find_hist_vars(hist_data
);
2566 if (WARN_ON(check_var_refs(hist_data
)))
2569 list_del(&var_data
->list
);
2573 trace_array_put(tr
);
2576 static struct hist_field
*find_var_field(struct hist_trigger_data
*hist_data
,
2577 const char *var_name
)
2579 struct hist_field
*hist_field
, *found
= NULL
;
2582 for_each_hist_field(i
, hist_data
) {
2583 hist_field
= hist_data
->fields
[i
];
2584 if (hist_field
&& hist_field
->flags
& HIST_FIELD_FL_VAR
&&
2585 strcmp(hist_field
->var
.name
, var_name
) == 0) {
2594 static struct hist_field
*find_var(struct hist_trigger_data
*hist_data
,
2595 struct trace_event_file
*file
,
2596 const char *var_name
)
2598 struct hist_trigger_data
*test_data
;
2599 struct event_trigger_data
*test
;
2600 struct hist_field
*hist_field
;
2602 lockdep_assert_held(&event_mutex
);
2604 hist_field
= find_var_field(hist_data
, var_name
);
2608 list_for_each_entry(test
, &file
->triggers
, list
) {
2609 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2610 test_data
= test
->private_data
;
2611 hist_field
= find_var_field(test_data
, var_name
);
2620 static struct trace_event_file
*find_var_file(struct trace_array
*tr
,
2625 struct hist_trigger_data
*var_hist_data
;
2626 struct hist_var_data
*var_data
;
2627 struct trace_event_file
*file
, *found
= NULL
;
2630 return find_event_file(tr
, system
, event_name
);
2632 list_for_each_entry(var_data
, &tr
->hist_vars
, list
) {
2633 var_hist_data
= var_data
->hist_data
;
2634 file
= var_hist_data
->event_file
;
2638 if (find_var_field(var_hist_data
, var_name
)) {
2640 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
, errpos(var_name
));
2651 static struct hist_field
*find_file_var(struct trace_event_file
*file
,
2652 const char *var_name
)
2654 struct hist_trigger_data
*test_data
;
2655 struct event_trigger_data
*test
;
2656 struct hist_field
*hist_field
;
2658 lockdep_assert_held(&event_mutex
);
2660 list_for_each_entry(test
, &file
->triggers
, list
) {
2661 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
2662 test_data
= test
->private_data
;
2663 hist_field
= find_var_field(test_data
, var_name
);
2672 static struct hist_field
*
2673 find_match_var(struct hist_trigger_data
*hist_data
, char *var_name
)
2675 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2676 struct hist_field
*hist_field
, *found
= NULL
;
2677 struct trace_event_file
*file
;
2680 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
2681 struct action_data
*data
= hist_data
->actions
[i
];
2683 if (data
->handler
== HANDLER_ONMATCH
) {
2684 char *system
= data
->match_data
.event_system
;
2685 char *event_name
= data
->match_data
.event
;
2687 file
= find_var_file(tr
, system
, event_name
, var_name
);
2690 hist_field
= find_file_var(file
, var_name
);
2693 hist_err(tr
, HIST_ERR_VAR_NOT_UNIQUE
,
2695 return ERR_PTR(-EINVAL
);
2705 static struct hist_field
*find_event_var(struct hist_trigger_data
*hist_data
,
2710 struct trace_array
*tr
= hist_data
->event_file
->tr
;
2711 struct hist_field
*hist_field
= NULL
;
2712 struct trace_event_file
*file
;
2714 if (!system
|| !event_name
) {
2715 hist_field
= find_match_var(hist_data
, var_name
);
2716 if (IS_ERR(hist_field
))
2722 file
= find_var_file(tr
, system
, event_name
, var_name
);
2726 hist_field
= find_file_var(file
, var_name
);
2731 static u64
hist_field_var_ref(struct hist_field
*hist_field
,
2732 struct tracing_map_elt
*elt
,
2733 struct ring_buffer_event
*rbe
,
2736 struct hist_elt_data
*elt_data
;
2739 if (WARN_ON_ONCE(!elt
))
2742 elt_data
= elt
->private_data
;
2743 var_val
= elt_data
->var_ref_vals
[hist_field
->var_ref_idx
];
2748 static bool resolve_var_refs(struct hist_trigger_data
*hist_data
, void *key
,
2749 u64
*var_ref_vals
, bool self
)
2751 struct hist_trigger_data
*var_data
;
2752 struct tracing_map_elt
*var_elt
;
2753 struct hist_field
*hist_field
;
2754 unsigned int i
, var_idx
;
2755 bool resolved
= true;
2758 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
2759 hist_field
= hist_data
->var_refs
[i
];
2760 var_idx
= hist_field
->var
.idx
;
2761 var_data
= hist_field
->var
.hist_data
;
2763 if (var_data
== NULL
) {
2768 if ((self
&& var_data
!= hist_data
) ||
2769 (!self
&& var_data
== hist_data
))
2772 var_elt
= tracing_map_lookup(var_data
->map
, key
);
2778 if (!tracing_map_var_set(var_elt
, var_idx
)) {
2783 if (self
|| !hist_field
->read_once
)
2784 var_val
= tracing_map_read_var(var_elt
, var_idx
);
2786 var_val
= tracing_map_read_var_once(var_elt
, var_idx
);
2788 var_ref_vals
[i
] = var_val
;
2794 static const char *hist_field_name(struct hist_field
*field
,
2797 const char *field_name
= "";
2803 field_name
= field
->field
->name
;
2804 else if (field
->flags
& HIST_FIELD_FL_LOG2
||
2805 field
->flags
& HIST_FIELD_FL_ALIAS
)
2806 field_name
= hist_field_name(field
->operands
[0], ++level
);
2807 else if (field
->flags
& HIST_FIELD_FL_CPU
)
2809 else if (field
->flags
& HIST_FIELD_FL_EXPR
||
2810 field
->flags
& HIST_FIELD_FL_VAR_REF
) {
2811 if (field
->system
) {
2812 static char full_name
[MAX_FILTER_STR_VAL
];
2814 strcat(full_name
, field
->system
);
2815 strcat(full_name
, ".");
2816 strcat(full_name
, field
->event_name
);
2817 strcat(full_name
, ".");
2818 strcat(full_name
, field
->name
);
2819 field_name
= full_name
;
2821 field_name
= field
->name
;
2822 } else if (field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
2823 field_name
= "common_timestamp";
2825 if (field_name
== NULL
)
2831 static hist_field_fn_t
select_value_fn(int field_size
, int field_is_signed
)
2833 hist_field_fn_t fn
= NULL
;
2835 switch (field_size
) {
2837 if (field_is_signed
)
2838 fn
= hist_field_s64
;
2840 fn
= hist_field_u64
;
2843 if (field_is_signed
)
2844 fn
= hist_field_s32
;
2846 fn
= hist_field_u32
;
2849 if (field_is_signed
)
2850 fn
= hist_field_s16
;
2852 fn
= hist_field_u16
;
2855 if (field_is_signed
)
2865 static int parse_map_size(char *str
)
2867 unsigned long size
, map_bits
;
2870 ret
= kstrtoul(str
, 0, &size
);
2874 map_bits
= ilog2(roundup_pow_of_two(size
));
2875 if (map_bits
< TRACING_MAP_BITS_MIN
||
2876 map_bits
> TRACING_MAP_BITS_MAX
)
2884 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs
*attrs
)
2891 for (i
= 0; i
< attrs
->n_assignments
; i
++)
2892 kfree(attrs
->assignment_str
[i
]);
2894 for (i
= 0; i
< attrs
->n_actions
; i
++)
2895 kfree(attrs
->action_str
[i
]);
2898 kfree(attrs
->sort_key_str
);
2899 kfree(attrs
->keys_str
);
2900 kfree(attrs
->vals_str
);
2901 kfree(attrs
->clock
);
2905 static int parse_action(char *str
, struct hist_trigger_attrs
*attrs
)
2909 if (attrs
->n_actions
>= HIST_ACTIONS_MAX
)
2912 if ((str_has_prefix(str
, "onmatch(")) ||
2913 (str_has_prefix(str
, "onmax(")) ||
2914 (str_has_prefix(str
, "onchange("))) {
2915 attrs
->action_str
[attrs
->n_actions
] = kstrdup(str
, GFP_KERNEL
);
2916 if (!attrs
->action_str
[attrs
->n_actions
]) {
2926 static int parse_assignment(struct trace_array
*tr
,
2927 char *str
, struct hist_trigger_attrs
*attrs
)
2931 if ((len
= str_has_prefix(str
, "key=")) ||
2932 (len
= str_has_prefix(str
, "keys="))) {
2933 attrs
->keys_str
= kstrdup(str
+ len
, GFP_KERNEL
);
2934 if (!attrs
->keys_str
) {
2938 } else if ((len
= str_has_prefix(str
, "val=")) ||
2939 (len
= str_has_prefix(str
, "vals=")) ||
2940 (len
= str_has_prefix(str
, "values="))) {
2941 attrs
->vals_str
= kstrdup(str
+ len
, GFP_KERNEL
);
2942 if (!attrs
->vals_str
) {
2946 } else if ((len
= str_has_prefix(str
, "sort="))) {
2947 attrs
->sort_key_str
= kstrdup(str
+ len
, GFP_KERNEL
);
2948 if (!attrs
->sort_key_str
) {
2952 } else if (str_has_prefix(str
, "name=")) {
2953 attrs
->name
= kstrdup(str
, GFP_KERNEL
);
2958 } else if ((len
= str_has_prefix(str
, "clock="))) {
2961 str
= strstrip(str
);
2962 attrs
->clock
= kstrdup(str
, GFP_KERNEL
);
2963 if (!attrs
->clock
) {
2967 } else if ((len
= str_has_prefix(str
, "size="))) {
2968 int map_bits
= parse_map_size(str
+ len
);
2974 attrs
->map_bits
= map_bits
;
2978 if (attrs
->n_assignments
== TRACING_MAP_VARS_MAX
) {
2979 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(str
));
2984 assignment
= kstrdup(str
, GFP_KERNEL
);
2990 attrs
->assignment_str
[attrs
->n_assignments
++] = assignment
;
2996 static struct hist_trigger_attrs
*
2997 parse_hist_trigger_attrs(struct trace_array
*tr
, char *trigger_str
)
2999 struct hist_trigger_attrs
*attrs
;
3002 attrs
= kzalloc(sizeof(*attrs
), GFP_KERNEL
);
3004 return ERR_PTR(-ENOMEM
);
3006 while (trigger_str
) {
3007 char *str
= strsep(&trigger_str
, ":");
3010 rhs
= strchr(str
, '=');
3012 if (!strlen(++rhs
)) {
3014 hist_err(tr
, HIST_ERR_EMPTY_ASSIGNMENT
, errpos(str
));
3017 ret
= parse_assignment(tr
, str
, attrs
);
3020 } else if (strcmp(str
, "pause") == 0)
3021 attrs
->pause
= true;
3022 else if ((strcmp(str
, "cont") == 0) ||
3023 (strcmp(str
, "continue") == 0))
3025 else if (strcmp(str
, "clear") == 0)
3026 attrs
->clear
= true;
3028 ret
= parse_action(str
, attrs
);
3034 if (!attrs
->keys_str
) {
3039 if (!attrs
->clock
) {
3040 attrs
->clock
= kstrdup("global", GFP_KERNEL
);
3041 if (!attrs
->clock
) {
3049 destroy_hist_trigger_attrs(attrs
);
3051 return ERR_PTR(ret
);
3054 static inline void save_comm(char *comm
, struct task_struct
*task
)
3057 strcpy(comm
, "<idle>");
3061 if (WARN_ON_ONCE(task
->pid
< 0)) {
3062 strcpy(comm
, "<XXX>");
3066 strncpy(comm
, task
->comm
, TASK_COMM_LEN
);
3069 static void hist_elt_data_free(struct hist_elt_data
*elt_data
)
3073 for (i
= 0; i
< SYNTH_FIELDS_MAX
; i
++)
3074 kfree(elt_data
->field_var_str
[i
]);
3076 kfree(elt_data
->comm
);
3080 static void hist_trigger_elt_data_free(struct tracing_map_elt
*elt
)
3082 struct hist_elt_data
*elt_data
= elt
->private_data
;
3084 hist_elt_data_free(elt_data
);
3087 static int hist_trigger_elt_data_alloc(struct tracing_map_elt
*elt
)
3089 struct hist_trigger_data
*hist_data
= elt
->map
->private_data
;
3090 unsigned int size
= TASK_COMM_LEN
;
3091 struct hist_elt_data
*elt_data
;
3092 struct hist_field
*key_field
;
3093 unsigned int i
, n_str
;
3095 elt_data
= kzalloc(sizeof(*elt_data
), GFP_KERNEL
);
3099 for_each_hist_key_field(i
, hist_data
) {
3100 key_field
= hist_data
->fields
[i
];
3102 if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
3103 elt_data
->comm
= kzalloc(size
, GFP_KERNEL
);
3104 if (!elt_data
->comm
) {
3112 n_str
= hist_data
->n_field_var_str
+ hist_data
->n_save_var_str
;
3114 size
= STR_VAR_LEN_MAX
;
3116 for (i
= 0; i
< n_str
; i
++) {
3117 elt_data
->field_var_str
[i
] = kzalloc(size
, GFP_KERNEL
);
3118 if (!elt_data
->field_var_str
[i
]) {
3119 hist_elt_data_free(elt_data
);
3124 elt
->private_data
= elt_data
;
3129 static void hist_trigger_elt_data_init(struct tracing_map_elt
*elt
)
3131 struct hist_elt_data
*elt_data
= elt
->private_data
;
3134 save_comm(elt_data
->comm
, current
);
3137 static const struct tracing_map_ops hist_trigger_elt_data_ops
= {
3138 .elt_alloc
= hist_trigger_elt_data_alloc
,
3139 .elt_free
= hist_trigger_elt_data_free
,
3140 .elt_init
= hist_trigger_elt_data_init
,
3143 static const char *get_hist_field_flags(struct hist_field
*hist_field
)
3145 const char *flags_str
= NULL
;
3147 if (hist_field
->flags
& HIST_FIELD_FL_HEX
)
3149 else if (hist_field
->flags
& HIST_FIELD_FL_SYM
)
3151 else if (hist_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
)
3152 flags_str
= "sym-offset";
3153 else if (hist_field
->flags
& HIST_FIELD_FL_EXECNAME
)
3154 flags_str
= "execname";
3155 else if (hist_field
->flags
& HIST_FIELD_FL_SYSCALL
)
3156 flags_str
= "syscall";
3157 else if (hist_field
->flags
& HIST_FIELD_FL_LOG2
)
3159 else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
3160 flags_str
= "usecs";
3165 static void expr_field_str(struct hist_field
*field
, char *expr
)
3167 if (field
->flags
& HIST_FIELD_FL_VAR_REF
)
3170 strcat(expr
, hist_field_name(field
, 0));
3172 if (field
->flags
&& !(field
->flags
& HIST_FIELD_FL_VAR_REF
)) {
3173 const char *flags_str
= get_hist_field_flags(field
);
3177 strcat(expr
, flags_str
);
3182 static char *expr_str(struct hist_field
*field
, unsigned int level
)
3189 expr
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
3193 if (!field
->operands
[0]) {
3194 expr_field_str(field
, expr
);
3198 if (field
->operator == FIELD_OP_UNARY_MINUS
) {
3202 subexpr
= expr_str(field
->operands
[0], ++level
);
3207 strcat(expr
, subexpr
);
3215 expr_field_str(field
->operands
[0], expr
);
3217 switch (field
->operator) {
3218 case FIELD_OP_MINUS
:
3229 expr_field_str(field
->operands
[1], expr
);
3234 static int contains_operator(char *str
)
3236 enum field_op_id field_op
= FIELD_OP_NONE
;
3239 op
= strpbrk(str
, "+-");
3241 return FIELD_OP_NONE
;
3246 field_op
= FIELD_OP_UNARY_MINUS
;
3248 field_op
= FIELD_OP_MINUS
;
3251 field_op
= FIELD_OP_PLUS
;
3260 static void __destroy_hist_field(struct hist_field
*hist_field
)
3262 kfree(hist_field
->var
.name
);
3263 kfree(hist_field
->name
);
3264 kfree(hist_field
->type
);
3269 static void destroy_hist_field(struct hist_field
*hist_field
,
3280 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
)
3281 return; /* var refs will be destroyed separately */
3283 for (i
= 0; i
< HIST_FIELD_OPERANDS_MAX
; i
++)
3284 destroy_hist_field(hist_field
->operands
[i
], level
+ 1);
3286 __destroy_hist_field(hist_field
);
3289 static struct hist_field
*create_hist_field(struct hist_trigger_data
*hist_data
,
3290 struct ftrace_event_field
*field
,
3291 unsigned long flags
,
3294 struct hist_field
*hist_field
;
3296 if (field
&& is_function_field(field
))
3299 hist_field
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
3303 hist_field
->hist_data
= hist_data
;
3305 if (flags
& HIST_FIELD_FL_EXPR
|| flags
& HIST_FIELD_FL_ALIAS
)
3306 goto out
; /* caller will populate */
3308 if (flags
& HIST_FIELD_FL_VAR_REF
) {
3309 hist_field
->fn
= hist_field_var_ref
;
3313 if (flags
& HIST_FIELD_FL_HITCOUNT
) {
3314 hist_field
->fn
= hist_field_counter
;
3315 hist_field
->size
= sizeof(u64
);
3316 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
3317 if (!hist_field
->type
)
3322 if (flags
& HIST_FIELD_FL_STACKTRACE
) {
3323 hist_field
->fn
= hist_field_none
;
3327 if (flags
& HIST_FIELD_FL_LOG2
) {
3328 unsigned long fl
= flags
& ~HIST_FIELD_FL_LOG2
;
3329 hist_field
->fn
= hist_field_log2
;
3330 hist_field
->operands
[0] = create_hist_field(hist_data
, field
, fl
, NULL
);
3331 hist_field
->size
= hist_field
->operands
[0]->size
;
3332 hist_field
->type
= kstrdup(hist_field
->operands
[0]->type
, GFP_KERNEL
);
3333 if (!hist_field
->type
)
3338 if (flags
& HIST_FIELD_FL_TIMESTAMP
) {
3339 hist_field
->fn
= hist_field_timestamp
;
3340 hist_field
->size
= sizeof(u64
);
3341 hist_field
->type
= kstrdup("u64", GFP_KERNEL
);
3342 if (!hist_field
->type
)
3347 if (flags
& HIST_FIELD_FL_CPU
) {
3348 hist_field
->fn
= hist_field_cpu
;
3349 hist_field
->size
= sizeof(int);
3350 hist_field
->type
= kstrdup("unsigned int", GFP_KERNEL
);
3351 if (!hist_field
->type
)
3356 if (WARN_ON_ONCE(!field
))
3359 if (is_string_field(field
)) {
3360 flags
|= HIST_FIELD_FL_STRING
;
3362 hist_field
->size
= MAX_FILTER_STR_VAL
;
3363 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
3364 if (!hist_field
->type
)
3367 if (field
->filter_type
== FILTER_STATIC_STRING
)
3368 hist_field
->fn
= hist_field_string
;
3369 else if (field
->filter_type
== FILTER_DYN_STRING
)
3370 hist_field
->fn
= hist_field_dynstring
;
3372 hist_field
->fn
= hist_field_pstring
;
3374 hist_field
->size
= field
->size
;
3375 hist_field
->is_signed
= field
->is_signed
;
3376 hist_field
->type
= kstrdup(field
->type
, GFP_KERNEL
);
3377 if (!hist_field
->type
)
3380 hist_field
->fn
= select_value_fn(field
->size
,
3382 if (!hist_field
->fn
) {
3383 destroy_hist_field(hist_field
, 0);
3388 hist_field
->field
= field
;
3389 hist_field
->flags
= flags
;
3392 hist_field
->var
.name
= kstrdup(var_name
, GFP_KERNEL
);
3393 if (!hist_field
->var
.name
)
3399 destroy_hist_field(hist_field
, 0);
3403 static void destroy_hist_fields(struct hist_trigger_data
*hist_data
)
3407 for (i
= 0; i
< HIST_FIELDS_MAX
; i
++) {
3408 if (hist_data
->fields
[i
]) {
3409 destroy_hist_field(hist_data
->fields
[i
], 0);
3410 hist_data
->fields
[i
] = NULL
;
3414 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
3415 WARN_ON(!(hist_data
->var_refs
[i
]->flags
& HIST_FIELD_FL_VAR_REF
));
3416 __destroy_hist_field(hist_data
->var_refs
[i
]);
3417 hist_data
->var_refs
[i
] = NULL
;
3421 static int init_var_ref(struct hist_field
*ref_field
,
3422 struct hist_field
*var_field
,
3423 char *system
, char *event_name
)
3427 ref_field
->var
.idx
= var_field
->var
.idx
;
3428 ref_field
->var
.hist_data
= var_field
->hist_data
;
3429 ref_field
->size
= var_field
->size
;
3430 ref_field
->is_signed
= var_field
->is_signed
;
3431 ref_field
->flags
|= var_field
->flags
&
3432 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3435 ref_field
->system
= kstrdup(system
, GFP_KERNEL
);
3436 if (!ref_field
->system
)
3441 ref_field
->event_name
= kstrdup(event_name
, GFP_KERNEL
);
3442 if (!ref_field
->event_name
) {
3448 if (var_field
->var
.name
) {
3449 ref_field
->name
= kstrdup(var_field
->var
.name
, GFP_KERNEL
);
3450 if (!ref_field
->name
) {
3454 } else if (var_field
->name
) {
3455 ref_field
->name
= kstrdup(var_field
->name
, GFP_KERNEL
);
3456 if (!ref_field
->name
) {
3462 ref_field
->type
= kstrdup(var_field
->type
, GFP_KERNEL
);
3463 if (!ref_field
->type
) {
3470 kfree(ref_field
->system
);
3471 kfree(ref_field
->event_name
);
3472 kfree(ref_field
->name
);
3477 static int find_var_ref_idx(struct hist_trigger_data
*hist_data
,
3478 struct hist_field
*var_field
)
3480 struct hist_field
*ref_field
;
3483 for (i
= 0; i
< hist_data
->n_var_refs
; i
++) {
3484 ref_field
= hist_data
->var_refs
[i
];
3485 if (ref_field
->var
.idx
== var_field
->var
.idx
&&
3486 ref_field
->var
.hist_data
== var_field
->hist_data
)
3494 * create_var_ref - Create a variable reference and attach it to trigger
3495 * @hist_data: The trigger that will be referencing the variable
3496 * @var_field: The VAR field to create a reference to
3497 * @system: The optional system string
3498 * @event_name: The optional event_name string
3500 * Given a variable hist_field, create a VAR_REF hist_field that
3501 * represents a reference to it.
3503 * This function also adds the reference to the trigger that
3504 * now references the variable.
3506 * Return: The VAR_REF field if successful, NULL if not
3508 static struct hist_field
*create_var_ref(struct hist_trigger_data
*hist_data
,
3509 struct hist_field
*var_field
,
3510 char *system
, char *event_name
)
3512 unsigned long flags
= HIST_FIELD_FL_VAR_REF
;
3513 struct hist_field
*ref_field
;
3515 ref_field
= create_hist_field(var_field
->hist_data
, NULL
, flags
, NULL
);
3517 if (init_var_ref(ref_field
, var_field
, system
, event_name
)) {
3518 destroy_hist_field(ref_field
, 0);
3522 hist_data
->var_refs
[hist_data
->n_var_refs
] = ref_field
;
3523 ref_field
->var_ref_idx
= hist_data
->n_var_refs
++;
3529 static bool is_var_ref(char *var_name
)
3531 if (!var_name
|| strlen(var_name
) < 2 || var_name
[0] != '$')
3537 static char *field_name_from_var(struct hist_trigger_data
*hist_data
,
3543 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
3544 name
= hist_data
->attrs
->var_defs
.name
[i
];
3546 if (strcmp(var_name
, name
) == 0) {
3547 field
= hist_data
->attrs
->var_defs
.expr
[i
];
3548 if (contains_operator(field
) || is_var_ref(field
))
3557 static char *local_field_var_ref(struct hist_trigger_data
*hist_data
,
3558 char *system
, char *event_name
,
3561 struct trace_event_call
*call
;
3563 if (system
&& event_name
) {
3564 call
= hist_data
->event_file
->event_call
;
3566 if (strcmp(system
, call
->class->system
) != 0)
3569 if (strcmp(event_name
, trace_event_name(call
)) != 0)
3573 if (!!system
!= !!event_name
)
3576 if (!is_var_ref(var_name
))
3581 return field_name_from_var(hist_data
, var_name
);
3584 static struct hist_field
*parse_var_ref(struct hist_trigger_data
*hist_data
,
3585 char *system
, char *event_name
,
3588 struct hist_field
*var_field
= NULL
, *ref_field
= NULL
;
3589 struct trace_array
*tr
= hist_data
->event_file
->tr
;
3591 if (!is_var_ref(var_name
))
3596 var_field
= find_event_var(hist_data
, system
, event_name
, var_name
);
3598 ref_field
= create_var_ref(hist_data
, var_field
,
3599 system
, event_name
);
3602 hist_err(tr
, HIST_ERR_VAR_NOT_FOUND
, errpos(var_name
));
3607 static struct ftrace_event_field
*
3608 parse_field(struct hist_trigger_data
*hist_data
, struct trace_event_file
*file
,
3609 char *field_str
, unsigned long *flags
)
3611 struct ftrace_event_field
*field
= NULL
;
3612 char *field_name
, *modifier
, *str
;
3613 struct trace_array
*tr
= file
->tr
;
3615 modifier
= str
= kstrdup(field_str
, GFP_KERNEL
);
3617 return ERR_PTR(-ENOMEM
);
3619 field_name
= strsep(&modifier
, ".");
3621 if (strcmp(modifier
, "hex") == 0)
3622 *flags
|= HIST_FIELD_FL_HEX
;
3623 else if (strcmp(modifier
, "sym") == 0)
3624 *flags
|= HIST_FIELD_FL_SYM
;
3625 else if (strcmp(modifier
, "sym-offset") == 0)
3626 *flags
|= HIST_FIELD_FL_SYM_OFFSET
;
3627 else if ((strcmp(modifier
, "execname") == 0) &&
3628 (strcmp(field_name
, "common_pid") == 0))
3629 *flags
|= HIST_FIELD_FL_EXECNAME
;
3630 else if (strcmp(modifier
, "syscall") == 0)
3631 *flags
|= HIST_FIELD_FL_SYSCALL
;
3632 else if (strcmp(modifier
, "log2") == 0)
3633 *flags
|= HIST_FIELD_FL_LOG2
;
3634 else if (strcmp(modifier
, "usecs") == 0)
3635 *flags
|= HIST_FIELD_FL_TIMESTAMP_USECS
;
3637 hist_err(tr
, HIST_ERR_BAD_FIELD_MODIFIER
, errpos(modifier
));
3638 field
= ERR_PTR(-EINVAL
);
3643 if (strcmp(field_name
, "common_timestamp") == 0) {
3644 *flags
|= HIST_FIELD_FL_TIMESTAMP
;
3645 hist_data
->enable_timestamps
= true;
3646 if (*flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)
3647 hist_data
->attrs
->ts_in_usecs
= true;
3648 } else if (strcmp(field_name
, "cpu") == 0)
3649 *flags
|= HIST_FIELD_FL_CPU
;
3651 field
= trace_find_event_field(file
->event_call
, field_name
);
3652 if (!field
|| !field
->size
) {
3653 hist_err(tr
, HIST_ERR_FIELD_NOT_FOUND
, errpos(field_name
));
3654 field
= ERR_PTR(-EINVAL
);
3664 static struct hist_field
*create_alias(struct hist_trigger_data
*hist_data
,
3665 struct hist_field
*var_ref
,
3668 struct hist_field
*alias
= NULL
;
3669 unsigned long flags
= HIST_FIELD_FL_ALIAS
| HIST_FIELD_FL_VAR
;
3671 alias
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3675 alias
->fn
= var_ref
->fn
;
3676 alias
->operands
[0] = var_ref
;
3678 if (init_var_ref(alias
, var_ref
, var_ref
->system
, var_ref
->event_name
)) {
3679 destroy_hist_field(alias
, 0);
3683 alias
->var_ref_idx
= var_ref
->var_ref_idx
;
3688 static struct hist_field
*parse_atom(struct hist_trigger_data
*hist_data
,
3689 struct trace_event_file
*file
, char *str
,
3690 unsigned long *flags
, char *var_name
)
3692 char *s
, *ref_system
= NULL
, *ref_event
= NULL
, *ref_var
= str
;
3693 struct ftrace_event_field
*field
= NULL
;
3694 struct hist_field
*hist_field
= NULL
;
3697 s
= strchr(str
, '.');
3699 s
= strchr(++s
, '.');
3701 ref_system
= strsep(&str
, ".");
3706 ref_event
= strsep(&str
, ".");
3715 s
= local_field_var_ref(hist_data
, ref_system
, ref_event
, ref_var
);
3717 hist_field
= parse_var_ref(hist_data
, ref_system
,
3718 ref_event
, ref_var
);
3721 hist_field
= create_alias(hist_data
, hist_field
, var_name
);
3732 field
= parse_field(hist_data
, file
, str
, flags
);
3733 if (IS_ERR(field
)) {
3734 ret
= PTR_ERR(field
);
3738 hist_field
= create_hist_field(hist_data
, field
, *flags
, var_name
);
3746 return ERR_PTR(ret
);
3749 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
3750 struct trace_event_file
*file
,
3751 char *str
, unsigned long flags
,
3752 char *var_name
, unsigned int level
);
3754 static struct hist_field
*parse_unary(struct hist_trigger_data
*hist_data
,
3755 struct trace_event_file
*file
,
3756 char *str
, unsigned long flags
,
3757 char *var_name
, unsigned int level
)
3759 struct hist_field
*operand1
, *expr
= NULL
;
3760 unsigned long operand_flags
;
3764 /* we support only -(xxx) i.e. explicit parens required */
3767 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
3772 str
++; /* skip leading '-' */
3774 s
= strchr(str
, '(');
3782 s
= strrchr(str
, ')');
3786 ret
= -EINVAL
; /* no closing ')' */
3790 flags
|= HIST_FIELD_FL_EXPR
;
3791 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3798 operand1
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
3799 if (IS_ERR(operand1
)) {
3800 ret
= PTR_ERR(operand1
);
3804 expr
->flags
|= operand1
->flags
&
3805 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3806 expr
->fn
= hist_field_unary_minus
;
3807 expr
->operands
[0] = operand1
;
3808 expr
->operator = FIELD_OP_UNARY_MINUS
;
3809 expr
->name
= expr_str(expr
, 0);
3810 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
3818 destroy_hist_field(expr
, 0);
3819 return ERR_PTR(ret
);
3822 static int check_expr_operands(struct trace_array
*tr
,
3823 struct hist_field
*operand1
,
3824 struct hist_field
*operand2
)
3826 unsigned long operand1_flags
= operand1
->flags
;
3827 unsigned long operand2_flags
= operand2
->flags
;
3829 if ((operand1_flags
& HIST_FIELD_FL_VAR_REF
) ||
3830 (operand1_flags
& HIST_FIELD_FL_ALIAS
)) {
3831 struct hist_field
*var
;
3833 var
= find_var_field(operand1
->var
.hist_data
, operand1
->name
);
3836 operand1_flags
= var
->flags
;
3839 if ((operand2_flags
& HIST_FIELD_FL_VAR_REF
) ||
3840 (operand2_flags
& HIST_FIELD_FL_ALIAS
)) {
3841 struct hist_field
*var
;
3843 var
= find_var_field(operand2
->var
.hist_data
, operand2
->name
);
3846 operand2_flags
= var
->flags
;
3849 if ((operand1_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
) !=
3850 (operand2_flags
& HIST_FIELD_FL_TIMESTAMP_USECS
)) {
3851 hist_err(tr
, HIST_ERR_TIMESTAMP_MISMATCH
, 0);
3858 static struct hist_field
*parse_expr(struct hist_trigger_data
*hist_data
,
3859 struct trace_event_file
*file
,
3860 char *str
, unsigned long flags
,
3861 char *var_name
, unsigned int level
)
3863 struct hist_field
*operand1
= NULL
, *operand2
= NULL
, *expr
= NULL
;
3864 unsigned long operand_flags
;
3865 int field_op
, ret
= -EINVAL
;
3866 char *sep
, *operand1_str
;
3869 hist_err(file
->tr
, HIST_ERR_TOO_MANY_SUBEXPR
, errpos(str
));
3870 return ERR_PTR(-EINVAL
);
3873 field_op
= contains_operator(str
);
3875 if (field_op
== FIELD_OP_NONE
)
3876 return parse_atom(hist_data
, file
, str
, &flags
, var_name
);
3878 if (field_op
== FIELD_OP_UNARY_MINUS
)
3879 return parse_unary(hist_data
, file
, str
, flags
, var_name
, ++level
);
3882 case FIELD_OP_MINUS
:
3892 operand1_str
= strsep(&str
, sep
);
3893 if (!operand1_str
|| !str
)
3897 operand1
= parse_atom(hist_data
, file
, operand1_str
,
3898 &operand_flags
, NULL
);
3899 if (IS_ERR(operand1
)) {
3900 ret
= PTR_ERR(operand1
);
3905 /* rest of string could be another expression e.g. b+c in a+b+c */
3907 operand2
= parse_expr(hist_data
, file
, str
, operand_flags
, NULL
, ++level
);
3908 if (IS_ERR(operand2
)) {
3909 ret
= PTR_ERR(operand2
);
3914 ret
= check_expr_operands(file
->tr
, operand1
, operand2
);
3918 flags
|= HIST_FIELD_FL_EXPR
;
3920 flags
|= operand1
->flags
&
3921 (HIST_FIELD_FL_TIMESTAMP
| HIST_FIELD_FL_TIMESTAMP_USECS
);
3923 expr
= create_hist_field(hist_data
, NULL
, flags
, var_name
);
3929 operand1
->read_once
= true;
3930 operand2
->read_once
= true;
3932 expr
->operands
[0] = operand1
;
3933 expr
->operands
[1] = operand2
;
3934 expr
->operator = field_op
;
3935 expr
->name
= expr_str(expr
, 0);
3936 expr
->type
= kstrdup(operand1
->type
, GFP_KERNEL
);
3943 case FIELD_OP_MINUS
:
3944 expr
->fn
= hist_field_minus
;
3947 expr
->fn
= hist_field_plus
;
3956 destroy_hist_field(operand1
, 0);
3957 destroy_hist_field(operand2
, 0);
3958 destroy_hist_field(expr
, 0);
3960 return ERR_PTR(ret
);
3963 static char *find_trigger_filter(struct hist_trigger_data
*hist_data
,
3964 struct trace_event_file
*file
)
3966 struct event_trigger_data
*test
;
3968 lockdep_assert_held(&event_mutex
);
3970 list_for_each_entry(test
, &file
->triggers
, list
) {
3971 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
3972 if (test
->private_data
== hist_data
)
3973 return test
->filter_str
;
3980 static struct event_command trigger_hist_cmd
;
3981 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
3982 struct trace_event_file
*file
,
3983 char *glob
, char *cmd
, char *param
);
3985 static bool compatible_keys(struct hist_trigger_data
*target_hist_data
,
3986 struct hist_trigger_data
*hist_data
,
3987 unsigned int n_keys
)
3989 struct hist_field
*target_hist_field
, *hist_field
;
3990 unsigned int n
, i
, j
;
3992 if (hist_data
->n_fields
- hist_data
->n_vals
!= n_keys
)
3995 i
= hist_data
->n_vals
;
3996 j
= target_hist_data
->n_vals
;
3998 for (n
= 0; n
< n_keys
; n
++) {
3999 hist_field
= hist_data
->fields
[i
+ n
];
4000 target_hist_field
= target_hist_data
->fields
[j
+ n
];
4002 if (strcmp(hist_field
->type
, target_hist_field
->type
) != 0)
4004 if (hist_field
->size
!= target_hist_field
->size
)
4006 if (hist_field
->is_signed
!= target_hist_field
->is_signed
)
4013 static struct hist_trigger_data
*
4014 find_compatible_hist(struct hist_trigger_data
*target_hist_data
,
4015 struct trace_event_file
*file
)
4017 struct hist_trigger_data
*hist_data
;
4018 struct event_trigger_data
*test
;
4019 unsigned int n_keys
;
4021 lockdep_assert_held(&event_mutex
);
4023 n_keys
= target_hist_data
->n_fields
- target_hist_data
->n_vals
;
4025 list_for_each_entry(test
, &file
->triggers
, list
) {
4026 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
4027 hist_data
= test
->private_data
;
4029 if (compatible_keys(target_hist_data
, hist_data
, n_keys
))
4037 static struct trace_event_file
*event_file(struct trace_array
*tr
,
4038 char *system
, char *event_name
)
4040 struct trace_event_file
*file
;
4042 file
= __find_event_file(tr
, system
, event_name
);
4044 return ERR_PTR(-EINVAL
);
4049 static struct hist_field
*
4050 find_synthetic_field_var(struct hist_trigger_data
*target_hist_data
,
4051 char *system
, char *event_name
, char *field_name
)
4053 struct hist_field
*event_var
;
4054 char *synthetic_name
;
4056 synthetic_name
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
4057 if (!synthetic_name
)
4058 return ERR_PTR(-ENOMEM
);
4060 strcpy(synthetic_name
, "synthetic_");
4061 strcat(synthetic_name
, field_name
);
4063 event_var
= find_event_var(target_hist_data
, system
, event_name
, synthetic_name
);
4065 kfree(synthetic_name
);
4071 * create_field_var_hist - Automatically create a histogram and var for a field
4072 * @target_hist_data: The target hist trigger
4073 * @subsys_name: Optional subsystem name
4074 * @event_name: Optional event name
4075 * @field_name: The name of the field (and the resulting variable)
4077 * Hist trigger actions fetch data from variables, not directly from
4078 * events. However, for convenience, users are allowed to directly
4079 * specify an event field in an action, which will be automatically
4080 * converted into a variable on their behalf.
4082 * If a user specifies a field on an event that isn't the event the
4083 * histogram currently being defined (the target event histogram), the
4084 * only way that can be accomplished is if a new hist trigger is
4085 * created and the field variable defined on that.
4087 * This function creates a new histogram compatible with the target
4088 * event (meaning a histogram with the same key as the target
4089 * histogram), and creates a variable for the specified field, but
4090 * with 'synthetic_' prepended to the variable name in order to avoid
4091 * collision with normal field variables.
4093 * Return: The variable created for the field.
4095 static struct hist_field
*
4096 create_field_var_hist(struct hist_trigger_data
*target_hist_data
,
4097 char *subsys_name
, char *event_name
, char *field_name
)
4099 struct trace_array
*tr
= target_hist_data
->event_file
->tr
;
4100 struct hist_field
*event_var
= ERR_PTR(-EINVAL
);
4101 struct hist_trigger_data
*hist_data
;
4102 unsigned int i
, n
, first
= true;
4103 struct field_var_hist
*var_hist
;
4104 struct trace_event_file
*file
;
4105 struct hist_field
*key_field
;
4110 if (target_hist_data
->n_field_var_hists
>= SYNTH_FIELDS_MAX
) {
4111 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
4112 return ERR_PTR(-EINVAL
);
4115 file
= event_file(tr
, subsys_name
, event_name
);
4118 hist_err(tr
, HIST_ERR_EVENT_FILE_NOT_FOUND
, errpos(field_name
));
4119 ret
= PTR_ERR(file
);
4120 return ERR_PTR(ret
);
4124 * Look for a histogram compatible with target. We'll use the
4125 * found histogram specification to create a new matching
4126 * histogram with our variable on it. target_hist_data is not
4127 * yet a registered histogram so we can't use that.
4129 hist_data
= find_compatible_hist(target_hist_data
, file
);
4131 hist_err(tr
, HIST_ERR_HIST_NOT_FOUND
, errpos(field_name
));
4132 return ERR_PTR(-EINVAL
);
4135 /* See if a synthetic field variable has already been created */
4136 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
4137 event_name
, field_name
);
4138 if (!IS_ERR_OR_NULL(event_var
))
4141 var_hist
= kzalloc(sizeof(*var_hist
), GFP_KERNEL
);
4143 return ERR_PTR(-ENOMEM
);
4145 cmd
= kzalloc(MAX_FILTER_STR_VAL
, GFP_KERNEL
);
4148 return ERR_PTR(-ENOMEM
);
4151 /* Use the same keys as the compatible histogram */
4152 strcat(cmd
, "keys=");
4154 for_each_hist_key_field(i
, hist_data
) {
4155 key_field
= hist_data
->fields
[i
];
4158 strcat(cmd
, key_field
->field
->name
);
4162 /* Create the synthetic field variable specification */
4163 strcat(cmd
, ":synthetic_");
4164 strcat(cmd
, field_name
);
4166 strcat(cmd
, field_name
);
4168 /* Use the same filter as the compatible histogram */
4169 saved_filter
= find_trigger_filter(hist_data
, file
);
4171 strcat(cmd
, " if ");
4172 strcat(cmd
, saved_filter
);
4175 var_hist
->cmd
= kstrdup(cmd
, GFP_KERNEL
);
4176 if (!var_hist
->cmd
) {
4179 return ERR_PTR(-ENOMEM
);
4182 /* Save the compatible histogram information */
4183 var_hist
->hist_data
= hist_data
;
4185 /* Create the new histogram with our variable */
4186 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
4190 kfree(var_hist
->cmd
);
4192 hist_err(tr
, HIST_ERR_HIST_CREATE_FAIL
, errpos(field_name
));
4193 return ERR_PTR(ret
);
4198 /* If we can't find the variable, something went wrong */
4199 event_var
= find_synthetic_field_var(target_hist_data
, subsys_name
,
4200 event_name
, field_name
);
4201 if (IS_ERR_OR_NULL(event_var
)) {
4202 kfree(var_hist
->cmd
);
4204 hist_err(tr
, HIST_ERR_SYNTH_VAR_NOT_FOUND
, errpos(field_name
));
4205 return ERR_PTR(-EINVAL
);
4208 n
= target_hist_data
->n_field_var_hists
;
4209 target_hist_data
->field_var_hists
[n
] = var_hist
;
4210 target_hist_data
->n_field_var_hists
++;
4215 static struct hist_field
*
4216 find_target_event_var(struct hist_trigger_data
*hist_data
,
4217 char *subsys_name
, char *event_name
, char *var_name
)
4219 struct trace_event_file
*file
= hist_data
->event_file
;
4220 struct hist_field
*hist_field
= NULL
;
4223 struct trace_event_call
*call
;
4228 call
= file
->event_call
;
4230 if (strcmp(subsys_name
, call
->class->system
) != 0)
4233 if (strcmp(event_name
, trace_event_name(call
)) != 0)
4237 hist_field
= find_var_field(hist_data
, var_name
);
4242 static inline void __update_field_vars(struct tracing_map_elt
*elt
,
4243 struct ring_buffer_event
*rbe
,
4245 struct field_var
**field_vars
,
4246 unsigned int n_field_vars
,
4247 unsigned int field_var_str_start
)
4249 struct hist_elt_data
*elt_data
= elt
->private_data
;
4250 unsigned int i
, j
, var_idx
;
4253 for (i
= 0, j
= field_var_str_start
; i
< n_field_vars
; i
++) {
4254 struct field_var
*field_var
= field_vars
[i
];
4255 struct hist_field
*var
= field_var
->var
;
4256 struct hist_field
*val
= field_var
->val
;
4258 var_val
= val
->fn(val
, elt
, rbe
, rec
);
4259 var_idx
= var
->var
.idx
;
4261 if (val
->flags
& HIST_FIELD_FL_STRING
) {
4262 char *str
= elt_data
->field_var_str
[j
++];
4263 char *val_str
= (char *)(uintptr_t)var_val
;
4265 strscpy(str
, val_str
, STR_VAR_LEN_MAX
);
4266 var_val
= (u64
)(uintptr_t)str
;
4268 tracing_map_set_var(elt
, var_idx
, var_val
);
4272 static void update_field_vars(struct hist_trigger_data
*hist_data
,
4273 struct tracing_map_elt
*elt
,
4274 struct ring_buffer_event
*rbe
,
4277 __update_field_vars(elt
, rbe
, rec
, hist_data
->field_vars
,
4278 hist_data
->n_field_vars
, 0);
4281 static void save_track_data_vars(struct hist_trigger_data
*hist_data
,
4282 struct tracing_map_elt
*elt
, void *rec
,
4283 struct ring_buffer_event
*rbe
, void *key
,
4284 struct action_data
*data
, u64
*var_ref_vals
)
4286 __update_field_vars(elt
, rbe
, rec
, hist_data
->save_vars
,
4287 hist_data
->n_save_vars
, hist_data
->n_field_var_str
);
4290 static struct hist_field
*create_var(struct hist_trigger_data
*hist_data
,
4291 struct trace_event_file
*file
,
4292 char *name
, int size
, const char *type
)
4294 struct hist_field
*var
;
4297 if (find_var(hist_data
, file
, name
) && !hist_data
->remove
) {
4298 var
= ERR_PTR(-EINVAL
);
4302 var
= kzalloc(sizeof(struct hist_field
), GFP_KERNEL
);
4304 var
= ERR_PTR(-ENOMEM
);
4308 idx
= tracing_map_add_var(hist_data
->map
);
4311 var
= ERR_PTR(-EINVAL
);
4315 var
->flags
= HIST_FIELD_FL_VAR
;
4317 var
->var
.hist_data
= var
->hist_data
= hist_data
;
4319 var
->var
.name
= kstrdup(name
, GFP_KERNEL
);
4320 var
->type
= kstrdup(type
, GFP_KERNEL
);
4321 if (!var
->var
.name
|| !var
->type
) {
4322 kfree(var
->var
.name
);
4325 var
= ERR_PTR(-ENOMEM
);
4331 static struct field_var
*create_field_var(struct hist_trigger_data
*hist_data
,
4332 struct trace_event_file
*file
,
4335 struct hist_field
*val
= NULL
, *var
= NULL
;
4336 unsigned long flags
= HIST_FIELD_FL_VAR
;
4337 struct trace_array
*tr
= file
->tr
;
4338 struct field_var
*field_var
;
4341 if (hist_data
->n_field_vars
>= SYNTH_FIELDS_MAX
) {
4342 hist_err(tr
, HIST_ERR_TOO_MANY_FIELD_VARS
, errpos(field_name
));
4347 val
= parse_atom(hist_data
, file
, field_name
, &flags
, NULL
);
4349 hist_err(tr
, HIST_ERR_FIELD_VAR_PARSE_FAIL
, errpos(field_name
));
4354 var
= create_var(hist_data
, file
, field_name
, val
->size
, val
->type
);
4356 hist_err(tr
, HIST_ERR_VAR_CREATE_FIND_FAIL
, errpos(field_name
));
4362 field_var
= kzalloc(sizeof(struct field_var
), GFP_KERNEL
);
4370 field_var
->var
= var
;
4371 field_var
->val
= val
;
4375 field_var
= ERR_PTR(ret
);
4380 * create_target_field_var - Automatically create a variable for a field
4381 * @target_hist_data: The target hist trigger
4382 * @subsys_name: Optional subsystem name
4383 * @event_name: Optional event name
4384 * @var_name: The name of the field (and the resulting variable)
4386 * Hist trigger actions fetch data from variables, not directly from
4387 * events. However, for convenience, users are allowed to directly
4388 * specify an event field in an action, which will be automatically
4389 * converted into a variable on their behalf.
4391 * This function creates a field variable with the name var_name on
4392 * the hist trigger currently being defined on the target event. If
4393 * subsys_name and event_name are specified, this function simply
4394 * verifies that they do in fact match the target event subsystem and
4397 * Return: The variable created for the field.
4399 static struct field_var
*
4400 create_target_field_var(struct hist_trigger_data
*target_hist_data
,
4401 char *subsys_name
, char *event_name
, char *var_name
)
4403 struct trace_event_file
*file
= target_hist_data
->event_file
;
4406 struct trace_event_call
*call
;
4411 call
= file
->event_call
;
4413 if (strcmp(subsys_name
, call
->class->system
) != 0)
4416 if (strcmp(event_name
, trace_event_name(call
)) != 0)
4420 return create_field_var(target_hist_data
, file
, var_name
);
4423 static bool check_track_val_max(u64 track_val
, u64 var_val
)
4425 if (var_val
<= track_val
)
4431 static bool check_track_val_changed(u64 track_val
, u64 var_val
)
4433 if (var_val
== track_val
)
4439 static u64
get_track_val(struct hist_trigger_data
*hist_data
,
4440 struct tracing_map_elt
*elt
,
4441 struct action_data
*data
)
4443 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
4446 track_val
= tracing_map_read_var(elt
, track_var_idx
);
4451 static void save_track_val(struct hist_trigger_data
*hist_data
,
4452 struct tracing_map_elt
*elt
,
4453 struct action_data
*data
, u64 var_val
)
4455 unsigned int track_var_idx
= data
->track_data
.track_var
->var
.idx
;
4457 tracing_map_set_var(elt
, track_var_idx
, var_val
);
4460 static void save_track_data(struct hist_trigger_data
*hist_data
,
4461 struct tracing_map_elt
*elt
, void *rec
,
4462 struct ring_buffer_event
*rbe
, void *key
,
4463 struct action_data
*data
, u64
*var_ref_vals
)
4465 if (data
->track_data
.save_data
)
4466 data
->track_data
.save_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
4469 static bool check_track_val(struct tracing_map_elt
*elt
,
4470 struct action_data
*data
,
4473 struct hist_trigger_data
*hist_data
;
4476 hist_data
= data
->track_data
.track_var
->hist_data
;
4477 track_val
= get_track_val(hist_data
, elt
, data
);
4479 return data
->track_data
.check_val(track_val
, var_val
);
4482 #ifdef CONFIG_TRACER_SNAPSHOT
4483 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
4485 /* called with tr->max_lock held */
4486 struct track_data
*track_data
= tr
->cond_snapshot
->cond_data
;
4487 struct hist_elt_data
*elt_data
, *track_elt_data
;
4488 struct snapshot_context
*context
= cond_data
;
4489 struct action_data
*action
;
4495 action
= track_data
->action_data
;
4497 track_val
= get_track_val(track_data
->hist_data
, context
->elt
,
4498 track_data
->action_data
);
4500 if (!action
->track_data
.check_val(track_data
->track_val
, track_val
))
4503 track_data
->track_val
= track_val
;
4504 memcpy(track_data
->key
, context
->key
, track_data
->key_len
);
4506 elt_data
= context
->elt
->private_data
;
4507 track_elt_data
= track_data
->elt
.private_data
;
4509 strncpy(track_elt_data
->comm
, elt_data
->comm
, TASK_COMM_LEN
);
4511 track_data
->updated
= true;
4516 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
4517 struct tracing_map_elt
*elt
, void *rec
,
4518 struct ring_buffer_event
*rbe
, void *key
,
4519 struct action_data
*data
,
4522 struct trace_event_file
*file
= hist_data
->event_file
;
4523 struct snapshot_context context
;
4528 tracing_snapshot_cond(file
->tr
, &context
);
4531 static void hist_trigger_print_key(struct seq_file
*m
,
4532 struct hist_trigger_data
*hist_data
,
4534 struct tracing_map_elt
*elt
);
4536 static struct action_data
*snapshot_action(struct hist_trigger_data
*hist_data
)
4540 if (!hist_data
->n_actions
)
4543 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
4544 struct action_data
*data
= hist_data
->actions
[i
];
4546 if (data
->action
== ACTION_SNAPSHOT
)
4553 static void track_data_snapshot_print(struct seq_file
*m
,
4554 struct hist_trigger_data
*hist_data
)
4556 struct trace_event_file
*file
= hist_data
->event_file
;
4557 struct track_data
*track_data
;
4558 struct action_data
*action
;
4560 track_data
= tracing_cond_snapshot_data(file
->tr
);
4564 if (!track_data
->updated
)
4567 action
= snapshot_action(hist_data
);
4571 seq_puts(m
, "\nSnapshot taken (see tracing/snapshot). Details:\n");
4572 seq_printf(m
, "\ttriggering value { %s(%s) }: %10llu",
4573 action
->handler
== HANDLER_ONMAX
? "onmax" : "onchange",
4574 action
->track_data
.var_str
, track_data
->track_val
);
4576 seq_puts(m
, "\ttriggered by event with key: ");
4577 hist_trigger_print_key(m
, hist_data
, track_data
->key
, &track_data
->elt
);
4581 static bool cond_snapshot_update(struct trace_array
*tr
, void *cond_data
)
4585 static void save_track_data_snapshot(struct hist_trigger_data
*hist_data
,
4586 struct tracing_map_elt
*elt
, void *rec
,
4587 struct ring_buffer_event
*rbe
, void *key
,
4588 struct action_data
*data
,
4589 u64
*var_ref_vals
) {}
4590 static void track_data_snapshot_print(struct seq_file
*m
,
4591 struct hist_trigger_data
*hist_data
) {}
4592 #endif /* CONFIG_TRACER_SNAPSHOT */
4594 static void track_data_print(struct seq_file
*m
,
4595 struct hist_trigger_data
*hist_data
,
4596 struct tracing_map_elt
*elt
,
4597 struct action_data
*data
)
4599 u64 track_val
= get_track_val(hist_data
, elt
, data
);
4600 unsigned int i
, save_var_idx
;
4602 if (data
->handler
== HANDLER_ONMAX
)
4603 seq_printf(m
, "\n\tmax: %10llu", track_val
);
4604 else if (data
->handler
== HANDLER_ONCHANGE
)
4605 seq_printf(m
, "\n\tchanged: %10llu", track_val
);
4607 if (data
->action
== ACTION_SNAPSHOT
)
4610 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
4611 struct hist_field
*save_val
= hist_data
->save_vars
[i
]->val
;
4612 struct hist_field
*save_var
= hist_data
->save_vars
[i
]->var
;
4615 save_var_idx
= save_var
->var
.idx
;
4617 val
= tracing_map_read_var(elt
, save_var_idx
);
4619 if (save_val
->flags
& HIST_FIELD_FL_STRING
) {
4620 seq_printf(m
, " %s: %-32s", save_var
->var
.name
,
4621 (char *)(uintptr_t)(val
));
4623 seq_printf(m
, " %s: %10llu", save_var
->var
.name
, val
);
4627 static void ontrack_action(struct hist_trigger_data
*hist_data
,
4628 struct tracing_map_elt
*elt
, void *rec
,
4629 struct ring_buffer_event
*rbe
, void *key
,
4630 struct action_data
*data
, u64
*var_ref_vals
)
4632 u64 var_val
= var_ref_vals
[data
->track_data
.var_ref
->var_ref_idx
];
4634 if (check_track_val(elt
, data
, var_val
)) {
4635 save_track_val(hist_data
, elt
, data
, var_val
);
4636 save_track_data(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
4640 static void action_data_destroy(struct action_data
*data
)
4644 lockdep_assert_held(&event_mutex
);
4646 kfree(data
->action_name
);
4648 for (i
= 0; i
< data
->n_params
; i
++)
4649 kfree(data
->params
[i
]);
4651 if (data
->synth_event
)
4652 data
->synth_event
->ref
--;
4654 kfree(data
->synth_event_name
);
4659 static void track_data_destroy(struct hist_trigger_data
*hist_data
,
4660 struct action_data
*data
)
4662 struct trace_event_file
*file
= hist_data
->event_file
;
4664 destroy_hist_field(data
->track_data
.track_var
, 0);
4666 if (data
->action
== ACTION_SNAPSHOT
) {
4667 struct track_data
*track_data
;
4669 track_data
= tracing_cond_snapshot_data(file
->tr
);
4670 if (track_data
&& track_data
->hist_data
== hist_data
) {
4671 tracing_snapshot_cond_disable(file
->tr
);
4672 track_data_free(track_data
);
4676 kfree(data
->track_data
.var_str
);
4678 action_data_destroy(data
);
4681 static int action_create(struct hist_trigger_data
*hist_data
,
4682 struct action_data
*data
);
4684 static int track_data_create(struct hist_trigger_data
*hist_data
,
4685 struct action_data
*data
)
4687 struct hist_field
*var_field
, *ref_field
, *track_var
= NULL
;
4688 struct trace_event_file
*file
= hist_data
->event_file
;
4689 struct trace_array
*tr
= file
->tr
;
4690 char *track_data_var_str
;
4693 track_data_var_str
= data
->track_data
.var_str
;
4694 if (track_data_var_str
[0] != '$') {
4695 hist_err(tr
, HIST_ERR_ONX_NOT_VAR
, errpos(track_data_var_str
));
4698 track_data_var_str
++;
4700 var_field
= find_target_event_var(hist_data
, NULL
, NULL
, track_data_var_str
);
4702 hist_err(tr
, HIST_ERR_ONX_VAR_NOT_FOUND
, errpos(track_data_var_str
));
4706 ref_field
= create_var_ref(hist_data
, var_field
, NULL
, NULL
);
4710 data
->track_data
.var_ref
= ref_field
;
4712 if (data
->handler
== HANDLER_ONMAX
)
4713 track_var
= create_var(hist_data
, file
, "__max", sizeof(u64
), "u64");
4714 if (IS_ERR(track_var
)) {
4715 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
4716 ret
= PTR_ERR(track_var
);
4720 if (data
->handler
== HANDLER_ONCHANGE
)
4721 track_var
= create_var(hist_data
, file
, "__change", sizeof(u64
), "u64");
4722 if (IS_ERR(track_var
)) {
4723 hist_err(tr
, HIST_ERR_ONX_VAR_CREATE_FAIL
, 0);
4724 ret
= PTR_ERR(track_var
);
4727 data
->track_data
.track_var
= track_var
;
4729 ret
= action_create(hist_data
, data
);
4734 static int parse_action_params(struct trace_array
*tr
, char *params
,
4735 struct action_data
*data
)
4737 char *param
, *saved_param
;
4738 bool first_param
= true;
4742 if (data
->n_params
>= SYNTH_FIELDS_MAX
) {
4743 hist_err(tr
, HIST_ERR_TOO_MANY_PARAMS
, 0);
4747 param
= strsep(¶ms
, ",");
4749 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, 0);
4754 param
= strstrip(param
);
4755 if (strlen(param
) < 2) {
4756 hist_err(tr
, HIST_ERR_INVALID_PARAM
, errpos(param
));
4761 saved_param
= kstrdup(param
, GFP_KERNEL
);
4767 if (first_param
&& data
->use_trace_keyword
) {
4768 data
->synth_event_name
= saved_param
;
4769 first_param
= false;
4772 first_param
= false;
4774 data
->params
[data
->n_params
++] = saved_param
;
4780 static int action_parse(struct trace_array
*tr
, char *str
, struct action_data
*data
,
4781 enum handler_id handler
)
4788 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
4793 action_name
= strsep(&str
, "(");
4794 if (!action_name
|| !str
) {
4795 hist_err(tr
, HIST_ERR_ACTION_NOT_FOUND
, 0);
4800 if (str_has_prefix(action_name
, "save")) {
4801 char *params
= strsep(&str
, ")");
4804 hist_err(tr
, HIST_ERR_NO_SAVE_PARAMS
, 0);
4809 ret
= parse_action_params(tr
, params
, data
);
4813 if (handler
== HANDLER_ONMAX
)
4814 data
->track_data
.check_val
= check_track_val_max
;
4815 else if (handler
== HANDLER_ONCHANGE
)
4816 data
->track_data
.check_val
= check_track_val_changed
;
4818 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
4823 data
->track_data
.save_data
= save_track_data_vars
;
4824 data
->fn
= ontrack_action
;
4825 data
->action
= ACTION_SAVE
;
4826 } else if (str_has_prefix(action_name
, "snapshot")) {
4827 char *params
= strsep(&str
, ")");
4830 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(params
));
4835 if (handler
== HANDLER_ONMAX
)
4836 data
->track_data
.check_val
= check_track_val_max
;
4837 else if (handler
== HANDLER_ONCHANGE
)
4838 data
->track_data
.check_val
= check_track_val_changed
;
4840 hist_err(tr
, HIST_ERR_ACTION_MISMATCH
, errpos(action_name
));
4845 data
->track_data
.save_data
= save_track_data_snapshot
;
4846 data
->fn
= ontrack_action
;
4847 data
->action
= ACTION_SNAPSHOT
;
4849 char *params
= strsep(&str
, ")");
4851 if (str_has_prefix(action_name
, "trace"))
4852 data
->use_trace_keyword
= true;
4855 ret
= parse_action_params(tr
, params
, data
);
4860 if (handler
== HANDLER_ONMAX
)
4861 data
->track_data
.check_val
= check_track_val_max
;
4862 else if (handler
== HANDLER_ONCHANGE
)
4863 data
->track_data
.check_val
= check_track_val_changed
;
4865 if (handler
!= HANDLER_ONMATCH
) {
4866 data
->track_data
.save_data
= action_trace
;
4867 data
->fn
= ontrack_action
;
4869 data
->fn
= action_trace
;
4871 data
->action
= ACTION_TRACE
;
4874 data
->action_name
= kstrdup(action_name
, GFP_KERNEL
);
4875 if (!data
->action_name
) {
4880 data
->handler
= handler
;
4885 static struct action_data
*track_data_parse(struct hist_trigger_data
*hist_data
,
4886 char *str
, enum handler_id handler
)
4888 struct action_data
*data
;
4892 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
4894 return ERR_PTR(-ENOMEM
);
4896 var_str
= strsep(&str
, ")");
4897 if (!var_str
|| !str
) {
4902 data
->track_data
.var_str
= kstrdup(var_str
, GFP_KERNEL
);
4903 if (!data
->track_data
.var_str
) {
4908 ret
= action_parse(hist_data
->event_file
->tr
, str
, data
, handler
);
4914 track_data_destroy(hist_data
, data
);
4915 data
= ERR_PTR(ret
);
4919 static void onmatch_destroy(struct action_data
*data
)
4921 kfree(data
->match_data
.event
);
4922 kfree(data
->match_data
.event_system
);
4924 action_data_destroy(data
);
4927 static void destroy_field_var(struct field_var
*field_var
)
4932 destroy_hist_field(field_var
->var
, 0);
4933 destroy_hist_field(field_var
->val
, 0);
4938 static void destroy_field_vars(struct hist_trigger_data
*hist_data
)
4942 for (i
= 0; i
< hist_data
->n_field_vars
; i
++)
4943 destroy_field_var(hist_data
->field_vars
[i
]);
4946 static void save_field_var(struct hist_trigger_data
*hist_data
,
4947 struct field_var
*field_var
)
4949 hist_data
->field_vars
[hist_data
->n_field_vars
++] = field_var
;
4951 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
4952 hist_data
->n_field_var_str
++;
4956 static int check_synth_field(struct synth_event
*event
,
4957 struct hist_field
*hist_field
,
4958 unsigned int field_pos
)
4960 struct synth_field
*field
;
4962 if (field_pos
>= event
->n_fields
)
4965 field
= event
->fields
[field_pos
];
4967 if (strcmp(field
->type
, hist_field
->type
) != 0) {
4968 if (field
->size
!= hist_field
->size
||
4969 field
->is_signed
!= hist_field
->is_signed
)
4976 static struct hist_field
*
4977 trace_action_find_var(struct hist_trigger_data
*hist_data
,
4978 struct action_data
*data
,
4979 char *system
, char *event
, char *var
)
4981 struct trace_array
*tr
= hist_data
->event_file
->tr
;
4982 struct hist_field
*hist_field
;
4984 var
++; /* skip '$' */
4986 hist_field
= find_target_event_var(hist_data
, system
, event
, var
);
4988 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
4989 system
= data
->match_data
.event_system
;
4990 event
= data
->match_data
.event
;
4993 hist_field
= find_event_var(hist_data
, system
, event
, var
);
4997 hist_err(tr
, HIST_ERR_PARAM_NOT_FOUND
, errpos(var
));
5002 static struct hist_field
*
5003 trace_action_create_field_var(struct hist_trigger_data
*hist_data
,
5004 struct action_data
*data
, char *system
,
5005 char *event
, char *var
)
5007 struct hist_field
*hist_field
= NULL
;
5008 struct field_var
*field_var
;
5011 * First try to create a field var on the target event (the
5012 * currently being defined). This will create a variable for
5013 * unqualified fields on the target event, or if qualified,
5014 * target fields that have qualified names matching the target.
5016 field_var
= create_target_field_var(hist_data
, system
, event
, var
);
5018 if (field_var
&& !IS_ERR(field_var
)) {
5019 save_field_var(hist_data
, field_var
);
5020 hist_field
= field_var
->var
;
5024 * If no explicit system.event is specfied, default to
5025 * looking for fields on the onmatch(system.event.xxx)
5028 if (!system
&& data
->handler
== HANDLER_ONMATCH
) {
5029 system
= data
->match_data
.event_system
;
5030 event
= data
->match_data
.event
;
5034 * At this point, we're looking at a field on another
5035 * event. Because we can't modify a hist trigger on
5036 * another event to add a variable for a field, we need
5037 * to create a new trigger on that event and create the
5038 * variable at the same time.
5040 hist_field
= create_field_var_hist(hist_data
, system
, event
, var
);
5041 if (IS_ERR(hist_field
))
5047 destroy_field_var(field_var
);
5052 static int trace_action_create(struct hist_trigger_data
*hist_data
,
5053 struct action_data
*data
)
5055 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5056 char *event_name
, *param
, *system
= NULL
;
5057 struct hist_field
*hist_field
, *var_ref
;
5059 unsigned int field_pos
= 0;
5060 struct synth_event
*event
;
5061 char *synth_event_name
;
5062 int var_ref_idx
, ret
= 0;
5064 lockdep_assert_held(&event_mutex
);
5066 if (data
->use_trace_keyword
)
5067 synth_event_name
= data
->synth_event_name
;
5069 synth_event_name
= data
->action_name
;
5071 event
= find_synth_event(synth_event_name
);
5073 hist_err(tr
, HIST_ERR_SYNTH_EVENT_NOT_FOUND
, errpos(synth_event_name
));
5079 for (i
= 0; i
< data
->n_params
; i
++) {
5082 p
= param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
5088 system
= strsep(¶m
, ".");
5090 param
= (char *)system
;
5091 system
= event_name
= NULL
;
5093 event_name
= strsep(¶m
, ".");
5101 if (param
[0] == '$')
5102 hist_field
= trace_action_find_var(hist_data
, data
,
5106 hist_field
= trace_action_create_field_var(hist_data
,
5118 if (check_synth_field(event
, hist_field
, field_pos
) == 0) {
5119 var_ref
= create_var_ref(hist_data
, hist_field
,
5120 system
, event_name
);
5127 var_ref_idx
= find_var_ref_idx(hist_data
, var_ref
);
5128 if (WARN_ON(var_ref_idx
< 0)) {
5133 data
->var_ref_idx
[i
] = var_ref_idx
;
5140 hist_err(tr
, HIST_ERR_SYNTH_TYPE_MISMATCH
, errpos(param
));
5146 if (field_pos
!= event
->n_fields
) {
5147 hist_err(tr
, HIST_ERR_SYNTH_COUNT_MISMATCH
, errpos(event
->name
));
5152 data
->synth_event
= event
;
5161 static int action_create(struct hist_trigger_data
*hist_data
,
5162 struct action_data
*data
)
5164 struct trace_event_file
*file
= hist_data
->event_file
;
5165 struct trace_array
*tr
= file
->tr
;
5166 struct track_data
*track_data
;
5167 struct field_var
*field_var
;
5172 if (data
->action
== ACTION_TRACE
)
5173 return trace_action_create(hist_data
, data
);
5175 if (data
->action
== ACTION_SNAPSHOT
) {
5176 track_data
= track_data_alloc(hist_data
->key_size
, data
, hist_data
);
5177 if (IS_ERR(track_data
)) {
5178 ret
= PTR_ERR(track_data
);
5182 ret
= tracing_snapshot_cond_enable(file
->tr
, track_data
,
5183 cond_snapshot_update
);
5185 track_data_free(track_data
);
5190 if (data
->action
== ACTION_SAVE
) {
5191 if (hist_data
->n_save_vars
) {
5193 hist_err(tr
, HIST_ERR_TOO_MANY_SAVE_ACTIONS
, 0);
5197 for (i
= 0; i
< data
->n_params
; i
++) {
5198 param
= kstrdup(data
->params
[i
], GFP_KERNEL
);
5204 field_var
= create_target_field_var(hist_data
, NULL
, NULL
, param
);
5205 if (IS_ERR(field_var
)) {
5206 hist_err(tr
, HIST_ERR_FIELD_VAR_CREATE_FAIL
,
5208 ret
= PTR_ERR(field_var
);
5213 hist_data
->save_vars
[hist_data
->n_save_vars
++] = field_var
;
5214 if (field_var
->val
->flags
& HIST_FIELD_FL_STRING
)
5215 hist_data
->n_save_var_str
++;
5223 static int onmatch_create(struct hist_trigger_data
*hist_data
,
5224 struct action_data
*data
)
5226 return action_create(hist_data
, data
);
5229 static struct action_data
*onmatch_parse(struct trace_array
*tr
, char *str
)
5231 char *match_event
, *match_event_system
;
5232 struct action_data
*data
;
5235 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
5237 return ERR_PTR(-ENOMEM
);
5239 match_event
= strsep(&str
, ")");
5240 if (!match_event
|| !str
) {
5241 hist_err(tr
, HIST_ERR_NO_CLOSING_PAREN
, errpos(match_event
));
5245 match_event_system
= strsep(&match_event
, ".");
5247 hist_err(tr
, HIST_ERR_SUBSYS_NOT_FOUND
, errpos(match_event_system
));
5251 if (IS_ERR(event_file(tr
, match_event_system
, match_event
))) {
5252 hist_err(tr
, HIST_ERR_INVALID_SUBSYS_EVENT
, errpos(match_event
));
5256 data
->match_data
.event
= kstrdup(match_event
, GFP_KERNEL
);
5257 if (!data
->match_data
.event
) {
5262 data
->match_data
.event_system
= kstrdup(match_event_system
, GFP_KERNEL
);
5263 if (!data
->match_data
.event_system
) {
5268 ret
= action_parse(tr
, str
, data
, HANDLER_ONMATCH
);
5274 onmatch_destroy(data
);
5275 data
= ERR_PTR(ret
);
5279 static int create_hitcount_val(struct hist_trigger_data
*hist_data
)
5281 hist_data
->fields
[HITCOUNT_IDX
] =
5282 create_hist_field(hist_data
, NULL
, HIST_FIELD_FL_HITCOUNT
, NULL
);
5283 if (!hist_data
->fields
[HITCOUNT_IDX
])
5286 hist_data
->n_vals
++;
5287 hist_data
->n_fields
++;
5289 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
))
5295 static int __create_val_field(struct hist_trigger_data
*hist_data
,
5296 unsigned int val_idx
,
5297 struct trace_event_file
*file
,
5298 char *var_name
, char *field_str
,
5299 unsigned long flags
)
5301 struct hist_field
*hist_field
;
5304 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
, var_name
, 0);
5305 if (IS_ERR(hist_field
)) {
5306 ret
= PTR_ERR(hist_field
);
5310 hist_data
->fields
[val_idx
] = hist_field
;
5312 ++hist_data
->n_vals
;
5313 ++hist_data
->n_fields
;
5315 if (WARN_ON(hist_data
->n_vals
> TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
5321 static int create_val_field(struct hist_trigger_data
*hist_data
,
5322 unsigned int val_idx
,
5323 struct trace_event_file
*file
,
5326 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
))
5329 return __create_val_field(hist_data
, val_idx
, file
, NULL
, field_str
, 0);
5332 static int create_var_field(struct hist_trigger_data
*hist_data
,
5333 unsigned int val_idx
,
5334 struct trace_event_file
*file
,
5335 char *var_name
, char *expr_str
)
5337 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5338 unsigned long flags
= 0;
5340 if (WARN_ON(val_idx
>= TRACING_MAP_VALS_MAX
+ TRACING_MAP_VARS_MAX
))
5343 if (find_var(hist_data
, file
, var_name
) && !hist_data
->remove
) {
5344 hist_err(tr
, HIST_ERR_DUPLICATE_VAR
, errpos(var_name
));
5348 flags
|= HIST_FIELD_FL_VAR
;
5349 hist_data
->n_vars
++;
5350 if (WARN_ON(hist_data
->n_vars
> TRACING_MAP_VARS_MAX
))
5353 return __create_val_field(hist_data
, val_idx
, file
, var_name
, expr_str
, flags
);
5356 static int create_val_fields(struct hist_trigger_data
*hist_data
,
5357 struct trace_event_file
*file
)
5359 char *fields_str
, *field_str
;
5360 unsigned int i
, j
= 1;
5363 ret
= create_hitcount_val(hist_data
);
5367 fields_str
= hist_data
->attrs
->vals_str
;
5371 for (i
= 0, j
= 1; i
< TRACING_MAP_VALS_MAX
&&
5372 j
< TRACING_MAP_VALS_MAX
; i
++) {
5373 field_str
= strsep(&fields_str
, ",");
5377 if (strcmp(field_str
, "hitcount") == 0)
5380 ret
= create_val_field(hist_data
, j
++, file
, field_str
);
5385 if (fields_str
&& (strcmp(fields_str
, "hitcount") != 0))
5391 static int create_key_field(struct hist_trigger_data
*hist_data
,
5392 unsigned int key_idx
,
5393 unsigned int key_offset
,
5394 struct trace_event_file
*file
,
5397 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5398 struct hist_field
*hist_field
= NULL
;
5399 unsigned long flags
= 0;
5400 unsigned int key_size
;
5403 if (WARN_ON(key_idx
>= HIST_FIELDS_MAX
))
5406 flags
|= HIST_FIELD_FL_KEY
;
5408 if (strcmp(field_str
, "stacktrace") == 0) {
5409 flags
|= HIST_FIELD_FL_STACKTRACE
;
5410 key_size
= sizeof(unsigned long) * HIST_STACKTRACE_DEPTH
;
5411 hist_field
= create_hist_field(hist_data
, NULL
, flags
, NULL
);
5413 hist_field
= parse_expr(hist_data
, file
, field_str
, flags
,
5415 if (IS_ERR(hist_field
)) {
5416 ret
= PTR_ERR(hist_field
);
5420 if (field_has_hist_vars(hist_field
, 0)) {
5421 hist_err(tr
, HIST_ERR_INVALID_REF_KEY
, errpos(field_str
));
5422 destroy_hist_field(hist_field
, 0);
5427 key_size
= hist_field
->size
;
5430 hist_data
->fields
[key_idx
] = hist_field
;
5432 key_size
= ALIGN(key_size
, sizeof(u64
));
5433 hist_data
->fields
[key_idx
]->size
= key_size
;
5434 hist_data
->fields
[key_idx
]->offset
= key_offset
;
5436 hist_data
->key_size
+= key_size
;
5438 if (hist_data
->key_size
> HIST_KEY_SIZE_MAX
) {
5443 hist_data
->n_keys
++;
5444 hist_data
->n_fields
++;
5446 if (WARN_ON(hist_data
->n_keys
> TRACING_MAP_KEYS_MAX
))
5454 static int create_key_fields(struct hist_trigger_data
*hist_data
,
5455 struct trace_event_file
*file
)
5457 unsigned int i
, key_offset
= 0, n_vals
= hist_data
->n_vals
;
5458 char *fields_str
, *field_str
;
5461 fields_str
= hist_data
->attrs
->keys_str
;
5465 for (i
= n_vals
; i
< n_vals
+ TRACING_MAP_KEYS_MAX
; i
++) {
5466 field_str
= strsep(&fields_str
, ",");
5469 ret
= create_key_field(hist_data
, i
, key_offset
,
5484 static int create_var_fields(struct hist_trigger_data
*hist_data
,
5485 struct trace_event_file
*file
)
5487 unsigned int i
, j
= hist_data
->n_vals
;
5490 unsigned int n_vars
= hist_data
->attrs
->var_defs
.n_vars
;
5492 for (i
= 0; i
< n_vars
; i
++) {
5493 char *var_name
= hist_data
->attrs
->var_defs
.name
[i
];
5494 char *expr
= hist_data
->attrs
->var_defs
.expr
[i
];
5496 ret
= create_var_field(hist_data
, j
++, file
, var_name
, expr
);
5504 static void free_var_defs(struct hist_trigger_data
*hist_data
)
5508 for (i
= 0; i
< hist_data
->attrs
->var_defs
.n_vars
; i
++) {
5509 kfree(hist_data
->attrs
->var_defs
.name
[i
]);
5510 kfree(hist_data
->attrs
->var_defs
.expr
[i
]);
5513 hist_data
->attrs
->var_defs
.n_vars
= 0;
5516 static int parse_var_defs(struct hist_trigger_data
*hist_data
)
5518 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5519 char *s
, *str
, *var_name
, *field_str
;
5520 unsigned int i
, j
, n_vars
= 0;
5523 for (i
= 0; i
< hist_data
->attrs
->n_assignments
; i
++) {
5524 str
= hist_data
->attrs
->assignment_str
[i
];
5525 for (j
= 0; j
< TRACING_MAP_VARS_MAX
; j
++) {
5526 field_str
= strsep(&str
, ",");
5530 var_name
= strsep(&field_str
, "=");
5531 if (!var_name
|| !field_str
) {
5532 hist_err(tr
, HIST_ERR_MALFORMED_ASSIGNMENT
,
5538 if (n_vars
== TRACING_MAP_VARS_MAX
) {
5539 hist_err(tr
, HIST_ERR_TOO_MANY_VARS
, errpos(var_name
));
5544 s
= kstrdup(var_name
, GFP_KERNEL
);
5549 hist_data
->attrs
->var_defs
.name
[n_vars
] = s
;
5551 s
= kstrdup(field_str
, GFP_KERNEL
);
5553 kfree(hist_data
->attrs
->var_defs
.name
[n_vars
]);
5557 hist_data
->attrs
->var_defs
.expr
[n_vars
++] = s
;
5559 hist_data
->attrs
->var_defs
.n_vars
= n_vars
;
5565 free_var_defs(hist_data
);
5570 static int create_hist_fields(struct hist_trigger_data
*hist_data
,
5571 struct trace_event_file
*file
)
5575 ret
= parse_var_defs(hist_data
);
5579 ret
= create_val_fields(hist_data
, file
);
5583 ret
= create_var_fields(hist_data
, file
);
5587 ret
= create_key_fields(hist_data
, file
);
5591 free_var_defs(hist_data
);
5596 static int is_descending(struct trace_array
*tr
, const char *str
)
5601 if (strcmp(str
, "descending") == 0)
5604 if (strcmp(str
, "ascending") == 0)
5607 hist_err(tr
, HIST_ERR_INVALID_SORT_MODIFIER
, errpos((char *)str
));
5612 static int create_sort_keys(struct hist_trigger_data
*hist_data
)
5614 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5615 char *fields_str
= hist_data
->attrs
->sort_key_str
;
5616 struct tracing_map_sort_key
*sort_key
;
5617 int descending
, ret
= 0;
5618 unsigned int i
, j
, k
;
5620 hist_data
->n_sort_keys
= 1; /* we always have at least one, hitcount */
5625 for (i
= 0; i
< TRACING_MAP_SORT_KEYS_MAX
; i
++) {
5626 struct hist_field
*hist_field
;
5627 char *field_str
, *field_name
;
5628 const char *test_name
;
5630 sort_key
= &hist_data
->sort_keys
[i
];
5632 field_str
= strsep(&fields_str
, ",");
5638 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
5642 if ((i
== TRACING_MAP_SORT_KEYS_MAX
- 1) && fields_str
) {
5643 hist_err(tr
, HIST_ERR_TOO_MANY_SORT_FIELDS
, errpos("sort="));
5648 field_name
= strsep(&field_str
, ".");
5649 if (!field_name
|| !*field_name
) {
5651 hist_err(tr
, HIST_ERR_EMPTY_SORT_FIELD
, errpos("sort="));
5655 if (strcmp(field_name
, "hitcount") == 0) {
5656 descending
= is_descending(tr
, field_str
);
5657 if (descending
< 0) {
5661 sort_key
->descending
= descending
;
5665 for (j
= 1, k
= 1; j
< hist_data
->n_fields
; j
++) {
5668 hist_field
= hist_data
->fields
[j
];
5669 if (hist_field
->flags
& HIST_FIELD_FL_VAR
)
5674 test_name
= hist_field_name(hist_field
, 0);
5676 if (strcmp(field_name
, test_name
) == 0) {
5677 sort_key
->field_idx
= idx
;
5678 descending
= is_descending(tr
, field_str
);
5679 if (descending
< 0) {
5683 sort_key
->descending
= descending
;
5687 if (j
== hist_data
->n_fields
) {
5689 hist_err(tr
, HIST_ERR_INVALID_SORT_FIELD
, errpos(field_name
));
5694 hist_data
->n_sort_keys
= i
;
5699 static void destroy_actions(struct hist_trigger_data
*hist_data
)
5703 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5704 struct action_data
*data
= hist_data
->actions
[i
];
5706 if (data
->handler
== HANDLER_ONMATCH
)
5707 onmatch_destroy(data
);
5708 else if (data
->handler
== HANDLER_ONMAX
||
5709 data
->handler
== HANDLER_ONCHANGE
)
5710 track_data_destroy(hist_data
, data
);
5716 static int parse_actions(struct hist_trigger_data
*hist_data
)
5718 struct trace_array
*tr
= hist_data
->event_file
->tr
;
5719 struct action_data
*data
;
5725 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
5726 str
= hist_data
->attrs
->action_str
[i
];
5728 if ((len
= str_has_prefix(str
, "onmatch("))) {
5729 char *action_str
= str
+ len
;
5731 data
= onmatch_parse(tr
, action_str
);
5733 ret
= PTR_ERR(data
);
5736 } else if ((len
= str_has_prefix(str
, "onmax("))) {
5737 char *action_str
= str
+ len
;
5739 data
= track_data_parse(hist_data
, action_str
,
5742 ret
= PTR_ERR(data
);
5745 } else if ((len
= str_has_prefix(str
, "onchange("))) {
5746 char *action_str
= str
+ len
;
5748 data
= track_data_parse(hist_data
, action_str
,
5751 ret
= PTR_ERR(data
);
5759 hist_data
->actions
[hist_data
->n_actions
++] = data
;
5765 static int create_actions(struct hist_trigger_data
*hist_data
)
5767 struct action_data
*data
;
5771 for (i
= 0; i
< hist_data
->attrs
->n_actions
; i
++) {
5772 data
= hist_data
->actions
[i
];
5774 if (data
->handler
== HANDLER_ONMATCH
) {
5775 ret
= onmatch_create(hist_data
, data
);
5778 } else if (data
->handler
== HANDLER_ONMAX
||
5779 data
->handler
== HANDLER_ONCHANGE
) {
5780 ret
= track_data_create(hist_data
, data
);
5792 static void print_actions(struct seq_file
*m
,
5793 struct hist_trigger_data
*hist_data
,
5794 struct tracing_map_elt
*elt
)
5798 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5799 struct action_data
*data
= hist_data
->actions
[i
];
5801 if (data
->action
== ACTION_SNAPSHOT
)
5804 if (data
->handler
== HANDLER_ONMAX
||
5805 data
->handler
== HANDLER_ONCHANGE
)
5806 track_data_print(m
, hist_data
, elt
, data
);
5810 static void print_action_spec(struct seq_file
*m
,
5811 struct hist_trigger_data
*hist_data
,
5812 struct action_data
*data
)
5816 if (data
->action
== ACTION_SAVE
) {
5817 for (i
= 0; i
< hist_data
->n_save_vars
; i
++) {
5818 seq_printf(m
, "%s", hist_data
->save_vars
[i
]->var
->var
.name
);
5819 if (i
< hist_data
->n_save_vars
- 1)
5822 } else if (data
->action
== ACTION_TRACE
) {
5823 if (data
->use_trace_keyword
)
5824 seq_printf(m
, "%s", data
->synth_event_name
);
5825 for (i
= 0; i
< data
->n_params
; i
++) {
5826 if (i
|| data
->use_trace_keyword
)
5828 seq_printf(m
, "%s", data
->params
[i
]);
5833 static void print_track_data_spec(struct seq_file
*m
,
5834 struct hist_trigger_data
*hist_data
,
5835 struct action_data
*data
)
5837 if (data
->handler
== HANDLER_ONMAX
)
5838 seq_puts(m
, ":onmax(");
5839 else if (data
->handler
== HANDLER_ONCHANGE
)
5840 seq_puts(m
, ":onchange(");
5841 seq_printf(m
, "%s", data
->track_data
.var_str
);
5842 seq_printf(m
, ").%s(", data
->action_name
);
5844 print_action_spec(m
, hist_data
, data
);
5849 static void print_onmatch_spec(struct seq_file
*m
,
5850 struct hist_trigger_data
*hist_data
,
5851 struct action_data
*data
)
5853 seq_printf(m
, ":onmatch(%s.%s).", data
->match_data
.event_system
,
5854 data
->match_data
.event
);
5856 seq_printf(m
, "%s(", data
->action_name
);
5858 print_action_spec(m
, hist_data
, data
);
5863 static bool actions_match(struct hist_trigger_data
*hist_data
,
5864 struct hist_trigger_data
*hist_data_test
)
5868 if (hist_data
->n_actions
!= hist_data_test
->n_actions
)
5871 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5872 struct action_data
*data
= hist_data
->actions
[i
];
5873 struct action_data
*data_test
= hist_data_test
->actions
[i
];
5874 char *action_name
, *action_name_test
;
5876 if (data
->handler
!= data_test
->handler
)
5878 if (data
->action
!= data_test
->action
)
5881 if (data
->n_params
!= data_test
->n_params
)
5884 for (j
= 0; j
< data
->n_params
; j
++) {
5885 if (strcmp(data
->params
[j
], data_test
->params
[j
]) != 0)
5889 if (data
->use_trace_keyword
)
5890 action_name
= data
->synth_event_name
;
5892 action_name
= data
->action_name
;
5894 if (data_test
->use_trace_keyword
)
5895 action_name_test
= data_test
->synth_event_name
;
5897 action_name_test
= data_test
->action_name
;
5899 if (strcmp(action_name
, action_name_test
) != 0)
5902 if (data
->handler
== HANDLER_ONMATCH
) {
5903 if (strcmp(data
->match_data
.event_system
,
5904 data_test
->match_data
.event_system
) != 0)
5906 if (strcmp(data
->match_data
.event
,
5907 data_test
->match_data
.event
) != 0)
5909 } else if (data
->handler
== HANDLER_ONMAX
||
5910 data
->handler
== HANDLER_ONCHANGE
) {
5911 if (strcmp(data
->track_data
.var_str
,
5912 data_test
->track_data
.var_str
) != 0)
5921 static void print_actions_spec(struct seq_file
*m
,
5922 struct hist_trigger_data
*hist_data
)
5926 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
5927 struct action_data
*data
= hist_data
->actions
[i
];
5929 if (data
->handler
== HANDLER_ONMATCH
)
5930 print_onmatch_spec(m
, hist_data
, data
);
5931 else if (data
->handler
== HANDLER_ONMAX
||
5932 data
->handler
== HANDLER_ONCHANGE
)
5933 print_track_data_spec(m
, hist_data
, data
);
5937 static void destroy_field_var_hists(struct hist_trigger_data
*hist_data
)
5941 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
5942 kfree(hist_data
->field_var_hists
[i
]->cmd
);
5943 kfree(hist_data
->field_var_hists
[i
]);
5947 static void destroy_hist_data(struct hist_trigger_data
*hist_data
)
5952 destroy_hist_trigger_attrs(hist_data
->attrs
);
5953 destroy_hist_fields(hist_data
);
5954 tracing_map_destroy(hist_data
->map
);
5956 destroy_actions(hist_data
);
5957 destroy_field_vars(hist_data
);
5958 destroy_field_var_hists(hist_data
);
5963 static int create_tracing_map_fields(struct hist_trigger_data
*hist_data
)
5965 struct tracing_map
*map
= hist_data
->map
;
5966 struct ftrace_event_field
*field
;
5967 struct hist_field
*hist_field
;
5970 for_each_hist_field(i
, hist_data
) {
5971 hist_field
= hist_data
->fields
[i
];
5972 if (hist_field
->flags
& HIST_FIELD_FL_KEY
) {
5973 tracing_map_cmp_fn_t cmp_fn
;
5975 field
= hist_field
->field
;
5977 if (hist_field
->flags
& HIST_FIELD_FL_STACKTRACE
)
5978 cmp_fn
= tracing_map_cmp_none
;
5980 cmp_fn
= tracing_map_cmp_num(hist_field
->size
,
5981 hist_field
->is_signed
);
5982 else if (is_string_field(field
))
5983 cmp_fn
= tracing_map_cmp_string
;
5985 cmp_fn
= tracing_map_cmp_num(field
->size
,
5987 idx
= tracing_map_add_key_field(map
,
5990 } else if (!(hist_field
->flags
& HIST_FIELD_FL_VAR
))
5991 idx
= tracing_map_add_sum_field(map
);
5996 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
5997 idx
= tracing_map_add_var(map
);
6000 hist_field
->var
.idx
= idx
;
6001 hist_field
->var
.hist_data
= hist_data
;
6008 static struct hist_trigger_data
*
6009 create_hist_data(unsigned int map_bits
,
6010 struct hist_trigger_attrs
*attrs
,
6011 struct trace_event_file
*file
,
6014 const struct tracing_map_ops
*map_ops
= NULL
;
6015 struct hist_trigger_data
*hist_data
;
6018 hist_data
= kzalloc(sizeof(*hist_data
), GFP_KERNEL
);
6020 return ERR_PTR(-ENOMEM
);
6022 hist_data
->attrs
= attrs
;
6023 hist_data
->remove
= remove
;
6024 hist_data
->event_file
= file
;
6026 ret
= parse_actions(hist_data
);
6030 ret
= create_hist_fields(hist_data
, file
);
6034 ret
= create_sort_keys(hist_data
);
6038 map_ops
= &hist_trigger_elt_data_ops
;
6040 hist_data
->map
= tracing_map_create(map_bits
, hist_data
->key_size
,
6041 map_ops
, hist_data
);
6042 if (IS_ERR(hist_data
->map
)) {
6043 ret
= PTR_ERR(hist_data
->map
);
6044 hist_data
->map
= NULL
;
6048 ret
= create_tracing_map_fields(hist_data
);
6054 hist_data
->attrs
= NULL
;
6056 destroy_hist_data(hist_data
);
6058 hist_data
= ERR_PTR(ret
);
6063 static void hist_trigger_elt_update(struct hist_trigger_data
*hist_data
,
6064 struct tracing_map_elt
*elt
, void *rec
,
6065 struct ring_buffer_event
*rbe
,
6068 struct hist_elt_data
*elt_data
;
6069 struct hist_field
*hist_field
;
6070 unsigned int i
, var_idx
;
6073 elt_data
= elt
->private_data
;
6074 elt_data
->var_ref_vals
= var_ref_vals
;
6076 for_each_hist_val_field(i
, hist_data
) {
6077 hist_field
= hist_data
->fields
[i
];
6078 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
6079 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
6080 var_idx
= hist_field
->var
.idx
;
6081 tracing_map_set_var(elt
, var_idx
, hist_val
);
6084 tracing_map_update_sum(elt
, i
, hist_val
);
6087 for_each_hist_key_field(i
, hist_data
) {
6088 hist_field
= hist_data
->fields
[i
];
6089 if (hist_field
->flags
& HIST_FIELD_FL_VAR
) {
6090 hist_val
= hist_field
->fn(hist_field
, elt
, rbe
, rec
);
6091 var_idx
= hist_field
->var
.idx
;
6092 tracing_map_set_var(elt
, var_idx
, hist_val
);
6096 update_field_vars(hist_data
, elt
, rbe
, rec
);
6099 static inline void add_to_key(char *compound_key
, void *key
,
6100 struct hist_field
*key_field
, void *rec
)
6102 size_t size
= key_field
->size
;
6104 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
6105 struct ftrace_event_field
*field
;
6107 field
= key_field
->field
;
6108 if (field
->filter_type
== FILTER_DYN_STRING
)
6109 size
= *(u32
*)(rec
+ field
->offset
) >> 16;
6110 else if (field
->filter_type
== FILTER_PTR_STRING
)
6112 else if (field
->filter_type
== FILTER_STATIC_STRING
)
6115 /* ensure NULL-termination */
6116 if (size
> key_field
->size
- 1)
6117 size
= key_field
->size
- 1;
6119 strncpy(compound_key
+ key_field
->offset
, (char *)key
, size
);
6121 memcpy(compound_key
+ key_field
->offset
, key
, size
);
6125 hist_trigger_actions(struct hist_trigger_data
*hist_data
,
6126 struct tracing_map_elt
*elt
, void *rec
,
6127 struct ring_buffer_event
*rbe
, void *key
,
6130 struct action_data
*data
;
6133 for (i
= 0; i
< hist_data
->n_actions
; i
++) {
6134 data
= hist_data
->actions
[i
];
6135 data
->fn(hist_data
, elt
, rec
, rbe
, key
, data
, var_ref_vals
);
6139 static void event_hist_trigger(struct event_trigger_data
*data
, void *rec
,
6140 struct ring_buffer_event
*rbe
)
6142 struct hist_trigger_data
*hist_data
= data
->private_data
;
6143 bool use_compound_key
= (hist_data
->n_keys
> 1);
6144 unsigned long entries
[HIST_STACKTRACE_DEPTH
];
6145 u64 var_ref_vals
[TRACING_MAP_VARS_MAX
];
6146 char compound_key
[HIST_KEY_SIZE_MAX
];
6147 struct tracing_map_elt
*elt
= NULL
;
6148 struct hist_field
*key_field
;
6153 memset(compound_key
, 0, hist_data
->key_size
);
6155 for_each_hist_key_field(i
, hist_data
) {
6156 key_field
= hist_data
->fields
[i
];
6158 if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
6159 memset(entries
, 0, HIST_STACKTRACE_SIZE
);
6160 stack_trace_save(entries
, HIST_STACKTRACE_DEPTH
,
6161 HIST_STACKTRACE_SKIP
);
6164 field_contents
= key_field
->fn(key_field
, elt
, rbe
, rec
);
6165 if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
6166 key
= (void *)(unsigned long)field_contents
;
6167 use_compound_key
= true;
6169 key
= (void *)&field_contents
;
6172 if (use_compound_key
)
6173 add_to_key(compound_key
, key
, key_field
, rec
);
6176 if (use_compound_key
)
6179 if (hist_data
->n_var_refs
&&
6180 !resolve_var_refs(hist_data
, key
, var_ref_vals
, false))
6183 elt
= tracing_map_insert(hist_data
->map
, key
);
6187 hist_trigger_elt_update(hist_data
, elt
, rec
, rbe
, var_ref_vals
);
6189 if (resolve_var_refs(hist_data
, key
, var_ref_vals
, true))
6190 hist_trigger_actions(hist_data
, elt
, rec
, rbe
, key
, var_ref_vals
);
6193 static void hist_trigger_stacktrace_print(struct seq_file
*m
,
6194 unsigned long *stacktrace_entries
,
6195 unsigned int max_entries
)
6197 char str
[KSYM_SYMBOL_LEN
];
6198 unsigned int spaces
= 8;
6201 for (i
= 0; i
< max_entries
; i
++) {
6202 if (!stacktrace_entries
[i
])
6205 seq_printf(m
, "%*c", 1 + spaces
, ' ');
6206 sprint_symbol(str
, stacktrace_entries
[i
]);
6207 seq_printf(m
, "%s\n", str
);
6211 static void hist_trigger_print_key(struct seq_file
*m
,
6212 struct hist_trigger_data
*hist_data
,
6214 struct tracing_map_elt
*elt
)
6216 struct hist_field
*key_field
;
6217 char str
[KSYM_SYMBOL_LEN
];
6218 bool multiline
= false;
6219 const char *field_name
;
6225 for_each_hist_key_field(i
, hist_data
) {
6226 key_field
= hist_data
->fields
[i
];
6228 if (i
> hist_data
->n_vals
)
6231 field_name
= hist_field_name(key_field
, 0);
6233 if (key_field
->flags
& HIST_FIELD_FL_HEX
) {
6234 uval
= *(u64
*)(key
+ key_field
->offset
);
6235 seq_printf(m
, "%s: %llx", field_name
, uval
);
6236 } else if (key_field
->flags
& HIST_FIELD_FL_SYM
) {
6237 uval
= *(u64
*)(key
+ key_field
->offset
);
6238 sprint_symbol_no_offset(str
, uval
);
6239 seq_printf(m
, "%s: [%llx] %-45s", field_name
,
6241 } else if (key_field
->flags
& HIST_FIELD_FL_SYM_OFFSET
) {
6242 uval
= *(u64
*)(key
+ key_field
->offset
);
6243 sprint_symbol(str
, uval
);
6244 seq_printf(m
, "%s: [%llx] %-55s", field_name
,
6246 } else if (key_field
->flags
& HIST_FIELD_FL_EXECNAME
) {
6247 struct hist_elt_data
*elt_data
= elt
->private_data
;
6250 if (WARN_ON_ONCE(!elt_data
))
6253 comm
= elt_data
->comm
;
6255 uval
= *(u64
*)(key
+ key_field
->offset
);
6256 seq_printf(m
, "%s: %-16s[%10llu]", field_name
,
6258 } else if (key_field
->flags
& HIST_FIELD_FL_SYSCALL
) {
6259 const char *syscall_name
;
6261 uval
= *(u64
*)(key
+ key_field
->offset
);
6262 syscall_name
= get_syscall_name(uval
);
6264 syscall_name
= "unknown_syscall";
6266 seq_printf(m
, "%s: %-30s[%3llu]", field_name
,
6267 syscall_name
, uval
);
6268 } else if (key_field
->flags
& HIST_FIELD_FL_STACKTRACE
) {
6269 seq_puts(m
, "stacktrace:\n");
6270 hist_trigger_stacktrace_print(m
,
6271 key
+ key_field
->offset
,
6272 HIST_STACKTRACE_DEPTH
);
6274 } else if (key_field
->flags
& HIST_FIELD_FL_LOG2
) {
6275 seq_printf(m
, "%s: ~ 2^%-2llu", field_name
,
6276 *(u64
*)(key
+ key_field
->offset
));
6277 } else if (key_field
->flags
& HIST_FIELD_FL_STRING
) {
6278 seq_printf(m
, "%s: %-50s", field_name
,
6279 (char *)(key
+ key_field
->offset
));
6281 uval
= *(u64
*)(key
+ key_field
->offset
);
6282 seq_printf(m
, "%s: %10llu", field_name
, uval
);
6292 static void hist_trigger_entry_print(struct seq_file
*m
,
6293 struct hist_trigger_data
*hist_data
,
6295 struct tracing_map_elt
*elt
)
6297 const char *field_name
;
6300 hist_trigger_print_key(m
, hist_data
, key
, elt
);
6302 seq_printf(m
, " hitcount: %10llu",
6303 tracing_map_read_sum(elt
, HITCOUNT_IDX
));
6305 for (i
= 1; i
< hist_data
->n_vals
; i
++) {
6306 field_name
= hist_field_name(hist_data
->fields
[i
], 0);
6308 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_VAR
||
6309 hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_EXPR
)
6312 if (hist_data
->fields
[i
]->flags
& HIST_FIELD_FL_HEX
) {
6313 seq_printf(m
, " %s: %10llx", field_name
,
6314 tracing_map_read_sum(elt
, i
));
6316 seq_printf(m
, " %s: %10llu", field_name
,
6317 tracing_map_read_sum(elt
, i
));
6321 print_actions(m
, hist_data
, elt
);
6326 static int print_entries(struct seq_file
*m
,
6327 struct hist_trigger_data
*hist_data
)
6329 struct tracing_map_sort_entry
**sort_entries
= NULL
;
6330 struct tracing_map
*map
= hist_data
->map
;
6333 n_entries
= tracing_map_sort_entries(map
, hist_data
->sort_keys
,
6334 hist_data
->n_sort_keys
,
6339 for (i
= 0; i
< n_entries
; i
++)
6340 hist_trigger_entry_print(m
, hist_data
,
6341 sort_entries
[i
]->key
,
6342 sort_entries
[i
]->elt
);
6344 tracing_map_destroy_sort_entries(sort_entries
, n_entries
);
6349 static void hist_trigger_show(struct seq_file
*m
,
6350 struct event_trigger_data
*data
, int n
)
6352 struct hist_trigger_data
*hist_data
;
6356 seq_puts(m
, "\n\n");
6358 seq_puts(m
, "# event histogram\n#\n# trigger info: ");
6359 data
->ops
->print(m
, data
->ops
, data
);
6360 seq_puts(m
, "#\n\n");
6362 hist_data
= data
->private_data
;
6363 n_entries
= print_entries(m
, hist_data
);
6367 track_data_snapshot_print(m
, hist_data
);
6369 seq_printf(m
, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
6370 (u64
)atomic64_read(&hist_data
->map
->hits
),
6371 n_entries
, (u64
)atomic64_read(&hist_data
->map
->drops
));
6374 static int hist_show(struct seq_file
*m
, void *v
)
6376 struct event_trigger_data
*data
;
6377 struct trace_event_file
*event_file
;
6380 mutex_lock(&event_mutex
);
6382 event_file
= event_file_data(m
->private);
6383 if (unlikely(!event_file
)) {
6388 list_for_each_entry(data
, &event_file
->triggers
, list
) {
6389 if (data
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
)
6390 hist_trigger_show(m
, data
, n
++);
6394 mutex_unlock(&event_mutex
);
6399 static int event_hist_open(struct inode
*inode
, struct file
*file
)
6403 ret
= security_locked_down(LOCKDOWN_TRACEFS
);
6407 return single_open(file
, hist_show
, file
);
6410 const struct file_operations event_hist_fops
= {
6411 .open
= event_hist_open
,
6413 .llseek
= seq_lseek
,
6414 .release
= single_release
,
6417 static void hist_field_print(struct seq_file
*m
, struct hist_field
*hist_field
)
6419 const char *field_name
= hist_field_name(hist_field
, 0);
6421 if (hist_field
->var
.name
)
6422 seq_printf(m
, "%s=", hist_field
->var
.name
);
6424 if (hist_field
->flags
& HIST_FIELD_FL_CPU
)
6426 else if (field_name
) {
6427 if (hist_field
->flags
& HIST_FIELD_FL_VAR_REF
||
6428 hist_field
->flags
& HIST_FIELD_FL_ALIAS
)
6430 seq_printf(m
, "%s", field_name
);
6431 } else if (hist_field
->flags
& HIST_FIELD_FL_TIMESTAMP
)
6432 seq_puts(m
, "common_timestamp");
6434 if (hist_field
->flags
) {
6435 if (!(hist_field
->flags
& HIST_FIELD_FL_VAR_REF
) &&
6436 !(hist_field
->flags
& HIST_FIELD_FL_EXPR
)) {
6437 const char *flags
= get_hist_field_flags(hist_field
);
6440 seq_printf(m
, ".%s", flags
);
6445 static int event_hist_trigger_print(struct seq_file
*m
,
6446 struct event_trigger_ops
*ops
,
6447 struct event_trigger_data
*data
)
6449 struct hist_trigger_data
*hist_data
= data
->private_data
;
6450 struct hist_field
*field
;
6451 bool have_var
= false;
6454 seq_puts(m
, "hist:");
6457 seq_printf(m
, "%s:", data
->name
);
6459 seq_puts(m
, "keys=");
6461 for_each_hist_key_field(i
, hist_data
) {
6462 field
= hist_data
->fields
[i
];
6464 if (i
> hist_data
->n_vals
)
6467 if (field
->flags
& HIST_FIELD_FL_STACKTRACE
)
6468 seq_puts(m
, "stacktrace");
6470 hist_field_print(m
, field
);
6473 seq_puts(m
, ":vals=");
6475 for_each_hist_val_field(i
, hist_data
) {
6476 field
= hist_data
->fields
[i
];
6477 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6482 if (i
== HITCOUNT_IDX
)
6483 seq_puts(m
, "hitcount");
6486 hist_field_print(m
, field
);
6495 for_each_hist_val_field(i
, hist_data
) {
6496 field
= hist_data
->fields
[i
];
6498 if (field
->flags
& HIST_FIELD_FL_VAR
) {
6501 hist_field_print(m
, field
);
6506 seq_puts(m
, ":sort=");
6508 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6509 struct tracing_map_sort_key
*sort_key
;
6510 unsigned int idx
, first_key_idx
;
6513 first_key_idx
= hist_data
->n_vals
- hist_data
->n_vars
;
6515 sort_key
= &hist_data
->sort_keys
[i
];
6516 idx
= sort_key
->field_idx
;
6518 if (WARN_ON(idx
>= HIST_FIELDS_MAX
))
6524 if (idx
== HITCOUNT_IDX
)
6525 seq_puts(m
, "hitcount");
6527 if (idx
>= first_key_idx
)
6528 idx
+= hist_data
->n_vars
;
6529 hist_field_print(m
, hist_data
->fields
[idx
]);
6532 if (sort_key
->descending
)
6533 seq_puts(m
, ".descending");
6535 seq_printf(m
, ":size=%u", (1 << hist_data
->map
->map_bits
));
6536 if (hist_data
->enable_timestamps
)
6537 seq_printf(m
, ":clock=%s", hist_data
->attrs
->clock
);
6539 print_actions_spec(m
, hist_data
);
6541 if (data
->filter_str
)
6542 seq_printf(m
, " if %s", data
->filter_str
);
6545 seq_puts(m
, " [paused]");
6547 seq_puts(m
, " [active]");
6554 static int event_hist_trigger_init(struct event_trigger_ops
*ops
,
6555 struct event_trigger_data
*data
)
6557 struct hist_trigger_data
*hist_data
= data
->private_data
;
6559 if (!data
->ref
&& hist_data
->attrs
->name
)
6560 save_named_trigger(hist_data
->attrs
->name
, data
);
6567 static void unregister_field_var_hists(struct hist_trigger_data
*hist_data
)
6569 struct trace_event_file
*file
;
6574 for (i
= 0; i
< hist_data
->n_field_var_hists
; i
++) {
6575 file
= hist_data
->field_var_hists
[i
]->hist_data
->event_file
;
6576 cmd
= hist_data
->field_var_hists
[i
]->cmd
;
6577 ret
= event_hist_trigger_func(&trigger_hist_cmd
, file
,
6578 "!hist", "hist", cmd
);
6582 static void event_hist_trigger_free(struct event_trigger_ops
*ops
,
6583 struct event_trigger_data
*data
)
6585 struct hist_trigger_data
*hist_data
= data
->private_data
;
6587 if (WARN_ON_ONCE(data
->ref
<= 0))
6593 del_named_trigger(data
);
6595 trigger_data_free(data
);
6597 remove_hist_vars(hist_data
);
6599 unregister_field_var_hists(hist_data
);
6601 destroy_hist_data(hist_data
);
6605 static struct event_trigger_ops event_hist_trigger_ops
= {
6606 .func
= event_hist_trigger
,
6607 .print
= event_hist_trigger_print
,
6608 .init
= event_hist_trigger_init
,
6609 .free
= event_hist_trigger_free
,
6612 static int event_hist_trigger_named_init(struct event_trigger_ops
*ops
,
6613 struct event_trigger_data
*data
)
6617 save_named_trigger(data
->named_data
->name
, data
);
6619 event_hist_trigger_init(ops
, data
->named_data
);
6624 static void event_hist_trigger_named_free(struct event_trigger_ops
*ops
,
6625 struct event_trigger_data
*data
)
6627 if (WARN_ON_ONCE(data
->ref
<= 0))
6630 event_hist_trigger_free(ops
, data
->named_data
);
6634 del_named_trigger(data
);
6635 trigger_data_free(data
);
6639 static struct event_trigger_ops event_hist_trigger_named_ops
= {
6640 .func
= event_hist_trigger
,
6641 .print
= event_hist_trigger_print
,
6642 .init
= event_hist_trigger_named_init
,
6643 .free
= event_hist_trigger_named_free
,
6646 static struct event_trigger_ops
*event_hist_get_trigger_ops(char *cmd
,
6649 return &event_hist_trigger_ops
;
6652 static void hist_clear(struct event_trigger_data
*data
)
6654 struct hist_trigger_data
*hist_data
= data
->private_data
;
6657 pause_named_trigger(data
);
6659 tracepoint_synchronize_unregister();
6661 tracing_map_clear(hist_data
->map
);
6664 unpause_named_trigger(data
);
6667 static bool compatible_field(struct ftrace_event_field
*field
,
6668 struct ftrace_event_field
*test_field
)
6670 if (field
== test_field
)
6672 if (field
== NULL
|| test_field
== NULL
)
6674 if (strcmp(field
->name
, test_field
->name
) != 0)
6676 if (strcmp(field
->type
, test_field
->type
) != 0)
6678 if (field
->size
!= test_field
->size
)
6680 if (field
->is_signed
!= test_field
->is_signed
)
6686 static bool hist_trigger_match(struct event_trigger_data
*data
,
6687 struct event_trigger_data
*data_test
,
6688 struct event_trigger_data
*named_data
,
6691 struct tracing_map_sort_key
*sort_key
, *sort_key_test
;
6692 struct hist_trigger_data
*hist_data
, *hist_data_test
;
6693 struct hist_field
*key_field
, *key_field_test
;
6696 if (named_data
&& (named_data
!= data_test
) &&
6697 (named_data
!= data_test
->named_data
))
6700 if (!named_data
&& is_named_trigger(data_test
))
6703 hist_data
= data
->private_data
;
6704 hist_data_test
= data_test
->private_data
;
6706 if (hist_data
->n_vals
!= hist_data_test
->n_vals
||
6707 hist_data
->n_fields
!= hist_data_test
->n_fields
||
6708 hist_data
->n_sort_keys
!= hist_data_test
->n_sort_keys
)
6711 if (!ignore_filter
) {
6712 if ((data
->filter_str
&& !data_test
->filter_str
) ||
6713 (!data
->filter_str
&& data_test
->filter_str
))
6717 for_each_hist_field(i
, hist_data
) {
6718 key_field
= hist_data
->fields
[i
];
6719 key_field_test
= hist_data_test
->fields
[i
];
6721 if (key_field
->flags
!= key_field_test
->flags
)
6723 if (!compatible_field(key_field
->field
, key_field_test
->field
))
6725 if (key_field
->offset
!= key_field_test
->offset
)
6727 if (key_field
->size
!= key_field_test
->size
)
6729 if (key_field
->is_signed
!= key_field_test
->is_signed
)
6731 if (!!key_field
->var
.name
!= !!key_field_test
->var
.name
)
6733 if (key_field
->var
.name
&&
6734 strcmp(key_field
->var
.name
, key_field_test
->var
.name
) != 0)
6738 for (i
= 0; i
< hist_data
->n_sort_keys
; i
++) {
6739 sort_key
= &hist_data
->sort_keys
[i
];
6740 sort_key_test
= &hist_data_test
->sort_keys
[i
];
6742 if (sort_key
->field_idx
!= sort_key_test
->field_idx
||
6743 sort_key
->descending
!= sort_key_test
->descending
)
6747 if (!ignore_filter
&& data
->filter_str
&&
6748 (strcmp(data
->filter_str
, data_test
->filter_str
) != 0))
6751 if (!actions_match(hist_data
, hist_data_test
))
6757 static int hist_register_trigger(char *glob
, struct event_trigger_ops
*ops
,
6758 struct event_trigger_data
*data
,
6759 struct trace_event_file
*file
)
6761 struct hist_trigger_data
*hist_data
= data
->private_data
;
6762 struct event_trigger_data
*test
, *named_data
= NULL
;
6763 struct trace_array
*tr
= file
->tr
;
6766 if (hist_data
->attrs
->name
) {
6767 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6769 if (!hist_trigger_match(data
, named_data
, named_data
,
6771 hist_err(tr
, HIST_ERR_NAMED_MISMATCH
, errpos(hist_data
->attrs
->name
));
6778 if (hist_data
->attrs
->name
&& !named_data
)
6781 lockdep_assert_held(&event_mutex
);
6783 list_for_each_entry(test
, &file
->triggers
, list
) {
6784 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6785 if (!hist_trigger_match(data
, test
, named_data
, false))
6787 if (hist_data
->attrs
->pause
)
6788 test
->paused
= true;
6789 else if (hist_data
->attrs
->cont
)
6790 test
->paused
= false;
6791 else if (hist_data
->attrs
->clear
)
6794 hist_err(tr
, HIST_ERR_TRIGGER_EEXIST
, 0);
6801 if (hist_data
->attrs
->cont
|| hist_data
->attrs
->clear
) {
6802 hist_err(tr
, HIST_ERR_TRIGGER_ENOENT_CLEAR
, 0);
6807 if (hist_data
->attrs
->pause
)
6808 data
->paused
= true;
6811 data
->private_data
= named_data
->private_data
;
6812 set_named_trigger_data(data
, named_data
);
6813 data
->ops
= &event_hist_trigger_named_ops
;
6816 if (data
->ops
->init
) {
6817 ret
= data
->ops
->init(data
->ops
, data
);
6822 if (hist_data
->enable_timestamps
) {
6823 char *clock
= hist_data
->attrs
->clock
;
6825 ret
= tracing_set_clock(file
->tr
, hist_data
->attrs
->clock
);
6827 hist_err(tr
, HIST_ERR_SET_CLOCK_FAIL
, errpos(clock
));
6831 tracing_set_time_stamp_abs(file
->tr
, true);
6835 destroy_hist_data(hist_data
);
6842 static int hist_trigger_enable(struct event_trigger_data
*data
,
6843 struct trace_event_file
*file
)
6847 list_add_tail_rcu(&data
->list
, &file
->triggers
);
6849 update_cond_flag(file
);
6851 if (trace_event_trigger_enable_disable(file
, 1) < 0) {
6852 list_del_rcu(&data
->list
);
6853 update_cond_flag(file
);
6860 static bool have_hist_trigger_match(struct event_trigger_data
*data
,
6861 struct trace_event_file
*file
)
6863 struct hist_trigger_data
*hist_data
= data
->private_data
;
6864 struct event_trigger_data
*test
, *named_data
= NULL
;
6867 lockdep_assert_held(&event_mutex
);
6869 if (hist_data
->attrs
->name
)
6870 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6872 list_for_each_entry(test
, &file
->triggers
, list
) {
6873 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6874 if (hist_trigger_match(data
, test
, named_data
, false)) {
6884 static bool hist_trigger_check_refs(struct event_trigger_data
*data
,
6885 struct trace_event_file
*file
)
6887 struct hist_trigger_data
*hist_data
= data
->private_data
;
6888 struct event_trigger_data
*test
, *named_data
= NULL
;
6890 lockdep_assert_held(&event_mutex
);
6892 if (hist_data
->attrs
->name
)
6893 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6895 list_for_each_entry(test
, &file
->triggers
, list
) {
6896 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6897 if (!hist_trigger_match(data
, test
, named_data
, false))
6899 hist_data
= test
->private_data
;
6900 if (check_var_refs(hist_data
))
6909 static void hist_unregister_trigger(char *glob
, struct event_trigger_ops
*ops
,
6910 struct event_trigger_data
*data
,
6911 struct trace_event_file
*file
)
6913 struct hist_trigger_data
*hist_data
= data
->private_data
;
6914 struct event_trigger_data
*test
, *named_data
= NULL
;
6915 bool unregistered
= false;
6917 lockdep_assert_held(&event_mutex
);
6919 if (hist_data
->attrs
->name
)
6920 named_data
= find_named_trigger(hist_data
->attrs
->name
);
6922 list_for_each_entry(test
, &file
->triggers
, list
) {
6923 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6924 if (!hist_trigger_match(data
, test
, named_data
, false))
6926 unregistered
= true;
6927 list_del_rcu(&test
->list
);
6928 trace_event_trigger_enable_disable(file
, 0);
6929 update_cond_flag(file
);
6934 if (unregistered
&& test
->ops
->free
)
6935 test
->ops
->free(test
->ops
, test
);
6937 if (hist_data
->enable_timestamps
) {
6938 if (!hist_data
->remove
|| unregistered
)
6939 tracing_set_time_stamp_abs(file
->tr
, false);
6943 static bool hist_file_check_refs(struct trace_event_file
*file
)
6945 struct hist_trigger_data
*hist_data
;
6946 struct event_trigger_data
*test
;
6948 lockdep_assert_held(&event_mutex
);
6950 list_for_each_entry(test
, &file
->triggers
, list
) {
6951 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6952 hist_data
= test
->private_data
;
6953 if (check_var_refs(hist_data
))
6961 static void hist_unreg_all(struct trace_event_file
*file
)
6963 struct event_trigger_data
*test
, *n
;
6964 struct hist_trigger_data
*hist_data
;
6965 struct synth_event
*se
;
6966 const char *se_name
;
6968 lockdep_assert_held(&event_mutex
);
6970 if (hist_file_check_refs(file
))
6973 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
6974 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
6975 hist_data
= test
->private_data
;
6976 list_del_rcu(&test
->list
);
6977 trace_event_trigger_enable_disable(file
, 0);
6979 se_name
= trace_event_name(file
->event_call
);
6980 se
= find_synth_event(se_name
);
6984 update_cond_flag(file
);
6985 if (hist_data
->enable_timestamps
)
6986 tracing_set_time_stamp_abs(file
->tr
, false);
6987 if (test
->ops
->free
)
6988 test
->ops
->free(test
->ops
, test
);
6993 static int event_hist_trigger_func(struct event_command
*cmd_ops
,
6994 struct trace_event_file
*file
,
6995 char *glob
, char *cmd
, char *param
)
6997 unsigned int hist_trigger_bits
= TRACING_MAP_BITS_DEFAULT
;
6998 struct event_trigger_data
*trigger_data
;
6999 struct hist_trigger_attrs
*attrs
;
7000 struct event_trigger_ops
*trigger_ops
;
7001 struct hist_trigger_data
*hist_data
;
7002 struct synth_event
*se
;
7003 const char *se_name
;
7004 bool remove
= false;
7008 lockdep_assert_held(&event_mutex
);
7010 if (glob
&& strlen(glob
)) {
7012 last_cmd_set(file
, param
);
7022 * separate the trigger from the filter (k:v [if filter])
7023 * allowing for whitespace in the trigger
7025 p
= trigger
= param
;
7027 p
= strstr(p
, "if");
7032 if (*(p
- 1) != ' ' && *(p
- 1) != '\t') {
7036 if (p
>= param
+ strlen(param
) - (sizeof("if") - 1) - 1)
7038 if (*(p
+ sizeof("if") - 1) != ' ' && *(p
+ sizeof("if") - 1) != '\t') {
7049 param
= strstrip(p
);
7050 trigger
= strstrip(trigger
);
7053 attrs
= parse_hist_trigger_attrs(file
->tr
, trigger
);
7055 return PTR_ERR(attrs
);
7057 if (attrs
->map_bits
)
7058 hist_trigger_bits
= attrs
->map_bits
;
7060 hist_data
= create_hist_data(hist_trigger_bits
, attrs
, file
, remove
);
7061 if (IS_ERR(hist_data
)) {
7062 destroy_hist_trigger_attrs(attrs
);
7063 return PTR_ERR(hist_data
);
7066 trigger_ops
= cmd_ops
->get_trigger_ops(cmd
, trigger
);
7068 trigger_data
= kzalloc(sizeof(*trigger_data
), GFP_KERNEL
);
7069 if (!trigger_data
) {
7074 trigger_data
->count
= -1;
7075 trigger_data
->ops
= trigger_ops
;
7076 trigger_data
->cmd_ops
= cmd_ops
;
7078 INIT_LIST_HEAD(&trigger_data
->list
);
7079 RCU_INIT_POINTER(trigger_data
->filter
, NULL
);
7081 trigger_data
->private_data
= hist_data
;
7083 /* if param is non-empty, it's supposed to be a filter */
7084 if (param
&& cmd_ops
->set_filter
) {
7085 ret
= cmd_ops
->set_filter(param
, trigger_data
, file
);
7091 if (!have_hist_trigger_match(trigger_data
, file
))
7094 if (hist_trigger_check_refs(trigger_data
, file
)) {
7099 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
7100 se_name
= trace_event_name(file
->event_call
);
7101 se
= find_synth_event(se_name
);
7108 ret
= cmd_ops
->reg(glob
, trigger_ops
, trigger_data
, file
);
7110 * The above returns on success the # of triggers registered,
7111 * but if it didn't register any it returns zero. Consider no
7112 * triggers registered a failure too.
7115 if (!(attrs
->pause
|| attrs
->cont
|| attrs
->clear
))
7121 if (get_named_trigger_data(trigger_data
))
7124 if (has_hist_vars(hist_data
))
7125 save_hist_vars(hist_data
);
7127 ret
= create_actions(hist_data
);
7131 ret
= tracing_map_init(hist_data
->map
);
7135 ret
= hist_trigger_enable(trigger_data
, file
);
7139 se_name
= trace_event_name(file
->event_call
);
7140 se
= find_synth_event(se_name
);
7143 /* Just return zero, not the number of registered triggers */
7151 cmd_ops
->unreg(glob
+1, trigger_ops
, trigger_data
, file
);
7153 if (cmd_ops
->set_filter
)
7154 cmd_ops
->set_filter(NULL
, trigger_data
, NULL
);
7156 remove_hist_vars(hist_data
);
7158 kfree(trigger_data
);
7160 destroy_hist_data(hist_data
);
7164 static struct event_command trigger_hist_cmd
= {
7166 .trigger_type
= ETT_EVENT_HIST
,
7167 .flags
= EVENT_CMD_FL_NEEDS_REC
,
7168 .func
= event_hist_trigger_func
,
7169 .reg
= hist_register_trigger
,
7170 .unreg
= hist_unregister_trigger
,
7171 .unreg_all
= hist_unreg_all
,
7172 .get_trigger_ops
= event_hist_get_trigger_ops
,
7173 .set_filter
= set_trigger_filter
,
7176 __init
int register_trigger_hist_cmd(void)
7180 ret
= register_event_command(&trigger_hist_cmd
);
7187 hist_enable_trigger(struct event_trigger_data
*data
, void *rec
,
7188 struct ring_buffer_event
*event
)
7190 struct enable_trigger_data
*enable_data
= data
->private_data
;
7191 struct event_trigger_data
*test
;
7193 list_for_each_entry_rcu(test
, &enable_data
->file
->triggers
, list
,
7194 lockdep_is_held(&event_mutex
)) {
7195 if (test
->cmd_ops
->trigger_type
== ETT_EVENT_HIST
) {
7196 if (enable_data
->enable
)
7197 test
->paused
= false;
7199 test
->paused
= true;
7205 hist_enable_count_trigger(struct event_trigger_data
*data
, void *rec
,
7206 struct ring_buffer_event
*event
)
7211 if (data
->count
!= -1)
7214 hist_enable_trigger(data
, rec
, event
);
7217 static struct event_trigger_ops hist_enable_trigger_ops
= {
7218 .func
= hist_enable_trigger
,
7219 .print
= event_enable_trigger_print
,
7220 .init
= event_trigger_init
,
7221 .free
= event_enable_trigger_free
,
7224 static struct event_trigger_ops hist_enable_count_trigger_ops
= {
7225 .func
= hist_enable_count_trigger
,
7226 .print
= event_enable_trigger_print
,
7227 .init
= event_trigger_init
,
7228 .free
= event_enable_trigger_free
,
7231 static struct event_trigger_ops hist_disable_trigger_ops
= {
7232 .func
= hist_enable_trigger
,
7233 .print
= event_enable_trigger_print
,
7234 .init
= event_trigger_init
,
7235 .free
= event_enable_trigger_free
,
7238 static struct event_trigger_ops hist_disable_count_trigger_ops
= {
7239 .func
= hist_enable_count_trigger
,
7240 .print
= event_enable_trigger_print
,
7241 .init
= event_trigger_init
,
7242 .free
= event_enable_trigger_free
,
7245 static struct event_trigger_ops
*
7246 hist_enable_get_trigger_ops(char *cmd
, char *param
)
7248 struct event_trigger_ops
*ops
;
7251 enable
= (strcmp(cmd
, ENABLE_HIST_STR
) == 0);
7254 ops
= param
? &hist_enable_count_trigger_ops
:
7255 &hist_enable_trigger_ops
;
7257 ops
= param
? &hist_disable_count_trigger_ops
:
7258 &hist_disable_trigger_ops
;
7263 static void hist_enable_unreg_all(struct trace_event_file
*file
)
7265 struct event_trigger_data
*test
, *n
;
7267 list_for_each_entry_safe(test
, n
, &file
->triggers
, list
) {
7268 if (test
->cmd_ops
->trigger_type
== ETT_HIST_ENABLE
) {
7269 list_del_rcu(&test
->list
);
7270 update_cond_flag(file
);
7271 trace_event_trigger_enable_disable(file
, 0);
7272 if (test
->ops
->free
)
7273 test
->ops
->free(test
->ops
, test
);
7278 static struct event_command trigger_hist_enable_cmd
= {
7279 .name
= ENABLE_HIST_STR
,
7280 .trigger_type
= ETT_HIST_ENABLE
,
7281 .func
= event_enable_trigger_func
,
7282 .reg
= event_enable_register_trigger
,
7283 .unreg
= event_enable_unregister_trigger
,
7284 .unreg_all
= hist_enable_unreg_all
,
7285 .get_trigger_ops
= hist_enable_get_trigger_ops
,
7286 .set_filter
= set_trigger_filter
,
7289 static struct event_command trigger_hist_disable_cmd
= {
7290 .name
= DISABLE_HIST_STR
,
7291 .trigger_type
= ETT_HIST_ENABLE
,
7292 .func
= event_enable_trigger_func
,
7293 .reg
= event_enable_register_trigger
,
7294 .unreg
= event_enable_unregister_trigger
,
7295 .unreg_all
= hist_enable_unreg_all
,
7296 .get_trigger_ops
= hist_enable_get_trigger_ops
,
7297 .set_filter
= set_trigger_filter
,
7300 static __init
void unregister_trigger_hist_enable_disable_cmds(void)
7302 unregister_event_command(&trigger_hist_enable_cmd
);
7303 unregister_event_command(&trigger_hist_disable_cmd
);
7306 __init
int register_trigger_hist_enable_disable_cmds(void)
7310 ret
= register_event_command(&trigger_hist_enable_cmd
);
7311 if (WARN_ON(ret
< 0))
7313 ret
= register_event_command(&trigger_hist_disable_cmd
);
7314 if (WARN_ON(ret
< 0))
7315 unregister_trigger_hist_enable_disable_cmds();
7320 static __init
int trace_events_hist_init(void)
7322 struct dentry
*entry
= NULL
;
7323 struct dentry
*d_tracer
;
7326 err
= dyn_event_register(&synth_event_ops
);
7328 pr_warn("Could not register synth_event_ops\n");
7332 d_tracer
= tracing_init_dentry();
7333 if (IS_ERR(d_tracer
)) {
7334 err
= PTR_ERR(d_tracer
);
7338 entry
= tracefs_create_file("synthetic_events", 0644, d_tracer
,
7339 NULL
, &synth_events_fops
);
7347 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
7352 fs_initcall(trace_events_hist_init
);