]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/trace/ftrace.h
tracing: Convert x86_64 mmap and uname to use DEFINE_SYSCALL
[mirror_ubuntu-bionic-kernel.git] / include / trace / ftrace.h
CommitLineData
f42c85e7
SR
1/*
2 * Stage 1 of the trace events.
3 *
4 * Override the macros in <trace/trace_events.h> to include the following:
5 *
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
8 * <type> <item>;
9 * <type2> <item2>[<len>];
10 * [...]
11 * };
12 *
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
16 * in the structure.
17 */
18
19#include <linux/ftrace_event.h>
20
7fcb7c47
LZ
21#undef __field
22#define __field(type, item) type item;
23
f42c85e7
SR
24#undef __array
25#define __array(type, item, len) type item[len];
26
7fcb7c47 27#undef __dynamic_array
7d536cb3 28#define __dynamic_array(type, item, len) u32 __data_loc_##item;
f42c85e7 29
9cbf1176 30#undef __string
7fcb7c47 31#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 32
f42c85e7
SR
33#undef TP_STRUCT__entry
34#define TP_STRUCT__entry(args...) args
35
36#undef TRACE_EVENT
37#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
38 struct ftrace_raw_##name { \
39 struct trace_entry ent; \
40 tstruct \
7fcb7c47 41 char __data[0]; \
f42c85e7
SR
42 }; \
43 static struct ftrace_event_call event_##name
44
45#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
46
9cbf1176 47
f42c85e7
SR
48/*
49 * Stage 2 of the trace events.
50 *
9cbf1176
FW
51 * Include the following:
52 *
7fcb7c47 53 * struct ftrace_data_offsets_<call> {
7d536cb3
LZ
54 * u32 <item1>;
55 * u32 <item2>;
9cbf1176
FW
56 * [...]
57 * };
58 *
7d536cb3 59 * The __dynamic_array() macro will create each u32 <item>, this is
7fcb7c47 60 * to keep the offset of each array from the beginning of the event.
7d536cb3 61 * The size of an array is also encoded, in the higher 16 bits of <item>.
9cbf1176
FW
62 */
63
7fcb7c47
LZ
64#undef __field
65#define __field(type, item);
66
9cbf1176
FW
67#undef __array
68#define __array(type, item, len)
69
7fcb7c47 70#undef __dynamic_array
7d536cb3 71#define __dynamic_array(type, item, len) u32 item;
9cbf1176
FW
72
73#undef __string
7fcb7c47 74#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176
FW
75
76#undef TRACE_EVENT
77#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
7fcb7c47 78 struct ftrace_data_offsets_##call { \
9cbf1176
FW
79 tstruct; \
80 };
81
82#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
83
6ff9a64d
SR
84/*
85 * Setup the showing format of trace point.
86 *
87 * int
88 * ftrace_format_##call(struct trace_seq *s)
89 * {
90 * struct ftrace_raw_##call field;
91 * int ret;
92 *
93 * ret = trace_seq_printf(s, #type " " #item ";"
94 * " offset:%u; size:%u;\n",
95 * offsetof(struct ftrace_raw_##call, item),
96 * sizeof(field.type));
97 *
98 * }
99 */
100
101#undef TP_STRUCT__entry
102#define TP_STRUCT__entry(args...) args
103
104#undef __field
105#define __field(type, item) \
106 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
107 "offset:%u;\tsize:%u;\n", \
108 (unsigned int)offsetof(typeof(field), item), \
109 (unsigned int)sizeof(field.item)); \
110 if (!ret) \
111 return 0;
112
113#undef __array
114#define __array(type, item, len) \
115 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
116 "offset:%u;\tsize:%u;\n", \
117 (unsigned int)offsetof(typeof(field), item), \
118 (unsigned int)sizeof(field.item)); \
119 if (!ret) \
120 return 0;
121
122#undef __dynamic_array
123#define __dynamic_array(type, item, len) \
68fd60a8 124 ret = trace_seq_printf(s, "\tfield:__data_loc " #type "[] " #item ";\t"\
6ff9a64d
SR
125 "offset:%u;\tsize:%u;\n", \
126 (unsigned int)offsetof(typeof(field), \
127 __data_loc_##item), \
128 (unsigned int)sizeof(field.__data_loc_##item)); \
129 if (!ret) \
130 return 0;
131
132#undef __string
133#define __string(item, src) __dynamic_array(char, item, -1)
134
135#undef __entry
136#define __entry REC
137
138#undef __print_symbolic
139#undef __get_dynamic_array
140#undef __get_str
141
142#undef TP_printk
143#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
144
145#undef TP_fast_assign
146#define TP_fast_assign(args...) args
147
3a659305
PZ
148#undef TP_perf_assign
149#define TP_perf_assign(args...)
150
6ff9a64d
SR
151#undef TRACE_EVENT
152#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
153static int \
154ftrace_format_##call(struct trace_seq *s) \
155{ \
156 struct ftrace_raw_##call field __attribute__((unused)); \
157 int ret = 0; \
158 \
159 tstruct; \
160 \
161 trace_seq_printf(s, "\nprint fmt: " print); \
162 \
163 return ret; \
164}
165
166#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
167
9cbf1176
FW
168/*
169 * Stage 3 of the trace events.
170 *
f42c85e7
SR
171 * Override the macros in <trace/trace_events.h> to include the following:
172 *
173 * enum print_line_t
174 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
175 * {
176 * struct trace_seq *s = &iter->seq;
177 * struct ftrace_raw_<call> *field; <-- defined in stage 1
178 * struct trace_entry *entry;
be74b73a 179 * struct trace_seq *p;
f42c85e7
SR
180 * int ret;
181 *
182 * entry = iter->ent;
183 *
184 * if (entry->type != event_<call>.id) {
185 * WARN_ON_ONCE(1);
186 * return TRACE_TYPE_UNHANDLED;
187 * }
188 *
189 * field = (typeof(field))entry;
190 *
be74b73a 191 * p = get_cpu_var(ftrace_event_seq);
56d8bd3f 192 * trace_seq_init(p);
f42c85e7 193 * ret = trace_seq_printf(s, <TP_printk> "\n");
be74b73a 194 * put_cpu();
f42c85e7
SR
195 * if (!ret)
196 * return TRACE_TYPE_PARTIAL_LINE;
197 *
198 * return TRACE_TYPE_HANDLED;
199 * }
200 *
201 * This is the method used to print the raw event to the trace
202 * output format. Note, this is not needed if the data is read
203 * in binary.
204 */
205
206#undef __entry
207#define __entry field
208
209#undef TP_printk
210#define TP_printk(fmt, args...) fmt "\n", args
211
7fcb7c47
LZ
212#undef __get_dynamic_array
213#define __get_dynamic_array(field) \
7d536cb3 214 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
7fcb7c47 215
9cbf1176 216#undef __get_str
7fcb7c47 217#define __get_str(field) (char *)__get_dynamic_array(field)
9cbf1176 218
be74b73a
SR
219#undef __print_flags
220#define __print_flags(flag, delim, flag_array...) \
221 ({ \
222 static const struct trace_print_flags flags[] = \
223 { flag_array, { -1, NULL }}; \
224 ftrace_print_flags_seq(p, delim, flag, flags); \
225 })
226
0f4fc29d
SR
227#undef __print_symbolic
228#define __print_symbolic(value, symbol_array...) \
229 ({ \
230 static const struct trace_print_flags symbols[] = \
231 { symbol_array, { -1, NULL }}; \
232 ftrace_print_symbols_seq(p, value, symbols); \
233 })
234
f42c85e7
SR
235#undef TRACE_EVENT
236#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
237enum print_line_t \
238ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
239{ \
240 struct trace_seq *s = &iter->seq; \
241 struct ftrace_raw_##call *field; \
242 struct trace_entry *entry; \
be74b73a 243 struct trace_seq *p; \
f42c85e7
SR
244 int ret; \
245 \
246 entry = iter->ent; \
247 \
248 if (entry->type != event_##call.id) { \
249 WARN_ON_ONCE(1); \
250 return TRACE_TYPE_UNHANDLED; \
251 } \
252 \
253 field = (typeof(field))entry; \
254 \
be74b73a 255 p = &get_cpu_var(ftrace_event_seq); \
56d8bd3f 256 trace_seq_init(p); \
f42c85e7 257 ret = trace_seq_printf(s, #call ": " print); \
be74b73a 258 put_cpu(); \
f42c85e7
SR
259 if (!ret) \
260 return TRACE_TYPE_PARTIAL_LINE; \
261 \
262 return TRACE_TYPE_HANDLED; \
263}
264
265#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
266
f42c85e7
SR
267#undef __field
268#define __field(type, item) \
269 ret = trace_define_field(event_call, #type, #item, \
270 offsetof(typeof(field), item), \
a118e4d1 271 sizeof(field.item), is_signed_type(type)); \
f42c85e7
SR
272 if (ret) \
273 return ret;
274
275#undef __array
276#define __array(type, item, len) \
277 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
278 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
279 offsetof(typeof(field), item), \
a118e4d1 280 sizeof(field.item), 0); \
f42c85e7
SR
281 if (ret) \
282 return ret;
283
7fcb7c47
LZ
284#undef __dynamic_array
285#define __dynamic_array(type, item, len) \
68fd60a8 286 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
7fcb7c47
LZ
287 offsetof(typeof(field), __data_loc_##item), \
288 sizeof(field.__data_loc_##item), 0);
289
9cbf1176 290#undef __string
7fcb7c47 291#define __string(item, src) __dynamic_array(char, item, -1)
9cbf1176 292
f42c85e7
SR
293#undef TRACE_EVENT
294#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
295int \
296ftrace_define_fields_##call(void) \
297{ \
298 struct ftrace_raw_##call field; \
299 struct ftrace_event_call *event_call = &event_##call; \
300 int ret; \
301 \
a118e4d1
TZ
302 __common_field(int, type, 1); \
303 __common_field(unsigned char, flags, 0); \
304 __common_field(unsigned char, preempt_count, 0); \
305 __common_field(int, pid, 1); \
306 __common_field(int, tgid, 1); \
f42c85e7
SR
307 \
308 tstruct; \
309 \
310 return ret; \
311}
312
313#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
314
7fcb7c47
LZ
315/*
316 * remember the offset of each array from the beginning of the event.
317 */
318
319#undef __entry
320#define __entry entry
321
322#undef __field
323#define __field(type, item)
324
325#undef __array
326#define __array(type, item, len)
327
328#undef __dynamic_array
329#define __dynamic_array(type, item, len) \
330 __data_offsets->item = __data_size + \
331 offsetof(typeof(*entry), __data); \
7d536cb3 332 __data_offsets->item |= (len * sizeof(type)) << 16; \
7fcb7c47
LZ
333 __data_size += (len) * sizeof(type);
334
335#undef __string
336#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
337
338#undef TRACE_EVENT
339#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
340static inline int ftrace_get_offsets_##call( \
341 struct ftrace_data_offsets_##call *__data_offsets, proto) \
342{ \
343 int __data_size = 0; \
344 struct ftrace_raw_##call __maybe_unused *entry; \
345 \
346 tstruct; \
347 \
348 return __data_size; \
349}
350
351#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
352
3a659305
PZ
353#ifdef CONFIG_EVENT_PROFILE
354
355/*
356 * Generate the functions needed for tracepoint perf_counter support.
357 *
f413cdb8 358 * NOTE: The insertion profile callback (ftrace_profile_<call>) is defined later
3a659305
PZ
359 *
360 * static int ftrace_profile_enable_<call>(struct ftrace_event_call *event_call)
361 * {
362 * int ret = 0;
363 *
364 * if (!atomic_inc_return(&event_call->profile_count))
365 * ret = register_trace_<call>(ftrace_profile_<call>);
366 *
367 * return ret;
368 * }
369 *
370 * static void ftrace_profile_disable_<call>(struct ftrace_event_call *event_call)
371 * {
372 * if (atomic_add_negative(-1, &event->call->profile_count))
373 * unregister_trace_<call>(ftrace_profile_<call>);
374 * }
375 *
376 */
377
3a659305
PZ
378#undef TRACE_EVENT
379#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
380 \
f413cdb8 381static void ftrace_profile_##call(proto); \
3a659305
PZ
382 \
383static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
384{ \
385 int ret = 0; \
386 \
387 if (!atomic_inc_return(&event_call->profile_count)) \
388 ret = register_trace_##call(ftrace_profile_##call); \
389 \
390 return ret; \
391} \
392 \
393static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
394{ \
395 if (atomic_add_negative(-1, &event_call->profile_count)) \
396 unregister_trace_##call(ftrace_profile_##call); \
397}
398
399#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
400
3a659305
PZ
401#endif
402
c32e827b 403/*
9cbf1176 404 * Stage 4 of the trace events.
c32e827b 405 *
ea20d929 406 * Override the macros in <trace/trace_events.h> to include the following:
c32e827b
SR
407 *
408 * static void ftrace_event_<call>(proto)
409 * {
ef18012b 410 * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
c32e827b
SR
411 * }
412 *
413 * static int ftrace_reg_event_<call>(void)
414 * {
ef18012b 415 * int ret;
c32e827b 416 *
ef18012b
SR
417 * ret = register_trace_<call>(ftrace_event_<call>);
418 * if (!ret)
419 * pr_info("event trace: Could not activate trace point "
420 * "probe to <call>");
421 * return ret;
c32e827b
SR
422 * }
423 *
424 * static void ftrace_unreg_event_<call>(void)
425 * {
ef18012b 426 * unregister_trace_<call>(ftrace_event_<call>);
c32e827b
SR
427 * }
428 *
c32e827b 429 *
157587d7 430 * For those macros defined with TRACE_EVENT:
c32e827b
SR
431 *
432 * static struct ftrace_event_call event_<call>;
433 *
434 * static void ftrace_raw_event_<call>(proto)
435 * {
ef18012b
SR
436 * struct ring_buffer_event *event;
437 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
438 * unsigned long irq_flags;
439 * int pc;
440 *
441 * local_save_flags(irq_flags);
442 * pc = preempt_count();
443 *
444 * event = trace_current_buffer_lock_reserve(event_<call>.id,
445 * sizeof(struct ftrace_raw_<call>),
446 * irq_flags, pc);
447 * if (!event)
448 * return;
449 * entry = ring_buffer_event_data(event);
450 *
451 * <assign>; <-- Here we assign the entries by the __field and
0e3d0f05 452 * __array macros.
c32e827b 453 *
ef18012b 454 * trace_current_buffer_unlock_commit(event, irq_flags, pc);
c32e827b
SR
455 * }
456 *
457 * static int ftrace_raw_reg_event_<call>(void)
458 * {
ef18012b 459 * int ret;
c32e827b 460 *
ef18012b
SR
461 * ret = register_trace_<call>(ftrace_raw_event_<call>);
462 * if (!ret)
463 * pr_info("event trace: Could not activate trace point "
464 * "probe to <call>");
465 * return ret;
c32e827b
SR
466 * }
467 *
468 * static void ftrace_unreg_event_<call>(void)
469 * {
ef18012b 470 * unregister_trace_<call>(ftrace_raw_event_<call>);
c32e827b
SR
471 * }
472 *
473 * static struct trace_event ftrace_event_type_<call> = {
ef18012b 474 * .trace = ftrace_raw_output_<call>, <-- stage 2
c32e827b
SR
475 * };
476 *
477 * static int ftrace_raw_init_event_<call>(void)
478 * {
ef18012b 479 * int id;
c32e827b 480 *
ef18012b
SR
481 * id = register_ftrace_event(&ftrace_event_type_<call>);
482 * if (!id)
483 * return -ENODEV;
484 * event_<call>.id = id;
485 * return 0;
c32e827b
SR
486 * }
487 *
488 * static struct ftrace_event_call __used
489 * __attribute__((__aligned__(4)))
490 * __attribute__((section("_ftrace_events"))) event_<call> = {
ef18012b 491 * .name = "<call>",
0e3d0f05 492 * .system = "<system>",
ef18012b
SR
493 * .raw_init = ftrace_raw_init_event_<call>,
494 * .regfunc = ftrace_reg_event_<call>,
495 * .unregfunc = ftrace_unreg_event_<call>,
981d081e 496 * .show_format = ftrace_format_<call>,
c32e827b
SR
497 * }
498 *
499 */
500
2939b046
SR
501#undef TP_FMT
502#define TP_FMT(fmt, args...) fmt "\n", ##args
c32e827b 503
ac199db0 504#ifdef CONFIG_EVENT_PROFILE
ac199db0
PZ
505
506#define _TRACE_PROFILE_INIT(call) \
507 .profile_count = ATOMIC_INIT(-1), \
508 .profile_enable = ftrace_profile_enable_##call, \
509 .profile_disable = ftrace_profile_disable_##call,
510
511#else
ac199db0
PZ
512#define _TRACE_PROFILE_INIT(call)
513#endif
514
da4d0302
SR
515#undef __entry
516#define __entry entry
d20e3b03 517
9cbf1176
FW
518#undef __field
519#define __field(type, item)
520
521#undef __array
522#define __array(type, item, len)
523
7fcb7c47
LZ
524#undef __dynamic_array
525#define __dynamic_array(type, item, len) \
526 __entry->__data_loc_##item = __data_offsets.item;
527
9cbf1176 528#undef __string
7fcb7c47 529#define __string(item, src) __dynamic_array(char, item, -1) \
9cbf1176
FW
530
531#undef __assign_str
532#define __assign_str(dst, src) \
9cbf1176
FW
533 strcpy(__get_str(dst), src);
534
da4d0302 535#undef TRACE_EVENT
30a8fecc 536#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
c32e827b
SR
537 \
538static struct ftrace_event_call event_##call; \
539 \
540static void ftrace_raw_event_##call(proto) \
541{ \
7fcb7c47 542 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
f2aebaee 543 struct ftrace_event_call *event_call = &event_##call; \
c32e827b
SR
544 struct ring_buffer_event *event; \
545 struct ftrace_raw_##call *entry; \
546 unsigned long irq_flags; \
7fcb7c47 547 int __data_size; \
c32e827b
SR
548 int pc; \
549 \
550 local_save_flags(irq_flags); \
551 pc = preempt_count(); \
552 \
7fcb7c47 553 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
9cbf1176 554 \
c32e827b 555 event = trace_current_buffer_lock_reserve(event_##call.id, \
7fcb7c47 556 sizeof(*entry) + __data_size, \
9cbf1176 557 irq_flags, pc); \
c32e827b
SR
558 if (!event) \
559 return; \
560 entry = ring_buffer_event_data(event); \
561 \
7fcb7c47
LZ
562 \
563 tstruct \
564 \
a9c1c3ab 565 { assign; } \
c32e827b 566 \
f2aebaee 567 if (!filter_current_check_discard(event_call, entry, event)) \
77d9f465 568 trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
c32e827b
SR
569} \
570 \
69fd4f0e 571static int ftrace_raw_reg_event_##call(void *ptr) \
c32e827b
SR
572{ \
573 int ret; \
574 \
575 ret = register_trace_##call(ftrace_raw_event_##call); \
633ddaa7 576 if (ret) \
c32e827b 577 pr_info("event trace: Could not activate trace point " \
633ddaa7 578 "probe to " #call "\n"); \
c32e827b
SR
579 return ret; \
580} \
581 \
69fd4f0e 582static void ftrace_raw_unreg_event_##call(void *ptr) \
c32e827b
SR
583{ \
584 unregister_trace_##call(ftrace_raw_event_##call); \
585} \
586 \
587static struct trace_event ftrace_event_type_##call = { \
588 .trace = ftrace_raw_output_##call, \
589}; \
590 \
591static int ftrace_raw_init_event_##call(void) \
592{ \
593 int id; \
594 \
595 id = register_ftrace_event(&ftrace_event_type_##call); \
596 if (!id) \
597 return -ENODEV; \
598 event_##call.id = id; \
cf027f64 599 INIT_LIST_HEAD(&event_##call.fields); \
0a19e53c 600 init_preds(&event_##call); \
c32e827b
SR
601 return 0; \
602} \
603 \
604static struct ftrace_event_call __used \
605__attribute__((__aligned__(4))) \
606__attribute__((section("_ftrace_events"))) event_##call = { \
ef18012b 607 .name = #call, \
9cc26a26 608 .system = __stringify(TRACE_SYSTEM), \
6d723736 609 .event = &ftrace_event_type_##call, \
c32e827b 610 .raw_init = ftrace_raw_init_event_##call, \
da4d0302
SR
611 .regfunc = ftrace_raw_reg_event_##call, \
612 .unregfunc = ftrace_raw_unreg_event_##call, \
981d081e 613 .show_format = ftrace_format_##call, \
cf027f64 614 .define_fields = ftrace_define_fields_##call, \
ac199db0 615 _TRACE_PROFILE_INIT(call) \
c32e827b 616}
ac199db0 617
f42c85e7 618#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
ac199db0 619
f413cdb8
FW
620/*
621 * Define the insertion callback to profile events
622 *
623 * The job is very similar to ftrace_raw_event_<call> except that we don't
624 * insert in the ring buffer but in a perf counter.
625 *
626 * static void ftrace_profile_<call>(proto)
627 * {
628 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
629 * struct ftrace_event_call *event_call = &event_<call>;
630 * extern void perf_tpcounter_event(int, u64, u64, void *, int);
631 * struct ftrace_raw_##call *entry;
632 * u64 __addr = 0, __count = 1;
633 * unsigned long irq_flags;
634 * int __entry_size;
635 * int __data_size;
636 * int pc;
637 *
638 * local_save_flags(irq_flags);
639 * pc = preempt_count();
640 *
641 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
304703ab
FW
642 *
643 * // Below we want to get the aligned size by taking into account
644 * // the u32 field that will later store the buffer size
645 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
646 * sizeof(u64));
647 * __entry_size -= sizeof(u32);
f413cdb8
FW
648 *
649 * do {
650 * char raw_data[__entry_size]; <- allocate our sample in the stack
651 * struct trace_entry *ent;
652 *
1853db0e
FW
653 * zero dead bytes from alignment to avoid stack leak to userspace:
654 *
655 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
f413cdb8
FW
656 * entry = (struct ftrace_raw_<call> *)raw_data;
657 * ent = &entry->ent;
658 * tracing_generic_entry_update(ent, irq_flags, pc);
659 * ent->type = event_call->id;
660 *
661 * <tstruct> <- do some jobs with dynamic arrays
662 *
663 * <assign> <- affect our values
664 *
665 * perf_tpcounter_event(event_call->id, __addr, __count, entry,
666 * __entry_size); <- submit them to perf counter
667 * } while (0);
668 *
669 * }
670 */
671
672#ifdef CONFIG_EVENT_PROFILE
673
674#undef __perf_addr
675#define __perf_addr(a) __addr = (a)
676
677#undef __perf_count
678#define __perf_count(c) __count = (c)
679
680#undef TRACE_EVENT
681#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
682static void ftrace_profile_##call(proto) \
683{ \
684 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
685 struct ftrace_event_call *event_call = &event_##call; \
686 extern void perf_tpcounter_event(int, u64, u64, void *, int); \
687 struct ftrace_raw_##call *entry; \
688 u64 __addr = 0, __count = 1; \
689 unsigned long irq_flags; \
690 int __entry_size; \
691 int __data_size; \
692 int pc; \
693 \
694 local_save_flags(irq_flags); \
695 pc = preempt_count(); \
696 \
697 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
a044560c
PZ
698 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
699 sizeof(u64)); \
304703ab 700 __entry_size -= sizeof(u32); \
f413cdb8
FW
701 \
702 do { \
703 char raw_data[__entry_size]; \
704 struct trace_entry *ent; \
705 \
1853db0e 706 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
f413cdb8
FW
707 entry = (struct ftrace_raw_##call *)raw_data; \
708 ent = &entry->ent; \
709 tracing_generic_entry_update(ent, irq_flags, pc); \
710 ent->type = event_call->id; \
711 \
712 tstruct \
713 \
714 { assign; } \
715 \
716 perf_tpcounter_event(event_call->id, __addr, __count, entry,\
717 __entry_size); \
718 } while (0); \
719 \
720}
721
722#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
723#endif /* CONFIG_EVENT_PROFILE */
724
ac199db0
PZ
725#undef _TRACE_PROFILE_INIT
726