1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
16 #include "eal_trace.h"
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz
);
19 RTE_DEFINE_PER_LCORE(void *, trace_mem
);
20 static RTE_DEFINE_PER_LCORE(char, ctf_field
[TRACE_CTF_FIELD_SIZE
]);
21 static RTE_DEFINE_PER_LCORE(int, ctf_count
);
23 static struct trace_point_head tp_list
= STAILQ_HEAD_INITIALIZER(tp_list
);
24 static struct trace trace
= { .args
= STAILQ_HEAD_INITIALIZER(trace
.args
), };
32 struct trace_point_head
*
33 trace_list_head_get(void)
41 struct trace_arg
*arg
;
43 /* Trace memory should start with 8B aligned for natural alignment */
44 RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header
, mem
) % 8) != 0);
46 /* One of the trace point registration failed */
47 if (trace
.register_errno
) {
48 rte_errno
= trace
.register_errno
;
52 if (!STAILQ_EMPTY(&trace
.args
))
55 if (!rte_trace_is_enabled())
58 rte_spinlock_init(&trace
.lock
);
60 /* Is duplicate trace name registered */
61 if (trace_has_duplicate_entry())
64 /* Generate UUID ver 4 with total size of events and number of
67 trace_uuid_generate();
69 /* Apply buffer size configuration for trace output */
70 trace_bufsz_args_apply();
72 /* Generate CTF TDSL metadata */
73 if (trace_metadata_create() < 0)
76 /* Create trace directory */
80 /* Save current epoch timestamp for future use */
81 if (trace_epoch_time_save() < 0)
84 /* Apply global configurations */
85 STAILQ_FOREACH(arg
, &trace
.args
, next
)
86 trace_args_apply(arg
->val
);
88 rte_trace_mode_set(trace
.mode
);
93 trace_metadata_destroy();
95 trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno
));
102 if (!rte_trace_is_enabled())
104 trace_mem_per_thread_free();
105 trace_metadata_destroy();
106 eal_trace_args_free();
110 rte_trace_is_enabled(void)
116 trace_mode_set(rte_trace_point_t
*trace
, enum rte_trace_mode mode
)
118 if (mode
== RTE_TRACE_MODE_OVERWRITE
)
119 __atomic_and_fetch(trace
, ~__RTE_TRACE_FIELD_ENABLE_DISCARD
,
122 __atomic_or_fetch(trace
, __RTE_TRACE_FIELD_ENABLE_DISCARD
,
127 rte_trace_mode_set(enum rte_trace_mode mode
)
129 struct trace_point
*tp
;
131 if (!rte_trace_is_enabled())
134 STAILQ_FOREACH(tp
, &tp_list
, next
)
135 trace_mode_set(tp
->handle
, mode
);
141 rte_trace_mode
rte_trace_mode_get(void)
147 trace_point_is_invalid(rte_trace_point_t
*t
)
149 return (t
== NULL
) || (trace_id_get(t
) >= trace
.nb_trace_points
);
153 rte_trace_point_is_enabled(rte_trace_point_t
*trace
)
157 if (trace_point_is_invalid(trace
))
160 val
= __atomic_load_n(trace
, __ATOMIC_ACQUIRE
);
161 return (val
& __RTE_TRACE_FIELD_ENABLE_MASK
) != 0;
165 rte_trace_point_enable(rte_trace_point_t
*trace
)
167 if (trace_point_is_invalid(trace
))
170 __atomic_or_fetch(trace
, __RTE_TRACE_FIELD_ENABLE_MASK
,
176 rte_trace_point_disable(rte_trace_point_t
*trace
)
178 if (trace_point_is_invalid(trace
))
181 __atomic_and_fetch(trace
, ~__RTE_TRACE_FIELD_ENABLE_MASK
,
187 rte_trace_pattern(const char *pattern
, bool enable
)
189 struct trace_point
*tp
;
190 int rc
= 0, found
= 0;
192 STAILQ_FOREACH(tp
, &tp_list
, next
) {
193 if (fnmatch(pattern
, tp
->name
, 0) == 0) {
195 rc
= rte_trace_point_enable(tp
->handle
);
197 rc
= rte_trace_point_disable(tp
->handle
);
208 rte_trace_regexp(const char *regex
, bool enable
)
210 struct trace_point
*tp
;
211 int rc
= 0, found
= 0;
214 if (regcomp(&r
, regex
, 0) != 0)
217 STAILQ_FOREACH(tp
, &tp_list
, next
) {
218 if (regexec(&r
, tp
->name
, 0, NULL
, 0) == 0) {
220 rc
= rte_trace_point_enable(tp
->handle
);
222 rc
= rte_trace_point_disable(tp
->handle
);
234 rte_trace_point_lookup(const char *name
)
236 struct trace_point
*tp
;
241 STAILQ_FOREACH(tp
, &tp_list
, next
)
242 if (strncmp(tp
->name
, name
, TRACE_POINT_NAME_SIZE
) == 0)
249 trace_point_dump(FILE *f
, struct trace_point
*tp
)
251 rte_trace_point_t
*handle
= tp
->handle
;
253 fprintf(f
, "\tid %d, %s, size is %d, %s\n",
254 trace_id_get(handle
), tp
->name
,
255 (uint16_t)(*handle
& __RTE_TRACE_FIELD_SIZE_MASK
),
256 rte_trace_point_is_enabled(handle
) ? "enabled" : "disabled");
260 trace_lcore_mem_dump(FILE *f
)
262 struct trace
*trace
= trace_obj_get();
263 struct __rte_trace_header
*header
;
266 if (trace
->nb_trace_mem_list
== 0)
269 rte_spinlock_lock(&trace
->lock
);
270 fprintf(f
, "nb_trace_mem_list = %d\n", trace
->nb_trace_mem_list
);
271 fprintf(f
, "\nTrace mem info\n--------------\n");
272 for (count
= 0; count
< trace
->nb_trace_mem_list
; count
++) {
273 header
= trace
->lcore_meta
[count
].mem
;
274 fprintf(f
, "\tid %d, mem=%p, area=%s, lcore_id=%d, name=%s\n",
276 trace_area_to_string(trace
->lcore_meta
[count
].area
),
277 header
->stream_header
.lcore_id
,
278 header
->stream_header
.thread_name
);
280 rte_spinlock_unlock(&trace
->lock
);
284 rte_trace_dump(FILE *f
)
286 struct trace_point_head
*tp_list
= trace_list_head_get();
287 struct trace
*trace
= trace_obj_get();
288 struct trace_point
*tp
;
290 fprintf(f
, "\nGlobal info\n-----------\n");
291 fprintf(f
, "status = %s\n",
292 rte_trace_is_enabled() ? "enabled" : "disabled");
293 fprintf(f
, "mode = %s\n",
294 trace_mode_to_string(rte_trace_mode_get()));
295 fprintf(f
, "dir = %s\n", trace
->dir
);
296 fprintf(f
, "buffer len = %d\n", trace
->buff_len
);
297 fprintf(f
, "number of trace points = %d\n", trace
->nb_trace_points
);
299 trace_lcore_mem_dump(f
);
300 fprintf(f
, "\nTrace point info\n----------------\n");
301 STAILQ_FOREACH(tp
, tp_list
, next
)
302 trace_point_dump(f
, tp
);
306 __rte_trace_mem_per_thread_alloc(void)
308 struct trace
*trace
= trace_obj_get();
309 struct __rte_trace_header
*header
;
312 if (!rte_trace_is_enabled())
315 if (RTE_PER_LCORE(trace_mem
))
318 rte_spinlock_lock(&trace
->lock
);
320 count
= trace
->nb_trace_mem_list
;
322 /* Allocate room for storing the thread trace mem meta */
323 trace
->lcore_meta
= realloc(trace
->lcore_meta
,
324 sizeof(trace
->lcore_meta
[0]) * (count
+ 1));
326 /* Provide dummy space for fast path to consume */
327 if (trace
->lcore_meta
== NULL
) {
328 trace_crit("trace mem meta memory realloc failed");
333 /* First attempt from huge page */
334 header
= eal_malloc_no_trace(NULL
, trace_mem_sz(trace
->buff_len
), 8);
336 trace
->lcore_meta
[count
].area
= TRACE_AREA_HUGEPAGE
;
340 /* Second attempt from heap */
341 header
= malloc(trace_mem_sz(trace
->buff_len
));
342 if (header
== NULL
) {
343 trace_crit("trace mem malloc attempt failed");
349 /* Second attempt from heap is success */
350 trace
->lcore_meta
[count
].area
= TRACE_AREA_HEAP
;
352 /* Initialize the trace header */
355 header
->len
= trace
->buff_len
;
356 header
->stream_header
.magic
= TRACE_CTF_MAGIC
;
357 rte_uuid_copy(header
->stream_header
.uuid
, trace
->uuid
);
358 header
->stream_header
.lcore_id
= rte_lcore_id();
360 /* Store the thread name */
361 char *name
= header
->stream_header
.thread_name
;
362 memset(name
, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX
);
363 rte_thread_getname(pthread_self(), name
,
364 __RTE_TRACE_EMIT_STRING_LEN_MAX
);
366 trace
->lcore_meta
[count
].mem
= header
;
367 trace
->nb_trace_mem_list
++;
369 RTE_PER_LCORE(trace_mem
) = header
;
370 rte_spinlock_unlock(&trace
->lock
);
374 trace_mem_per_thread_free(void)
376 struct trace
*trace
= trace_obj_get();
380 if (!rte_trace_is_enabled())
383 rte_spinlock_lock(&trace
->lock
);
384 for (count
= 0; count
< trace
->nb_trace_mem_list
; count
++) {
385 mem
= trace
->lcore_meta
[count
].mem
;
386 if (trace
->lcore_meta
[count
].area
== TRACE_AREA_HUGEPAGE
)
387 eal_free_no_trace(mem
);
388 else if (trace
->lcore_meta
[count
].area
== TRACE_AREA_HEAP
)
391 rte_spinlock_unlock(&trace
->lock
);
395 __rte_trace_point_emit_field(size_t sz
, const char *in
, const char *datatype
)
397 char *field
= RTE_PER_LCORE(ctf_field
);
398 int count
= RTE_PER_LCORE(ctf_count
);
402 size
= RTE_MAX(0, TRACE_CTF_FIELD_SIZE
- 1 - count
);
403 RTE_PER_LCORE(trace_point_sz
) += sz
;
404 rc
= snprintf(RTE_PTR_ADD(field
, count
), size
, "%s %s;", datatype
, in
);
405 if (rc
<= 0 || (size_t)rc
>= size
) {
406 RTE_PER_LCORE(trace_point_sz
) = 0;
407 trace_crit("CTF field is too long");
410 RTE_PER_LCORE(ctf_count
) += rc
;
414 __rte_trace_point_register(rte_trace_point_t
*handle
, const char *name
,
415 void (*register_fn
)(void))
417 char *field
= RTE_PER_LCORE(ctf_field
);
418 struct trace_point
*tp
;
421 /* Sanity checks of arguments */
422 if (name
== NULL
|| register_fn
== NULL
|| handle
== NULL
) {
423 trace_err("invalid arguments");
428 /* Check the size of the trace point object */
429 RTE_PER_LCORE(trace_point_sz
) = 0;
430 RTE_PER_LCORE(ctf_count
) = 0;
432 if (RTE_PER_LCORE(trace_point_sz
) == 0) {
433 trace_err("missing rte_trace_emit_header() in register fn");
438 /* Is size overflowed */
439 if (RTE_PER_LCORE(trace_point_sz
) > UINT16_MAX
) {
440 trace_err("trace point size overflowed");
445 /* Are we running out of space to store trace points? */
446 if (trace
.nb_trace_points
> UINT16_MAX
) {
447 trace_err("trace point exceeds the max count");
452 /* Get the size of the trace point */
453 sz
= RTE_PER_LCORE(trace_point_sz
);
454 tp
= calloc(1, sizeof(struct trace_point
));
456 trace_err("fail to allocate trace point memory");
461 /* Initialize the trace point */
462 if (rte_strscpy(tp
->name
, name
, TRACE_POINT_NAME_SIZE
) < 0) {
463 trace_err("name is too long");
468 /* Copy the field data for future use */
469 if (rte_strscpy(tp
->ctf_field
, field
, TRACE_CTF_FIELD_SIZE
) < 0) {
470 trace_err("CTF field size is too long");
475 /* Clear field memory for the next event */
476 memset(field
, 0, TRACE_CTF_FIELD_SIZE
);
478 /* Form the trace handle */
480 *handle
|= trace
.nb_trace_points
<< __RTE_TRACE_FIELD_ID_SHIFT
;
482 trace
.nb_trace_points
++;
485 /* Add the trace point at tail */
486 STAILQ_INSERT_TAIL(&tp_list
, tp
, next
);
487 __atomic_thread_fence(__ATOMIC_RELEASE
);
494 if (trace
.register_errno
== 0)
495 trace
.register_errno
= rte_errno
;