]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/lib/librte_eal/common/eal_common_trace.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / lib / librte_eal / common / eal_common_trace.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
3 */
4
5 #include <fnmatch.h>
6 #include <inttypes.h>
7 #include <sys/queue.h>
8 #include <regex.h>
9
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_per_lcore.h>
14 #include <rte_string_fns.h>
15
16 #include "eal_trace.h"
17
18 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
19 RTE_DEFINE_PER_LCORE(void *, trace_mem);
20 static RTE_DEFINE_PER_LCORE(char, ctf_field[TRACE_CTF_FIELD_SIZE]);
21 static RTE_DEFINE_PER_LCORE(int, ctf_count);
22
23 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
24 static struct trace trace = { .args = STAILQ_HEAD_INITIALIZER(trace.args), };
25
26 struct trace *
27 trace_obj_get(void)
28 {
29 return &trace;
30 }
31
32 struct trace_point_head *
33 trace_list_head_get(void)
34 {
35 return &tp_list;
36 }
37
38 int
39 eal_trace_init(void)
40 {
41 struct trace_arg *arg;
42
43 /* Trace memory should start with 8B aligned for natural alignment */
44 RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
45
46 /* One of the trace point registration failed */
47 if (trace.register_errno) {
48 rte_errno = trace.register_errno;
49 goto fail;
50 }
51
52 if (!STAILQ_EMPTY(&trace.args))
53 trace.status = true;
54
55 if (!rte_trace_is_enabled())
56 return 0;
57
58 rte_spinlock_init(&trace.lock);
59
60 /* Is duplicate trace name registered */
61 if (trace_has_duplicate_entry())
62 goto fail;
63
64 /* Generate UUID ver 4 with total size of events and number of
65 * events
66 */
67 trace_uuid_generate();
68
69 /* Apply buffer size configuration for trace output */
70 trace_bufsz_args_apply();
71
72 /* Generate CTF TDSL metadata */
73 if (trace_metadata_create() < 0)
74 goto fail;
75
76 /* Create trace directory */
77 if (trace_mkdir())
78 goto free_meta;
79
80 /* Save current epoch timestamp for future use */
81 if (trace_epoch_time_save() < 0)
82 goto fail;
83
84 /* Apply global configurations */
85 STAILQ_FOREACH(arg, &trace.args, next)
86 trace_args_apply(arg->val);
87
88 rte_trace_mode_set(trace.mode);
89
90 return 0;
91
92 free_meta:
93 trace_metadata_destroy();
94 fail:
95 trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
96 return -rte_errno;
97 }
98
99 void
100 eal_trace_fini(void)
101 {
102 if (!rte_trace_is_enabled())
103 return;
104 trace_mem_per_thread_free();
105 trace_metadata_destroy();
106 eal_trace_args_free();
107 }
108
109 bool
110 rte_trace_is_enabled(void)
111 {
112 return trace.status;
113 }
114
115 static void
116 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
117 {
118 if (mode == RTE_TRACE_MODE_OVERWRITE)
119 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
120 __ATOMIC_RELEASE);
121 else
122 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
123 __ATOMIC_RELEASE);
124 }
125
126 void
127 rte_trace_mode_set(enum rte_trace_mode mode)
128 {
129 struct trace_point *tp;
130
131 if (!rte_trace_is_enabled())
132 return;
133
134 STAILQ_FOREACH(tp, &tp_list, next)
135 trace_mode_set(tp->handle, mode);
136
137 trace.mode = mode;
138 }
139
140 enum
141 rte_trace_mode rte_trace_mode_get(void)
142 {
143 return trace.mode;
144 }
145
146 static bool
147 trace_point_is_invalid(rte_trace_point_t *t)
148 {
149 return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
150 }
151
152 bool
153 rte_trace_point_is_enabled(rte_trace_point_t *trace)
154 {
155 uint64_t val;
156
157 if (trace_point_is_invalid(trace))
158 return false;
159
160 val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
161 return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
162 }
163
164 int
165 rte_trace_point_enable(rte_trace_point_t *trace)
166 {
167 if (trace_point_is_invalid(trace))
168 return -ERANGE;
169
170 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
171 __ATOMIC_RELEASE);
172 return 0;
173 }
174
175 int
176 rte_trace_point_disable(rte_trace_point_t *trace)
177 {
178 if (trace_point_is_invalid(trace))
179 return -ERANGE;
180
181 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
182 __ATOMIC_RELEASE);
183 return 0;
184 }
185
186 int
187 rte_trace_pattern(const char *pattern, bool enable)
188 {
189 struct trace_point *tp;
190 int rc = 0, found = 0;
191
192 STAILQ_FOREACH(tp, &tp_list, next) {
193 if (fnmatch(pattern, tp->name, 0) == 0) {
194 if (enable)
195 rc = rte_trace_point_enable(tp->handle);
196 else
197 rc = rte_trace_point_disable(tp->handle);
198 found = 1;
199 }
200 if (rc < 0)
201 return rc;
202 }
203
204 return rc | found;
205 }
206
207 int
208 rte_trace_regexp(const char *regex, bool enable)
209 {
210 struct trace_point *tp;
211 int rc = 0, found = 0;
212 regex_t r;
213
214 if (regcomp(&r, regex, 0) != 0)
215 return -EINVAL;
216
217 STAILQ_FOREACH(tp, &tp_list, next) {
218 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
219 if (enable)
220 rc = rte_trace_point_enable(tp->handle);
221 else
222 rc = rte_trace_point_disable(tp->handle);
223 found = 1;
224 }
225 if (rc < 0)
226 return rc;
227 }
228 regfree(&r);
229
230 return rc | found;
231 }
232
233 rte_trace_point_t *
234 rte_trace_point_lookup(const char *name)
235 {
236 struct trace_point *tp;
237
238 if (name == NULL)
239 return NULL;
240
241 STAILQ_FOREACH(tp, &tp_list, next)
242 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
243 return tp->handle;
244
245 return NULL;
246 }
247
248 static void
249 trace_point_dump(FILE *f, struct trace_point *tp)
250 {
251 rte_trace_point_t *handle = tp->handle;
252
253 fprintf(f, "\tid %d, %s, size is %d, %s\n",
254 trace_id_get(handle), tp->name,
255 (uint16_t)(*handle & __RTE_TRACE_FIELD_SIZE_MASK),
256 rte_trace_point_is_enabled(handle) ? "enabled" : "disabled");
257 }
258
259 static void
260 trace_lcore_mem_dump(FILE *f)
261 {
262 struct trace *trace = trace_obj_get();
263 struct __rte_trace_header *header;
264 uint32_t count;
265
266 if (trace->nb_trace_mem_list == 0)
267 return;
268
269 rte_spinlock_lock(&trace->lock);
270 fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list);
271 fprintf(f, "\nTrace mem info\n--------------\n");
272 for (count = 0; count < trace->nb_trace_mem_list; count++) {
273 header = trace->lcore_meta[count].mem;
274 fprintf(f, "\tid %d, mem=%p, area=%s, lcore_id=%d, name=%s\n",
275 count, header,
276 trace_area_to_string(trace->lcore_meta[count].area),
277 header->stream_header.lcore_id,
278 header->stream_header.thread_name);
279 }
280 rte_spinlock_unlock(&trace->lock);
281 }
282
283 void
284 rte_trace_dump(FILE *f)
285 {
286 struct trace_point_head *tp_list = trace_list_head_get();
287 struct trace *trace = trace_obj_get();
288 struct trace_point *tp;
289
290 fprintf(f, "\nGlobal info\n-----------\n");
291 fprintf(f, "status = %s\n",
292 rte_trace_is_enabled() ? "enabled" : "disabled");
293 fprintf(f, "mode = %s\n",
294 trace_mode_to_string(rte_trace_mode_get()));
295 fprintf(f, "dir = %s\n", trace->dir);
296 fprintf(f, "buffer len = %d\n", trace->buff_len);
297 fprintf(f, "number of trace points = %d\n", trace->nb_trace_points);
298
299 trace_lcore_mem_dump(f);
300 fprintf(f, "\nTrace point info\n----------------\n");
301 STAILQ_FOREACH(tp, tp_list, next)
302 trace_point_dump(f, tp);
303 }
304
305 void
306 __rte_trace_mem_per_thread_alloc(void)
307 {
308 struct trace *trace = trace_obj_get();
309 struct __rte_trace_header *header;
310 uint32_t count;
311
312 if (!rte_trace_is_enabled())
313 return;
314
315 if (RTE_PER_LCORE(trace_mem))
316 return;
317
318 rte_spinlock_lock(&trace->lock);
319
320 count = trace->nb_trace_mem_list;
321
322 /* Allocate room for storing the thread trace mem meta */
323 trace->lcore_meta = realloc(trace->lcore_meta,
324 sizeof(trace->lcore_meta[0]) * (count + 1));
325
326 /* Provide dummy space for fast path to consume */
327 if (trace->lcore_meta == NULL) {
328 trace_crit("trace mem meta memory realloc failed");
329 header = NULL;
330 goto fail;
331 }
332
333 /* First attempt from huge page */
334 header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
335 if (header) {
336 trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
337 goto found;
338 }
339
340 /* Second attempt from heap */
341 header = malloc(trace_mem_sz(trace->buff_len));
342 if (header == NULL) {
343 trace_crit("trace mem malloc attempt failed");
344 header = NULL;
345 goto fail;
346
347 }
348
349 /* Second attempt from heap is success */
350 trace->lcore_meta[count].area = TRACE_AREA_HEAP;
351
352 /* Initialize the trace header */
353 found:
354 header->offset = 0;
355 header->len = trace->buff_len;
356 header->stream_header.magic = TRACE_CTF_MAGIC;
357 rte_uuid_copy(header->stream_header.uuid, trace->uuid);
358 header->stream_header.lcore_id = rte_lcore_id();
359
360 /* Store the thread name */
361 char *name = header->stream_header.thread_name;
362 memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
363 rte_thread_getname(pthread_self(), name,
364 __RTE_TRACE_EMIT_STRING_LEN_MAX);
365
366 trace->lcore_meta[count].mem = header;
367 trace->nb_trace_mem_list++;
368 fail:
369 RTE_PER_LCORE(trace_mem) = header;
370 rte_spinlock_unlock(&trace->lock);
371 }
372
373 void
374 trace_mem_per_thread_free(void)
375 {
376 struct trace *trace = trace_obj_get();
377 uint32_t count;
378 void *mem;
379
380 if (!rte_trace_is_enabled())
381 return;
382
383 rte_spinlock_lock(&trace->lock);
384 for (count = 0; count < trace->nb_trace_mem_list; count++) {
385 mem = trace->lcore_meta[count].mem;
386 if (trace->lcore_meta[count].area == TRACE_AREA_HUGEPAGE)
387 eal_free_no_trace(mem);
388 else if (trace->lcore_meta[count].area == TRACE_AREA_HEAP)
389 free(mem);
390 }
391 rte_spinlock_unlock(&trace->lock);
392 }
393
394 void
395 __rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
396 {
397 char *field = RTE_PER_LCORE(ctf_field);
398 int count = RTE_PER_LCORE(ctf_count);
399 size_t size;
400 int rc;
401
402 size = RTE_MAX(0, TRACE_CTF_FIELD_SIZE - 1 - count);
403 RTE_PER_LCORE(trace_point_sz) += sz;
404 rc = snprintf(RTE_PTR_ADD(field, count), size, "%s %s;", datatype, in);
405 if (rc <= 0 || (size_t)rc >= size) {
406 RTE_PER_LCORE(trace_point_sz) = 0;
407 trace_crit("CTF field is too long");
408 return;
409 }
410 RTE_PER_LCORE(ctf_count) += rc;
411 }
412
413 int
414 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
415 void (*register_fn)(void))
416 {
417 char *field = RTE_PER_LCORE(ctf_field);
418 struct trace_point *tp;
419 uint16_t sz;
420
421 /* Sanity checks of arguments */
422 if (name == NULL || register_fn == NULL || handle == NULL) {
423 trace_err("invalid arguments");
424 rte_errno = EINVAL;
425 goto fail;
426 }
427
428 /* Check the size of the trace point object */
429 RTE_PER_LCORE(trace_point_sz) = 0;
430 RTE_PER_LCORE(ctf_count) = 0;
431 register_fn();
432 if (RTE_PER_LCORE(trace_point_sz) == 0) {
433 trace_err("missing rte_trace_emit_header() in register fn");
434 rte_errno = EBADF;
435 goto fail;
436 }
437
438 /* Is size overflowed */
439 if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
440 trace_err("trace point size overflowed");
441 rte_errno = ENOSPC;
442 goto fail;
443 }
444
445 /* Are we running out of space to store trace points? */
446 if (trace.nb_trace_points > UINT16_MAX) {
447 trace_err("trace point exceeds the max count");
448 rte_errno = ENOSPC;
449 goto fail;
450 }
451
452 /* Get the size of the trace point */
453 sz = RTE_PER_LCORE(trace_point_sz);
454 tp = calloc(1, sizeof(struct trace_point));
455 if (tp == NULL) {
456 trace_err("fail to allocate trace point memory");
457 rte_errno = ENOMEM;
458 goto fail;
459 }
460
461 /* Initialize the trace point */
462 if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
463 trace_err("name is too long");
464 rte_errno = E2BIG;
465 goto free;
466 }
467
468 /* Copy the field data for future use */
469 if (rte_strscpy(tp->ctf_field, field, TRACE_CTF_FIELD_SIZE) < 0) {
470 trace_err("CTF field size is too long");
471 rte_errno = E2BIG;
472 goto free;
473 }
474
475 /* Clear field memory for the next event */
476 memset(field, 0, TRACE_CTF_FIELD_SIZE);
477
478 /* Form the trace handle */
479 *handle = sz;
480 *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
481
482 trace.nb_trace_points++;
483 tp->handle = handle;
484
485 /* Add the trace point at tail */
486 STAILQ_INSERT_TAIL(&tp_list, tp, next);
487 __atomic_thread_fence(__ATOMIC_RELEASE);
488
489 /* All Good !!! */
490 return 0;
491 free:
492 free(tp);
493 fail:
494 if (trace.register_errno == 0)
495 trace.register_errno = rte_errno;
496
497 return -rte_errno;
498 }