]> git.proxmox.com Git - mirror_qemu.git/blame - trace/simple.c
trace: use glib atomic int types
[mirror_qemu.git] / trace / simple.c
CommitLineData
26f7227b
SH
1/*
2 * Simple trace backend
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#include <stdlib.h>
12#include <stdint.h>
13#include <stdio.h>
14#include <time.h>
85aff158 15#ifndef _WIN32
0b5538c3
SH
16#include <signal.h>
17#include <pthread.h>
85aff158 18#endif
1de7afc9 19#include "qemu/timer.h"
26f7227b 20#include "trace.h"
e4858974 21#include "trace/control.h"
26f7227b
SH
22
23/** Trace file header event ID */
24#define HEADER_EVENT_ID (~(uint64_t)0) /* avoids conflicting with TraceEventIDs */
25
26/** Trace file magic number */
27#define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
28
29/** Trace file version number, bump if format changes */
62bab732 30#define HEADER_VERSION 2
26f7227b 31
0b5538c3
SH
32/** Records were dropped event ID */
33#define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
34
35/** Trace record is valid */
36#define TRACE_RECORD_VALID ((uint64_t)1 << 63)
37
0b5538c3
SH
38/*
39 * Trace records are written out by a dedicated thread. The thread waits for
40 * records to become available, writes them out, and then waits again.
41 */
85aff158
SH
42static GStaticMutex trace_lock = G_STATIC_MUTEX_INIT;
43static GCond *trace_available_cond;
44static GCond *trace_empty_cond;
0b5538c3
SH
45static bool trace_available;
46static bool trace_writeout_enabled;
47
62bab732
HPB
48enum {
49 TRACE_BUF_LEN = 4096 * 64,
50 TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
51};
52
53uint8_t trace_buf[TRACE_BUF_LEN];
30d94087 54static volatile gint trace_idx;
62bab732 55static unsigned int writeout_idx;
30d94087 56static volatile gint dropped_events;
26f7227b 57static FILE *trace_fp;
4552e410 58static char *trace_file_name;
26f7227b 59
62bab732
HPB
60/* * Trace buffer entry */
61typedef struct {
62 uint64_t event; /* TraceEventID */
63 uint64_t timestamp_ns;
64 uint32_t length; /* in bytes */
65 uint32_t reserved; /* unused */
fb3a5085 66 uint64_t arguments[];
62bab732
HPB
67} TraceRecord;
68
69typedef struct {
70 uint64_t header_event_id; /* HEADER_EVENT_ID */
71 uint64_t header_magic; /* HEADER_MAGIC */
72 uint64_t header_version; /* HEADER_VERSION */
8ae601e8 73} TraceLogHeader;
62bab732
HPB
74
75
76static void read_from_buffer(unsigned int idx, void *dataptr, size_t size);
77static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size);
78
79static void clear_buffer_range(unsigned int idx, size_t len)
80{
81 uint32_t num = 0;
82 while (num < len) {
83 if (idx >= TRACE_BUF_LEN) {
84 idx = idx % TRACE_BUF_LEN;
85 }
86 trace_buf[idx++] = 0;
87 num++;
88 }
89}
c5ceb523 90/**
0b5538c3
SH
91 * Read a trace record from the trace buffer
92 *
93 * @idx Trace buffer index
94 * @record Trace record to fill
95 *
96 * Returns false if the record is not valid.
c5ceb523 97 */
62bab732 98static bool get_trace_record(unsigned int idx, TraceRecord **recordptr)
9410b56c 99{
62bab732
HPB
100 uint64_t event_flag = 0;
101 TraceRecord record;
102 /* read the event flag to see if its a valid record */
103 read_from_buffer(idx, &record, sizeof(event_flag));
104
105 if (!(record.event & TRACE_RECORD_VALID)) {
0b5538c3 106 return false;
9410b56c
PS
107 }
108
62bab732
HPB
109 smp_rmb(); /* read memory barrier before accessing record */
110 /* read the record header to know record length */
111 read_from_buffer(idx, &record, sizeof(TraceRecord));
112 *recordptr = malloc(record.length); /* dont use g_malloc, can deadlock when traced */
113 /* make a copy of record to avoid being overwritten */
114 read_from_buffer(idx, *recordptr, record.length);
115 smp_rmb(); /* memory barrier before clearing valid flag */
116 (*recordptr)->event &= ~TRACE_RECORD_VALID;
117 /* clear the trace buffer range for consumed record otherwise any byte
118 * with its MSB set may be considered as a valid event id when the writer
119 * thread crosses this range of buffer again.
120 */
121 clear_buffer_range(idx, record.length);
c5ceb523 122 return true;
9410b56c
PS
123}
124
0b5538c3
SH
125/**
126 * Kick writeout thread
127 *
128 * @wait Whether to wait for writeout thread to complete
129 */
130static void flush_trace_file(bool wait)
26f7227b 131{
85aff158 132 g_static_mutex_lock(&trace_lock);
0b5538c3 133 trace_available = true;
85aff158 134 g_cond_signal(trace_available_cond);
c5ceb523 135
0b5538c3 136 if (wait) {
85aff158 137 g_cond_wait(trace_empty_cond, g_static_mutex_get_mutex(&trace_lock));
26f7227b 138 }
0b5538c3 139
85aff158 140 g_static_mutex_unlock(&trace_lock);
c5ceb523
SH
141}
142
0b5538c3 143static void wait_for_trace_records_available(void)
c5ceb523 144{
85aff158 145 g_static_mutex_lock(&trace_lock);
0b5538c3 146 while (!(trace_available && trace_writeout_enabled)) {
85aff158
SH
147 g_cond_signal(trace_empty_cond);
148 g_cond_wait(trace_available_cond,
149 g_static_mutex_get_mutex(&trace_lock));
c5ceb523 150 }
0b5538c3 151 trace_available = false;
85aff158 152 g_static_mutex_unlock(&trace_lock);
26f7227b
SH
153}
154
85aff158 155static gpointer writeout_thread(gpointer opaque)
26f7227b 156{
62bab732
HPB
157 TraceRecord *recordptr;
158 union {
159 TraceRecord rec;
160 uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)];
161 } dropped;
162 unsigned int idx = 0;
fb3a5085 163 int dropped_count;
0caf448b 164 size_t unused __attribute__ ((unused));
0b5538c3
SH
165
166 for (;;) {
167 wait_for_trace_records_available();
168
e722d705 169 if (g_atomic_int_get(&dropped_events)) {
62bab732
HPB
170 dropped.rec.event = DROPPED_EVENT_ID,
171 dropped.rec.timestamp_ns = get_clock();
fb3a5085 172 dropped.rec.length = sizeof(TraceRecord) + sizeof(uint64_t),
62bab732 173 dropped.rec.reserved = 0;
b6b2c962 174 do {
e722d705 175 dropped_count = g_atomic_int_get(&dropped_events);
b6b2c962
MA
176 } while (!g_atomic_int_compare_and_exchange(&dropped_events,
177 dropped_count, 0));
fb3a5085 178 dropped.rec.arguments[0] = dropped_count;
62bab732 179 unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp);
0b5538c3 180 }
26f7227b 181
62bab732
HPB
182 while (get_trace_record(idx, &recordptr)) {
183 unused = fwrite(recordptr, recordptr->length, 1, trace_fp);
184 writeout_idx += recordptr->length;
185 free(recordptr); /* dont use g_free, can deadlock when traced */
186 idx = writeout_idx % TRACE_BUF_LEN;
0b5538c3 187 }
26f7227b 188
0b5538c3 189 fflush(trace_fp);
26f7227b 190 }
0b5538c3 191 return NULL;
26f7227b
SH
192}
193
62bab732 194void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val)
26f7227b 195{
62bab732 196 rec->rec_off = write_to_buffer(rec->rec_off, &val, sizeof(uint64_t));
26f7227b
SH
197}
198
62bab732 199void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen)
26f7227b 200{
62bab732
HPB
201 /* Write string length first */
202 rec->rec_off = write_to_buffer(rec->rec_off, &slen, sizeof(slen));
203 /* Write actual string now */
204 rec->rec_off = write_to_buffer(rec->rec_off, (void*)s, slen);
26f7227b
SH
205}
206
62bab732 207int trace_record_start(TraceBufferRecord *rec, TraceEventID event, size_t datasize)
26f7227b 208{
62bab732
HPB
209 unsigned int idx, rec_off, old_idx, new_idx;
210 uint32_t rec_len = sizeof(TraceRecord) + datasize;
211 uint64_t timestamp_ns = get_clock();
212
b6b2c962 213 do {
e722d705 214 old_idx = g_atomic_int_get(&trace_idx);
62bab732
HPB
215 smp_rmb();
216 new_idx = old_idx + rec_len;
217
218 if (new_idx - writeout_idx > TRACE_BUF_LEN) {
219 /* Trace Buffer Full, Event dropped ! */
fb3a5085 220 g_atomic_int_inc(&dropped_events);
62bab732
HPB
221 return -ENOSPC;
222 }
b6b2c962 223 } while (!g_atomic_int_compare_and_exchange(&trace_idx, old_idx, new_idx));
26f7227b 224
62bab732 225 idx = old_idx % TRACE_BUF_LEN;
62bab732
HPB
226
227 rec_off = idx;
83d35d3e
HPB
228 rec_off = write_to_buffer(rec_off, &event, sizeof(event));
229 rec_off = write_to_buffer(rec_off, &timestamp_ns, sizeof(timestamp_ns));
230 rec_off = write_to_buffer(rec_off, &rec_len, sizeof(rec_len));
62bab732
HPB
231
232 rec->tbuf_idx = idx;
233 rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN;
234 return 0;
26f7227b
SH
235}
236
62bab732 237static void read_from_buffer(unsigned int idx, void *dataptr, size_t size)
26f7227b 238{
62bab732
HPB
239 uint8_t *data_ptr = dataptr;
240 uint32_t x = 0;
241 while (x < size) {
242 if (idx >= TRACE_BUF_LEN) {
243 idx = idx % TRACE_BUF_LEN;
244 }
245 data_ptr[x++] = trace_buf[idx++];
246 }
26f7227b
SH
247}
248
62bab732 249static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size)
26f7227b 250{
62bab732
HPB
251 uint8_t *data_ptr = dataptr;
252 uint32_t x = 0;
253 while (x < size) {
254 if (idx >= TRACE_BUF_LEN) {
255 idx = idx % TRACE_BUF_LEN;
256 }
257 trace_buf[idx++] = data_ptr[x++];
258 }
259 return idx; /* most callers wants to know where to write next */
26f7227b
SH
260}
261
62bab732 262void trace_record_finish(TraceBufferRecord *rec)
26f7227b 263{
db8894f2
HPB
264 TraceRecord record;
265 read_from_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
62bab732 266 smp_wmb(); /* write barrier before marking as valid */
db8894f2
HPB
267 record.event |= TRACE_RECORD_VALID;
268 write_to_buffer(rec->tbuf_idx, &record, sizeof(TraceRecord));
62bab732 269
30d94087 270 if (((unsigned int)g_atomic_int_get(&trace_idx) - writeout_idx)
e722d705 271 > TRACE_BUF_FLUSH_THRESHOLD) {
62bab732
HPB
272 flush_trace_file(false);
273 }
26f7227b
SH
274}
275
0b5538c3
SH
276void st_set_trace_file_enabled(bool enable)
277{
278 if (enable == !!trace_fp) {
279 return; /* no change */
280 }
281
282 /* Halt trace writeout */
283 flush_trace_file(true);
284 trace_writeout_enabled = false;
285 flush_trace_file(true);
286
287 if (enable) {
8ae601e8 288 static const TraceLogHeader header = {
62bab732
HPB
289 .header_event_id = HEADER_EVENT_ID,
290 .header_magic = HEADER_MAGIC,
291 /* Older log readers will check for version at next location */
292 .header_version = HEADER_VERSION,
0b5538c3
SH
293 };
294
6c2a4074 295 trace_fp = fopen(trace_file_name, "wb");
0b5538c3
SH
296 if (!trace_fp) {
297 return;
298 }
299
300 if (fwrite(&header, sizeof header, 1, trace_fp) != 1) {
301 fclose(trace_fp);
302 trace_fp = NULL;
303 return;
304 }
305
306 /* Resume trace writeout */
307 trace_writeout_enabled = true;
308 flush_trace_file(false);
309 } else {
310 fclose(trace_fp);
311 trace_fp = NULL;
312 }
313}
314
26f7227b 315/**
0b5538c3
SH
316 * Set the name of a trace file
317 *
318 * @file The trace file name or NULL for the default name-<pid> set at
319 * config time
26f7227b 320 */
0b5538c3 321bool st_set_trace_file(const char *file)
26f7227b 322{
0b5538c3
SH
323 st_set_trace_file_enabled(false);
324
4552e410 325 g_free(trace_file_name);
0b5538c3
SH
326
327 if (!file) {
4552e410 328 trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE, getpid());
0b5538c3 329 } else {
4552e410 330 trace_file_name = g_strdup_printf("%s", file);
0b5538c3
SH
331 }
332
333 st_set_trace_file_enabled(true);
334 return true;
335}
336
337void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
338{
339 stream_printf(stream, "Trace file \"%s\" %s.\n",
340 trace_file_name, trace_fp ? "on" : "off");
26f7227b 341}
22890ab5 342
fc764105
LV
343void st_flush_trace_buffer(void)
344{
345 flush_trace_file(true);
346}
347
348void trace_print_events(FILE *stream, fprintf_function stream_printf)
22890ab5
PS
349{
350 unsigned int i;
351
352 for (i = 0; i < NR_TRACE_EVENTS; i++) {
353 stream_printf(stream, "%s [Event ID %u] : state %u\n",
354 trace_list[i].tp_name, i, trace_list[i].state);
355 }
356}
357
fc764105 358bool trace_event_set_state(const char *name, bool state)
22890ab5
PS
359{
360 unsigned int i;
454e202d
MW
361 unsigned int len;
362 bool wildcard = false;
363 bool matched = false;
364
365 len = strlen(name);
366 if (len > 0 && name[len - 1] == '*') {
367 wildcard = true;
368 len -= 1;
369 }
22890ab5 370 for (i = 0; i < NR_TRACE_EVENTS; i++) {
454e202d
MW
371 if (wildcard) {
372 if (!strncmp(trace_list[i].tp_name, name, len)) {
373 trace_list[i].state = state;
374 matched = true;
375 }
376 continue;
377 }
0b5538c3 378 if (!strcmp(trace_list[i].tp_name, name)) {
fc764105 379 trace_list[i].state = state;
0b5538c3 380 return true;
22890ab5
PS
381 }
382 }
454e202d 383 return matched;
0b5538c3
SH
384}
385
85aff158
SH
386/* Helper function to create a thread with signals blocked. Use glib's
387 * portable threads since QEMU abstractions cannot be used due to reentrancy in
388 * the tracer. Also note the signal masking on POSIX hosts so that the thread
389 * does not steal signals when the rest of the program wants them blocked.
390 */
391static GThread *trace_thread_create(GThreadFunc fn)
22890ab5 392{
85aff158
SH
393 GThread *thread;
394#ifndef _WIN32
0b5538c3 395 sigset_t set, oldset;
22890ab5 396
0b5538c3
SH
397 sigfillset(&set);
398 pthread_sigmask(SIG_SETMASK, &set, &oldset);
85aff158 399#endif
db3bf869 400 thread = g_thread_create(fn, NULL, FALSE, NULL);
85aff158 401#ifndef _WIN32
0b5538c3 402 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
85aff158 403#endif
0b5538c3 404
85aff158
SH
405 return thread;
406}
407
408bool trace_backend_init(const char *events, const char *file)
409{
410 GThread *thread;
411
412 if (!g_thread_supported()) {
42ed3727 413#if !GLIB_CHECK_VERSION(2, 31, 0)
85aff158 414 g_thread_init(NULL);
42ed3727
AL
415#else
416 fprintf(stderr, "glib threading failed to initialize.\n");
417 exit(1);
418#endif
85aff158
SH
419 }
420
421 trace_available_cond = g_cond_new();
422 trace_empty_cond = g_cond_new();
423
424 thread = trace_thread_create(writeout_thread);
425 if (!thread) {
e4858974 426 fprintf(stderr, "warning: unable to initialize simple trace backend\n");
85aff158 427 return false;
22890ab5 428 }
0b5538c3 429
85aff158
SH
430 atexit(st_flush_trace_buffer);
431 trace_backend_init_events(events);
432 st_set_trace_file(file);
31d3c9b8 433 return true;
22890ab5 434}