]> git.proxmox.com Git - qemu.git/blob - trace/simple.c
trace: make trace_thread_create() use its function arg
[qemu.git] / trace / simple.c
1 /*
2 * Simple trace backend
3 *
4 * Copyright IBM, Corp. 2010
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11 #include <stdlib.h>
12 #include <stdint.h>
13 #include <stdio.h>
14 #include <time.h>
15 #ifndef _WIN32
16 #include <signal.h>
17 #include <pthread.h>
18 #endif
19 #include "qemu-timer.h"
20 #include "trace.h"
21 #include "trace/control.h"
22
23 /** Trace file header event ID */
24 #define HEADER_EVENT_ID (~(uint64_t)0) /* avoids conflicting with TraceEventIDs */
25
26 /** Trace file magic number */
27 #define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
28
29 /** Trace file version number, bump if format changes */
30 #define HEADER_VERSION 0
31
32 /** Records were dropped event ID */
33 #define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
34
35 /** Trace record is valid */
36 #define TRACE_RECORD_VALID ((uint64_t)1 << 63)
37
38 /** Trace buffer entry */
39 typedef struct {
40 uint64_t event;
41 uint64_t timestamp_ns;
42 uint64_t x1;
43 uint64_t x2;
44 uint64_t x3;
45 uint64_t x4;
46 uint64_t x5;
47 uint64_t x6;
48 } TraceRecord;
49
50 enum {
51 TRACE_BUF_LEN = 4096,
52 TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
53 };
54
55 /*
56 * Trace records are written out by a dedicated thread. The thread waits for
57 * records to become available, writes them out, and then waits again.
58 */
59 static GStaticMutex trace_lock = G_STATIC_MUTEX_INIT;
60 static GCond *trace_available_cond;
61 static GCond *trace_empty_cond;
62 static bool trace_available;
63 static bool trace_writeout_enabled;
64
65 static TraceRecord trace_buf[TRACE_BUF_LEN];
66 static unsigned int trace_idx;
67 static FILE *trace_fp;
68 static char *trace_file_name = NULL;
69
70 /**
71 * Read a trace record from the trace buffer
72 *
73 * @idx Trace buffer index
74 * @record Trace record to fill
75 *
76 * Returns false if the record is not valid.
77 */
78 static bool get_trace_record(unsigned int idx, TraceRecord *record)
79 {
80 if (!(trace_buf[idx].event & TRACE_RECORD_VALID)) {
81 return false;
82 }
83
84 __sync_synchronize(); /* read memory barrier before accessing record */
85
86 *record = trace_buf[idx];
87 record->event &= ~TRACE_RECORD_VALID;
88 return true;
89 }
90
91 /**
92 * Kick writeout thread
93 *
94 * @wait Whether to wait for writeout thread to complete
95 */
96 static void flush_trace_file(bool wait)
97 {
98 g_static_mutex_lock(&trace_lock);
99 trace_available = true;
100 g_cond_signal(trace_available_cond);
101
102 if (wait) {
103 g_cond_wait(trace_empty_cond, g_static_mutex_get_mutex(&trace_lock));
104 }
105
106 g_static_mutex_unlock(&trace_lock);
107 }
108
109 static void wait_for_trace_records_available(void)
110 {
111 g_static_mutex_lock(&trace_lock);
112 while (!(trace_available && trace_writeout_enabled)) {
113 g_cond_signal(trace_empty_cond);
114 g_cond_wait(trace_available_cond,
115 g_static_mutex_get_mutex(&trace_lock));
116 }
117 trace_available = false;
118 g_static_mutex_unlock(&trace_lock);
119 }
120
121 static gpointer writeout_thread(gpointer opaque)
122 {
123 TraceRecord record;
124 unsigned int writeout_idx = 0;
125 unsigned int num_available, idx;
126 size_t unused __attribute__ ((unused));
127
128 for (;;) {
129 wait_for_trace_records_available();
130
131 num_available = trace_idx - writeout_idx;
132 if (num_available > TRACE_BUF_LEN) {
133 record = (TraceRecord){
134 .event = DROPPED_EVENT_ID,
135 .x1 = num_available,
136 };
137 unused = fwrite(&record, sizeof(record), 1, trace_fp);
138 writeout_idx += num_available;
139 }
140
141 idx = writeout_idx % TRACE_BUF_LEN;
142 while (get_trace_record(idx, &record)) {
143 trace_buf[idx].event = 0; /* clear valid bit */
144 unused = fwrite(&record, sizeof(record), 1, trace_fp);
145 idx = ++writeout_idx % TRACE_BUF_LEN;
146 }
147
148 fflush(trace_fp);
149 }
150 return NULL;
151 }
152
153 static void trace(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3,
154 uint64_t x4, uint64_t x5, uint64_t x6)
155 {
156 unsigned int idx;
157 uint64_t timestamp;
158
159 if (!trace_list[event].state) {
160 return;
161 }
162
163 timestamp = get_clock();
164
165 idx = g_atomic_int_exchange_and_add((gint *)&trace_idx, 1) % TRACE_BUF_LEN;
166 trace_buf[idx] = (TraceRecord){
167 .event = event,
168 .timestamp_ns = timestamp,
169 .x1 = x1,
170 .x2 = x2,
171 .x3 = x3,
172 .x4 = x4,
173 .x5 = x5,
174 .x6 = x6,
175 };
176 __sync_synchronize(); /* write barrier before marking as valid */
177 trace_buf[idx].event |= TRACE_RECORD_VALID;
178
179 if ((idx + 1) % TRACE_BUF_FLUSH_THRESHOLD == 0) {
180 flush_trace_file(false);
181 }
182 }
183
184 void trace0(TraceEventID event)
185 {
186 trace(event, 0, 0, 0, 0, 0, 0);
187 }
188
189 void trace1(TraceEventID event, uint64_t x1)
190 {
191 trace(event, x1, 0, 0, 0, 0, 0);
192 }
193
194 void trace2(TraceEventID event, uint64_t x1, uint64_t x2)
195 {
196 trace(event, x1, x2, 0, 0, 0, 0);
197 }
198
199 void trace3(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3)
200 {
201 trace(event, x1, x2, x3, 0, 0, 0);
202 }
203
204 void trace4(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4)
205 {
206 trace(event, x1, x2, x3, x4, 0, 0);
207 }
208
209 void trace5(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5)
210 {
211 trace(event, x1, x2, x3, x4, x5, 0);
212 }
213
214 void trace6(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5, uint64_t x6)
215 {
216 trace(event, x1, x2, x3, x4, x5, x6);
217 }
218
219 void st_set_trace_file_enabled(bool enable)
220 {
221 if (enable == !!trace_fp) {
222 return; /* no change */
223 }
224
225 /* Halt trace writeout */
226 flush_trace_file(true);
227 trace_writeout_enabled = false;
228 flush_trace_file(true);
229
230 if (enable) {
231 static const TraceRecord header = {
232 .event = HEADER_EVENT_ID,
233 .timestamp_ns = HEADER_MAGIC,
234 .x1 = HEADER_VERSION,
235 };
236
237 trace_fp = fopen(trace_file_name, "wb");
238 if (!trace_fp) {
239 return;
240 }
241
242 if (fwrite(&header, sizeof header, 1, trace_fp) != 1) {
243 fclose(trace_fp);
244 trace_fp = NULL;
245 return;
246 }
247
248 /* Resume trace writeout */
249 trace_writeout_enabled = true;
250 flush_trace_file(false);
251 } else {
252 fclose(trace_fp);
253 trace_fp = NULL;
254 }
255 }
256
257 /**
258 * Set the name of a trace file
259 *
260 * @file The trace file name or NULL for the default name-<pid> set at
261 * config time
262 */
263 bool st_set_trace_file(const char *file)
264 {
265 st_set_trace_file_enabled(false);
266
267 free(trace_file_name);
268
269 if (!file) {
270 if (asprintf(&trace_file_name, CONFIG_TRACE_FILE, getpid()) < 0) {
271 trace_file_name = NULL;
272 return false;
273 }
274 } else {
275 if (asprintf(&trace_file_name, "%s", file) < 0) {
276 trace_file_name = NULL;
277 return false;
278 }
279 }
280
281 st_set_trace_file_enabled(true);
282 return true;
283 }
284
285 void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
286 {
287 stream_printf(stream, "Trace file \"%s\" %s.\n",
288 trace_file_name, trace_fp ? "on" : "off");
289 }
290
291 void st_print_trace(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
292 {
293 unsigned int i;
294
295 for (i = 0; i < TRACE_BUF_LEN; i++) {
296 TraceRecord record;
297
298 if (!get_trace_record(i, &record)) {
299 continue;
300 }
301 stream_printf(stream, "Event %" PRIu64 " : %" PRIx64 " %" PRIx64
302 " %" PRIx64 " %" PRIx64 " %" PRIx64 " %" PRIx64 "\n",
303 record.event, record.x1, record.x2,
304 record.x3, record.x4, record.x5,
305 record.x6);
306 }
307 }
308
309 void st_flush_trace_buffer(void)
310 {
311 flush_trace_file(true);
312 }
313
314 void trace_print_events(FILE *stream, fprintf_function stream_printf)
315 {
316 unsigned int i;
317
318 for (i = 0; i < NR_TRACE_EVENTS; i++) {
319 stream_printf(stream, "%s [Event ID %u] : state %u\n",
320 trace_list[i].tp_name, i, trace_list[i].state);
321 }
322 }
323
324 bool trace_event_set_state(const char *name, bool state)
325 {
326 unsigned int i;
327 unsigned int len;
328 bool wildcard = false;
329 bool matched = false;
330
331 len = strlen(name);
332 if (len > 0 && name[len - 1] == '*') {
333 wildcard = true;
334 len -= 1;
335 }
336 for (i = 0; i < NR_TRACE_EVENTS; i++) {
337 if (wildcard) {
338 if (!strncmp(trace_list[i].tp_name, name, len)) {
339 trace_list[i].state = state;
340 matched = true;
341 }
342 continue;
343 }
344 if (!strcmp(trace_list[i].tp_name, name)) {
345 trace_list[i].state = state;
346 return true;
347 }
348 }
349 return matched;
350 }
351
352 /* Helper function to create a thread with signals blocked. Use glib's
353 * portable threads since QEMU abstractions cannot be used due to reentrancy in
354 * the tracer. Also note the signal masking on POSIX hosts so that the thread
355 * does not steal signals when the rest of the program wants them blocked.
356 */
357 static GThread *trace_thread_create(GThreadFunc fn)
358 {
359 GThread *thread;
360 #ifndef _WIN32
361 sigset_t set, oldset;
362
363 sigfillset(&set);
364 pthread_sigmask(SIG_SETMASK, &set, &oldset);
365 #endif
366 thread = g_thread_create(fn, NULL, FALSE, NULL);
367 #ifndef _WIN32
368 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
369 #endif
370
371 return thread;
372 }
373
374 bool trace_backend_init(const char *events, const char *file)
375 {
376 GThread *thread;
377
378 if (!g_thread_supported()) {
379 #if !GLIB_CHECK_VERSION(2, 31, 0)
380 g_thread_init(NULL);
381 #else
382 fprintf(stderr, "glib threading failed to initialize.\n");
383 exit(1);
384 #endif
385 }
386
387 trace_available_cond = g_cond_new();
388 trace_empty_cond = g_cond_new();
389
390 thread = trace_thread_create(writeout_thread);
391 if (!thread) {
392 fprintf(stderr, "warning: unable to initialize simple trace backend\n");
393 return false;
394 }
395
396 atexit(st_flush_trace_buffer);
397 trace_backend_init_events(events);
398 st_set_trace_file(file);
399 return true;
400 }