]> git.proxmox.com Git - mirror_qemu.git/blob - scripts/simpletrace.py
docs: fix broken paths to docs/devel/tracing.txt
[mirror_qemu.git] / scripts / simpletrace.py
1 #!/usr/bin/env python
2 #
3 # Pretty-printer for simple trace backend binary trace files
4 #
5 # Copyright IBM, Corp. 2010
6 #
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
9 #
10 # For help see docs/devel/tracing.txt
11
12 import struct
13 import re
14 import inspect
15 from tracetool import read_events, Event
16 from tracetool.backend.simple import is_string
17
18 header_event_id = 0xffffffffffffffff
19 header_magic = 0xf2b177cb0aa429b4
20 dropped_event_id = 0xfffffffffffffffe
21
22 record_type_mapping = 0
23 record_type_event = 1
24
25 log_header_fmt = '=QQQ'
26 rec_header_fmt = '=QQII'
27
28 def read_header(fobj, hfmt):
29 '''Read a trace record header'''
30 hlen = struct.calcsize(hfmt)
31 hdr = fobj.read(hlen)
32 if len(hdr) != hlen:
33 return None
34 return struct.unpack(hfmt, hdr)
35
36 def get_record(edict, idtoname, rechdr, fobj):
37 """Deserialize a trace record from a file into a tuple
38 (name, timestamp, pid, arg1, ..., arg6)."""
39 if rechdr is None:
40 return None
41 if rechdr[0] != dropped_event_id:
42 event_id = rechdr[0]
43 name = idtoname[event_id]
44 rec = (name, rechdr[1], rechdr[3])
45 try:
46 event = edict[name]
47 except KeyError, e:
48 import sys
49 sys.stderr.write('%s event is logged but is not declared ' \
50 'in the trace events file, try using ' \
51 'trace-events-all instead.\n' % str(e))
52 sys.exit(1)
53
54 for type, name in event.args:
55 if is_string(type):
56 l = fobj.read(4)
57 (len,) = struct.unpack('=L', l)
58 s = fobj.read(len)
59 rec = rec + (s,)
60 else:
61 (value,) = struct.unpack('=Q', fobj.read(8))
62 rec = rec + (value,)
63 else:
64 rec = ("dropped", rechdr[1], rechdr[3])
65 (value,) = struct.unpack('=Q', fobj.read(8))
66 rec = rec + (value,)
67 return rec
68
69 def get_mapping(fobj):
70 (event_id, ) = struct.unpack('=Q', fobj.read(8))
71 (len, ) = struct.unpack('=L', fobj.read(4))
72 name = fobj.read(len)
73
74 return (event_id, name)
75
76 def read_record(edict, idtoname, fobj):
77 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
78 rechdr = read_header(fobj, rec_header_fmt)
79 return get_record(edict, idtoname, rechdr, fobj)
80
81 def read_trace_header(fobj):
82 """Read and verify trace file header"""
83 header = read_header(fobj, log_header_fmt)
84 if header is None:
85 raise ValueError('Not a valid trace file!')
86 if header[0] != header_event_id:
87 raise ValueError('Not a valid trace file, header id %d != %d' %
88 (header[0], header_event_id))
89 if header[1] != header_magic:
90 raise ValueError('Not a valid trace file, header magic %d != %d' %
91 (header[1], header_magic))
92
93 log_version = header[2]
94 if log_version not in [0, 2, 3, 4]:
95 raise ValueError('Unknown version of tracelog format!')
96 if log_version != 4:
97 raise ValueError('Log format %d not supported with this QEMU release!'
98 % log_version)
99
100 def read_trace_records(edict, fobj):
101 """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6)."""
102 idtoname = {
103 dropped_event_id: "dropped"
104 }
105 while True:
106 t = fobj.read(8)
107 if len(t) == 0:
108 break
109
110 (rectype, ) = struct.unpack('=Q', t)
111 if rectype == record_type_mapping:
112 event_id, name = get_mapping(fobj)
113 idtoname[event_id] = name
114 else:
115 rec = read_record(edict, idtoname, fobj)
116
117 yield rec
118
119 class Analyzer(object):
120 """A trace file analyzer which processes trace records.
121
122 An analyzer can be passed to run() or process(). The begin() method is
123 invoked, then each trace record is processed, and finally the end() method
124 is invoked.
125
126 If a method matching a trace event name exists, it is invoked to process
127 that trace record. Otherwise the catchall() method is invoked.
128
129 Example:
130 The following method handles the runstate_set(int new_state) trace event::
131
132 def runstate_set(self, new_state):
133 ...
134
135 The method can also take a timestamp argument before the trace event
136 arguments::
137
138 def runstate_set(self, timestamp, new_state):
139 ...
140
141 Timestamps have the uint64_t type and are in nanoseconds.
142
143 The pid can be included in addition to the timestamp and is useful when
144 dealing with traces from multiple processes::
145
146 def runstate_set(self, timestamp, pid, new_state):
147 ...
148 """
149
150 def begin(self):
151 """Called at the start of the trace."""
152 pass
153
154 def catchall(self, event, rec):
155 """Called if no specific method for processing a trace event has been found."""
156 pass
157
158 def end(self):
159 """Called at the end of the trace."""
160 pass
161
162 def process(events, log, analyzer, read_header=True):
163 """Invoke an analyzer on each event in a log."""
164 if isinstance(events, str):
165 events = read_events(open(events, 'r'))
166 if isinstance(log, str):
167 log = open(log, 'rb')
168
169 if read_header:
170 read_trace_header(log)
171
172 dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
173 edict = {"dropped": dropped_event}
174
175 for event in events:
176 edict[event.name] = event
177
178 def build_fn(analyzer, event):
179 if isinstance(event, str):
180 return analyzer.catchall
181
182 fn = getattr(analyzer, event.name, None)
183 if fn is None:
184 return analyzer.catchall
185
186 event_argcount = len(event.args)
187 fn_argcount = len(inspect.getargspec(fn)[0]) - 1
188 if fn_argcount == event_argcount + 1:
189 # Include timestamp as first argument
190 return lambda _, rec: fn(*((rec[1:2],) + rec[3:3 + event_argcount]))
191 elif fn_argcount == event_argcount + 2:
192 # Include timestamp and pid
193 return lambda _, rec: fn(*rec[1:3 + event_argcount])
194 else:
195 # Just arguments, no timestamp or pid
196 return lambda _, rec: fn(*rec[3:3 + event_argcount])
197
198 analyzer.begin()
199 fn_cache = {}
200 for rec in read_trace_records(edict, log):
201 event_num = rec[0]
202 event = edict[event_num]
203 if event_num not in fn_cache:
204 fn_cache[event_num] = build_fn(analyzer, event)
205 fn_cache[event_num](event, rec)
206 analyzer.end()
207
208 def run(analyzer):
209 """Execute an analyzer on a trace file given on the command-line.
210
211 This function is useful as a driver for simple analysis scripts. More
212 advanced scripts will want to call process() instead."""
213 import sys
214
215 read_header = True
216 if len(sys.argv) == 4 and sys.argv[1] == '--no-header':
217 read_header = False
218 del sys.argv[1]
219 elif len(sys.argv) != 3:
220 sys.stderr.write('usage: %s [--no-header] <trace-events> ' \
221 '<trace-file>\n' % sys.argv[0])
222 sys.exit(1)
223
224 events = read_events(open(sys.argv[1], 'r'))
225 process(events, sys.argv[2], analyzer, read_header=read_header)
226
227 if __name__ == '__main__':
228 class Formatter(Analyzer):
229 def __init__(self):
230 self.last_timestamp = None
231
232 def catchall(self, event, rec):
233 timestamp = rec[1]
234 if self.last_timestamp is None:
235 self.last_timestamp = timestamp
236 delta_ns = timestamp - self.last_timestamp
237 self.last_timestamp = timestamp
238
239 fields = [event.name, '%0.3f' % (delta_ns / 1000.0),
240 'pid=%d' % rec[2]]
241 i = 3
242 for type, name in event.args:
243 if is_string(type):
244 fields.append('%s=%s' % (name, rec[i]))
245 else:
246 fields.append('%s=0x%x' % (name, rec[i]))
247 i += 1
248 print ' '.join(fields)
249
250 run(Formatter())