]> git.proxmox.com Git - mirror_qemu.git/blob - scripts/simpletrace.py
simpletrace: changed naming of edict and idtoname to improve readability
[mirror_qemu.git] / scripts / simpletrace.py
1 #!/usr/bin/env python3
2 #
3 # Pretty-printer for simple trace backend binary trace files
4 #
5 # Copyright IBM, Corp. 2010
6 #
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
9 #
10 # For help see docs/devel/tracing.rst
11
12 import sys
13 import struct
14 import inspect
15 from tracetool import read_events, Event
16 from tracetool.backend.simple import is_string
17
18 __all__ = ['Analyzer', 'process', 'run']
19
20 # This is the binary format that the QEMU "simple" trace backend
21 # emits. There is no specification documentation because the format is
22 # not guaranteed to be stable. Trace files must be parsed with the
23 # same trace-events-all file and the same simpletrace.py file that
24 # QEMU was built with.
25 header_event_id = 0xffffffffffffffff
26 header_magic = 0xf2b177cb0aa429b4
27 dropped_event_id = 0xfffffffffffffffe
28
29 record_type_mapping = 0
30 record_type_event = 1
31
32 log_header_fmt = '=QQQ'
33 rec_header_fmt = '=QQII'
34
35 def read_header(fobj, hfmt):
36 '''Read a trace record header'''
37 hlen = struct.calcsize(hfmt)
38 hdr = fobj.read(hlen)
39 if len(hdr) != hlen:
40 return None
41 return struct.unpack(hfmt, hdr)
42
43 def get_record(event_mapping, event_id_to_name, rechdr, fobj):
44 """Deserialize a trace record from a file into a tuple
45 (name, timestamp, pid, arg1, ..., arg6)."""
46 if rechdr is None:
47 return None
48 if rechdr[0] != dropped_event_id:
49 event_id = rechdr[0]
50 name = event_id_to_name[event_id]
51 rec = (name, rechdr[1], rechdr[3])
52 try:
53 event = event_mapping[name]
54 except KeyError as e:
55 sys.stderr.write('%s event is logged but is not declared ' \
56 'in the trace events file, try using ' \
57 'trace-events-all instead.\n' % str(e))
58 sys.exit(1)
59
60 for type, name in event.args:
61 if is_string(type):
62 l = fobj.read(4)
63 (len,) = struct.unpack('=L', l)
64 s = fobj.read(len)
65 rec = rec + (s,)
66 else:
67 (value,) = struct.unpack('=Q', fobj.read(8))
68 rec = rec + (value,)
69 else:
70 rec = ("dropped", rechdr[1], rechdr[3])
71 (value,) = struct.unpack('=Q', fobj.read(8))
72 rec = rec + (value,)
73 return rec
74
75 def get_mapping(fobj):
76 (event_id, ) = struct.unpack('=Q', fobj.read(8))
77 (len, ) = struct.unpack('=L', fobj.read(4))
78 name = fobj.read(len).decode()
79
80 return (event_id, name)
81
82 def read_record(event_mapping, event_id_to_name, fobj):
83 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
84 rechdr = read_header(fobj, rec_header_fmt)
85 return get_record(event_mapping, event_id_to_name, rechdr, fobj)
86
87 def read_trace_header(fobj):
88 """Read and verify trace file header"""
89 header = read_header(fobj, log_header_fmt)
90 if header is None:
91 raise ValueError('Not a valid trace file!')
92 if header[0] != header_event_id:
93 raise ValueError('Not a valid trace file, header id %d != %d' %
94 (header[0], header_event_id))
95 if header[1] != header_magic:
96 raise ValueError('Not a valid trace file, header magic %d != %d' %
97 (header[1], header_magic))
98
99 log_version = header[2]
100 if log_version not in [0, 2, 3, 4]:
101 raise ValueError('Unknown version of tracelog format!')
102 if log_version != 4:
103 raise ValueError('Log format %d not supported with this QEMU release!'
104 % log_version)
105
106 def read_trace_records(event_mapping, event_id_to_name, fobj):
107 """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6).
108
109 Note that `event_id_to_name` is modified if the file contains mapping records.
110
111 Args:
112 event_mapping (str -> Event): events dict, indexed by name
113 event_id_to_name (int -> str): event names dict, indexed by event ID
114 fobj (file): input file
115
116 """
117 while True:
118 t = fobj.read(8)
119 if len(t) == 0:
120 break
121
122 (rectype, ) = struct.unpack('=Q', t)
123 if rectype == record_type_mapping:
124 event_id, name = get_mapping(fobj)
125 event_id_to_name[event_id] = name
126 else:
127 rec = read_record(event_mapping, event_id_to_name, fobj)
128
129 yield rec
130
131 class Analyzer(object):
132 """A trace file analyzer which processes trace records.
133
134 An analyzer can be passed to run() or process(). The begin() method is
135 invoked, then each trace record is processed, and finally the end() method
136 is invoked.
137
138 If a method matching a trace event name exists, it is invoked to process
139 that trace record. Otherwise the catchall() method is invoked.
140
141 Example:
142 The following method handles the runstate_set(int new_state) trace event::
143
144 def runstate_set(self, new_state):
145 ...
146
147 The method can also take a timestamp argument before the trace event
148 arguments::
149
150 def runstate_set(self, timestamp, new_state):
151 ...
152
153 Timestamps have the uint64_t type and are in nanoseconds.
154
155 The pid can be included in addition to the timestamp and is useful when
156 dealing with traces from multiple processes::
157
158 def runstate_set(self, timestamp, pid, new_state):
159 ...
160 """
161
162 def begin(self):
163 """Called at the start of the trace."""
164 pass
165
166 def catchall(self, event, rec):
167 """Called if no specific method for processing a trace event has been found."""
168 pass
169
170 def end(self):
171 """Called at the end of the trace."""
172 pass
173
174 def process(events, log, analyzer, read_header=True):
175 """Invoke an analyzer on each event in a log.
176 Args:
177 events (file-object or list or str): events list or file-like object or file path as str to read event data from
178 log (file-object or str): file-like object or file path as str to read log data from
179 analyzer (Analyzer): Instance of Analyzer to interpret the event data
180 read_header (bool, optional): Whether to read header data from the log data. Defaults to True.
181 """
182
183 if isinstance(events, str):
184 with open(events, 'r') as f:
185 events_list = read_events(f, events)
186 elif isinstance(events, list):
187 # Treat as a list of events already produced by tracetool.read_events
188 events_list = events
189 else:
190 # Treat as an already opened file-object
191 events_list = read_events(events, events.name)
192
193 close_log = False
194 if isinstance(log, str):
195 log = open(log, 'rb')
196 close_log = True
197
198 if read_header:
199 read_trace_header(log)
200
201 frameinfo = inspect.getframeinfo(inspect.currentframe())
202 dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)",
203 frameinfo.lineno + 1, frameinfo.filename)
204 event_mapping = {"dropped": dropped_event}
205 event_id_to_name = {dropped_event_id: "dropped"}
206
207 for event in events_list:
208 event_mapping[event.name] = event
209
210 # If there is no header assume event ID mapping matches events list
211 if not read_header:
212 for event_id, event in enumerate(events_list):
213 event_id_to_name[event_id] = event.name
214
215 def build_fn(analyzer, event):
216 if isinstance(event, str):
217 return analyzer.catchall
218
219 fn = getattr(analyzer, event.name, None)
220 if fn is None:
221 return analyzer.catchall
222
223 event_argcount = len(event.args)
224 fn_argcount = len(inspect.getargspec(fn)[0]) - 1
225 if fn_argcount == event_argcount + 1:
226 # Include timestamp as first argument
227 return lambda _, rec: fn(*(rec[1:2] + rec[3:3 + event_argcount]))
228 elif fn_argcount == event_argcount + 2:
229 # Include timestamp and pid
230 return lambda _, rec: fn(*rec[1:3 + event_argcount])
231 else:
232 # Just arguments, no timestamp or pid
233 return lambda _, rec: fn(*rec[3:3 + event_argcount])
234
235 analyzer.begin()
236 fn_cache = {}
237 for rec in read_trace_records(event_mapping, event_id_to_name, log):
238 event_num = rec[0]
239 event = event_mapping[event_num]
240 if event_num not in fn_cache:
241 fn_cache[event_num] = build_fn(analyzer, event)
242 fn_cache[event_num](event, rec)
243 analyzer.end()
244
245 if close_log:
246 log.close()
247
248 def run(analyzer):
249 """Execute an analyzer on a trace file given on the command-line.
250
251 This function is useful as a driver for simple analysis scripts. More
252 advanced scripts will want to call process() instead."""
253
254 try:
255 # NOTE: See built-in `argparse` module for a more robust cli interface
256 *no_header, trace_event_path, trace_file_path = sys.argv[1:]
257 assert no_header == [] or no_header == ['--no-header'], 'Invalid no-header argument'
258 except (AssertionError, ValueError):
259 sys.stderr.write(f'usage: {sys.argv[0]} [--no-header] <trace-events> <trace-file>\n')
260 sys.exit(1)
261
262 with open(trace_event_path, 'r') as events_fobj, open(trace_file_path, 'rb') as log_fobj:
263 process(events_fobj, log_fobj, analyzer, read_header=not no_header)
264
265 if __name__ == '__main__':
266 class Formatter(Analyzer):
267 def __init__(self):
268 self.last_timestamp = None
269
270 def catchall(self, event, rec):
271 timestamp = rec[1]
272 if self.last_timestamp is None:
273 self.last_timestamp = timestamp
274 delta_ns = timestamp - self.last_timestamp
275 self.last_timestamp = timestamp
276
277 fields = [event.name, '%0.3f' % (delta_ns / 1000.0),
278 'pid=%d' % rec[2]]
279 i = 3
280 for type, name in event.args:
281 if is_string(type):
282 fields.append('%s=%s' % (name, rec[i]))
283 else:
284 fields.append('%s=0x%x' % (name, rec[i]))
285 i += 1
286 print(' '.join(fields))
287
288 run(Formatter())