]> git.proxmox.com Git - mirror_qemu.git/blame - scripts/simpletrace.py
Merge tag 'pull-trivial-patches' of https://gitlab.com/mjt0k/qemu into staging
[mirror_qemu.git] / scripts / simpletrace.py
CommitLineData
3d004a37 1#!/usr/bin/env python3
26f7227b
SH
2#
3# Pretty-printer for simple trace backend binary trace files
4#
5# Copyright IBM, Corp. 2010
6#
7# This work is licensed under the terms of the GNU GPL, version 2. See
8# the COPYING file in the top-level directory.
9#
d0fb9657 10# For help see docs/devel/tracing.rst
26f7227b 11
f7bd4f02 12import sys
26f7227b 13import struct
59da6684 14import inspect
3470fef1 15import warnings
d1b97bce 16from tracetool import read_events, Event
90a147a2 17from tracetool.backend.simple import is_string
26f7227b 18
3470fef1 19__all__ = ['Analyzer', 'Analyzer2', 'process', 'run']
2c109f21 20
8405ec6a
MY
21# This is the binary format that the QEMU "simple" trace backend
22# emits. There is no specification documentation because the format is
23# not guaranteed to be stable. Trace files must be parsed with the
24# same trace-events-all file and the same simpletrace.py file that
25# QEMU was built with.
26f7227b
SH
26header_event_id = 0xffffffffffffffff
27header_magic = 0xf2b177cb0aa429b4
0b5538c3 28dropped_event_id = 0xfffffffffffffffe
26f7227b 29
7f1b588f
DB
30record_type_mapping = 0
31record_type_event = 1
32
90a147a2
HPB
33log_header_fmt = '=QQQ'
34rec_header_fmt = '=QQII'
6f53641a 35rec_header_fmt_len = struct.calcsize(rec_header_fmt)
26f7227b 36
1990fb98
MY
37class SimpleException(Exception):
38 pass
39
90a147a2
HPB
40def read_header(fobj, hfmt):
41 '''Read a trace record header'''
42 hlen = struct.calcsize(hfmt)
43 hdr = fobj.read(hlen)
44 if len(hdr) != hlen:
1990fb98 45 raise SimpleException('Error reading header. Wrong filetype provided?')
90a147a2 46 return struct.unpack(hfmt, hdr)
26f7227b 47
7f1b588f
DB
48def get_mapping(fobj):
49 (event_id, ) = struct.unpack('=Q', fobj.read(8))
50 (len, ) = struct.unpack('=L', fobj.read(4))
749c1d8e 51 name = fobj.read(len).decode()
90a147a2 52
7f1b588f
DB
53 return (event_id, name)
54
6f53641a
MY
55def read_record(fobj):
56 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, args)."""
57 event_id, timestamp_ns, record_length, record_pid = read_header(fobj, rec_header_fmt)
58 args_payload = fobj.read(record_length - rec_header_fmt_len)
59 return (event_id, timestamp_ns, record_pid, args_payload)
26f7227b 60
15327c3d
SH
61def read_trace_header(fobj):
62 """Read and verify trace file header"""
d1f92590
MY
63 _header_event_id, _header_magic, log_version = read_header(fobj, log_header_fmt)
64 if _header_event_id != header_event_id:
65 raise ValueError(f'Not a valid trace file, header id {_header_event_id} != {header_event_id}')
66 if _header_magic != header_magic:
67 raise ValueError(f'Not a valid trace file, header magic {_header_magic} != {header_magic}')
68
7f1b588f 69 if log_version not in [0, 2, 3, 4]:
d1f92590 70 raise ValueError(f'Unknown version {log_version} of tracelog format!')
7f1b588f 71 if log_version != 4:
d1f92590 72 raise ValueError(f'Log format {log_version} not supported with this QEMU release!')
26f7227b 73
6f53641a
MY
74def read_trace_records(events, fobj, read_header):
75 """Deserialize trace records from a file, yielding record tuples (event, event_num, timestamp, pid, arg1, ..., arg6).
840d8351
SH
76
77 Args:
3b71b61e 78 event_mapping (str -> Event): events dict, indexed by name
840d8351 79 fobj (file): input file
6f53641a 80 read_header (bool): whether headers were read from fobj
840d8351
SH
81
82 """
6f53641a
MY
83 frameinfo = inspect.getframeinfo(inspect.currentframe())
84 dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)",
85 frameinfo.lineno + 1, frameinfo.filename)
86
87 event_mapping = {e.name: e for e in events}
88 event_mapping["dropped"] = dropped_event
89 event_id_to_name = {dropped_event_id: "dropped"}
90
91 # If there is no header assume event ID mapping matches events list
92 if not read_header:
93 for event_id, event in enumerate(events):
94 event_id_to_name[event_id] = event.name
95
26f7227b 96 while True:
7f1b588f
DB
97 t = fobj.read(8)
98 if len(t) == 0:
26f7227b
SH
99 break
100
7f1b588f
DB
101 (rectype, ) = struct.unpack('=Q', t)
102 if rectype == record_type_mapping:
6f53641a
MY
103 event_id, event_name = get_mapping(fobj)
104 event_id_to_name[event_id] = event_name
7f1b588f 105 else:
6f53641a
MY
106 event_id, timestamp_ns, pid, args_payload = read_record(fobj)
107 event_name = event_id_to_name[event_id]
108
109 try:
110 event = event_mapping[event_name]
111 except KeyError as e:
112 raise SimpleException(
113 f'{e} event is logged but is not declared in the trace events'
114 'file, try using trace-events-all instead.'
115 )
116
117 offset = 0
118 args = []
119 for type, _ in event.args:
120 if is_string(type):
121 (length,) = struct.unpack_from('=L', args_payload, offset=offset)
122 offset += 4
123 s = args_payload[offset:offset+length]
124 offset += length
125 args.append(s)
126 else:
127 (value,) = struct.unpack_from('=Q', args_payload, offset=offset)
128 offset += 8
129 args.append(value)
130
131 yield (event_mapping[event_name], event_name, timestamp_ns, pid) + tuple(args)
26f7227b 132
87617b9a 133class Analyzer:
3470fef1
MY
134 """[Deprecated. Refer to Analyzer2 instead.]
135
136 A trace file analyzer which processes trace records.
59da6684
SH
137
138 An analyzer can be passed to run() or process(). The begin() method is
139 invoked, then each trace record is processed, and finally the end() method
87617b9a
MY
140 is invoked. When Analyzer is used as a context-manager (using the `with`
141 statement), begin() and end() are called automatically.
59da6684
SH
142
143 If a method matching a trace event name exists, it is invoked to process
659370f7
SH
144 that trace record. Otherwise the catchall() method is invoked.
145
146 Example:
147 The following method handles the runstate_set(int new_state) trace event::
148
149 def runstate_set(self, new_state):
150 ...
151
152 The method can also take a timestamp argument before the trace event
153 arguments::
154
155 def runstate_set(self, timestamp, new_state):
156 ...
157
158 Timestamps have the uint64_t type and are in nanoseconds.
159
160 The pid can be included in addition to the timestamp and is useful when
161 dealing with traces from multiple processes::
162
163 def runstate_set(self, timestamp, pid, new_state):
164 ...
165 """
59da6684
SH
166
167 def begin(self):
168 """Called at the start of the trace."""
169 pass
170
171 def catchall(self, event, rec):
172 """Called if no specific method for processing a trace event has been found."""
173 pass
174
d1f89c23
MY
175 def _build_fn(self, event):
176 fn = getattr(self, event.name, None)
177 if fn is None:
178 # Return early to avoid costly call to inspect.getfullargspec
179 return self.catchall
180
181 event_argcount = len(event.args)
182 fn_argcount = len(inspect.getfullargspec(fn)[0]) - 1
183 if fn_argcount == event_argcount + 1:
184 # Include timestamp as first argument
185 return lambda _, rec: fn(*(rec[1:2] + rec[3:3 + event_argcount]))
186 elif fn_argcount == event_argcount + 2:
187 # Include timestamp and pid
188 return lambda _, rec: fn(*rec[1:3 + event_argcount])
189 else:
190 # Just arguments, no timestamp or pid
191 return lambda _, rec: fn(*rec[3:3 + event_argcount])
192
193 def _process_event(self, rec_args, *, event, event_id, timestamp_ns, pid, **kwargs):
3470fef1
MY
194 warnings.warn(
195 "Use of deprecated Analyzer class. Refer to Analyzer2 instead.",
196 DeprecationWarning,
197 )
198
d1f89c23
MY
199 if not hasattr(self, '_fn_cache'):
200 # NOTE: Cannot depend on downstream subclasses to have
201 # super().__init__() because of legacy.
202 self._fn_cache = {}
203
204 rec = (event_id, timestamp_ns, pid, *rec_args)
205 if event_id not in self._fn_cache:
206 self._fn_cache[event_id] = self._build_fn(event)
207 self._fn_cache[event_id](event, rec)
208
59da6684
SH
209 def end(self):
210 """Called at the end of the trace."""
211 pass
212
87617b9a
MY
213 def __enter__(self):
214 self.begin()
215 return self
216
217 def __exit__(self, exc_type, exc_val, exc_tb):
218 if exc_type is None:
219 self.end()
220 return False
221
3470fef1
MY
222class Analyzer2(Analyzer):
223 """A trace file analyzer which processes trace records.
224
225 An analyzer can be passed to run() or process(). The begin() method is
226 invoked, then each trace record is processed, and finally the end() method
227 is invoked. When Analyzer is used as a context-manager (using the `with`
228 statement), begin() and end() are called automatically.
229
230 If a method matching a trace event name exists, it is invoked to process
231 that trace record. Otherwise the catchall() method is invoked.
232
233 The methods are called with a set of keyword-arguments. These can be ignored
234 using `**kwargs` or defined like any keyword-argument.
235
236 The following keyword-arguments are available, but make sure to have an
237 **kwargs to allow for unmatched arguments in the future:
238 event: Event object of current trace
239 event_id: The id of the event in the current trace file
240 timestamp_ns: The timestamp in nanoseconds of the trace
241 pid: The process id recorded for the given trace
242
243 Example:
244 The following method handles the runstate_set(int new_state) trace event::
245
246 def runstate_set(self, new_state, **kwargs):
247 ...
248
249 The method can also explicitly take a timestamp keyword-argument with the
250 trace event arguments::
251
252 def runstate_set(self, new_state, *, timestamp_ns, **kwargs):
253 ...
254
255 Timestamps have the uint64_t type and are in nanoseconds.
256
257 The pid can be included in addition to the timestamp and is useful when
258 dealing with traces from multiple processes:
259
260 def runstate_set(self, new_state, *, timestamp_ns, pid, **kwargs):
261 ...
262 """
263
264 def catchall(self, *rec_args, event, timestamp_ns, pid, event_id, **kwargs):
265 """Called if no specific method for processing a trace event has been found."""
266 pass
267
268 def _process_event(self, rec_args, *, event, **kwargs):
269 fn = getattr(self, event.name, self.catchall)
270 fn(*rec_args, event=event, **kwargs)
271
15327c3d 272def process(events, log, analyzer, read_header=True):
f7bd4f02
MY
273 """Invoke an analyzer on each event in a log.
274 Args:
275 events (file-object or list or str): events list or file-like object or file path as str to read event data from
276 log (file-object or str): file-like object or file path as str to read log data from
277 analyzer (Analyzer): Instance of Analyzer to interpret the event data
278 read_header (bool, optional): Whether to read header data from the log data. Defaults to True.
279 """
280
59da6684 281 if isinstance(events, str):
f7bd4f02
MY
282 with open(events, 'r') as f:
283 events_list = read_events(f, events)
284 elif isinstance(events, list):
285 # Treat as a list of events already produced by tracetool.read_events
286 events_list = events
287 else:
288 # Treat as an already opened file-object
289 events_list = read_events(events, events.name)
290
59da6684 291 if isinstance(log, str):
b78234e6
MY
292 with open(log, 'rb') as log_fobj:
293 _process(events_list, log_fobj, analyzer, read_header)
294 else:
295 # Treat `log` as an already opened file-object. We will not close it,
296 # as we do not own it.
297 _process(events_list, log, analyzer, read_header)
298
299def _process(events, log_fobj, analyzer, read_header=True):
300 """Internal function for processing
301
302 Args:
303 events (list): list of events already produced by tracetool.read_events
304 log_fobj (file): file-object to read log data from
305 analyzer (Analyzer): the Analyzer to interpret the event data
306 read_header (bool, optional): Whether to read header data from the log data. Defaults to True.
307 """
59da6684 308
15327c3d 309 if read_header:
b78234e6 310 read_trace_header(log_fobj)
15327c3d 311
87617b9a 312 with analyzer:
b78234e6 313 for event, event_id, timestamp_ns, record_pid, *rec_args in read_trace_records(events, log_fobj, read_header):
d1f89c23
MY
314 analyzer._process_event(
315 rec_args,
316 event=event,
317 event_id=event_id,
318 timestamp_ns=timestamp_ns,
319 pid=record_pid,
320 )
59da6684
SH
321
322def run(analyzer):
323 """Execute an analyzer on a trace file given on the command-line.
324
325 This function is useful as a driver for simple analysis scripts. More
326 advanced scripts will want to call process() instead."""
f7bd4f02
MY
327
328 try:
329 # NOTE: See built-in `argparse` module for a more robust cli interface
330 *no_header, trace_event_path, trace_file_path = sys.argv[1:]
331 assert no_header == [] or no_header == ['--no-header'], 'Invalid no-header argument'
332 except (AssertionError, ValueError):
1990fb98 333 raise SimpleException(f'usage: {sys.argv[0]} [--no-header] <trace-events> <trace-file>\n')
59da6684 334
f7bd4f02
MY
335 with open(trace_event_path, 'r') as events_fobj, open(trace_file_path, 'rb') as log_fobj:
336 process(events_fobj, log_fobj, analyzer, read_header=not no_header)
59da6684
SH
337
338if __name__ == '__main__':
3470fef1 339 class Formatter2(Analyzer2):
59da6684 340 def __init__(self):
3470fef1
MY
341 self.last_timestamp_ns = None
342
343 def catchall(self, *rec_args, event, timestamp_ns, pid, event_id):
344 if self.last_timestamp_ns is None:
345 self.last_timestamp_ns = timestamp_ns
346 delta_ns = timestamp_ns - self.last_timestamp_ns
347 self.last_timestamp_ns = timestamp_ns
348
349 fields = [
350 f'{name}={r}' if is_string(type) else f'{name}=0x{r:x}'
351 for r, (type, name) in zip(rec_args, event.args)
352 ]
353 print(f'{event.name} {delta_ns / 1000:0.3f} {pid=} ' + ' '.join(fields))
59da6684 354
1990fb98 355 try:
3470fef1 356 run(Formatter2())
1990fb98
MY
357 except SimpleException as e:
358 sys.stderr.write(str(e) + "\n")
359 sys.exit(1)