]>
Commit | Line | Data |
---|---|---|
3d004a37 | 1 | #!/usr/bin/env python3 |
26f7227b SH |
2 | # |
3 | # Pretty-printer for simple trace backend binary trace files | |
4 | # | |
5 | # Copyright IBM, Corp. 2010 | |
6 | # | |
7 | # This work is licensed under the terms of the GNU GPL, version 2. See | |
8 | # the COPYING file in the top-level directory. | |
9 | # | |
d0fb9657 | 10 | # For help see docs/devel/tracing.rst |
26f7227b | 11 | |
f7bd4f02 | 12 | import sys |
26f7227b | 13 | import struct |
59da6684 | 14 | import inspect |
d1b97bce | 15 | from tracetool import read_events, Event |
90a147a2 | 16 | from tracetool.backend.simple import is_string |
26f7227b | 17 | |
2c109f21 MY |
18 | __all__ = ['Analyzer', 'process', 'run'] |
19 | ||
8405ec6a MY |
20 | # This is the binary format that the QEMU "simple" trace backend |
21 | # emits. There is no specification documentation because the format is | |
22 | # not guaranteed to be stable. Trace files must be parsed with the | |
23 | # same trace-events-all file and the same simpletrace.py file that | |
24 | # QEMU was built with. | |
26f7227b SH |
25 | header_event_id = 0xffffffffffffffff |
26 | header_magic = 0xf2b177cb0aa429b4 | |
0b5538c3 | 27 | dropped_event_id = 0xfffffffffffffffe |
26f7227b | 28 | |
7f1b588f DB |
29 | record_type_mapping = 0 |
30 | record_type_event = 1 | |
31 | ||
90a147a2 HPB |
32 | log_header_fmt = '=QQQ' |
33 | rec_header_fmt = '=QQII' | |
6f53641a | 34 | rec_header_fmt_len = struct.calcsize(rec_header_fmt) |
26f7227b | 35 | |
1990fb98 MY |
36 | class SimpleException(Exception): |
37 | pass | |
38 | ||
90a147a2 HPB |
39 | def read_header(fobj, hfmt): |
40 | '''Read a trace record header''' | |
41 | hlen = struct.calcsize(hfmt) | |
42 | hdr = fobj.read(hlen) | |
43 | if len(hdr) != hlen: | |
1990fb98 | 44 | raise SimpleException('Error reading header. Wrong filetype provided?') |
90a147a2 | 45 | return struct.unpack(hfmt, hdr) |
26f7227b | 46 | |
7f1b588f DB |
47 | def get_mapping(fobj): |
48 | (event_id, ) = struct.unpack('=Q', fobj.read(8)) | |
49 | (len, ) = struct.unpack('=L', fobj.read(4)) | |
749c1d8e | 50 | name = fobj.read(len).decode() |
90a147a2 | 51 | |
7f1b588f DB |
52 | return (event_id, name) |
53 | ||
6f53641a MY |
54 | def read_record(fobj): |
55 | """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, args).""" | |
56 | event_id, timestamp_ns, record_length, record_pid = read_header(fobj, rec_header_fmt) | |
57 | args_payload = fobj.read(record_length - rec_header_fmt_len) | |
58 | return (event_id, timestamp_ns, record_pid, args_payload) | |
26f7227b | 59 | |
15327c3d SH |
60 | def read_trace_header(fobj): |
61 | """Read and verify trace file header""" | |
d1f92590 MY |
62 | _header_event_id, _header_magic, log_version = read_header(fobj, log_header_fmt) |
63 | if _header_event_id != header_event_id: | |
64 | raise ValueError(f'Not a valid trace file, header id {_header_event_id} != {header_event_id}') | |
65 | if _header_magic != header_magic: | |
66 | raise ValueError(f'Not a valid trace file, header magic {_header_magic} != {header_magic}') | |
67 | ||
7f1b588f | 68 | if log_version not in [0, 2, 3, 4]: |
d1f92590 | 69 | raise ValueError(f'Unknown version {log_version} of tracelog format!') |
7f1b588f | 70 | if log_version != 4: |
d1f92590 | 71 | raise ValueError(f'Log format {log_version} not supported with this QEMU release!') |
26f7227b | 72 | |
6f53641a MY |
73 | def read_trace_records(events, fobj, read_header): |
74 | """Deserialize trace records from a file, yielding record tuples (event, event_num, timestamp, pid, arg1, ..., arg6). | |
840d8351 SH |
75 | |
76 | Args: | |
3b71b61e | 77 | event_mapping (str -> Event): events dict, indexed by name |
840d8351 | 78 | fobj (file): input file |
6f53641a | 79 | read_header (bool): whether headers were read from fobj |
840d8351 SH |
80 | |
81 | """ | |
6f53641a MY |
82 | frameinfo = inspect.getframeinfo(inspect.currentframe()) |
83 | dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)", | |
84 | frameinfo.lineno + 1, frameinfo.filename) | |
85 | ||
86 | event_mapping = {e.name: e for e in events} | |
87 | event_mapping["dropped"] = dropped_event | |
88 | event_id_to_name = {dropped_event_id: "dropped"} | |
89 | ||
90 | # If there is no header assume event ID mapping matches events list | |
91 | if not read_header: | |
92 | for event_id, event in enumerate(events): | |
93 | event_id_to_name[event_id] = event.name | |
94 | ||
26f7227b | 95 | while True: |
7f1b588f DB |
96 | t = fobj.read(8) |
97 | if len(t) == 0: | |
26f7227b SH |
98 | break |
99 | ||
7f1b588f DB |
100 | (rectype, ) = struct.unpack('=Q', t) |
101 | if rectype == record_type_mapping: | |
6f53641a MY |
102 | event_id, event_name = get_mapping(fobj) |
103 | event_id_to_name[event_id] = event_name | |
7f1b588f | 104 | else: |
6f53641a MY |
105 | event_id, timestamp_ns, pid, args_payload = read_record(fobj) |
106 | event_name = event_id_to_name[event_id] | |
107 | ||
108 | try: | |
109 | event = event_mapping[event_name] | |
110 | except KeyError as e: | |
111 | raise SimpleException( | |
112 | f'{e} event is logged but is not declared in the trace events' | |
113 | 'file, try using trace-events-all instead.' | |
114 | ) | |
115 | ||
116 | offset = 0 | |
117 | args = [] | |
118 | for type, _ in event.args: | |
119 | if is_string(type): | |
120 | (length,) = struct.unpack_from('=L', args_payload, offset=offset) | |
121 | offset += 4 | |
122 | s = args_payload[offset:offset+length] | |
123 | offset += length | |
124 | args.append(s) | |
125 | else: | |
126 | (value,) = struct.unpack_from('=Q', args_payload, offset=offset) | |
127 | offset += 8 | |
128 | args.append(value) | |
129 | ||
130 | yield (event_mapping[event_name], event_name, timestamp_ns, pid) + tuple(args) | |
26f7227b | 131 | |
87617b9a | 132 | class Analyzer: |
59da6684 SH |
133 | """A trace file analyzer which processes trace records. |
134 | ||
135 | An analyzer can be passed to run() or process(). The begin() method is | |
136 | invoked, then each trace record is processed, and finally the end() method | |
87617b9a MY |
137 | is invoked. When Analyzer is used as a context-manager (using the `with` |
138 | statement), begin() and end() are called automatically. | |
59da6684 SH |
139 | |
140 | If a method matching a trace event name exists, it is invoked to process | |
659370f7 SH |
141 | that trace record. Otherwise the catchall() method is invoked. |
142 | ||
143 | Example: | |
144 | The following method handles the runstate_set(int new_state) trace event:: | |
145 | ||
146 | def runstate_set(self, new_state): | |
147 | ... | |
148 | ||
149 | The method can also take a timestamp argument before the trace event | |
150 | arguments:: | |
151 | ||
152 | def runstate_set(self, timestamp, new_state): | |
153 | ... | |
154 | ||
155 | Timestamps have the uint64_t type and are in nanoseconds. | |
156 | ||
157 | The pid can be included in addition to the timestamp and is useful when | |
158 | dealing with traces from multiple processes:: | |
159 | ||
160 | def runstate_set(self, timestamp, pid, new_state): | |
161 | ... | |
162 | """ | |
59da6684 SH |
163 | |
164 | def begin(self): | |
165 | """Called at the start of the trace.""" | |
166 | pass | |
167 | ||
168 | def catchall(self, event, rec): | |
169 | """Called if no specific method for processing a trace event has been found.""" | |
170 | pass | |
171 | ||
d1f89c23 MY |
172 | def _build_fn(self, event): |
173 | fn = getattr(self, event.name, None) | |
174 | if fn is None: | |
175 | # Return early to avoid costly call to inspect.getfullargspec | |
176 | return self.catchall | |
177 | ||
178 | event_argcount = len(event.args) | |
179 | fn_argcount = len(inspect.getfullargspec(fn)[0]) - 1 | |
180 | if fn_argcount == event_argcount + 1: | |
181 | # Include timestamp as first argument | |
182 | return lambda _, rec: fn(*(rec[1:2] + rec[3:3 + event_argcount])) | |
183 | elif fn_argcount == event_argcount + 2: | |
184 | # Include timestamp and pid | |
185 | return lambda _, rec: fn(*rec[1:3 + event_argcount]) | |
186 | else: | |
187 | # Just arguments, no timestamp or pid | |
188 | return lambda _, rec: fn(*rec[3:3 + event_argcount]) | |
189 | ||
190 | def _process_event(self, rec_args, *, event, event_id, timestamp_ns, pid, **kwargs): | |
191 | if not hasattr(self, '_fn_cache'): | |
192 | # NOTE: Cannot depend on downstream subclasses to have | |
193 | # super().__init__() because of legacy. | |
194 | self._fn_cache = {} | |
195 | ||
196 | rec = (event_id, timestamp_ns, pid, *rec_args) | |
197 | if event_id not in self._fn_cache: | |
198 | self._fn_cache[event_id] = self._build_fn(event) | |
199 | self._fn_cache[event_id](event, rec) | |
200 | ||
59da6684 SH |
201 | def end(self): |
202 | """Called at the end of the trace.""" | |
203 | pass | |
204 | ||
87617b9a MY |
205 | def __enter__(self): |
206 | self.begin() | |
207 | return self | |
208 | ||
209 | def __exit__(self, exc_type, exc_val, exc_tb): | |
210 | if exc_type is None: | |
211 | self.end() | |
212 | return False | |
213 | ||
15327c3d | 214 | def process(events, log, analyzer, read_header=True): |
f7bd4f02 MY |
215 | """Invoke an analyzer on each event in a log. |
216 | Args: | |
217 | events (file-object or list or str): events list or file-like object or file path as str to read event data from | |
218 | log (file-object or str): file-like object or file path as str to read log data from | |
219 | analyzer (Analyzer): Instance of Analyzer to interpret the event data | |
220 | read_header (bool, optional): Whether to read header data from the log data. Defaults to True. | |
221 | """ | |
222 | ||
59da6684 | 223 | if isinstance(events, str): |
f7bd4f02 MY |
224 | with open(events, 'r') as f: |
225 | events_list = read_events(f, events) | |
226 | elif isinstance(events, list): | |
227 | # Treat as a list of events already produced by tracetool.read_events | |
228 | events_list = events | |
229 | else: | |
230 | # Treat as an already opened file-object | |
231 | events_list = read_events(events, events.name) | |
232 | ||
59da6684 | 233 | if isinstance(log, str): |
b78234e6 MY |
234 | with open(log, 'rb') as log_fobj: |
235 | _process(events_list, log_fobj, analyzer, read_header) | |
236 | else: | |
237 | # Treat `log` as an already opened file-object. We will not close it, | |
238 | # as we do not own it. | |
239 | _process(events_list, log, analyzer, read_header) | |
240 | ||
241 | def _process(events, log_fobj, analyzer, read_header=True): | |
242 | """Internal function for processing | |
243 | ||
244 | Args: | |
245 | events (list): list of events already produced by tracetool.read_events | |
246 | log_fobj (file): file-object to read log data from | |
247 | analyzer (Analyzer): the Analyzer to interpret the event data | |
248 | read_header (bool, optional): Whether to read header data from the log data. Defaults to True. | |
249 | """ | |
59da6684 | 250 | |
15327c3d | 251 | if read_header: |
b78234e6 | 252 | read_trace_header(log_fobj) |
15327c3d | 253 | |
87617b9a | 254 | with analyzer: |
b78234e6 | 255 | for event, event_id, timestamp_ns, record_pid, *rec_args in read_trace_records(events, log_fobj, read_header): |
d1f89c23 MY |
256 | analyzer._process_event( |
257 | rec_args, | |
258 | event=event, | |
259 | event_id=event_id, | |
260 | timestamp_ns=timestamp_ns, | |
261 | pid=record_pid, | |
262 | ) | |
59da6684 SH |
263 | |
264 | def run(analyzer): | |
265 | """Execute an analyzer on a trace file given on the command-line. | |
266 | ||
267 | This function is useful as a driver for simple analysis scripts. More | |
268 | advanced scripts will want to call process() instead.""" | |
f7bd4f02 MY |
269 | |
270 | try: | |
271 | # NOTE: See built-in `argparse` module for a more robust cli interface | |
272 | *no_header, trace_event_path, trace_file_path = sys.argv[1:] | |
273 | assert no_header == [] or no_header == ['--no-header'], 'Invalid no-header argument' | |
274 | except (AssertionError, ValueError): | |
1990fb98 | 275 | raise SimpleException(f'usage: {sys.argv[0]} [--no-header] <trace-events> <trace-file>\n') |
59da6684 | 276 | |
f7bd4f02 MY |
277 | with open(trace_event_path, 'r') as events_fobj, open(trace_file_path, 'rb') as log_fobj: |
278 | process(events_fobj, log_fobj, analyzer, read_header=not no_header) | |
59da6684 SH |
279 | |
280 | if __name__ == '__main__': | |
281 | class Formatter(Analyzer): | |
282 | def __init__(self): | |
283 | self.last_timestamp = None | |
284 | ||
285 | def catchall(self, event, rec): | |
286 | timestamp = rec[1] | |
287 | if self.last_timestamp is None: | |
288 | self.last_timestamp = timestamp | |
289 | delta_ns = timestamp - self.last_timestamp | |
290 | self.last_timestamp = timestamp | |
291 | ||
80ff35cd SH |
292 | fields = [event.name, '%0.3f' % (delta_ns / 1000.0), |
293 | 'pid=%d' % rec[2]] | |
294 | i = 3 | |
90a147a2 HPB |
295 | for type, name in event.args: |
296 | if is_string(type): | |
80ff35cd | 297 | fields.append('%s=%s' % (name, rec[i])) |
90a147a2 | 298 | else: |
80ff35cd | 299 | fields.append('%s=0x%x' % (name, rec[i])) |
90a147a2 | 300 | i += 1 |
f03868bd | 301 | print(' '.join(fields)) |
59da6684 | 302 | |
1990fb98 MY |
303 | try: |
304 | run(Formatter()) | |
305 | except SimpleException as e: | |
306 | sys.stderr.write(str(e) + "\n") | |
307 | sys.exit(1) |