]>
Commit | Line | Data |
---|---|---|
3d004a37 | 1 | #!/usr/bin/env python3 |
26f7227b SH |
2 | # |
3 | # Pretty-printer for simple trace backend binary trace files | |
4 | # | |
5 | # Copyright IBM, Corp. 2010 | |
6 | # | |
7 | # This work is licensed under the terms of the GNU GPL, version 2. See | |
8 | # the COPYING file in the top-level directory. | |
9 | # | |
d0fb9657 | 10 | # For help see docs/devel/tracing.rst |
26f7227b | 11 | |
f7bd4f02 | 12 | import sys |
26f7227b | 13 | import struct |
59da6684 | 14 | import inspect |
d1b97bce | 15 | from tracetool import read_events, Event |
90a147a2 | 16 | from tracetool.backend.simple import is_string |
26f7227b | 17 | |
2c109f21 MY |
18 | __all__ = ['Analyzer', 'process', 'run'] |
19 | ||
8405ec6a MY |
20 | # This is the binary format that the QEMU "simple" trace backend |
21 | # emits. There is no specification documentation because the format is | |
22 | # not guaranteed to be stable. Trace files must be parsed with the | |
23 | # same trace-events-all file and the same simpletrace.py file that | |
24 | # QEMU was built with. | |
26f7227b SH |
25 | header_event_id = 0xffffffffffffffff |
26 | header_magic = 0xf2b177cb0aa429b4 | |
0b5538c3 | 27 | dropped_event_id = 0xfffffffffffffffe |
26f7227b | 28 | |
7f1b588f DB |
29 | record_type_mapping = 0 |
30 | record_type_event = 1 | |
31 | ||
90a147a2 HPB |
32 | log_header_fmt = '=QQQ' |
33 | rec_header_fmt = '=QQII' | |
26f7227b | 34 | |
90a147a2 HPB |
35 | def read_header(fobj, hfmt): |
36 | '''Read a trace record header''' | |
37 | hlen = struct.calcsize(hfmt) | |
38 | hdr = fobj.read(hlen) | |
39 | if len(hdr) != hlen: | |
40 | return None | |
41 | return struct.unpack(hfmt, hdr) | |
26f7227b | 42 | |
3b71b61e | 43 | def get_record(event_mapping, event_id_to_name, rechdr, fobj): |
7f1b588f DB |
44 | """Deserialize a trace record from a file into a tuple |
45 | (name, timestamp, pid, arg1, ..., arg6).""" | |
90a147a2 | 46 | if rechdr is None: |
26f7227b | 47 | return None |
90a147a2 HPB |
48 | if rechdr[0] != dropped_event_id: |
49 | event_id = rechdr[0] | |
3b71b61e | 50 | name = event_id_to_name[event_id] |
7f1b588f | 51 | rec = (name, rechdr[1], rechdr[3]) |
249e9f79 | 52 | try: |
3b71b61e | 53 | event = event_mapping[name] |
bd228083 | 54 | except KeyError as e: |
249e9f79 JRZ |
55 | sys.stderr.write('%s event is logged but is not declared ' \ |
56 | 'in the trace events file, try using ' \ | |
57 | 'trace-events-all instead.\n' % str(e)) | |
58 | sys.exit(1) | |
59 | ||
90a147a2 HPB |
60 | for type, name in event.args: |
61 | if is_string(type): | |
62 | l = fobj.read(4) | |
63 | (len,) = struct.unpack('=L', l) | |
64 | s = fobj.read(len) | |
65 | rec = rec + (s,) | |
66 | else: | |
67 | (value,) = struct.unpack('=Q', fobj.read(8)) | |
68 | rec = rec + (value,) | |
69 | else: | |
7f1b588f | 70 | rec = ("dropped", rechdr[1], rechdr[3]) |
90a147a2 HPB |
71 | (value,) = struct.unpack('=Q', fobj.read(8)) |
72 | rec = rec + (value,) | |
73 | return rec | |
74 | ||
7f1b588f DB |
75 | def get_mapping(fobj): |
76 | (event_id, ) = struct.unpack('=Q', fobj.read(8)) | |
77 | (len, ) = struct.unpack('=L', fobj.read(4)) | |
749c1d8e | 78 | name = fobj.read(len).decode() |
90a147a2 | 79 | |
7f1b588f DB |
80 | return (event_id, name) |
81 | ||
3b71b61e | 82 | def read_record(event_mapping, event_id_to_name, fobj): |
80ff35cd | 83 | """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6).""" |
90a147a2 | 84 | rechdr = read_header(fobj, rec_header_fmt) |
3b71b61e | 85 | return get_record(event_mapping, event_id_to_name, rechdr, fobj) |
26f7227b | 86 | |
15327c3d SH |
87 | def read_trace_header(fobj): |
88 | """Read and verify trace file header""" | |
90a147a2 | 89 | header = read_header(fobj, log_header_fmt) |
25d54654 | 90 | if header is None: |
90a147a2 | 91 | raise ValueError('Not a valid trace file!') |
25d54654 DB |
92 | if header[0] != header_event_id: |
93 | raise ValueError('Not a valid trace file, header id %d != %d' % | |
94 | (header[0], header_event_id)) | |
95 | if header[1] != header_magic: | |
96 | raise ValueError('Not a valid trace file, header magic %d != %d' % | |
97 | (header[1], header_magic)) | |
90a147a2 HPB |
98 | |
99 | log_version = header[2] | |
7f1b588f | 100 | if log_version not in [0, 2, 3, 4]: |
ef0bd3bb | 101 | raise ValueError('Unknown version of tracelog format!') |
7f1b588f | 102 | if log_version != 4: |
ef0bd3bb LV |
103 | raise ValueError('Log format %d not supported with this QEMU release!' |
104 | % log_version) | |
26f7227b | 105 | |
3b71b61e | 106 | def read_trace_records(event_mapping, event_id_to_name, fobj): |
840d8351 SH |
107 | """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6). |
108 | ||
3b71b61e | 109 | Note that `event_id_to_name` is modified if the file contains mapping records. |
840d8351 SH |
110 | |
111 | Args: | |
3b71b61e MY |
112 | event_mapping (str -> Event): events dict, indexed by name |
113 | event_id_to_name (int -> str): event names dict, indexed by event ID | |
840d8351 SH |
114 | fobj (file): input file |
115 | ||
116 | """ | |
26f7227b | 117 | while True: |
7f1b588f DB |
118 | t = fobj.read(8) |
119 | if len(t) == 0: | |
26f7227b SH |
120 | break |
121 | ||
7f1b588f DB |
122 | (rectype, ) = struct.unpack('=Q', t) |
123 | if rectype == record_type_mapping: | |
124 | event_id, name = get_mapping(fobj) | |
3b71b61e | 125 | event_id_to_name[event_id] = name |
7f1b588f | 126 | else: |
3b71b61e | 127 | rec = read_record(event_mapping, event_id_to_name, fobj) |
7f1b588f DB |
128 | |
129 | yield rec | |
26f7227b | 130 | |
59da6684 SH |
131 | class Analyzer(object): |
132 | """A trace file analyzer which processes trace records. | |
133 | ||
134 | An analyzer can be passed to run() or process(). The begin() method is | |
135 | invoked, then each trace record is processed, and finally the end() method | |
136 | is invoked. | |
137 | ||
138 | If a method matching a trace event name exists, it is invoked to process | |
659370f7 SH |
139 | that trace record. Otherwise the catchall() method is invoked. |
140 | ||
141 | Example: | |
142 | The following method handles the runstate_set(int new_state) trace event:: | |
143 | ||
144 | def runstate_set(self, new_state): | |
145 | ... | |
146 | ||
147 | The method can also take a timestamp argument before the trace event | |
148 | arguments:: | |
149 | ||
150 | def runstate_set(self, timestamp, new_state): | |
151 | ... | |
152 | ||
153 | Timestamps have the uint64_t type and are in nanoseconds. | |
154 | ||
155 | The pid can be included in addition to the timestamp and is useful when | |
156 | dealing with traces from multiple processes:: | |
157 | ||
158 | def runstate_set(self, timestamp, pid, new_state): | |
159 | ... | |
160 | """ | |
59da6684 SH |
161 | |
162 | def begin(self): | |
163 | """Called at the start of the trace.""" | |
164 | pass | |
165 | ||
166 | def catchall(self, event, rec): | |
167 | """Called if no specific method for processing a trace event has been found.""" | |
168 | pass | |
169 | ||
170 | def end(self): | |
171 | """Called at the end of the trace.""" | |
172 | pass | |
173 | ||
15327c3d | 174 | def process(events, log, analyzer, read_header=True): |
f7bd4f02 MY |
175 | """Invoke an analyzer on each event in a log. |
176 | Args: | |
177 | events (file-object or list or str): events list or file-like object or file path as str to read event data from | |
178 | log (file-object or str): file-like object or file path as str to read log data from | |
179 | analyzer (Analyzer): Instance of Analyzer to interpret the event data | |
180 | read_header (bool, optional): Whether to read header data from the log data. Defaults to True. | |
181 | """ | |
182 | ||
59da6684 | 183 | if isinstance(events, str): |
f7bd4f02 MY |
184 | with open(events, 'r') as f: |
185 | events_list = read_events(f, events) | |
186 | elif isinstance(events, list): | |
187 | # Treat as a list of events already produced by tracetool.read_events | |
188 | events_list = events | |
189 | else: | |
190 | # Treat as an already opened file-object | |
191 | events_list = read_events(events, events.name) | |
192 | ||
193 | close_log = False | |
59da6684 SH |
194 | if isinstance(log, str): |
195 | log = open(log, 'rb') | |
f7bd4f02 | 196 | close_log = True |
59da6684 | 197 | |
15327c3d SH |
198 | if read_header: |
199 | read_trace_header(log) | |
200 | ||
c6e93c9d VR |
201 | frameinfo = inspect.getframeinfo(inspect.currentframe()) |
202 | dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)", | |
203 | frameinfo.lineno + 1, frameinfo.filename) | |
3b71b61e MY |
204 | event_mapping = {"dropped": dropped_event} |
205 | event_id_to_name = {dropped_event_id: "dropped"} | |
90a147a2 | 206 | |
f7bd4f02 | 207 | for event in events_list: |
3b71b61e | 208 | event_mapping[event.name] = event |
90a147a2 | 209 | |
840d8351 SH |
210 | # If there is no header assume event ID mapping matches events list |
211 | if not read_header: | |
f7bd4f02 | 212 | for event_id, event in enumerate(events_list): |
3b71b61e | 213 | event_id_to_name[event_id] = event.name |
840d8351 | 214 | |
59da6684 | 215 | def build_fn(analyzer, event): |
90a147a2 HPB |
216 | if isinstance(event, str): |
217 | return analyzer.catchall | |
218 | ||
219 | fn = getattr(analyzer, event.name, None) | |
59da6684 SH |
220 | if fn is None: |
221 | return analyzer.catchall | |
222 | ||
90a147a2 | 223 | event_argcount = len(event.args) |
59da6684 SH |
224 | fn_argcount = len(inspect.getargspec(fn)[0]) - 1 |
225 | if fn_argcount == event_argcount + 1: | |
226 | # Include timestamp as first argument | |
e42860ae | 227 | return lambda _, rec: fn(*(rec[1:2] + rec[3:3 + event_argcount])) |
80ff35cd SH |
228 | elif fn_argcount == event_argcount + 2: |
229 | # Include timestamp and pid | |
230 | return lambda _, rec: fn(*rec[1:3 + event_argcount]) | |
59da6684 | 231 | else: |
80ff35cd SH |
232 | # Just arguments, no timestamp or pid |
233 | return lambda _, rec: fn(*rec[3:3 + event_argcount]) | |
59da6684 SH |
234 | |
235 | analyzer.begin() | |
236 | fn_cache = {} | |
3b71b61e | 237 | for rec in read_trace_records(event_mapping, event_id_to_name, log): |
59da6684 | 238 | event_num = rec[0] |
3b71b61e | 239 | event = event_mapping[event_num] |
59da6684 SH |
240 | if event_num not in fn_cache: |
241 | fn_cache[event_num] = build_fn(analyzer, event) | |
242 | fn_cache[event_num](event, rec) | |
243 | analyzer.end() | |
244 | ||
f7bd4f02 MY |
245 | if close_log: |
246 | log.close() | |
247 | ||
59da6684 SH |
248 | def run(analyzer): |
249 | """Execute an analyzer on a trace file given on the command-line. | |
250 | ||
251 | This function is useful as a driver for simple analysis scripts. More | |
252 | advanced scripts will want to call process() instead.""" | |
f7bd4f02 MY |
253 | |
254 | try: | |
255 | # NOTE: See built-in `argparse` module for a more robust cli interface | |
256 | *no_header, trace_event_path, trace_file_path = sys.argv[1:] | |
257 | assert no_header == [] or no_header == ['--no-header'], 'Invalid no-header argument' | |
258 | except (AssertionError, ValueError): | |
259 | sys.stderr.write(f'usage: {sys.argv[0]} [--no-header] <trace-events> <trace-file>\n') | |
59da6684 SH |
260 | sys.exit(1) |
261 | ||
f7bd4f02 MY |
262 | with open(trace_event_path, 'r') as events_fobj, open(trace_file_path, 'rb') as log_fobj: |
263 | process(events_fobj, log_fobj, analyzer, read_header=not no_header) | |
59da6684 SH |
264 | |
265 | if __name__ == '__main__': | |
266 | class Formatter(Analyzer): | |
267 | def __init__(self): | |
268 | self.last_timestamp = None | |
269 | ||
270 | def catchall(self, event, rec): | |
271 | timestamp = rec[1] | |
272 | if self.last_timestamp is None: | |
273 | self.last_timestamp = timestamp | |
274 | delta_ns = timestamp - self.last_timestamp | |
275 | self.last_timestamp = timestamp | |
276 | ||
80ff35cd SH |
277 | fields = [event.name, '%0.3f' % (delta_ns / 1000.0), |
278 | 'pid=%d' % rec[2]] | |
279 | i = 3 | |
90a147a2 HPB |
280 | for type, name in event.args: |
281 | if is_string(type): | |
80ff35cd | 282 | fields.append('%s=%s' % (name, rec[i])) |
90a147a2 | 283 | else: |
80ff35cd | 284 | fields.append('%s=0x%x' % (name, rec[i])) |
90a147a2 | 285 | i += 1 |
f03868bd | 286 | print(' '.join(fields)) |
59da6684 SH |
287 | |
288 | run(Formatter()) |