]>
git.proxmox.com Git - mirror_qemu.git/blob - scripts/simpletrace.py
3 # Pretty-printer for simple trace backend binary trace files
5 # Copyright IBM, Corp. 2010
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
10 # For help see docs/devel/tracing.rst
15 from tracetool
import read_events
, Event
16 from tracetool
.backend
.simple
import is_string
18 __all__
= ['Analyzer', 'process', 'run']
20 # This is the binary format that the QEMU "simple" trace backend
21 # emits. There is no specification documentation because the format is
22 # not guaranteed to be stable. Trace files must be parsed with the
23 # same trace-events-all file and the same simpletrace.py file that
24 # QEMU was built with.
25 header_event_id
= 0xffffffffffffffff
26 header_magic
= 0xf2b177cb0aa429b4
27 dropped_event_id
= 0xfffffffffffffffe
29 record_type_mapping
= 0
32 log_header_fmt
= '=QQQ'
33 rec_header_fmt
= '=QQII'
35 def read_header(fobj
, hfmt
):
36 '''Read a trace record header'''
37 hlen
= struct
.calcsize(hfmt
)
41 return struct
.unpack(hfmt
, hdr
)
43 def get_record(event_mapping
, event_id_to_name
, rechdr
, fobj
):
44 """Deserialize a trace record from a file into a tuple
45 (name, timestamp, pid, arg1, ..., arg6)."""
48 if rechdr
[0] != dropped_event_id
:
50 name
= event_id_to_name
[event_id
]
51 rec
= (name
, rechdr
[1], rechdr
[3])
53 event
= event_mapping
[name
]
55 sys
.stderr
.write('%s event is logged but is not declared ' \
56 'in the trace events file, try using ' \
57 'trace-events-all instead.\n' % str(e
))
60 for type, name
in event
.args
:
63 (len,) = struct
.unpack('=L', l
)
67 (value
,) = struct
.unpack('=Q', fobj
.read(8))
70 rec
= ("dropped", rechdr
[1], rechdr
[3])
71 (value
,) = struct
.unpack('=Q', fobj
.read(8))
75 def get_mapping(fobj
):
76 (event_id
, ) = struct
.unpack('=Q', fobj
.read(8))
77 (len, ) = struct
.unpack('=L', fobj
.read(4))
78 name
= fobj
.read(len).decode()
80 return (event_id
, name
)
82 def read_record(event_mapping
, event_id_to_name
, fobj
):
83 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
84 rechdr
= read_header(fobj
, rec_header_fmt
)
85 return get_record(event_mapping
, event_id_to_name
, rechdr
, fobj
)
87 def read_trace_header(fobj
):
88 """Read and verify trace file header"""
89 header
= read_header(fobj
, log_header_fmt
)
91 raise ValueError('Not a valid trace file!')
92 if header
[0] != header_event_id
:
93 raise ValueError('Not a valid trace file, header id %d != %d' %
94 (header
[0], header_event_id
))
95 if header
[1] != header_magic
:
96 raise ValueError('Not a valid trace file, header magic %d != %d' %
97 (header
[1], header_magic
))
99 log_version
= header
[2]
100 if log_version
not in [0, 2, 3, 4]:
101 raise ValueError('Unknown version of tracelog format!')
103 raise ValueError('Log format %d not supported with this QEMU release!'
106 def read_trace_records(event_mapping
, event_id_to_name
, fobj
):
107 """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6).
109 Note that `event_id_to_name` is modified if the file contains mapping records.
112 event_mapping (str -> Event): events dict, indexed by name
113 event_id_to_name (int -> str): event names dict, indexed by event ID
114 fobj (file): input file
122 (rectype
, ) = struct
.unpack('=Q', t
)
123 if rectype
== record_type_mapping
:
124 event_id
, name
= get_mapping(fobj
)
125 event_id_to_name
[event_id
] = name
127 rec
= read_record(event_mapping
, event_id_to_name
, fobj
)
131 class Analyzer(object):
132 """A trace file analyzer which processes trace records.
134 An analyzer can be passed to run() or process(). The begin() method is
135 invoked, then each trace record is processed, and finally the end() method
138 If a method matching a trace event name exists, it is invoked to process
139 that trace record. Otherwise the catchall() method is invoked.
142 The following method handles the runstate_set(int new_state) trace event::
144 def runstate_set(self, new_state):
147 The method can also take a timestamp argument before the trace event
150 def runstate_set(self, timestamp, new_state):
153 Timestamps have the uint64_t type and are in nanoseconds.
155 The pid can be included in addition to the timestamp and is useful when
156 dealing with traces from multiple processes::
158 def runstate_set(self, timestamp, pid, new_state):
163 """Called at the start of the trace."""
166 def catchall(self
, event
, rec
):
167 """Called if no specific method for processing a trace event has been found."""
171 """Called at the end of the trace."""
174 def process(events
, log
, analyzer
, read_header
=True):
175 """Invoke an analyzer on each event in a log.
177 events (file-object or list or str): events list or file-like object or file path as str to read event data from
178 log (file-object or str): file-like object or file path as str to read log data from
179 analyzer (Analyzer): Instance of Analyzer to interpret the event data
180 read_header (bool, optional): Whether to read header data from the log data. Defaults to True.
183 if isinstance(events
, str):
184 with
open(events
, 'r') as f
:
185 events_list
= read_events(f
, events
)
186 elif isinstance(events
, list):
187 # Treat as a list of events already produced by tracetool.read_events
190 # Treat as an already opened file-object
191 events_list
= read_events(events
, events
.name
)
194 if isinstance(log
, str):
195 log
= open(log
, 'rb')
199 read_trace_header(log
)
201 frameinfo
= inspect
.getframeinfo(inspect
.currentframe())
202 dropped_event
= Event
.build("Dropped_Event(uint64_t num_events_dropped)",
203 frameinfo
.lineno
+ 1, frameinfo
.filename
)
204 event_mapping
= {"dropped": dropped_event
}
205 event_id_to_name
= {dropped_event_id
: "dropped"}
207 for event
in events_list
:
208 event_mapping
[event
.name
] = event
210 # If there is no header assume event ID mapping matches events list
212 for event_id
, event
in enumerate(events_list
):
213 event_id_to_name
[event_id
] = event
.name
215 def build_fn(analyzer
, event
):
216 if isinstance(event
, str):
217 return analyzer
.catchall
219 fn
= getattr(analyzer
, event
.name
, None)
221 return analyzer
.catchall
223 event_argcount
= len(event
.args
)
224 fn_argcount
= len(inspect
.getargspec(fn
)[0]) - 1
225 if fn_argcount
== event_argcount
+ 1:
226 # Include timestamp as first argument
227 return lambda _
, rec
: fn(*(rec
[1:2] + rec
[3:3 + event_argcount
]))
228 elif fn_argcount
== event_argcount
+ 2:
229 # Include timestamp and pid
230 return lambda _
, rec
: fn(*rec
[1:3 + event_argcount
])
232 # Just arguments, no timestamp or pid
233 return lambda _
, rec
: fn(*rec
[3:3 + event_argcount
])
237 for rec
in read_trace_records(event_mapping
, event_id_to_name
, log
):
239 event
= event_mapping
[event_num
]
240 if event_num
not in fn_cache
:
241 fn_cache
[event_num
] = build_fn(analyzer
, event
)
242 fn_cache
[event_num
](event
, rec
)
249 """Execute an analyzer on a trace file given on the command-line.
251 This function is useful as a driver for simple analysis scripts. More
252 advanced scripts will want to call process() instead."""
255 # NOTE: See built-in `argparse` module for a more robust cli interface
256 *no_header
, trace_event_path
, trace_file_path
= sys
.argv
[1:]
257 assert no_header
== [] or no_header
== ['--no-header'], 'Invalid no-header argument'
258 except (AssertionError, ValueError):
259 sys
.stderr
.write(f
'usage: {sys.argv[0]} [--no-header] <trace-events> <trace-file>\n')
262 with
open(trace_event_path
, 'r') as events_fobj
, open(trace_file_path
, 'rb') as log_fobj
:
263 process(events_fobj
, log_fobj
, analyzer
, read_header
=not no_header
)
265 if __name__
== '__main__':
266 class Formatter(Analyzer
):
268 self
.last_timestamp
= None
270 def catchall(self
, event
, rec
):
272 if self
.last_timestamp
is None:
273 self
.last_timestamp
= timestamp
274 delta_ns
= timestamp
- self
.last_timestamp
275 self
.last_timestamp
= timestamp
277 fields
= [event
.name
, '%0.3f' % (delta_ns
/ 1000.0),
280 for type, name
in event
.args
:
282 fields
.append('%s=%s' % (name
, rec
[i
]))
284 fields
.append('%s=0x%x' % (name
, rec
[i
]))
286 print(' '.join(fields
))