]> git.proxmox.com Git - mirror_qemu.git/blob - scripts/simpletrace.py
9pfs: clear migration blocker at session reset
[mirror_qemu.git] / scripts / simpletrace.py
1 #!/usr/bin/env python
2 #
3 # Pretty-printer for simple trace backend binary trace files
4 #
5 # Copyright IBM, Corp. 2010
6 #
7 # This work is licensed under the terms of the GNU GPL, version 2. See
8 # the COPYING file in the top-level directory.
9 #
10 # For help see docs/tracing.txt
11
12 import struct
13 import re
14 import inspect
15 from tracetool import read_events, Event
16 from tracetool.backend.simple import is_string
17
18 header_event_id = 0xffffffffffffffff
19 header_magic = 0xf2b177cb0aa429b4
20 dropped_event_id = 0xfffffffffffffffe
21
22 record_type_mapping = 0
23 record_type_event = 1
24
25 log_header_fmt = '=QQQ'
26 rec_header_fmt = '=QQII'
27
28 def read_header(fobj, hfmt):
29 '''Read a trace record header'''
30 hlen = struct.calcsize(hfmt)
31 hdr = fobj.read(hlen)
32 if len(hdr) != hlen:
33 return None
34 return struct.unpack(hfmt, hdr)
35
36 def get_record(edict, idtoname, rechdr, fobj):
37 """Deserialize a trace record from a file into a tuple
38 (name, timestamp, pid, arg1, ..., arg6)."""
39 if rechdr is None:
40 return None
41 if rechdr[0] != dropped_event_id:
42 event_id = rechdr[0]
43 name = idtoname[event_id]
44 rec = (name, rechdr[1], rechdr[3])
45 event = edict[name]
46 for type, name in event.args:
47 if is_string(type):
48 l = fobj.read(4)
49 (len,) = struct.unpack('=L', l)
50 s = fobj.read(len)
51 rec = rec + (s,)
52 else:
53 (value,) = struct.unpack('=Q', fobj.read(8))
54 rec = rec + (value,)
55 else:
56 rec = ("dropped", rechdr[1], rechdr[3])
57 (value,) = struct.unpack('=Q', fobj.read(8))
58 rec = rec + (value,)
59 return rec
60
61 def get_mapping(fobj):
62 (event_id, ) = struct.unpack('=Q', fobj.read(8))
63 (len, ) = struct.unpack('=L', fobj.read(4))
64 name = fobj.read(len)
65
66 return (event_id, name)
67
68 def read_record(edict, idtoname, fobj):
69 """Deserialize a trace record from a file into a tuple (event_num, timestamp, pid, arg1, ..., arg6)."""
70 rechdr = read_header(fobj, rec_header_fmt)
71 return get_record(edict, idtoname, rechdr, fobj)
72
73 def read_trace_header(fobj):
74 """Read and verify trace file header"""
75 header = read_header(fobj, log_header_fmt)
76 if header is None:
77 raise ValueError('Not a valid trace file!')
78 if header[0] != header_event_id:
79 raise ValueError('Not a valid trace file, header id %d != %d' %
80 (header[0], header_event_id))
81 if header[1] != header_magic:
82 raise ValueError('Not a valid trace file, header magic %d != %d' %
83 (header[1], header_magic))
84
85 log_version = header[2]
86 if log_version not in [0, 2, 3, 4]:
87 raise ValueError('Unknown version of tracelog format!')
88 if log_version != 4:
89 raise ValueError('Log format %d not supported with this QEMU release!'
90 % log_version)
91
92 def read_trace_records(edict, fobj):
93 """Deserialize trace records from a file, yielding record tuples (event_num, timestamp, pid, arg1, ..., arg6)."""
94 idtoname = {
95 dropped_event_id: "dropped"
96 }
97 while True:
98 t = fobj.read(8)
99 if len(t) == 0:
100 break
101
102 (rectype, ) = struct.unpack('=Q', t)
103 if rectype == record_type_mapping:
104 event_id, name = get_mapping(fobj)
105 idtoname[event_id] = name
106 else:
107 rec = read_record(edict, idtoname, fobj)
108
109 yield rec
110
111 class Analyzer(object):
112 """A trace file analyzer which processes trace records.
113
114 An analyzer can be passed to run() or process(). The begin() method is
115 invoked, then each trace record is processed, and finally the end() method
116 is invoked.
117
118 If a method matching a trace event name exists, it is invoked to process
119 that trace record. Otherwise the catchall() method is invoked."""
120
121 def begin(self):
122 """Called at the start of the trace."""
123 pass
124
125 def catchall(self, event, rec):
126 """Called if no specific method for processing a trace event has been found."""
127 pass
128
129 def end(self):
130 """Called at the end of the trace."""
131 pass
132
133 def process(events, log, analyzer, read_header=True):
134 """Invoke an analyzer on each event in a log."""
135 if isinstance(events, str):
136 events = read_events(open(events, 'r'))
137 if isinstance(log, str):
138 log = open(log, 'rb')
139
140 if read_header:
141 read_trace_header(log)
142
143 dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
144 edict = {"dropped": dropped_event}
145
146 for event in events:
147 edict[event.name] = event
148
149 def build_fn(analyzer, event):
150 if isinstance(event, str):
151 return analyzer.catchall
152
153 fn = getattr(analyzer, event.name, None)
154 if fn is None:
155 return analyzer.catchall
156
157 event_argcount = len(event.args)
158 fn_argcount = len(inspect.getargspec(fn)[0]) - 1
159 if fn_argcount == event_argcount + 1:
160 # Include timestamp as first argument
161 return lambda _, rec: fn(*((rec[1:2],) + rec[3:3 + event_argcount]))
162 elif fn_argcount == event_argcount + 2:
163 # Include timestamp and pid
164 return lambda _, rec: fn(*rec[1:3 + event_argcount])
165 else:
166 # Just arguments, no timestamp or pid
167 return lambda _, rec: fn(*rec[3:3 + event_argcount])
168
169 analyzer.begin()
170 fn_cache = {}
171 for rec in read_trace_records(edict, log):
172 event_num = rec[0]
173 event = edict[event_num]
174 if event_num not in fn_cache:
175 fn_cache[event_num] = build_fn(analyzer, event)
176 fn_cache[event_num](event, rec)
177 analyzer.end()
178
179 def run(analyzer):
180 """Execute an analyzer on a trace file given on the command-line.
181
182 This function is useful as a driver for simple analysis scripts. More
183 advanced scripts will want to call process() instead."""
184 import sys
185
186 read_header = True
187 if len(sys.argv) == 4 and sys.argv[1] == '--no-header':
188 read_header = False
189 del sys.argv[1]
190 elif len(sys.argv) != 3:
191 sys.stderr.write('usage: %s [--no-header] <trace-events> ' \
192 '<trace-file>\n' % sys.argv[0])
193 sys.exit(1)
194
195 events = read_events(open(sys.argv[1], 'r'))
196 process(events, sys.argv[2], analyzer, read_header=read_header)
197
198 if __name__ == '__main__':
199 class Formatter(Analyzer):
200 def __init__(self):
201 self.last_timestamp = None
202
203 def catchall(self, event, rec):
204 timestamp = rec[1]
205 if self.last_timestamp is None:
206 self.last_timestamp = timestamp
207 delta_ns = timestamp - self.last_timestamp
208 self.last_timestamp = timestamp
209
210 fields = [event.name, '%0.3f' % (delta_ns / 1000.0),
211 'pid=%d' % rec[2]]
212 i = 3
213 for type, name in event.args:
214 if is_string(type):
215 fields.append('%s=%s' % (name, rec[i]))
216 else:
217 fields.append('%s=0x%x' % (name, rec[i]))
218 i += 1
219 print ' '.join(fields)
220
221 run(Formatter())