]> git.proxmox.com Git - mirror_qemu.git/blob - scripts/analyze-migration.py
tracetool: avoid invalid escape in Python string
[mirror_qemu.git] / scripts / analyze-migration.py
1 #!/usr/bin/env python3
2 #
3 # Migration Stream Analyzer
4 #
5 # Copyright (c) 2015 Alexander Graf <agraf@suse.de>
6 #
7 # This library is free software; you can redistribute it and/or
8 # modify it under the terms of the GNU Lesser General Public
9 # License as published by the Free Software Foundation; either
10 # version 2.1 of the License, or (at your option) any later version.
11 #
12 # This library is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 # Lesser General Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser General Public
18 # License along with this library; if not, see <http://www.gnu.org/licenses/>.
19
20 import json
21 import os
22 import argparse
23 import collections
24 import struct
25 import sys
26
27
28 def mkdir_p(path):
29 try:
30 os.makedirs(path)
31 except OSError:
32 pass
33
34
35 class MigrationFile(object):
36 def __init__(self, filename):
37 self.filename = filename
38 self.file = open(self.filename, "rb")
39
40 def read64(self):
41 return int.from_bytes(self.file.read(8), byteorder='big', signed=False)
42
43 def read32(self):
44 return int.from_bytes(self.file.read(4), byteorder='big', signed=False)
45
46 def read16(self):
47 return int.from_bytes(self.file.read(2), byteorder='big', signed=False)
48
49 def read8(self):
50 return int.from_bytes(self.file.read(1), byteorder='big', signed=True)
51
52 def readstr(self, len = None):
53 return self.readvar(len).decode('utf-8')
54
55 def readvar(self, size = None):
56 if size is None:
57 size = self.read8()
58 if size == 0:
59 return ""
60 value = self.file.read(size)
61 if len(value) != size:
62 raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
63 return value
64
65 def tell(self):
66 return self.file.tell()
67
68 # The VMSD description is at the end of the file, after EOF. Look for
69 # the last NULL byte, then for the beginning brace of JSON.
70 def read_migration_debug_json(self):
71 QEMU_VM_VMDESCRIPTION = 0x06
72
73 # Remember the offset in the file when we started
74 entrypos = self.file.tell()
75
76 # Read the last 10MB
77 self.file.seek(0, os.SEEK_END)
78 endpos = self.file.tell()
79 self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
80 datapos = self.file.tell()
81 data = self.file.read()
82 # The full file read closed the file as well, reopen it
83 self.file = open(self.filename, "rb")
84
85 # Find the last NULL byte, then the first brace after that. This should
86 # be the beginning of our JSON data.
87 nulpos = data.rfind(b'\0')
88 jsonpos = data.find(b'{', nulpos)
89
90 # Check backwards from there and see whether we guessed right
91 self.file.seek(datapos + jsonpos - 5, 0)
92 if self.read8() != QEMU_VM_VMDESCRIPTION:
93 raise Exception("No Debug Migration device found")
94
95 jsonlen = self.read32()
96
97 # Seek back to where we were at the beginning
98 self.file.seek(entrypos, 0)
99
100 # explicit decode() needed for Python 3.5 compatibility
101 return data[jsonpos:jsonpos + jsonlen].decode("utf-8")
102
103 def close(self):
104 self.file.close()
105
106 class RamSection(object):
107 RAM_SAVE_FLAG_COMPRESS = 0x02
108 RAM_SAVE_FLAG_MEM_SIZE = 0x04
109 RAM_SAVE_FLAG_PAGE = 0x08
110 RAM_SAVE_FLAG_EOS = 0x10
111 RAM_SAVE_FLAG_CONTINUE = 0x20
112 RAM_SAVE_FLAG_XBZRLE = 0x40
113 RAM_SAVE_FLAG_HOOK = 0x80
114 RAM_SAVE_FLAG_COMPRESS_PAGE = 0x100
115 RAM_SAVE_FLAG_MULTIFD_FLUSH = 0x200
116
117 def __init__(self, file, version_id, ramargs, section_key):
118 if version_id != 4:
119 raise Exception("Unknown RAM version %d" % version_id)
120
121 self.file = file
122 self.section_key = section_key
123 self.TARGET_PAGE_SIZE = ramargs['page_size']
124 self.dump_memory = ramargs['dump_memory']
125 self.write_memory = ramargs['write_memory']
126 self.ignore_shared = ramargs['ignore_shared']
127 self.sizeinfo = collections.OrderedDict()
128 self.data = collections.OrderedDict()
129 self.data['section sizes'] = self.sizeinfo
130 self.name = ''
131 if self.write_memory:
132 self.files = { }
133 if self.dump_memory:
134 self.memory = collections.OrderedDict()
135 self.data['memory'] = self.memory
136
137 def __repr__(self):
138 return self.data.__repr__()
139
140 def __str__(self):
141 return self.data.__str__()
142
143 def getDict(self):
144 return self.data
145
146 def read(self):
147 # Read all RAM sections
148 while True:
149 addr = self.file.read64()
150 flags = addr & (self.TARGET_PAGE_SIZE - 1)
151 addr &= ~(self.TARGET_PAGE_SIZE - 1)
152
153 if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
154 while True:
155 namelen = self.file.read8()
156 # We assume that no RAM chunk is big enough to ever
157 # hit the first byte of the address, so when we see
158 # a zero here we know it has to be an address, not the
159 # length of the next block.
160 if namelen == 0:
161 self.file.file.seek(-1, 1)
162 break
163 self.name = self.file.readstr(len = namelen)
164 len = self.file.read64()
165 self.sizeinfo[self.name] = '0x%016x' % len
166 if self.write_memory:
167 print(self.name)
168 mkdir_p('./' + os.path.dirname(self.name))
169 f = open('./' + self.name, "wb")
170 f.truncate(0)
171 f.truncate(len)
172 self.files[self.name] = f
173 if self.ignore_shared:
174 mr_addr = self.file.read64()
175 flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
176
177 if flags & self.RAM_SAVE_FLAG_COMPRESS:
178 if flags & self.RAM_SAVE_FLAG_CONTINUE:
179 flags &= ~self.RAM_SAVE_FLAG_CONTINUE
180 else:
181 self.name = self.file.readstr()
182 fill_char = self.file.read8()
183 # The page in question is filled with fill_char now
184 if self.write_memory and fill_char != 0:
185 self.files[self.name].seek(addr, os.SEEK_SET)
186 self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
187 if self.dump_memory:
188 self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_char
189 flags &= ~self.RAM_SAVE_FLAG_COMPRESS
190 elif flags & self.RAM_SAVE_FLAG_PAGE:
191 if flags & self.RAM_SAVE_FLAG_CONTINUE:
192 flags &= ~self.RAM_SAVE_FLAG_CONTINUE
193 else:
194 self.name = self.file.readstr()
195
196 if self.write_memory or self.dump_memory:
197 data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
198 else: # Just skip RAM data
199 self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
200
201 if self.write_memory:
202 self.files[self.name].seek(addr, os.SEEK_SET)
203 self.files[self.name].write(data)
204 if self.dump_memory:
205 hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
206 self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
207
208 flags &= ~self.RAM_SAVE_FLAG_PAGE
209 elif flags & self.RAM_SAVE_FLAG_XBZRLE:
210 raise Exception("XBZRLE RAM compression is not supported yet")
211 elif flags & self.RAM_SAVE_FLAG_HOOK:
212 raise Exception("RAM hooks don't make sense with files")
213 if flags & self.RAM_SAVE_FLAG_MULTIFD_FLUSH:
214 continue
215
216 # End of RAM section
217 if flags & self.RAM_SAVE_FLAG_EOS:
218 break
219
220 if flags != 0:
221 raise Exception("Unknown RAM flags: %x" % flags)
222
223 def __del__(self):
224 if self.write_memory:
225 for key in self.files:
226 self.files[key].close()
227
228
229 class HTABSection(object):
230 HASH_PTE_SIZE_64 = 16
231
232 def __init__(self, file, version_id, device, section_key):
233 if version_id != 1:
234 raise Exception("Unknown HTAB version %d" % version_id)
235
236 self.file = file
237 self.section_key = section_key
238
239 def read(self):
240
241 header = self.file.read32()
242
243 if (header == -1):
244 # "no HPT" encoding
245 return
246
247 if (header > 0):
248 # First section, just the hash shift
249 return
250
251 # Read until end marker
252 while True:
253 index = self.file.read32()
254 n_valid = self.file.read16()
255 n_invalid = self.file.read16()
256
257 if index == 0 and n_valid == 0 and n_invalid == 0:
258 break
259
260 self.file.readvar(n_valid * self.HASH_PTE_SIZE_64)
261
262 def getDict(self):
263 return ""
264
265
266 class ConfigurationSection(object):
267 def __init__(self, file, desc):
268 self.file = file
269 self.desc = desc
270 self.caps = []
271
272 def parse_capabilities(self, vmsd_caps):
273 if not vmsd_caps:
274 return
275
276 ncaps = vmsd_caps.data['caps_count'].data
277 self.caps = vmsd_caps.data['capabilities']
278
279 if type(self.caps) != list:
280 self.caps = [self.caps]
281
282 if len(self.caps) != ncaps:
283 raise Exception("Number of capabilities doesn't match "
284 "caps_count field")
285
286 def has_capability(self, cap):
287 return any([str(c) == cap for c in self.caps])
288
289 def read(self):
290 if self.desc:
291 version_id = self.desc['version']
292 section = VMSDSection(self.file, version_id, self.desc,
293 'configuration')
294 section.read()
295 self.parse_capabilities(
296 section.data.get("configuration/capabilities"))
297 else:
298 # backward compatibility for older streams that don't have
299 # the configuration section in the json
300 name_len = self.file.read32()
301 name = self.file.readstr(len = name_len)
302
303 class VMSDFieldGeneric(object):
304 def __init__(self, desc, file):
305 self.file = file
306 self.desc = desc
307 self.data = ""
308
309 def __repr__(self):
310 return str(self.__str__())
311
312 def __str__(self):
313 return " ".join("{0:02x}".format(c) for c in self.data)
314
315 def getDict(self):
316 return self.__str__()
317
318 def read(self):
319 size = int(self.desc['size'])
320 self.data = self.file.readvar(size)
321 return self.data
322
323 class VMSDFieldCap(object):
324 def __init__(self, desc, file):
325 self.file = file
326 self.desc = desc
327 self.data = ""
328
329 def __repr__(self):
330 return self.data
331
332 def __str__(self):
333 return self.data
334
335 def read(self):
336 len = self.file.read8()
337 self.data = self.file.readstr(len)
338
339
340 class VMSDFieldInt(VMSDFieldGeneric):
341 def __init__(self, desc, file):
342 super(VMSDFieldInt, self).__init__(desc, file)
343 self.size = int(desc['size'])
344 self.format = '0x%%0%dx' % (self.size * 2)
345 self.sdtype = '>i%d' % self.size
346 self.udtype = '>u%d' % self.size
347
348 def __repr__(self):
349 if self.data < 0:
350 return ('%s (%d)' % ((self.format % self.udata), self.data))
351 else:
352 return self.format % self.data
353
354 def __str__(self):
355 return self.__repr__()
356
357 def getDict(self):
358 return self.__str__()
359
360 def read(self):
361 super(VMSDFieldInt, self).read()
362 self.sdata = int.from_bytes(self.data, byteorder='big', signed=True)
363 self.udata = int.from_bytes(self.data, byteorder='big', signed=False)
364 self.data = self.sdata
365 return self.data
366
367 class VMSDFieldUInt(VMSDFieldInt):
368 def __init__(self, desc, file):
369 super(VMSDFieldUInt, self).__init__(desc, file)
370
371 def read(self):
372 super(VMSDFieldUInt, self).read()
373 self.data = self.udata
374 return self.data
375
376 class VMSDFieldIntLE(VMSDFieldInt):
377 def __init__(self, desc, file):
378 super(VMSDFieldIntLE, self).__init__(desc, file)
379 self.dtype = '<i%d' % self.size
380
381 class VMSDFieldBool(VMSDFieldGeneric):
382 def __init__(self, desc, file):
383 super(VMSDFieldBool, self).__init__(desc, file)
384
385 def __repr__(self):
386 return self.data.__repr__()
387
388 def __str__(self):
389 return self.data.__str__()
390
391 def getDict(self):
392 return self.data
393
394 def read(self):
395 super(VMSDFieldBool, self).read()
396 if self.data[0] == 0:
397 self.data = False
398 else:
399 self.data = True
400 return self.data
401
402 class VMSDFieldStruct(VMSDFieldGeneric):
403 QEMU_VM_SUBSECTION = 0x05
404
405 def __init__(self, desc, file):
406 super(VMSDFieldStruct, self).__init__(desc, file)
407 self.data = collections.OrderedDict()
408
409 # When we see compressed array elements, unfold them here
410 new_fields = []
411 for field in self.desc['struct']['fields']:
412 if not 'array_len' in field:
413 new_fields.append(field)
414 continue
415 array_len = field.pop('array_len')
416 field['index'] = 0
417 new_fields.append(field)
418 for i in range(1, array_len):
419 c = field.copy()
420 c['index'] = i
421 new_fields.append(c)
422
423 self.desc['struct']['fields'] = new_fields
424
425 def __repr__(self):
426 return self.data.__repr__()
427
428 def __str__(self):
429 return self.data.__str__()
430
431 def read(self):
432 for field in self.desc['struct']['fields']:
433 try:
434 reader = vmsd_field_readers[field['type']]
435 except:
436 reader = VMSDFieldGeneric
437
438 field['data'] = reader(field, self.file)
439 field['data'].read()
440
441 if 'index' in field:
442 if field['name'] not in self.data:
443 self.data[field['name']] = []
444 a = self.data[field['name']]
445 if len(a) != int(field['index']):
446 raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
447 a.append(field['data'])
448 else:
449 self.data[field['name']] = field['data']
450
451 if 'subsections' in self.desc['struct']:
452 for subsection in self.desc['struct']['subsections']:
453 if self.file.read8() != self.QEMU_VM_SUBSECTION:
454 raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
455 name = self.file.readstr()
456 version_id = self.file.read32()
457 self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
458 self.data[name].read()
459
460 def getDictItem(self, value):
461 # Strings would fall into the array category, treat
462 # them specially
463 if value.__class__ is ''.__class__:
464 return value
465
466 try:
467 return self.getDictOrderedDict(value)
468 except:
469 try:
470 return self.getDictArray(value)
471 except:
472 try:
473 return value.getDict()
474 except:
475 return value
476
477 def getDictArray(self, array):
478 r = []
479 for value in array:
480 r.append(self.getDictItem(value))
481 return r
482
483 def getDictOrderedDict(self, dict):
484 r = collections.OrderedDict()
485 for (key, value) in dict.items():
486 r[key] = self.getDictItem(value)
487 return r
488
489 def getDict(self):
490 return self.getDictOrderedDict(self.data)
491
492 vmsd_field_readers = {
493 "bool" : VMSDFieldBool,
494 "int8" : VMSDFieldInt,
495 "int16" : VMSDFieldInt,
496 "int32" : VMSDFieldInt,
497 "int32 equal" : VMSDFieldInt,
498 "int32 le" : VMSDFieldIntLE,
499 "int64" : VMSDFieldInt,
500 "uint8" : VMSDFieldUInt,
501 "uint16" : VMSDFieldUInt,
502 "uint32" : VMSDFieldUInt,
503 "uint32 equal" : VMSDFieldUInt,
504 "uint64" : VMSDFieldUInt,
505 "int64 equal" : VMSDFieldInt,
506 "uint8 equal" : VMSDFieldInt,
507 "uint16 equal" : VMSDFieldInt,
508 "float64" : VMSDFieldGeneric,
509 "timer" : VMSDFieldGeneric,
510 "buffer" : VMSDFieldGeneric,
511 "unused_buffer" : VMSDFieldGeneric,
512 "bitmap" : VMSDFieldGeneric,
513 "struct" : VMSDFieldStruct,
514 "capability": VMSDFieldCap,
515 "unknown" : VMSDFieldGeneric,
516 }
517
518 class VMSDSection(VMSDFieldStruct):
519 def __init__(self, file, version_id, device, section_key):
520 self.file = file
521 self.data = ""
522 self.vmsd_name = ""
523 self.section_key = section_key
524 desc = device
525 if 'vmsd_name' in device:
526 self.vmsd_name = device['vmsd_name']
527
528 # A section really is nothing but a FieldStruct :)
529 super(VMSDSection, self).__init__({ 'struct' : desc }, file)
530
531 ###############################################################################
532
533 class MigrationDump(object):
534 QEMU_VM_FILE_MAGIC = 0x5145564d
535 QEMU_VM_FILE_VERSION = 0x00000003
536 QEMU_VM_EOF = 0x00
537 QEMU_VM_SECTION_START = 0x01
538 QEMU_VM_SECTION_PART = 0x02
539 QEMU_VM_SECTION_END = 0x03
540 QEMU_VM_SECTION_FULL = 0x04
541 QEMU_VM_SUBSECTION = 0x05
542 QEMU_VM_VMDESCRIPTION = 0x06
543 QEMU_VM_CONFIGURATION = 0x07
544 QEMU_VM_SECTION_FOOTER= 0x7e
545
546 def __init__(self, filename):
547 self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ],
548 ( 'spapr/htab', 0) : ( HTABSection, None ) }
549 self.filename = filename
550 self.vmsd_desc = None
551
552 def read(self, desc_only = False, dump_memory = False, write_memory = False):
553 # Read in the whole file
554 file = MigrationFile(self.filename)
555
556 # File magic
557 data = file.read32()
558 if data != self.QEMU_VM_FILE_MAGIC:
559 raise Exception("Invalid file magic %x" % data)
560
561 # Version (has to be v3)
562 data = file.read32()
563 if data != self.QEMU_VM_FILE_VERSION:
564 raise Exception("Invalid version number %d" % data)
565
566 self.load_vmsd_json(file)
567
568 # Read sections
569 self.sections = collections.OrderedDict()
570
571 if desc_only:
572 return
573
574 ramargs = {}
575 ramargs['page_size'] = self.vmsd_desc['page_size']
576 ramargs['dump_memory'] = dump_memory
577 ramargs['write_memory'] = write_memory
578 ramargs['ignore_shared'] = False
579 self.section_classes[('ram',0)][1] = ramargs
580
581 while True:
582 section_type = file.read8()
583 if section_type == self.QEMU_VM_EOF:
584 break
585 elif section_type == self.QEMU_VM_CONFIGURATION:
586 config_desc = self.vmsd_desc.get('configuration')
587 section = ConfigurationSection(file, config_desc)
588 section.read()
589 ramargs['ignore_shared'] = section.has_capability('x-ignore-shared')
590 elif section_type == self.QEMU_VM_SECTION_START or section_type == self.QEMU_VM_SECTION_FULL:
591 section_id = file.read32()
592 name = file.readstr()
593 instance_id = file.read32()
594 version_id = file.read32()
595 section_key = (name, instance_id)
596 classdesc = self.section_classes[section_key]
597 section = classdesc[0](file, version_id, classdesc[1], section_key)
598 self.sections[section_id] = section
599 section.read()
600 elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
601 section_id = file.read32()
602 self.sections[section_id].read()
603 elif section_type == self.QEMU_VM_SECTION_FOOTER:
604 read_section_id = file.read32()
605 if read_section_id != section_id:
606 raise Exception("Mismatched section footer: %x vs %x" % (read_section_id, section_id))
607 else:
608 raise Exception("Unknown section type: %d" % section_type)
609 file.close()
610
611 def load_vmsd_json(self, file):
612 vmsd_json = file.read_migration_debug_json()
613 self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
614 for device in self.vmsd_desc['devices']:
615 key = (device['name'], device['instance_id'])
616 value = ( VMSDSection, device )
617 self.section_classes[key] = value
618
619 def getDict(self):
620 r = collections.OrderedDict()
621 for (key, value) in self.sections.items():
622 key = "%s (%d)" % ( value.section_key[0], key )
623 r[key] = value.getDict()
624 return r
625
626 ###############################################################################
627
628 class JSONEncoder(json.JSONEncoder):
629 def default(self, o):
630 if isinstance(o, VMSDFieldGeneric):
631 return str(o)
632 return json.JSONEncoder.default(self, o)
633
634 parser = argparse.ArgumentParser()
635 parser.add_argument("-f", "--file", help='migration dump to read from', required=True)
636 parser.add_argument("-m", "--memory", help='dump RAM contents as well', action='store_true')
637 parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default='state')
638 parser.add_argument("-x", "--extract", help='extract contents into individual files', action='store_true')
639 args = parser.parse_args()
640
641 jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
642
643 if args.extract:
644 dump = MigrationDump(args.file)
645
646 dump.read(desc_only = True)
647 print("desc.json")
648 f = open("desc.json", "w")
649 f.truncate()
650 f.write(jsonenc.encode(dump.vmsd_desc))
651 f.close()
652
653 dump.read(write_memory = True)
654 dict = dump.getDict()
655 print("state.json")
656 f = open("state.json", "w")
657 f.truncate()
658 f.write(jsonenc.encode(dict))
659 f.close()
660 elif args.dump == "state":
661 dump = MigrationDump(args.file)
662 dump.read(dump_memory = args.memory)
663 dict = dump.getDict()
664 print(jsonenc.encode(dict))
665 elif args.dump == "desc":
666 dump = MigrationDump(args.file)
667 dump.read(desc_only = True)
668 print(jsonenc.encode(dump.vmsd_desc))
669 else:
670 raise Exception("Please specify either -x, -d state or -d desc")