[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH 3/3] Add migration stream analyzation script
From: |
Alexander Graf |
Subject: |
[Qemu-devel] [PATCH 3/3] Add migration stream analyzation script |
Date: |
Wed, 23 Oct 2013 15:11:24 +0200 |
This patch adds a python tool to the scripts directory that can read
a dumped migration stream which contains the debug_migration device
and construct a human readable JSON stream out of it.
It's very simple to use:
$ qemu-system-x86_64 -device debug_migration
(qemu) migrate "exec:cat > mig"
$ ./scripts/analyze_migration.py -f mig
Signed-off-by: Alexander Graf <address@hidden>
---
scripts/analyze-migration.py | 483 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 483 insertions(+)
create mode 100755 scripts/analyze-migration.py
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
new file mode 100755
index 0000000..bf70749
--- /dev/null
+++ b/scripts/analyze-migration.py
@@ -0,0 +1,483 @@
+#!/usr/bin/env python
+#
+# Migration Stream Analyzer
+#
+# Copyright (c) 2013 Alexander Graf <address@hidden>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+
+import numpy as np
+import json
+import os
+import argparse
+import collections
+import pprint
+
+class MigrationFile(object):
+ def __init__(self, filename):
+ self.filename = filename
+ self.file = open(self.filename, "rb")
+
+ def read64(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i8')[0])
+
+ def read32(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i4')[0])
+
+ def read8(self):
+ return np.asscalar(np.fromfile(self.file, count=1, dtype='>i1')[0])
+
+ def readstr(self, len = None):
+ if len is None:
+ len = self.read8()
+ if len == 0:
+ return ""
+ return np.fromfile(self.file, count=1, dtype=('S%d' % len))[0]
+
+ def readvar(self, size = None):
+ if size is None:
+ size = self.read8()
+ if size == 0:
+ return ""
+ value = self.file.read(size)
+ if len(value) != size:
+ raise Exception("Unexpected end of %s at 0x%x" % (self.filename,
self.file.tell()))
+ return value
+
+ # Search the current file from the current position onwards for a JSON
+ # migration descriptor. Returns the JSON string blob.
+ def read_migration_debug_json(self):
+ pos = self.file.tell()
+ data = self.file.read()
+ dbgpos = data.find("Debug Migration")
+ if dbgpos == -1:
+ raise Exception("No Debug Migration device found")
+
+ # The full file read closed the file as well, reopen it where we were
+ self.file = open(self.filename, "rb")
+ self.file.seek(pos, 0)
+
+ # We assume that our JSON blob starts after the "Debug Migration" magic
+ # and is null terminated.
+ return data[(dbgpos + 16):].split('\0',1)[0]
+
+ def close(self):
+ self.file.close()
+
+
+class RamSection(object):
+ RAM_SAVE_FLAG_COMPRESS = 0x02
+ RAM_SAVE_FLAG_MEM_SIZE = 0x04
+ RAM_SAVE_FLAG_PAGE = 0x08
+ RAM_SAVE_FLAG_EOS = 0x10
+ RAM_SAVE_FLAG_CONTINUE = 0x20
+ RAM_SAVE_FLAG_XBZRLE = 0x40
+ RAM_SAVE_FLAG_HOOK = 0x80
+ # This can be dynamic, but all targets we care about have 4k pages
+ TARGET_PAGE_SIZE = 0x1000
+ blocks = []
+
+ def __init__(self, file, version_id, device, section_key):
+ if version_id != 4:
+ raise Exception("Unknown RAM version %d" % version_id)
+
+ self.file = file
+ self.section_key = section_key
+
+ def read(self):
+ # Read all RAM sections
+ while True:
+ addr = self.file.read64()
+ flags = addr & 0xfff
+ addr &= 0xfffffffffffff000;
+
+ if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
+ while True:
+ namelen = self.file.read8()
+ # We assume that no RAM chunk is big enough to ever
+ # hit the first byte of the address, so when we see
+ # a zero here we know it has to be an address, not the
+ # length of the next block.
+ if namelen == 0:
+ self.file.file.seek(-1, 1)
+ break
+ name = self.file.readstr(len = namelen)
+ len = self.file.read64()
+ self.blocks.append((name, len))
+ flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
+
+ if flags & self.RAM_SAVE_FLAG_COMPRESS:
+ if flags & self.RAM_SAVE_FLAG_CONTINUE:
+ flags &= ~self.RAM_SAVE_FLAG_CONTINUE
+ else:
+ name = self.file.readstr()
+ fill_char = self.file.read8()
+ # The page in question would be filled with fill_char now
+ flags &= ~self.RAM_SAVE_FLAG_COMPRESS
+ elif flags & self.RAM_SAVE_FLAG_PAGE:
+ if flags & self.RAM_SAVE_FLAG_CONTINUE:
+ flags &= ~self.RAM_SAVE_FLAG_CONTINUE
+ else:
+ name = self.file.readstr()
+ # Just skip RAM data for now
+ self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
+ flags &= ~self.RAM_SAVE_FLAG_PAGE
+ elif flags & self.RAM_SAVE_FLAG_XBZRLE:
+ raise Exception("XBZRLE RAM compression is not supported yet")
+ elif flags & self.RAM_SAVE_FLAG_HOOK:
+ raise Exception("RAM hooks don't make sense with files")
+
+ # End of RAM section
+ if flags & self.RAM_SAVE_FLAG_EOS:
+ break
+
+ if flags != 0:
+ raise Exception("Unknown RAM flags: %x" % flags)
+
+ def getDict(self):
+ return ""
+
+class VMSDFieldGeneric(object):
+ def __init__(self, desc, file):
+ self.file = file
+ self.desc = desc
+ self.data = ""
+
+ def __repr__(self):
+ return str(self.__str__())
+
+ def __str__(self):
+ return " ".join("{0:02x}".format(ord(c)) for c in self.data)
+
+ def getDict(self):
+ return self.__str__()
+
+ def read(self):
+ size = int(self.desc['size'])
+ self.data = self.file.readvar(size)
+ return self.data
+
+class VMSDFieldInt(VMSDFieldGeneric):
+ def __init__(self, desc, file):
+ super(VMSDFieldInt, self).__init__(desc, file)
+ self.size = int(desc['size'])
+ self.format = '0x%%0%dx' % (self.size * 2)
+ self.sdtype = '>i%d' % self.size
+ self.udtype = '>u%d' % self.size
+
+ def __repr__(self):
+ if self.data < 0:
+ return ('%s (%d)' % ((self.format % self.udata), self.data))
+ else:
+ return self.format % self.data
+
+ def __str__(self):
+ return self.__repr__()
+
+ def getDict(self):
+ return self.__str__()
+
+ def read(self):
+ super(VMSDFieldInt, self).read()
+ self.sdata = np.fromstring(self.data, count=1, dtype=(self.sdtype))[0]
+ self.udata = np.fromstring(self.data, count=1, dtype=(self.udtype))[0]
+ self.data = self.sdata
+ return self.data
+
+class VMSDFieldUInt(VMSDFieldInt):
+ def __init__(self, desc, file):
+ super(VMSDFieldUInt, self).__init__(desc, file)
+
+ def read(self):
+ super(VMSDFieldUInt, self).read()
+ self.data = self.udata
+ return self.data
+
+class VMSDFieldIntLE(VMSDFieldInt):
+ def __init__(self, desc, file):
+ super(VMSDFieldIntLE, self).__init__(desc, file)
+ self.dtype = '<i%d' % self.size
+
+class VMSDFieldBool(VMSDFieldGeneric):
+ def __init__(self, desc, file):
+ super(VMSDFieldBool, self).__init__(desc, file)
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def getDict(self):
+ return self.data
+
+ def read(self):
+ super(VMSDFieldBool, self).read()
+ if self.data[0] == 0:
+ self.data = False
+ else:
+ self.data = True
+ return self.data
+
+class VMSDFieldStruct(VMSDFieldGeneric):
+ QEMU_VM_SUBSECTION = 0x05
+
+ def __init__(self, desc, file):
+ super(VMSDFieldStruct, self).__init__(desc, file)
+ self.data = collections.OrderedDict()
+
+ def __repr__(self):
+ return self.data.__repr__()
+
+ def __str__(self):
+ return self.data.__str__()
+
+ def read(self):
+ for field in self.desc['struct']['fields']:
+ field['data'] = vmsd_field_readers[field['type']](field, self.file)
+ field['data'].read()
+
+ if 'index' in field:
+ if field['name'] not in self.data:
+ self.data[field['name']] = []
+ a = self.data[field['name']]
+ if len(a) != int(field['index']):
+ raise Exception("internal index of data field unmatched
(%d/%d)" % (len(a), int(field['index'])))
+ a.append(field['data'])
+ else:
+ self.data[field['name']] = field['data']
+
+ if 'subsections' in self.desc['struct']:
+ for subsection in self.desc['struct']['subsections']:
+ if self.file.read8() != self.QEMU_VM_SUBSECTION:
+ raise Exception("Subsection %s not found" %
subsection['vmsd_name'])
+ name = self.file.readstr()
+ version_id = self.file.read32()
+ self.data[name] = VMSDSection(self.file, version_id,
subsection, (name, 0))
+ self.data[name].read()
+
+ def getDictItem(self, value):
+ # Strings would fall into the array category, treat
+ # them specially
+ if value.__class__ is ''.__class__:
+ return value
+
+ try:
+ return self.getDictOrderedDict(value)
+ except:
+ try:
+ return self.getDictArray(value)
+ except:
+ try:
+ return value.getDict()
+ except:
+ return value
+
+ def getDictArray(self, array):
+ r = []
+ for value in array:
+ r.append(self.getDictItem(value))
+ return r
+
+ def getDictOrderedDict(self, dict):
+ r = collections.OrderedDict()
+ for (key, value) in dict.items():
+ r[key] = self.getDictItem(value)
+ return r
+
+ def getDict(self):
+ return self.getDictOrderedDict(self.data)
+
+vmsd_field_readers = {
+ "bool" : VMSDFieldBool,
+ "int8" : VMSDFieldInt,
+ "int16" : VMSDFieldInt,
+ "int32" : VMSDFieldInt,
+ "int32_equal" : VMSDFieldInt,
+ "int32_le" : VMSDFieldIntLE,
+ "int64" : VMSDFieldInt,
+ "uint8" : VMSDFieldUInt,
+ "uint16" : VMSDFieldUInt,
+ "uint32" : VMSDFieldUInt,
+ "uint64" : VMSDFieldUInt,
+ "float64" : VMSDFieldGeneric,
+ "timer" : VMSDFieldGeneric,
+ "buffer" : VMSDFieldGeneric,
+ "unused_buffer" : VMSDFieldGeneric,
+ "bitmap" : VMSDFieldGeneric,
+ "struct" : VMSDFieldStruct,
+ "unknown" : VMSDFieldGeneric,
+}
+
+class VMSDSection(VMSDFieldStruct):
+ def __init__(self, file, version_id, device, section_key):
+ self.file = file
+ self.data = ""
+ self.section_key = section_key
+ if 'versions' in device:
+ # Normal VMSD description
+ desc = device['versions'][str(version_id)]
+ self.vmsd_name = device['vmsd_name']
+ else:
+ # A legacy non-VMSD section without detailed information.
+ desc = device
+ self.vmsd_name = ""
+
+ # A section really is nothing but a FieldStruct :)
+ super(VMSDSection, self).__init__({ 'struct' : desc }, file)
+
+class DebugMigrationSection(VMSDSection):
+ def __init__(self, file, version_id, device, section_key):
+ super(DebugMigrationSection, self).__init__(file, version_id, device,
section_key)
+
+ # We define out own reader because it's very unlikely that the buffer size
+ # for the VMSD description is identical between different migration files.
+ #
+ # It also allows us to override the "data" field with something a lot
+ # less convoluted than the VMSD description JSON.
+ def read(self):
+ self.data['size'] = self.file.read32()
+ self.data['magic'] = str(self.file.readstr(len = 16))
+ self.data['data'] = self.file.readstr(len = int(self.data['size']))
+
+ # Don't include our VMSD description in the output
+ self.data['data'] = 'Omitted for the sake of readability'
+
+
+###############################################################################
+
+class MigrationDump(object):
+ QEMU_VM_FILE_MAGIC = 0x5145564d
+ QEMU_VM_FILE_VERSION = 0x00000003
+ QEMU_VM_EOF = 0x00
+ QEMU_VM_SECTION_START = 0x01
+ QEMU_VM_SECTION_PART = 0x02
+ QEMU_VM_SECTION_END = 0x03
+ QEMU_VM_SECTION_FULL = 0x04
+ QEMU_VM_SUBSECTION = 0x05
+
+ def __init__(self, filename):
+ self.section_classes = { ( 'ram', 0 ) : ( RamSection, None ) }
+ self.filename = filename
+ self.vmsd_desc = None
+
+ def read(self, desc_only = False):
+ # Read in the whole file
+ file = MigrationFile(self.filename)
+
+ # File magic
+ data = file.read32()
+ if data != self.QEMU_VM_FILE_MAGIC:
+ raise Exception("Invalid file magic %x" % data)
+
+ # Version (has to be v3)
+ data = file.read32()
+ if data != self.QEMU_VM_FILE_VERSION:
+ raise Exception("Invalid version number %d" % data)
+
+ # Read sections
+ self.sections = collections.OrderedDict()
+
+ while True:
+ section_type = file.read8()
+ if section_type == self.QEMU_VM_EOF:
+ break
+ elif section_type == self.QEMU_VM_SECTION_START or section_type ==
self.QEMU_VM_SECTION_FULL:
+ section_id = file.read32()
+ name = file.readstr()
+ instance_id = file.read32()
+ version_id = file.read32()
+ section_key = (name, instance_id)
+ try:
+ classdesc = self.section_classes[section_key]
+ section = classdesc[0](file, version_id, classdesc[1],
section_key)
+ except:
+ # Could not find a decoder for that section
+ if self.vmsd_desc is None:
+ # Try to find the migration debug device and extract
parsers
+ # from there
+ self.load_vmsd_json(file)
+ # We only care about the vmsd description json, so
drop out now
+ if desc_only:
+ return
+ classdesc = self.section_classes[section_key]
+ section = classdesc[0](file, version_id, classdesc[1],
section_key)
+ else:
+ # This is a genuinely unknown section
+ raise
+ self.sections[section_id] = section
+ section.read()
+ elif section_type == self.QEMU_VM_SECTION_PART or section_type ==
self.QEMU_VM_SECTION_END:
+ section_id = file.read32()
+ self.sections[section_id].read()
+ else:
+ raise Exception("Unknown section type: %d" % section_type)
+ file.close()
+
+ def load_vmsd_json(self, file):
+ vmsd_json = file.read_migration_debug_json()
+ self.vmsd_desc = json.loads(vmsd_json,
object_pairs_hook=collections.OrderedDict)
+ for device in self.vmsd_desc['devices']:
+ key = (device['name'], device['instance_id'])
+ value = ( VMSDSection, device )
+ if device['name'] == 'debug-migration':
+ value = ( DebugMigrationSection, device )
+ self.section_classes[key] = value
+
+ def getDict(self):
+ r = collections.OrderedDict()
+ for (key, value) in self.sections.items():
+ key = "%s (%d)" % ( value.section_key[0], key )
+ r[key] = value.getDict()
+ return r
+
+###############################################################################
+
+class JSONEncoder(json.JSONEncoder):
+ def default(self, o):
+ if isinstance(o, VMSDFieldGeneric):
+ return str(o)
+ return json.JSONEncoder.default(self, o)
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-f", "--file", help='migration dump to read from',
required=True)
+parser.add_argument("-s", "--descriptionfile", help='migration dump to read
vmstate description from')
+parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")',
default='state')
+args = parser.parse_args()
+
+jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
+
+if args.dump == "state":
+ dump = MigrationDump(args.file)
+ if args.descriptionfile:
+ # Fetch the vmstate description from the file passed through -s
+ desc_dump = MigrationDump(args.descriptionfile)
+ desc_dump.read(desc_only = True)
+ # and override all section readers and vmsd description in our
+ # data migration file with the ones from the vmstate description
+ # migration file
+ dump.vmsd_desc = desc_dump.vmsd_desc
+ dump.section_classes = desc_dump.section_classes
+ dump.read()
+ dict = dump.getDict()
+ print jsonenc.encode(dict)
+elif args.dump == "desc":
+ if args.descriptionfile:
+ dump = MigrationDump(args.descriptionfile)
+ else:
+ dump = MigrationDump(args.file)
+ dump.read(desc_only = True)
+ print jsonenc.encode(dump.vmsd_desc)
+else:
+ raise Exception("Unknown dump type \"%s\", available: \"state\", \"desc\""
% args.dump)
--
1.7.12.4