analyze-migration.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. #!/usr/bin/env python
  2. #
  3. # Migration Stream Analyzer
  4. #
  5. # Copyright (c) 2015 Alexander Graf <agraf@suse.de>
  6. #
  7. # This library is free software; you can redistribute it and/or
  8. # modify it under the terms of the GNU Lesser General Public
  9. # License as published by the Free Software Foundation; either
  10. # version 2 of the License, or (at your option) any later version.
  11. #
  12. # This library is distributed in the hope that it will be useful,
  13. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. # Lesser General Public License for more details.
  16. #
  17. # You should have received a copy of the GNU Lesser General Public
  18. # License along with this library; if not, see <http://www.gnu.org/licenses/>.
  19. from __future__ import print_function
  20. import numpy as np
  21. import json
  22. import os
  23. import argparse
  24. import collections
  25. def mkdir_p(path):
  26. try:
  27. os.makedirs(path)
  28. except OSError:
  29. pass
  30. class MigrationFile(object):
  31. def __init__(self, filename):
  32. self.filename = filename
  33. self.file = open(self.filename, "rb")
  34. def read64(self):
  35. return np.asscalar(np.fromfile(self.file, count=1, dtype='>i8')[0])
  36. def read32(self):
  37. return np.asscalar(np.fromfile(self.file, count=1, dtype='>i4')[0])
  38. def read16(self):
  39. return np.asscalar(np.fromfile(self.file, count=1, dtype='>i2')[0])
  40. def read8(self):
  41. return np.asscalar(np.fromfile(self.file, count=1, dtype='>i1')[0])
  42. def readstr(self, len = None):
  43. if len is None:
  44. len = self.read8()
  45. if len == 0:
  46. return ""
  47. return np.fromfile(self.file, count=1, dtype=('S%d' % len))[0]
  48. def readvar(self, size = None):
  49. if size is None:
  50. size = self.read8()
  51. if size == 0:
  52. return ""
  53. value = self.file.read(size)
  54. if len(value) != size:
  55. raise Exception("Unexpected end of %s at 0x%x" % (self.filename, self.file.tell()))
  56. return value
  57. def tell(self):
  58. return self.file.tell()
  59. # The VMSD description is at the end of the file, after EOF. Look for
  60. # the last NULL byte, then for the beginning brace of JSON.
  61. def read_migration_debug_json(self):
  62. QEMU_VM_VMDESCRIPTION = 0x06
  63. # Remember the offset in the file when we started
  64. entrypos = self.file.tell()
  65. # Read the last 10MB
  66. self.file.seek(0, os.SEEK_END)
  67. endpos = self.file.tell()
  68. self.file.seek(max(-endpos, -10 * 1024 * 1024), os.SEEK_END)
  69. datapos = self.file.tell()
  70. data = self.file.read()
  71. # The full file read closed the file as well, reopen it
  72. self.file = open(self.filename, "rb")
  73. # Find the last NULL byte, then the first brace after that. This should
  74. # be the beginning of our JSON data.
  75. nulpos = data.rfind("\0")
  76. jsonpos = data.find("{", nulpos)
  77. # Check backwards from there and see whether we guessed right
  78. self.file.seek(datapos + jsonpos - 5, 0)
  79. if self.read8() != QEMU_VM_VMDESCRIPTION:
  80. raise Exception("No Debug Migration device found")
  81. jsonlen = self.read32()
  82. # Seek back to where we were at the beginning
  83. self.file.seek(entrypos, 0)
  84. return data[jsonpos:jsonpos + jsonlen]
  85. def close(self):
  86. self.file.close()
  87. class RamSection(object):
  88. RAM_SAVE_FLAG_COMPRESS = 0x02
  89. RAM_SAVE_FLAG_MEM_SIZE = 0x04
  90. RAM_SAVE_FLAG_PAGE = 0x08
  91. RAM_SAVE_FLAG_EOS = 0x10
  92. RAM_SAVE_FLAG_CONTINUE = 0x20
  93. RAM_SAVE_FLAG_XBZRLE = 0x40
  94. RAM_SAVE_FLAG_HOOK = 0x80
  95. def __init__(self, file, version_id, ramargs, section_key):
  96. if version_id != 4:
  97. raise Exception("Unknown RAM version %d" % version_id)
  98. self.file = file
  99. self.section_key = section_key
  100. self.TARGET_PAGE_SIZE = ramargs['page_size']
  101. self.dump_memory = ramargs['dump_memory']
  102. self.write_memory = ramargs['write_memory']
  103. self.sizeinfo = collections.OrderedDict()
  104. self.data = collections.OrderedDict()
  105. self.data['section sizes'] = self.sizeinfo
  106. self.name = ''
  107. if self.write_memory:
  108. self.files = { }
  109. if self.dump_memory:
  110. self.memory = collections.OrderedDict()
  111. self.data['memory'] = self.memory
  112. def __repr__(self):
  113. return self.data.__repr__()
  114. def __str__(self):
  115. return self.data.__str__()
  116. def getDict(self):
  117. return self.data
  118. def read(self):
  119. # Read all RAM sections
  120. while True:
  121. addr = self.file.read64()
  122. flags = addr & (self.TARGET_PAGE_SIZE - 1)
  123. addr &= ~(self.TARGET_PAGE_SIZE - 1)
  124. if flags & self.RAM_SAVE_FLAG_MEM_SIZE:
  125. while True:
  126. namelen = self.file.read8()
  127. # We assume that no RAM chunk is big enough to ever
  128. # hit the first byte of the address, so when we see
  129. # a zero here we know it has to be an address, not the
  130. # length of the next block.
  131. if namelen == 0:
  132. self.file.file.seek(-1, 1)
  133. break
  134. self.name = self.file.readstr(len = namelen)
  135. len = self.file.read64()
  136. self.sizeinfo[self.name] = '0x%016x' % len
  137. if self.write_memory:
  138. print(self.name)
  139. mkdir_p('./' + os.path.dirname(self.name))
  140. f = open('./' + self.name, "wb")
  141. f.truncate(0)
  142. f.truncate(len)
  143. self.files[self.name] = f
  144. flags &= ~self.RAM_SAVE_FLAG_MEM_SIZE
  145. if flags & self.RAM_SAVE_FLAG_COMPRESS:
  146. if flags & self.RAM_SAVE_FLAG_CONTINUE:
  147. flags &= ~self.RAM_SAVE_FLAG_CONTINUE
  148. else:
  149. self.name = self.file.readstr()
  150. fill_char = self.file.read8()
  151. # The page in question is filled with fill_char now
  152. if self.write_memory and fill_char != 0:
  153. self.files[self.name].seek(addr, os.SEEK_SET)
  154. self.files[self.name].write(chr(fill_char) * self.TARGET_PAGE_SIZE)
  155. if self.dump_memory:
  156. self.memory['%s (0x%016x)' % (self.name, addr)] = 'Filled with 0x%02x' % fill_char
  157. flags &= ~self.RAM_SAVE_FLAG_COMPRESS
  158. elif flags & self.RAM_SAVE_FLAG_PAGE:
  159. if flags & self.RAM_SAVE_FLAG_CONTINUE:
  160. flags &= ~self.RAM_SAVE_FLAG_CONTINUE
  161. else:
  162. self.name = self.file.readstr()
  163. if self.write_memory or self.dump_memory:
  164. data = self.file.readvar(size = self.TARGET_PAGE_SIZE)
  165. else: # Just skip RAM data
  166. self.file.file.seek(self.TARGET_PAGE_SIZE, 1)
  167. if self.write_memory:
  168. self.files[self.name].seek(addr, os.SEEK_SET)
  169. self.files[self.name].write(data)
  170. if self.dump_memory:
  171. hexdata = " ".join("{0:02x}".format(ord(c)) for c in data)
  172. self.memory['%s (0x%016x)' % (self.name, addr)] = hexdata
  173. flags &= ~self.RAM_SAVE_FLAG_PAGE
  174. elif flags & self.RAM_SAVE_FLAG_XBZRLE:
  175. raise Exception("XBZRLE RAM compression is not supported yet")
  176. elif flags & self.RAM_SAVE_FLAG_HOOK:
  177. raise Exception("RAM hooks don't make sense with files")
  178. # End of RAM section
  179. if flags & self.RAM_SAVE_FLAG_EOS:
  180. break
  181. if flags != 0:
  182. raise Exception("Unknown RAM flags: %x" % flags)
  183. def __del__(self):
  184. if self.write_memory:
  185. for key in self.files:
  186. self.files[key].close()
  187. class HTABSection(object):
  188. HASH_PTE_SIZE_64 = 16
  189. def __init__(self, file, version_id, device, section_key):
  190. if version_id != 1:
  191. raise Exception("Unknown HTAB version %d" % version_id)
  192. self.file = file
  193. self.section_key = section_key
  194. def read(self):
  195. header = self.file.read32()
  196. if (header == -1):
  197. # "no HPT" encoding
  198. return
  199. if (header > 0):
  200. # First section, just the hash shift
  201. return
  202. # Read until end marker
  203. while True:
  204. index = self.file.read32()
  205. n_valid = self.file.read16()
  206. n_invalid = self.file.read16()
  207. if index == 0 and n_valid == 0 and n_invalid == 0:
  208. break
  209. self.file.readvar(n_valid * self.HASH_PTE_SIZE_64)
  210. def getDict(self):
  211. return ""
  212. class ConfigurationSection(object):
  213. def __init__(self, file):
  214. self.file = file
  215. def read(self):
  216. name_len = self.file.read32()
  217. name = self.file.readstr(len = name_len)
  218. class VMSDFieldGeneric(object):
  219. def __init__(self, desc, file):
  220. self.file = file
  221. self.desc = desc
  222. self.data = ""
  223. def __repr__(self):
  224. return str(self.__str__())
  225. def __str__(self):
  226. return " ".join("{0:02x}".format(ord(c)) for c in self.data)
  227. def getDict(self):
  228. return self.__str__()
  229. def read(self):
  230. size = int(self.desc['size'])
  231. self.data = self.file.readvar(size)
  232. return self.data
  233. class VMSDFieldInt(VMSDFieldGeneric):
  234. def __init__(self, desc, file):
  235. super(VMSDFieldInt, self).__init__(desc, file)
  236. self.size = int(desc['size'])
  237. self.format = '0x%%0%dx' % (self.size * 2)
  238. self.sdtype = '>i%d' % self.size
  239. self.udtype = '>u%d' % self.size
  240. def __repr__(self):
  241. if self.data < 0:
  242. return ('%s (%d)' % ((self.format % self.udata), self.data))
  243. else:
  244. return self.format % self.data
  245. def __str__(self):
  246. return self.__repr__()
  247. def getDict(self):
  248. return self.__str__()
  249. def read(self):
  250. super(VMSDFieldInt, self).read()
  251. self.sdata = np.fromstring(self.data, count=1, dtype=(self.sdtype))[0]
  252. self.udata = np.fromstring(self.data, count=1, dtype=(self.udtype))[0]
  253. self.data = self.sdata
  254. return self.data
  255. class VMSDFieldUInt(VMSDFieldInt):
  256. def __init__(self, desc, file):
  257. super(VMSDFieldUInt, self).__init__(desc, file)
  258. def read(self):
  259. super(VMSDFieldUInt, self).read()
  260. self.data = self.udata
  261. return self.data
  262. class VMSDFieldIntLE(VMSDFieldInt):
  263. def __init__(self, desc, file):
  264. super(VMSDFieldIntLE, self).__init__(desc, file)
  265. self.dtype = '<i%d' % self.size
  266. class VMSDFieldBool(VMSDFieldGeneric):
  267. def __init__(self, desc, file):
  268. super(VMSDFieldBool, self).__init__(desc, file)
  269. def __repr__(self):
  270. return self.data.__repr__()
  271. def __str__(self):
  272. return self.data.__str__()
  273. def getDict(self):
  274. return self.data
  275. def read(self):
  276. super(VMSDFieldBool, self).read()
  277. if self.data[0] == 0:
  278. self.data = False
  279. else:
  280. self.data = True
  281. return self.data
  282. class VMSDFieldStruct(VMSDFieldGeneric):
  283. QEMU_VM_SUBSECTION = 0x05
  284. def __init__(self, desc, file):
  285. super(VMSDFieldStruct, self).__init__(desc, file)
  286. self.data = collections.OrderedDict()
  287. # When we see compressed array elements, unfold them here
  288. new_fields = []
  289. for field in self.desc['struct']['fields']:
  290. if not 'array_len' in field:
  291. new_fields.append(field)
  292. continue
  293. array_len = field.pop('array_len')
  294. field['index'] = 0
  295. new_fields.append(field)
  296. for i in xrange(1, array_len):
  297. c = field.copy()
  298. c['index'] = i
  299. new_fields.append(c)
  300. self.desc['struct']['fields'] = new_fields
  301. def __repr__(self):
  302. return self.data.__repr__()
  303. def __str__(self):
  304. return self.data.__str__()
  305. def read(self):
  306. for field in self.desc['struct']['fields']:
  307. try:
  308. reader = vmsd_field_readers[field['type']]
  309. except:
  310. reader = VMSDFieldGeneric
  311. field['data'] = reader(field, self.file)
  312. field['data'].read()
  313. if 'index' in field:
  314. if field['name'] not in self.data:
  315. self.data[field['name']] = []
  316. a = self.data[field['name']]
  317. if len(a) != int(field['index']):
  318. raise Exception("internal index of data field unmatched (%d/%d)" % (len(a), int(field['index'])))
  319. a.append(field['data'])
  320. else:
  321. self.data[field['name']] = field['data']
  322. if 'subsections' in self.desc['struct']:
  323. for subsection in self.desc['struct']['subsections']:
  324. if self.file.read8() != self.QEMU_VM_SUBSECTION:
  325. raise Exception("Subsection %s not found at offset %x" % ( subsection['vmsd_name'], self.file.tell()))
  326. name = self.file.readstr()
  327. version_id = self.file.read32()
  328. self.data[name] = VMSDSection(self.file, version_id, subsection, (name, 0))
  329. self.data[name].read()
  330. def getDictItem(self, value):
  331. # Strings would fall into the array category, treat
  332. # them specially
  333. if value.__class__ is ''.__class__:
  334. return value
  335. try:
  336. return self.getDictOrderedDict(value)
  337. except:
  338. try:
  339. return self.getDictArray(value)
  340. except:
  341. try:
  342. return value.getDict()
  343. except:
  344. return value
  345. def getDictArray(self, array):
  346. r = []
  347. for value in array:
  348. r.append(self.getDictItem(value))
  349. return r
  350. def getDictOrderedDict(self, dict):
  351. r = collections.OrderedDict()
  352. for (key, value) in dict.items():
  353. r[key] = self.getDictItem(value)
  354. return r
  355. def getDict(self):
  356. return self.getDictOrderedDict(self.data)
  357. vmsd_field_readers = {
  358. "bool" : VMSDFieldBool,
  359. "int8" : VMSDFieldInt,
  360. "int16" : VMSDFieldInt,
  361. "int32" : VMSDFieldInt,
  362. "int32 equal" : VMSDFieldInt,
  363. "int32 le" : VMSDFieldIntLE,
  364. "int64" : VMSDFieldInt,
  365. "uint8" : VMSDFieldUInt,
  366. "uint16" : VMSDFieldUInt,
  367. "uint32" : VMSDFieldUInt,
  368. "uint32 equal" : VMSDFieldUInt,
  369. "uint64" : VMSDFieldUInt,
  370. "int64 equal" : VMSDFieldInt,
  371. "uint8 equal" : VMSDFieldInt,
  372. "uint16 equal" : VMSDFieldInt,
  373. "float64" : VMSDFieldGeneric,
  374. "timer" : VMSDFieldGeneric,
  375. "buffer" : VMSDFieldGeneric,
  376. "unused_buffer" : VMSDFieldGeneric,
  377. "bitmap" : VMSDFieldGeneric,
  378. "struct" : VMSDFieldStruct,
  379. "unknown" : VMSDFieldGeneric,
  380. }
  381. class VMSDSection(VMSDFieldStruct):
  382. def __init__(self, file, version_id, device, section_key):
  383. self.file = file
  384. self.data = ""
  385. self.vmsd_name = ""
  386. self.section_key = section_key
  387. desc = device
  388. if 'vmsd_name' in device:
  389. self.vmsd_name = device['vmsd_name']
  390. # A section really is nothing but a FieldStruct :)
  391. super(VMSDSection, self).__init__({ 'struct' : desc }, file)
  392. ###############################################################################
  393. class MigrationDump(object):
  394. QEMU_VM_FILE_MAGIC = 0x5145564d
  395. QEMU_VM_FILE_VERSION = 0x00000003
  396. QEMU_VM_EOF = 0x00
  397. QEMU_VM_SECTION_START = 0x01
  398. QEMU_VM_SECTION_PART = 0x02
  399. QEMU_VM_SECTION_END = 0x03
  400. QEMU_VM_SECTION_FULL = 0x04
  401. QEMU_VM_SUBSECTION = 0x05
  402. QEMU_VM_VMDESCRIPTION = 0x06
  403. QEMU_VM_CONFIGURATION = 0x07
  404. QEMU_VM_SECTION_FOOTER= 0x7e
  405. def __init__(self, filename):
  406. self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ],
  407. ( 'spapr/htab', 0) : ( HTABSection, None ) }
  408. self.filename = filename
  409. self.vmsd_desc = None
  410. def read(self, desc_only = False, dump_memory = False, write_memory = False):
  411. # Read in the whole file
  412. file = MigrationFile(self.filename)
  413. # File magic
  414. data = file.read32()
  415. if data != self.QEMU_VM_FILE_MAGIC:
  416. raise Exception("Invalid file magic %x" % data)
  417. # Version (has to be v3)
  418. data = file.read32()
  419. if data != self.QEMU_VM_FILE_VERSION:
  420. raise Exception("Invalid version number %d" % data)
  421. self.load_vmsd_json(file)
  422. # Read sections
  423. self.sections = collections.OrderedDict()
  424. if desc_only:
  425. return
  426. ramargs = {}
  427. ramargs['page_size'] = self.vmsd_desc['page_size']
  428. ramargs['dump_memory'] = dump_memory
  429. ramargs['write_memory'] = write_memory
  430. self.section_classes[('ram',0)][1] = ramargs
  431. while True:
  432. section_type = file.read8()
  433. if section_type == self.QEMU_VM_EOF:
  434. break
  435. elif section_type == self.QEMU_VM_CONFIGURATION:
  436. section = ConfigurationSection(file)
  437. section.read()
  438. elif section_type == self.QEMU_VM_SECTION_START or section_type == self.QEMU_VM_SECTION_FULL:
  439. section_id = file.read32()
  440. name = file.readstr()
  441. instance_id = file.read32()
  442. version_id = file.read32()
  443. section_key = (name, instance_id)
  444. classdesc = self.section_classes[section_key]
  445. section = classdesc[0](file, version_id, classdesc[1], section_key)
  446. self.sections[section_id] = section
  447. section.read()
  448. elif section_type == self.QEMU_VM_SECTION_PART or section_type == self.QEMU_VM_SECTION_END:
  449. section_id = file.read32()
  450. self.sections[section_id].read()
  451. elif section_type == self.QEMU_VM_SECTION_FOOTER:
  452. read_section_id = file.read32()
  453. if read_section_id != section_id:
  454. raise Exception("Mismatched section footer: %x vs %x" % (read_section_id, section_id))
  455. else:
  456. raise Exception("Unknown section type: %d" % section_type)
  457. file.close()
  458. def load_vmsd_json(self, file):
  459. vmsd_json = file.read_migration_debug_json()
  460. self.vmsd_desc = json.loads(vmsd_json, object_pairs_hook=collections.OrderedDict)
  461. for device in self.vmsd_desc['devices']:
  462. key = (device['name'], device['instance_id'])
  463. value = ( VMSDSection, device )
  464. self.section_classes[key] = value
  465. def getDict(self):
  466. r = collections.OrderedDict()
  467. for (key, value) in self.sections.items():
  468. key = "%s (%d)" % ( value.section_key[0], key )
  469. r[key] = value.getDict()
  470. return r
  471. ###############################################################################
  472. class JSONEncoder(json.JSONEncoder):
  473. def default(self, o):
  474. if isinstance(o, VMSDFieldGeneric):
  475. return str(o)
  476. return json.JSONEncoder.default(self, o)
  477. parser = argparse.ArgumentParser()
  478. parser.add_argument("-f", "--file", help='migration dump to read from', required=True)
  479. parser.add_argument("-m", "--memory", help='dump RAM contents as well', action='store_true')
  480. parser.add_argument("-d", "--dump", help='what to dump ("state" or "desc")', default='state')
  481. parser.add_argument("-x", "--extract", help='extract contents into individual files', action='store_true')
  482. args = parser.parse_args()
  483. jsonenc = JSONEncoder(indent=4, separators=(',', ': '))
  484. if args.extract:
  485. dump = MigrationDump(args.file)
  486. dump.read(desc_only = True)
  487. print("desc.json")
  488. f = open("desc.json", "wb")
  489. f.truncate()
  490. f.write(jsonenc.encode(dump.vmsd_desc))
  491. f.close()
  492. dump.read(write_memory = True)
  493. dict = dump.getDict()
  494. print("state.json")
  495. f = open("state.json", "wb")
  496. f.truncate()
  497. f.write(jsonenc.encode(dict))
  498. f.close()
  499. elif args.dump == "state":
  500. dump = MigrationDump(args.file)
  501. dump.read(dump_memory = args.memory)
  502. dict = dump.getDict()
  503. print(jsonenc.encode(dict))
  504. elif args.dump == "desc":
  505. dump = MigrationDump(args.file)
  506. dump.read(desc_only = True)
  507. print(jsonenc.encode(dump.vmsd_desc))
  508. else:
  509. raise Exception("Please specify either -x, -d state or -d dump")