dump.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826
  1. /*
  2. * QEMU dump
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu-common.h"
  14. #include "elf.h"
  15. #include "cpu.h"
  16. #include "exec/cpu-all.h"
  17. #include "exec/hwaddr.h"
  18. #include "monitor/monitor.h"
  19. #include "sysemu/kvm.h"
  20. #include "sysemu/dump.h"
  21. #include "sysemu/sysemu.h"
  22. #include "sysemu/memory_mapping.h"
  23. #include "sysemu/cpus.h"
  24. #include "qapi/error.h"
  25. #include "qmp-commands.h"
  26. #include <zlib.h>
  27. #ifdef CONFIG_LZO
  28. #include <lzo/lzo1x.h>
  29. #endif
  30. #ifdef CONFIG_SNAPPY
  31. #include <snappy-c.h>
  32. #endif
  33. #ifndef ELF_MACHINE_UNAME
  34. #define ELF_MACHINE_UNAME "Unknown"
  35. #endif
  36. static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
  37. {
  38. if (endian == ELFDATA2LSB) {
  39. val = cpu_to_le16(val);
  40. } else {
  41. val = cpu_to_be16(val);
  42. }
  43. return val;
  44. }
  45. static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
  46. {
  47. if (endian == ELFDATA2LSB) {
  48. val = cpu_to_le32(val);
  49. } else {
  50. val = cpu_to_be32(val);
  51. }
  52. return val;
  53. }
  54. static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
  55. {
  56. if (endian == ELFDATA2LSB) {
  57. val = cpu_to_le64(val);
  58. } else {
  59. val = cpu_to_be64(val);
  60. }
  61. return val;
  62. }
  63. typedef struct DumpState {
  64. GuestPhysBlockList guest_phys_blocks;
  65. ArchDumpInfo dump_info;
  66. MemoryMappingList list;
  67. uint16_t phdr_num;
  68. uint32_t sh_info;
  69. bool have_section;
  70. bool resume;
  71. ssize_t note_size;
  72. hwaddr memory_offset;
  73. int fd;
  74. GuestPhysBlock *next_block;
  75. ram_addr_t start;
  76. bool has_filter;
  77. int64_t begin;
  78. int64_t length;
  79. Error **errp;
  80. uint8_t *note_buf; /* buffer for notes */
  81. size_t note_buf_offset; /* the writing place in note_buf */
  82. uint32_t nr_cpus; /* number of guest's cpu */
  83. size_t page_size; /* guest's page size */
  84. uint32_t page_shift; /* guest's page shift */
  85. uint64_t max_mapnr; /* the biggest guest's phys-mem's number */
  86. size_t len_dump_bitmap; /* the size of the place used to store
  87. dump_bitmap in vmcore */
  88. off_t offset_dump_bitmap; /* offset of dump_bitmap part in vmcore */
  89. off_t offset_page; /* offset of page part in vmcore */
  90. size_t num_dumpable; /* number of page that can be dumped */
  91. uint32_t flag_compress; /* indicate the compression format */
  92. } DumpState;
  93. static int dump_cleanup(DumpState *s)
  94. {
  95. int ret = 0;
  96. guest_phys_blocks_free(&s->guest_phys_blocks);
  97. memory_mapping_list_free(&s->list);
  98. if (s->fd != -1) {
  99. close(s->fd);
  100. }
  101. if (s->resume) {
  102. vm_start();
  103. }
  104. return ret;
  105. }
  106. static void dump_error(DumpState *s, const char *reason)
  107. {
  108. dump_cleanup(s);
  109. }
  110. static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
  111. {
  112. DumpState *s = opaque;
  113. size_t written_size;
  114. written_size = qemu_write_full(s->fd, buf, size);
  115. if (written_size != size) {
  116. return -1;
  117. }
  118. return 0;
  119. }
  120. static int write_elf64_header(DumpState *s)
  121. {
  122. Elf64_Ehdr elf_header;
  123. int ret;
  124. int endian = s->dump_info.d_endian;
  125. memset(&elf_header, 0, sizeof(Elf64_Ehdr));
  126. memcpy(&elf_header, ELFMAG, SELFMAG);
  127. elf_header.e_ident[EI_CLASS] = ELFCLASS64;
  128. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  129. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  130. elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
  131. elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
  132. endian);
  133. elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
  134. elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
  135. elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
  136. elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
  137. endian);
  138. elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
  139. if (s->have_section) {
  140. uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
  141. elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
  142. elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
  143. endian);
  144. elf_header.e_shnum = cpu_convert_to_target16(1, endian);
  145. }
  146. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  147. if (ret < 0) {
  148. dump_error(s, "dump: failed to write elf header.\n");
  149. return -1;
  150. }
  151. return 0;
  152. }
  153. static int write_elf32_header(DumpState *s)
  154. {
  155. Elf32_Ehdr elf_header;
  156. int ret;
  157. int endian = s->dump_info.d_endian;
  158. memset(&elf_header, 0, sizeof(Elf32_Ehdr));
  159. memcpy(&elf_header, ELFMAG, SELFMAG);
  160. elf_header.e_ident[EI_CLASS] = ELFCLASS32;
  161. elf_header.e_ident[EI_DATA] = endian;
  162. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  163. elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
  164. elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
  165. endian);
  166. elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
  167. elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
  168. elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
  169. elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
  170. endian);
  171. elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
  172. if (s->have_section) {
  173. uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
  174. elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
  175. elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
  176. endian);
  177. elf_header.e_shnum = cpu_convert_to_target16(1, endian);
  178. }
  179. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  180. if (ret < 0) {
  181. dump_error(s, "dump: failed to write elf header.\n");
  182. return -1;
  183. }
  184. return 0;
  185. }
  186. static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
  187. int phdr_index, hwaddr offset,
  188. hwaddr filesz)
  189. {
  190. Elf64_Phdr phdr;
  191. int ret;
  192. int endian = s->dump_info.d_endian;
  193. memset(&phdr, 0, sizeof(Elf64_Phdr));
  194. phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
  195. phdr.p_offset = cpu_convert_to_target64(offset, endian);
  196. phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
  197. phdr.p_filesz = cpu_convert_to_target64(filesz, endian);
  198. phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
  199. phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
  200. assert(memory_mapping->length >= filesz);
  201. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  202. if (ret < 0) {
  203. dump_error(s, "dump: failed to write program header table.\n");
  204. return -1;
  205. }
  206. return 0;
  207. }
  208. static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
  209. int phdr_index, hwaddr offset,
  210. hwaddr filesz)
  211. {
  212. Elf32_Phdr phdr;
  213. int ret;
  214. int endian = s->dump_info.d_endian;
  215. memset(&phdr, 0, sizeof(Elf32_Phdr));
  216. phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
  217. phdr.p_offset = cpu_convert_to_target32(offset, endian);
  218. phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
  219. phdr.p_filesz = cpu_convert_to_target32(filesz, endian);
  220. phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
  221. phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
  222. assert(memory_mapping->length >= filesz);
  223. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  224. if (ret < 0) {
  225. dump_error(s, "dump: failed to write program header table.\n");
  226. return -1;
  227. }
  228. return 0;
  229. }
  230. static int write_elf64_note(DumpState *s)
  231. {
  232. Elf64_Phdr phdr;
  233. int endian = s->dump_info.d_endian;
  234. hwaddr begin = s->memory_offset - s->note_size;
  235. int ret;
  236. memset(&phdr, 0, sizeof(Elf64_Phdr));
  237. phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
  238. phdr.p_offset = cpu_convert_to_target64(begin, endian);
  239. phdr.p_paddr = 0;
  240. phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
  241. phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
  242. phdr.p_vaddr = 0;
  243. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  244. if (ret < 0) {
  245. dump_error(s, "dump: failed to write program header table.\n");
  246. return -1;
  247. }
  248. return 0;
  249. }
  250. static inline int cpu_index(CPUState *cpu)
  251. {
  252. return cpu->cpu_index + 1;
  253. }
  254. static int write_elf64_notes(WriteCoreDumpFunction f, DumpState *s)
  255. {
  256. CPUState *cpu;
  257. int ret;
  258. int id;
  259. CPU_FOREACH(cpu) {
  260. id = cpu_index(cpu);
  261. ret = cpu_write_elf64_note(f, cpu, id, s);
  262. if (ret < 0) {
  263. dump_error(s, "dump: failed to write elf notes.\n");
  264. return -1;
  265. }
  266. }
  267. CPU_FOREACH(cpu) {
  268. ret = cpu_write_elf64_qemunote(f, cpu, s);
  269. if (ret < 0) {
  270. dump_error(s, "dump: failed to write CPU status.\n");
  271. return -1;
  272. }
  273. }
  274. return 0;
  275. }
  276. static int write_elf32_note(DumpState *s)
  277. {
  278. hwaddr begin = s->memory_offset - s->note_size;
  279. Elf32_Phdr phdr;
  280. int endian = s->dump_info.d_endian;
  281. int ret;
  282. memset(&phdr, 0, sizeof(Elf32_Phdr));
  283. phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
  284. phdr.p_offset = cpu_convert_to_target32(begin, endian);
  285. phdr.p_paddr = 0;
  286. phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
  287. phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
  288. phdr.p_vaddr = 0;
  289. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  290. if (ret < 0) {
  291. dump_error(s, "dump: failed to write program header table.\n");
  292. return -1;
  293. }
  294. return 0;
  295. }
  296. static int write_elf32_notes(WriteCoreDumpFunction f, DumpState *s)
  297. {
  298. CPUState *cpu;
  299. int ret;
  300. int id;
  301. CPU_FOREACH(cpu) {
  302. id = cpu_index(cpu);
  303. ret = cpu_write_elf32_note(f, cpu, id, s);
  304. if (ret < 0) {
  305. dump_error(s, "dump: failed to write elf notes.\n");
  306. return -1;
  307. }
  308. }
  309. CPU_FOREACH(cpu) {
  310. ret = cpu_write_elf32_qemunote(f, cpu, s);
  311. if (ret < 0) {
  312. dump_error(s, "dump: failed to write CPU status.\n");
  313. return -1;
  314. }
  315. }
  316. return 0;
  317. }
  318. static int write_elf_section(DumpState *s, int type)
  319. {
  320. Elf32_Shdr shdr32;
  321. Elf64_Shdr shdr64;
  322. int endian = s->dump_info.d_endian;
  323. int shdr_size;
  324. void *shdr;
  325. int ret;
  326. if (type == 0) {
  327. shdr_size = sizeof(Elf32_Shdr);
  328. memset(&shdr32, 0, shdr_size);
  329. shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
  330. shdr = &shdr32;
  331. } else {
  332. shdr_size = sizeof(Elf64_Shdr);
  333. memset(&shdr64, 0, shdr_size);
  334. shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
  335. shdr = &shdr64;
  336. }
  337. ret = fd_write_vmcore(&shdr, shdr_size, s);
  338. if (ret < 0) {
  339. dump_error(s, "dump: failed to write section header table.\n");
  340. return -1;
  341. }
  342. return 0;
  343. }
  344. static int write_data(DumpState *s, void *buf, int length)
  345. {
  346. int ret;
  347. ret = fd_write_vmcore(buf, length, s);
  348. if (ret < 0) {
  349. dump_error(s, "dump: failed to save memory.\n");
  350. return -1;
  351. }
  352. return 0;
  353. }
  354. /* write the memroy to vmcore. 1 page per I/O. */
  355. static int write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
  356. int64_t size)
  357. {
  358. int64_t i;
  359. int ret;
  360. for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
  361. ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
  362. TARGET_PAGE_SIZE);
  363. if (ret < 0) {
  364. return ret;
  365. }
  366. }
  367. if ((size % TARGET_PAGE_SIZE) != 0) {
  368. ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
  369. size % TARGET_PAGE_SIZE);
  370. if (ret < 0) {
  371. return ret;
  372. }
  373. }
  374. return 0;
  375. }
  376. /* get the memory's offset and size in the vmcore */
  377. static void get_offset_range(hwaddr phys_addr,
  378. ram_addr_t mapping_length,
  379. DumpState *s,
  380. hwaddr *p_offset,
  381. hwaddr *p_filesz)
  382. {
  383. GuestPhysBlock *block;
  384. hwaddr offset = s->memory_offset;
  385. int64_t size_in_block, start;
  386. /* When the memory is not stored into vmcore, offset will be -1 */
  387. *p_offset = -1;
  388. *p_filesz = 0;
  389. if (s->has_filter) {
  390. if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
  391. return;
  392. }
  393. }
  394. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  395. if (s->has_filter) {
  396. if (block->target_start >= s->begin + s->length ||
  397. block->target_end <= s->begin) {
  398. /* This block is out of the range */
  399. continue;
  400. }
  401. if (s->begin <= block->target_start) {
  402. start = block->target_start;
  403. } else {
  404. start = s->begin;
  405. }
  406. size_in_block = block->target_end - start;
  407. if (s->begin + s->length < block->target_end) {
  408. size_in_block -= block->target_end - (s->begin + s->length);
  409. }
  410. } else {
  411. start = block->target_start;
  412. size_in_block = block->target_end - block->target_start;
  413. }
  414. if (phys_addr >= start && phys_addr < start + size_in_block) {
  415. *p_offset = phys_addr - start + offset;
  416. /* The offset range mapped from the vmcore file must not spill over
  417. * the GuestPhysBlock, clamp it. The rest of the mapping will be
  418. * zero-filled in memory at load time; see
  419. * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
  420. */
  421. *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
  422. mapping_length :
  423. size_in_block - (phys_addr - start);
  424. return;
  425. }
  426. offset += size_in_block;
  427. }
  428. }
  429. static int write_elf_loads(DumpState *s)
  430. {
  431. hwaddr offset, filesz;
  432. MemoryMapping *memory_mapping;
  433. uint32_t phdr_index = 1;
  434. int ret;
  435. uint32_t max_index;
  436. if (s->have_section) {
  437. max_index = s->sh_info;
  438. } else {
  439. max_index = s->phdr_num;
  440. }
  441. QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
  442. get_offset_range(memory_mapping->phys_addr,
  443. memory_mapping->length,
  444. s, &offset, &filesz);
  445. if (s->dump_info.d_class == ELFCLASS64) {
  446. ret = write_elf64_load(s, memory_mapping, phdr_index++, offset,
  447. filesz);
  448. } else {
  449. ret = write_elf32_load(s, memory_mapping, phdr_index++, offset,
  450. filesz);
  451. }
  452. if (ret < 0) {
  453. return -1;
  454. }
  455. if (phdr_index >= max_index) {
  456. break;
  457. }
  458. }
  459. return 0;
  460. }
  461. /* write elf header, PT_NOTE and elf note to vmcore. */
  462. static int dump_begin(DumpState *s)
  463. {
  464. int ret;
  465. /*
  466. * the vmcore's format is:
  467. * --------------
  468. * | elf header |
  469. * --------------
  470. * | PT_NOTE |
  471. * --------------
  472. * | PT_LOAD |
  473. * --------------
  474. * | ...... |
  475. * --------------
  476. * | PT_LOAD |
  477. * --------------
  478. * | sec_hdr |
  479. * --------------
  480. * | elf note |
  481. * --------------
  482. * | memory |
  483. * --------------
  484. *
  485. * we only know where the memory is saved after we write elf note into
  486. * vmcore.
  487. */
  488. /* write elf header to vmcore */
  489. if (s->dump_info.d_class == ELFCLASS64) {
  490. ret = write_elf64_header(s);
  491. } else {
  492. ret = write_elf32_header(s);
  493. }
  494. if (ret < 0) {
  495. return -1;
  496. }
  497. if (s->dump_info.d_class == ELFCLASS64) {
  498. /* write PT_NOTE to vmcore */
  499. if (write_elf64_note(s) < 0) {
  500. return -1;
  501. }
  502. /* write all PT_LOAD to vmcore */
  503. if (write_elf_loads(s) < 0) {
  504. return -1;
  505. }
  506. /* write section to vmcore */
  507. if (s->have_section) {
  508. if (write_elf_section(s, 1) < 0) {
  509. return -1;
  510. }
  511. }
  512. /* write notes to vmcore */
  513. if (write_elf64_notes(fd_write_vmcore, s) < 0) {
  514. return -1;
  515. }
  516. } else {
  517. /* write PT_NOTE to vmcore */
  518. if (write_elf32_note(s) < 0) {
  519. return -1;
  520. }
  521. /* write all PT_LOAD to vmcore */
  522. if (write_elf_loads(s) < 0) {
  523. return -1;
  524. }
  525. /* write section to vmcore */
  526. if (s->have_section) {
  527. if (write_elf_section(s, 0) < 0) {
  528. return -1;
  529. }
  530. }
  531. /* write notes to vmcore */
  532. if (write_elf32_notes(fd_write_vmcore, s) < 0) {
  533. return -1;
  534. }
  535. }
  536. return 0;
  537. }
  538. /* write PT_LOAD to vmcore */
  539. static int dump_completed(DumpState *s)
  540. {
  541. dump_cleanup(s);
  542. return 0;
  543. }
  544. static int get_next_block(DumpState *s, GuestPhysBlock *block)
  545. {
  546. while (1) {
  547. block = QTAILQ_NEXT(block, next);
  548. if (!block) {
  549. /* no more block */
  550. return 1;
  551. }
  552. s->start = 0;
  553. s->next_block = block;
  554. if (s->has_filter) {
  555. if (block->target_start >= s->begin + s->length ||
  556. block->target_end <= s->begin) {
  557. /* This block is out of the range */
  558. continue;
  559. }
  560. if (s->begin > block->target_start) {
  561. s->start = s->begin - block->target_start;
  562. }
  563. }
  564. return 0;
  565. }
  566. }
  567. /* write all memory to vmcore */
  568. static int dump_iterate(DumpState *s)
  569. {
  570. GuestPhysBlock *block;
  571. int64_t size;
  572. int ret;
  573. while (1) {
  574. block = s->next_block;
  575. size = block->target_end - block->target_start;
  576. if (s->has_filter) {
  577. size -= s->start;
  578. if (s->begin + s->length < block->target_end) {
  579. size -= block->target_end - (s->begin + s->length);
  580. }
  581. }
  582. ret = write_memory(s, block, s->start, size);
  583. if (ret == -1) {
  584. return ret;
  585. }
  586. ret = get_next_block(s, block);
  587. if (ret == 1) {
  588. dump_completed(s);
  589. return 0;
  590. }
  591. }
  592. }
  593. static int create_vmcore(DumpState *s)
  594. {
  595. int ret;
  596. ret = dump_begin(s);
  597. if (ret < 0) {
  598. return -1;
  599. }
  600. ret = dump_iterate(s);
  601. if (ret < 0) {
  602. return -1;
  603. }
  604. return 0;
  605. }
  606. static int write_start_flat_header(int fd)
  607. {
  608. uint8_t *buf;
  609. MakedumpfileHeader mh;
  610. int ret = 0;
  611. memset(&mh, 0, sizeof(mh));
  612. strncpy(mh.signature, MAKEDUMPFILE_SIGNATURE,
  613. strlen(MAKEDUMPFILE_SIGNATURE));
  614. mh.type = cpu_to_be64(TYPE_FLAT_HEADER);
  615. mh.version = cpu_to_be64(VERSION_FLAT_HEADER);
  616. buf = g_malloc0(MAX_SIZE_MDF_HEADER);
  617. memcpy(buf, &mh, sizeof(mh));
  618. size_t written_size;
  619. written_size = qemu_write_full(fd, buf, MAX_SIZE_MDF_HEADER);
  620. if (written_size != MAX_SIZE_MDF_HEADER) {
  621. ret = -1;
  622. }
  623. g_free(buf);
  624. return ret;
  625. }
  626. static int write_end_flat_header(int fd)
  627. {
  628. MakedumpfileDataHeader mdh;
  629. mdh.offset = END_FLAG_FLAT_HEADER;
  630. mdh.buf_size = END_FLAG_FLAT_HEADER;
  631. size_t written_size;
  632. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  633. if (written_size != sizeof(mdh)) {
  634. return -1;
  635. }
  636. return 0;
  637. }
  638. static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
  639. {
  640. size_t written_size;
  641. MakedumpfileDataHeader mdh;
  642. mdh.offset = cpu_to_be64(offset);
  643. mdh.buf_size = cpu_to_be64(size);
  644. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  645. if (written_size != sizeof(mdh)) {
  646. return -1;
  647. }
  648. written_size = qemu_write_full(fd, buf, size);
  649. if (written_size != size) {
  650. return -1;
  651. }
  652. return 0;
  653. }
  654. static int buf_write_note(const void *buf, size_t size, void *opaque)
  655. {
  656. DumpState *s = opaque;
  657. /* note_buf is not enough */
  658. if (s->note_buf_offset + size > s->note_size) {
  659. return -1;
  660. }
  661. memcpy(s->note_buf + s->note_buf_offset, buf, size);
  662. s->note_buf_offset += size;
  663. return 0;
  664. }
  665. /* write common header, sub header and elf note to vmcore */
  666. static int create_header32(DumpState *s)
  667. {
  668. int ret = 0;
  669. DiskDumpHeader32 *dh = NULL;
  670. KdumpSubHeader32 *kh = NULL;
  671. size_t size;
  672. int endian = s->dump_info.d_endian;
  673. uint32_t block_size;
  674. uint32_t sub_hdr_size;
  675. uint32_t bitmap_blocks;
  676. uint32_t status = 0;
  677. uint64_t offset_note;
  678. /* write common header, the version of kdump-compressed format is 6th */
  679. size = sizeof(DiskDumpHeader32);
  680. dh = g_malloc0(size);
  681. strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
  682. dh->header_version = cpu_convert_to_target32(6, endian);
  683. block_size = s->page_size;
  684. dh->block_size = cpu_convert_to_target32(block_size, endian);
  685. sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
  686. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  687. dh->sub_hdr_size = cpu_convert_to_target32(sub_hdr_size, endian);
  688. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  689. dh->max_mapnr = cpu_convert_to_target32(MIN(s->max_mapnr, UINT_MAX),
  690. endian);
  691. dh->nr_cpus = cpu_convert_to_target32(s->nr_cpus, endian);
  692. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  693. dh->bitmap_blocks = cpu_convert_to_target32(bitmap_blocks, endian);
  694. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  695. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  696. status |= DUMP_DH_COMPRESSED_ZLIB;
  697. }
  698. #ifdef CONFIG_LZO
  699. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  700. status |= DUMP_DH_COMPRESSED_LZO;
  701. }
  702. #endif
  703. #ifdef CONFIG_SNAPPY
  704. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  705. status |= DUMP_DH_COMPRESSED_SNAPPY;
  706. }
  707. #endif
  708. dh->status = cpu_convert_to_target32(status, endian);
  709. if (write_buffer(s->fd, 0, dh, size) < 0) {
  710. dump_error(s, "dump: failed to write disk dump header.\n");
  711. ret = -1;
  712. goto out;
  713. }
  714. /* write sub header */
  715. size = sizeof(KdumpSubHeader32);
  716. kh = g_malloc0(size);
  717. /* 64bit max_mapnr_64 */
  718. kh->max_mapnr_64 = cpu_convert_to_target64(s->max_mapnr, endian);
  719. kh->phys_base = cpu_convert_to_target32(PHYS_BASE, endian);
  720. kh->dump_level = cpu_convert_to_target32(DUMP_LEVEL, endian);
  721. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  722. kh->offset_note = cpu_convert_to_target64(offset_note, endian);
  723. kh->note_size = cpu_convert_to_target32(s->note_size, endian);
  724. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  725. block_size, kh, size) < 0) {
  726. dump_error(s, "dump: failed to write kdump sub header.\n");
  727. ret = -1;
  728. goto out;
  729. }
  730. /* write note */
  731. s->note_buf = g_malloc0(s->note_size);
  732. s->note_buf_offset = 0;
  733. /* use s->note_buf to store notes temporarily */
  734. if (write_elf32_notes(buf_write_note, s) < 0) {
  735. ret = -1;
  736. goto out;
  737. }
  738. if (write_buffer(s->fd, offset_note, s->note_buf,
  739. s->note_size) < 0) {
  740. dump_error(s, "dump: failed to write notes");
  741. ret = -1;
  742. goto out;
  743. }
  744. /* get offset of dump_bitmap */
  745. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  746. block_size;
  747. /* get offset of page */
  748. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  749. block_size;
  750. out:
  751. g_free(dh);
  752. g_free(kh);
  753. g_free(s->note_buf);
  754. return ret;
  755. }
  756. /* write common header, sub header and elf note to vmcore */
  757. static int create_header64(DumpState *s)
  758. {
  759. int ret = 0;
  760. DiskDumpHeader64 *dh = NULL;
  761. KdumpSubHeader64 *kh = NULL;
  762. size_t size;
  763. int endian = s->dump_info.d_endian;
  764. uint32_t block_size;
  765. uint32_t sub_hdr_size;
  766. uint32_t bitmap_blocks;
  767. uint32_t status = 0;
  768. uint64_t offset_note;
  769. /* write common header, the version of kdump-compressed format is 6th */
  770. size = sizeof(DiskDumpHeader64);
  771. dh = g_malloc0(size);
  772. strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
  773. dh->header_version = cpu_convert_to_target32(6, endian);
  774. block_size = s->page_size;
  775. dh->block_size = cpu_convert_to_target32(block_size, endian);
  776. sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
  777. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  778. dh->sub_hdr_size = cpu_convert_to_target32(sub_hdr_size, endian);
  779. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  780. dh->max_mapnr = cpu_convert_to_target32(MIN(s->max_mapnr, UINT_MAX),
  781. endian);
  782. dh->nr_cpus = cpu_convert_to_target32(s->nr_cpus, endian);
  783. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  784. dh->bitmap_blocks = cpu_convert_to_target32(bitmap_blocks, endian);
  785. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  786. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  787. status |= DUMP_DH_COMPRESSED_ZLIB;
  788. }
  789. #ifdef CONFIG_LZO
  790. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  791. status |= DUMP_DH_COMPRESSED_LZO;
  792. }
  793. #endif
  794. #ifdef CONFIG_SNAPPY
  795. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  796. status |= DUMP_DH_COMPRESSED_SNAPPY;
  797. }
  798. #endif
  799. dh->status = cpu_convert_to_target32(status, endian);
  800. if (write_buffer(s->fd, 0, dh, size) < 0) {
  801. dump_error(s, "dump: failed to write disk dump header.\n");
  802. ret = -1;
  803. goto out;
  804. }
  805. /* write sub header */
  806. size = sizeof(KdumpSubHeader64);
  807. kh = g_malloc0(size);
  808. /* 64bit max_mapnr_64 */
  809. kh->max_mapnr_64 = cpu_convert_to_target64(s->max_mapnr, endian);
  810. kh->phys_base = cpu_convert_to_target64(PHYS_BASE, endian);
  811. kh->dump_level = cpu_convert_to_target32(DUMP_LEVEL, endian);
  812. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  813. kh->offset_note = cpu_convert_to_target64(offset_note, endian);
  814. kh->note_size = cpu_convert_to_target64(s->note_size, endian);
  815. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  816. block_size, kh, size) < 0) {
  817. dump_error(s, "dump: failed to write kdump sub header.\n");
  818. ret = -1;
  819. goto out;
  820. }
  821. /* write note */
  822. s->note_buf = g_malloc0(s->note_size);
  823. s->note_buf_offset = 0;
  824. /* use s->note_buf to store notes temporarily */
  825. if (write_elf64_notes(buf_write_note, s) < 0) {
  826. ret = -1;
  827. goto out;
  828. }
  829. if (write_buffer(s->fd, offset_note, s->note_buf,
  830. s->note_size) < 0) {
  831. dump_error(s, "dump: failed to write notes");
  832. ret = -1;
  833. goto out;
  834. }
  835. /* get offset of dump_bitmap */
  836. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  837. block_size;
  838. /* get offset of page */
  839. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  840. block_size;
  841. out:
  842. g_free(dh);
  843. g_free(kh);
  844. g_free(s->note_buf);
  845. return ret;
  846. }
  847. static int write_dump_header(DumpState *s)
  848. {
  849. if (s->dump_info.d_machine == EM_386) {
  850. return create_header32(s);
  851. } else {
  852. return create_header64(s);
  853. }
  854. }
  855. /*
  856. * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
  857. * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
  858. * set_dump_bitmap will always leave the recently set bit un-sync. And setting
  859. * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
  860. * vmcore, ie. synchronizing un-sync bit into vmcore.
  861. */
  862. static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
  863. uint8_t *buf, DumpState *s)
  864. {
  865. off_t old_offset, new_offset;
  866. off_t offset_bitmap1, offset_bitmap2;
  867. uint32_t byte, bit;
  868. /* should not set the previous place */
  869. assert(last_pfn <= pfn);
  870. /*
  871. * if the bit needed to be set is not cached in buf, flush the data in buf
  872. * to vmcore firstly.
  873. * making new_offset be bigger than old_offset can also sync remained data
  874. * into vmcore.
  875. */
  876. old_offset = BUFSIZE_BITMAP * (last_pfn / PFN_BUFBITMAP);
  877. new_offset = BUFSIZE_BITMAP * (pfn / PFN_BUFBITMAP);
  878. while (old_offset < new_offset) {
  879. /* calculate the offset and write dump_bitmap */
  880. offset_bitmap1 = s->offset_dump_bitmap + old_offset;
  881. if (write_buffer(s->fd, offset_bitmap1, buf,
  882. BUFSIZE_BITMAP) < 0) {
  883. return -1;
  884. }
  885. /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
  886. offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
  887. old_offset;
  888. if (write_buffer(s->fd, offset_bitmap2, buf,
  889. BUFSIZE_BITMAP) < 0) {
  890. return -1;
  891. }
  892. memset(buf, 0, BUFSIZE_BITMAP);
  893. old_offset += BUFSIZE_BITMAP;
  894. }
  895. /* get the exact place of the bit in the buf, and set it */
  896. byte = (pfn % PFN_BUFBITMAP) / CHAR_BIT;
  897. bit = (pfn % PFN_BUFBITMAP) % CHAR_BIT;
  898. if (value) {
  899. buf[byte] |= 1u << bit;
  900. } else {
  901. buf[byte] &= ~(1u << bit);
  902. }
  903. return 0;
  904. }
  905. /*
  906. * exam every page and return the page frame number and the address of the page.
  907. * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
  908. * blocks, so block->target_start and block->target_end should be interal
  909. * multiples of the target page size.
  910. */
  911. static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
  912. uint8_t **bufptr, DumpState *s)
  913. {
  914. GuestPhysBlock *block = *blockptr;
  915. hwaddr addr;
  916. uint8_t *buf;
  917. /* block == NULL means the start of the iteration */
  918. if (!block) {
  919. block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  920. *blockptr = block;
  921. assert(block->target_start % s->page_size == 0);
  922. assert(block->target_end % s->page_size == 0);
  923. *pfnptr = paddr_to_pfn(block->target_start, s->page_shift);
  924. if (bufptr) {
  925. *bufptr = block->host_addr;
  926. }
  927. return true;
  928. }
  929. *pfnptr = *pfnptr + 1;
  930. addr = pfn_to_paddr(*pfnptr, s->page_shift);
  931. if ((addr >= block->target_start) &&
  932. (addr + s->page_size <= block->target_end)) {
  933. buf = block->host_addr + (addr - block->target_start);
  934. } else {
  935. /* the next page is in the next block */
  936. block = QTAILQ_NEXT(block, next);
  937. *blockptr = block;
  938. if (!block) {
  939. return false;
  940. }
  941. assert(block->target_start % s->page_size == 0);
  942. assert(block->target_end % s->page_size == 0);
  943. *pfnptr = paddr_to_pfn(block->target_start, s->page_shift);
  944. buf = block->host_addr;
  945. }
  946. if (bufptr) {
  947. *bufptr = buf;
  948. }
  949. return true;
  950. }
  951. static int write_dump_bitmap(DumpState *s)
  952. {
  953. int ret = 0;
  954. uint64_t last_pfn, pfn;
  955. void *dump_bitmap_buf;
  956. size_t num_dumpable;
  957. GuestPhysBlock *block_iter = NULL;
  958. /* dump_bitmap_buf is used to store dump_bitmap temporarily */
  959. dump_bitmap_buf = g_malloc0(BUFSIZE_BITMAP);
  960. num_dumpable = 0;
  961. last_pfn = 0;
  962. /*
  963. * exam memory page by page, and set the bit in dump_bitmap corresponded
  964. * to the existing page.
  965. */
  966. while (get_next_page(&block_iter, &pfn, NULL, s)) {
  967. ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
  968. if (ret < 0) {
  969. dump_error(s, "dump: failed to set dump_bitmap.\n");
  970. ret = -1;
  971. goto out;
  972. }
  973. last_pfn = pfn;
  974. num_dumpable++;
  975. }
  976. /*
  977. * set_dump_bitmap will always leave the recently set bit un-sync. Here we
  978. * set last_pfn + PFN_BUFBITMAP to 0 and those set but un-sync bit will be
  979. * synchronized into vmcore.
  980. */
  981. if (num_dumpable > 0) {
  982. ret = set_dump_bitmap(last_pfn, last_pfn + PFN_BUFBITMAP, false,
  983. dump_bitmap_buf, s);
  984. if (ret < 0) {
  985. dump_error(s, "dump: failed to sync dump_bitmap.\n");
  986. ret = -1;
  987. goto out;
  988. }
  989. }
  990. /* number of dumpable pages that will be dumped later */
  991. s->num_dumpable = num_dumpable;
  992. out:
  993. g_free(dump_bitmap_buf);
  994. return ret;
  995. }
  996. static void prepare_data_cache(DataCache *data_cache, DumpState *s,
  997. off_t offset)
  998. {
  999. data_cache->fd = s->fd;
  1000. data_cache->data_size = 0;
  1001. data_cache->buf_size = BUFSIZE_DATA_CACHE;
  1002. data_cache->buf = g_malloc0(BUFSIZE_DATA_CACHE);
  1003. data_cache->offset = offset;
  1004. }
  1005. static int write_cache(DataCache *dc, const void *buf, size_t size,
  1006. bool flag_sync)
  1007. {
  1008. /*
  1009. * dc->buf_size should not be less than size, otherwise dc will never be
  1010. * enough
  1011. */
  1012. assert(size <= dc->buf_size);
  1013. /*
  1014. * if flag_sync is set, synchronize data in dc->buf into vmcore.
  1015. * otherwise check if the space is enough for caching data in buf, if not,
  1016. * write the data in dc->buf to dc->fd and reset dc->buf
  1017. */
  1018. if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
  1019. (flag_sync && dc->data_size > 0)) {
  1020. if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
  1021. return -1;
  1022. }
  1023. dc->offset += dc->data_size;
  1024. dc->data_size = 0;
  1025. }
  1026. if (!flag_sync) {
  1027. memcpy(dc->buf + dc->data_size, buf, size);
  1028. dc->data_size += size;
  1029. }
  1030. return 0;
  1031. }
  1032. static void free_data_cache(DataCache *data_cache)
  1033. {
  1034. g_free(data_cache->buf);
  1035. }
  1036. static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
  1037. {
  1038. size_t len_buf_out_zlib, len_buf_out_lzo, len_buf_out_snappy;
  1039. size_t len_buf_out;
  1040. /* init buf_out */
  1041. len_buf_out_zlib = len_buf_out_lzo = len_buf_out_snappy = 0;
  1042. /* buf size for zlib */
  1043. len_buf_out_zlib = compressBound(page_size);
  1044. /* buf size for lzo */
  1045. #ifdef CONFIG_LZO
  1046. if (flag_compress & DUMP_DH_COMPRESSED_LZO) {
  1047. if (lzo_init() != LZO_E_OK) {
  1048. /* return 0 to indicate lzo is unavailable */
  1049. return 0;
  1050. }
  1051. }
  1052. /*
  1053. * LZO will expand incompressible data by a little amount. please check the
  1054. * following URL to see the expansion calculation:
  1055. * http://www.oberhumer.com/opensource/lzo/lzofaq.php
  1056. */
  1057. len_buf_out_lzo = page_size + page_size / 16 + 64 + 3;
  1058. #endif
  1059. #ifdef CONFIG_SNAPPY
  1060. /* buf size for snappy */
  1061. len_buf_out_snappy = snappy_max_compressed_length(page_size);
  1062. #endif
  1063. /* get the biggest that can store all kinds of compressed page */
  1064. len_buf_out = MAX(len_buf_out_zlib,
  1065. MAX(len_buf_out_lzo, len_buf_out_snappy));
  1066. return len_buf_out;
  1067. }
  1068. /*
  1069. * check if the page is all 0
  1070. */
  1071. static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
  1072. {
  1073. return buffer_is_zero(buf, page_size);
  1074. }
  1075. static int write_dump_pages(DumpState *s)
  1076. {
  1077. int ret = 0;
  1078. DataCache page_desc, page_data;
  1079. size_t len_buf_out, size_out;
  1080. #ifdef CONFIG_LZO
  1081. lzo_bytep wrkmem = NULL;
  1082. #endif
  1083. uint8_t *buf_out = NULL;
  1084. off_t offset_desc, offset_data;
  1085. PageDescriptor pd, pd_zero;
  1086. uint8_t *buf;
  1087. int endian = s->dump_info.d_endian;
  1088. GuestPhysBlock *block_iter = NULL;
  1089. uint64_t pfn_iter;
  1090. /* get offset of page_desc and page_data in dump file */
  1091. offset_desc = s->offset_page;
  1092. offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
  1093. prepare_data_cache(&page_desc, s, offset_desc);
  1094. prepare_data_cache(&page_data, s, offset_data);
  1095. /* prepare buffer to store compressed data */
  1096. len_buf_out = get_len_buf_out(s->page_size, s->flag_compress);
  1097. if (len_buf_out == 0) {
  1098. dump_error(s, "dump: failed to get length of output buffer.\n");
  1099. goto out;
  1100. }
  1101. #ifdef CONFIG_LZO
  1102. wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
  1103. #endif
  1104. buf_out = g_malloc(len_buf_out);
  1105. /*
  1106. * init zero page's page_desc and page_data, because every zero page
  1107. * uses the same page_data
  1108. */
  1109. pd_zero.size = cpu_convert_to_target32(s->page_size, endian);
  1110. pd_zero.flags = cpu_convert_to_target32(0, endian);
  1111. pd_zero.offset = cpu_convert_to_target64(offset_data, endian);
  1112. pd_zero.page_flags = cpu_convert_to_target64(0, endian);
  1113. buf = g_malloc0(s->page_size);
  1114. ret = write_cache(&page_data, buf, s->page_size, false);
  1115. g_free(buf);
  1116. if (ret < 0) {
  1117. dump_error(s, "dump: failed to write page data(zero page).\n");
  1118. goto out;
  1119. }
  1120. offset_data += s->page_size;
  1121. /*
  1122. * dump memory to vmcore page by page. zero page will all be resided in the
  1123. * first page of page section
  1124. */
  1125. while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
  1126. /* check zero page */
  1127. if (is_zero_page(buf, s->page_size)) {
  1128. ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
  1129. false);
  1130. if (ret < 0) {
  1131. dump_error(s, "dump: failed to write page desc.\n");
  1132. goto out;
  1133. }
  1134. } else {
  1135. /*
  1136. * not zero page, then:
  1137. * 1. compress the page
  1138. * 2. write the compressed page into the cache of page_data
  1139. * 3. get page desc of the compressed page and write it into the
  1140. * cache of page_desc
  1141. *
  1142. * only one compression format will be used here, for
  1143. * s->flag_compress is set. But when compression fails to work,
  1144. * we fall back to save in plaintext.
  1145. */
  1146. size_out = len_buf_out;
  1147. if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
  1148. (compress2(buf_out, (uLongf *)&size_out, buf, s->page_size,
  1149. Z_BEST_SPEED) == Z_OK) && (size_out < s->page_size)) {
  1150. pd.flags = cpu_convert_to_target32(DUMP_DH_COMPRESSED_ZLIB,
  1151. endian);
  1152. pd.size = cpu_convert_to_target32(size_out, endian);
  1153. ret = write_cache(&page_data, buf_out, size_out, false);
  1154. if (ret < 0) {
  1155. dump_error(s, "dump: failed to write page data.\n");
  1156. goto out;
  1157. }
  1158. #ifdef CONFIG_LZO
  1159. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
  1160. (lzo1x_1_compress(buf, s->page_size, buf_out,
  1161. (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
  1162. (size_out < s->page_size)) {
  1163. pd.flags = cpu_convert_to_target32(DUMP_DH_COMPRESSED_LZO,
  1164. endian);
  1165. pd.size = cpu_convert_to_target32(size_out, endian);
  1166. ret = write_cache(&page_data, buf_out, size_out, false);
  1167. if (ret < 0) {
  1168. dump_error(s, "dump: failed to write page data.\n");
  1169. goto out;
  1170. }
  1171. #endif
  1172. #ifdef CONFIG_SNAPPY
  1173. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
  1174. (snappy_compress((char *)buf, s->page_size,
  1175. (char *)buf_out, &size_out) == SNAPPY_OK) &&
  1176. (size_out < s->page_size)) {
  1177. pd.flags = cpu_convert_to_target32(
  1178. DUMP_DH_COMPRESSED_SNAPPY, endian);
  1179. pd.size = cpu_convert_to_target32(size_out, endian);
  1180. ret = write_cache(&page_data, buf_out, size_out, false);
  1181. if (ret < 0) {
  1182. dump_error(s, "dump: failed to write page data.\n");
  1183. goto out;
  1184. }
  1185. #endif
  1186. } else {
  1187. /*
  1188. * fall back to save in plaintext, size_out should be
  1189. * assigned to s->page_size
  1190. */
  1191. pd.flags = cpu_convert_to_target32(0, endian);
  1192. size_out = s->page_size;
  1193. pd.size = cpu_convert_to_target32(size_out, endian);
  1194. ret = write_cache(&page_data, buf, s->page_size, false);
  1195. if (ret < 0) {
  1196. dump_error(s, "dump: failed to write page data.\n");
  1197. goto out;
  1198. }
  1199. }
  1200. /* get and write page desc here */
  1201. pd.page_flags = cpu_convert_to_target64(0, endian);
  1202. pd.offset = cpu_convert_to_target64(offset_data, endian);
  1203. offset_data += size_out;
  1204. ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
  1205. if (ret < 0) {
  1206. dump_error(s, "dump: failed to write page desc.\n");
  1207. goto out;
  1208. }
  1209. }
  1210. }
  1211. ret = write_cache(&page_desc, NULL, 0, true);
  1212. if (ret < 0) {
  1213. dump_error(s, "dump: failed to sync cache for page_desc.\n");
  1214. goto out;
  1215. }
  1216. ret = write_cache(&page_data, NULL, 0, true);
  1217. if (ret < 0) {
  1218. dump_error(s, "dump: failed to sync cache for page_data.\n");
  1219. goto out;
  1220. }
  1221. out:
  1222. free_data_cache(&page_desc);
  1223. free_data_cache(&page_data);
  1224. #ifdef CONFIG_LZO
  1225. g_free(wrkmem);
  1226. #endif
  1227. g_free(buf_out);
  1228. return ret;
  1229. }
  1230. static int create_kdump_vmcore(DumpState *s)
  1231. {
  1232. int ret;
  1233. /*
  1234. * the kdump-compressed format is:
  1235. * File offset
  1236. * +------------------------------------------+ 0x0
  1237. * | main header (struct disk_dump_header) |
  1238. * |------------------------------------------+ block 1
  1239. * | sub header (struct kdump_sub_header) |
  1240. * |------------------------------------------+ block 2
  1241. * | 1st-dump_bitmap |
  1242. * |------------------------------------------+ block 2 + X blocks
  1243. * | 2nd-dump_bitmap | (aligned by block)
  1244. * |------------------------------------------+ block 2 + 2 * X blocks
  1245. * | page desc for pfn 0 (struct page_desc) | (aligned by block)
  1246. * | page desc for pfn 1 (struct page_desc) |
  1247. * | : |
  1248. * |------------------------------------------| (not aligned by block)
  1249. * | page data (pfn 0) |
  1250. * | page data (pfn 1) |
  1251. * | : |
  1252. * +------------------------------------------+
  1253. */
  1254. ret = write_start_flat_header(s->fd);
  1255. if (ret < 0) {
  1256. dump_error(s, "dump: failed to write start flat header.\n");
  1257. return -1;
  1258. }
  1259. ret = write_dump_header(s);
  1260. if (ret < 0) {
  1261. return -1;
  1262. }
  1263. ret = write_dump_bitmap(s);
  1264. if (ret < 0) {
  1265. return -1;
  1266. }
  1267. ret = write_dump_pages(s);
  1268. if (ret < 0) {
  1269. return -1;
  1270. }
  1271. ret = write_end_flat_header(s->fd);
  1272. if (ret < 0) {
  1273. dump_error(s, "dump: failed to write end flat header.\n");
  1274. return -1;
  1275. }
  1276. dump_completed(s);
  1277. return 0;
  1278. }
  1279. static ram_addr_t get_start_block(DumpState *s)
  1280. {
  1281. GuestPhysBlock *block;
  1282. if (!s->has_filter) {
  1283. s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  1284. return 0;
  1285. }
  1286. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  1287. if (block->target_start >= s->begin + s->length ||
  1288. block->target_end <= s->begin) {
  1289. /* This block is out of the range */
  1290. continue;
  1291. }
  1292. s->next_block = block;
  1293. if (s->begin > block->target_start) {
  1294. s->start = s->begin - block->target_start;
  1295. } else {
  1296. s->start = 0;
  1297. }
  1298. return s->start;
  1299. }
  1300. return -1;
  1301. }
  1302. static void get_max_mapnr(DumpState *s)
  1303. {
  1304. GuestPhysBlock *last_block;
  1305. last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
  1306. s->max_mapnr = paddr_to_pfn(last_block->target_end, s->page_shift);
  1307. }
  1308. static int dump_init(DumpState *s, int fd, bool has_format,
  1309. DumpGuestMemoryFormat format, bool paging, bool has_filter,
  1310. int64_t begin, int64_t length, Error **errp)
  1311. {
  1312. CPUState *cpu;
  1313. int nr_cpus;
  1314. Error *err = NULL;
  1315. int ret;
  1316. /* kdump-compressed is conflict with paging and filter */
  1317. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1318. assert(!paging && !has_filter);
  1319. }
  1320. if (runstate_is_running()) {
  1321. vm_stop(RUN_STATE_SAVE_VM);
  1322. s->resume = true;
  1323. } else {
  1324. s->resume = false;
  1325. }
  1326. /* If we use KVM, we should synchronize the registers before we get dump
  1327. * info or physmap info.
  1328. */
  1329. cpu_synchronize_all_states();
  1330. nr_cpus = 0;
  1331. CPU_FOREACH(cpu) {
  1332. nr_cpus++;
  1333. }
  1334. s->errp = errp;
  1335. s->fd = fd;
  1336. s->has_filter = has_filter;
  1337. s->begin = begin;
  1338. s->length = length;
  1339. guest_phys_blocks_init(&s->guest_phys_blocks);
  1340. guest_phys_blocks_append(&s->guest_phys_blocks);
  1341. s->start = get_start_block(s);
  1342. if (s->start == -1) {
  1343. error_set(errp, QERR_INVALID_PARAMETER, "begin");
  1344. goto cleanup;
  1345. }
  1346. /* get dump info: endian, class and architecture.
  1347. * If the target architecture is not supported, cpu_get_dump_info() will
  1348. * return -1.
  1349. */
  1350. ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
  1351. if (ret < 0) {
  1352. error_set(errp, QERR_UNSUPPORTED);
  1353. goto cleanup;
  1354. }
  1355. s->note_size = cpu_get_note_size(s->dump_info.d_class,
  1356. s->dump_info.d_machine, nr_cpus);
  1357. if (s->note_size < 0) {
  1358. error_set(errp, QERR_UNSUPPORTED);
  1359. goto cleanup;
  1360. }
  1361. /* get memory mapping */
  1362. memory_mapping_list_init(&s->list);
  1363. if (paging) {
  1364. qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
  1365. if (err != NULL) {
  1366. error_propagate(errp, err);
  1367. goto cleanup;
  1368. }
  1369. } else {
  1370. qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
  1371. }
  1372. s->nr_cpus = nr_cpus;
  1373. s->page_size = TARGET_PAGE_SIZE;
  1374. s->page_shift = ffs(s->page_size) - 1;
  1375. get_max_mapnr(s);
  1376. uint64_t tmp;
  1377. tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), s->page_size);
  1378. s->len_dump_bitmap = tmp * s->page_size;
  1379. /* init for kdump-compressed format */
  1380. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1381. switch (format) {
  1382. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
  1383. s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
  1384. break;
  1385. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
  1386. s->flag_compress = DUMP_DH_COMPRESSED_LZO;
  1387. break;
  1388. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
  1389. s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
  1390. break;
  1391. default:
  1392. s->flag_compress = 0;
  1393. }
  1394. return 0;
  1395. }
  1396. if (s->has_filter) {
  1397. memory_mapping_filter(&s->list, s->begin, s->length);
  1398. }
  1399. /*
  1400. * calculate phdr_num
  1401. *
  1402. * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
  1403. */
  1404. s->phdr_num = 1; /* PT_NOTE */
  1405. if (s->list.num < UINT16_MAX - 2) {
  1406. s->phdr_num += s->list.num;
  1407. s->have_section = false;
  1408. } else {
  1409. s->have_section = true;
  1410. s->phdr_num = PN_XNUM;
  1411. s->sh_info = 1; /* PT_NOTE */
  1412. /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
  1413. if (s->list.num <= UINT32_MAX - 1) {
  1414. s->sh_info += s->list.num;
  1415. } else {
  1416. s->sh_info = UINT32_MAX;
  1417. }
  1418. }
  1419. if (s->dump_info.d_class == ELFCLASS64) {
  1420. if (s->have_section) {
  1421. s->memory_offset = sizeof(Elf64_Ehdr) +
  1422. sizeof(Elf64_Phdr) * s->sh_info +
  1423. sizeof(Elf64_Shdr) + s->note_size;
  1424. } else {
  1425. s->memory_offset = sizeof(Elf64_Ehdr) +
  1426. sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
  1427. }
  1428. } else {
  1429. if (s->have_section) {
  1430. s->memory_offset = sizeof(Elf32_Ehdr) +
  1431. sizeof(Elf32_Phdr) * s->sh_info +
  1432. sizeof(Elf32_Shdr) + s->note_size;
  1433. } else {
  1434. s->memory_offset = sizeof(Elf32_Ehdr) +
  1435. sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
  1436. }
  1437. }
  1438. return 0;
  1439. cleanup:
  1440. guest_phys_blocks_free(&s->guest_phys_blocks);
  1441. if (s->resume) {
  1442. vm_start();
  1443. }
  1444. return -1;
  1445. }
  1446. void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
  1447. int64_t begin, bool has_length,
  1448. int64_t length, bool has_format,
  1449. DumpGuestMemoryFormat format, Error **errp)
  1450. {
  1451. const char *p;
  1452. int fd = -1;
  1453. DumpState *s;
  1454. int ret;
  1455. /*
  1456. * kdump-compressed format need the whole memory dumped, so paging or
  1457. * filter is not supported here.
  1458. */
  1459. if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
  1460. (paging || has_begin || has_length)) {
  1461. error_setg(errp, "kdump-compressed format doesn't support paging or "
  1462. "filter");
  1463. return;
  1464. }
  1465. if (has_begin && !has_length) {
  1466. error_set(errp, QERR_MISSING_PARAMETER, "length");
  1467. return;
  1468. }
  1469. if (!has_begin && has_length) {
  1470. error_set(errp, QERR_MISSING_PARAMETER, "begin");
  1471. return;
  1472. }
  1473. /* check whether lzo/snappy is supported */
  1474. #ifndef CONFIG_LZO
  1475. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
  1476. error_setg(errp, "kdump-lzo is not available now");
  1477. return;
  1478. }
  1479. #endif
  1480. #ifndef CONFIG_SNAPPY
  1481. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
  1482. error_setg(errp, "kdump-snappy is not available now");
  1483. return;
  1484. }
  1485. #endif
  1486. #if !defined(WIN32)
  1487. if (strstart(file, "fd:", &p)) {
  1488. fd = monitor_get_fd(cur_mon, p, errp);
  1489. if (fd == -1) {
  1490. return;
  1491. }
  1492. }
  1493. #endif
  1494. if (strstart(file, "file:", &p)) {
  1495. fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
  1496. if (fd < 0) {
  1497. error_setg_file_open(errp, errno, p);
  1498. return;
  1499. }
  1500. }
  1501. if (fd == -1) {
  1502. error_set(errp, QERR_INVALID_PARAMETER, "protocol");
  1503. return;
  1504. }
  1505. s = g_malloc0(sizeof(DumpState));
  1506. ret = dump_init(s, fd, has_format, format, paging, has_begin,
  1507. begin, length, errp);
  1508. if (ret < 0) {
  1509. g_free(s);
  1510. return;
  1511. }
  1512. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1513. if (create_kdump_vmcore(s) < 0 && !error_is_set(s->errp)) {
  1514. error_set(errp, QERR_IO_ERROR);
  1515. }
  1516. } else {
  1517. if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
  1518. error_set(errp, QERR_IO_ERROR);
  1519. }
  1520. }
  1521. g_free(s);
  1522. }
  1523. DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
  1524. {
  1525. DumpGuestMemoryFormatList *item;
  1526. DumpGuestMemoryCapability *cap =
  1527. g_malloc0(sizeof(DumpGuestMemoryCapability));
  1528. /* elf is always available */
  1529. item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1530. cap->formats = item;
  1531. item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
  1532. /* kdump-zlib is always available */
  1533. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1534. item = item->next;
  1535. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
  1536. /* add new item if kdump-lzo is available */
  1537. #ifdef CONFIG_LZO
  1538. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1539. item = item->next;
  1540. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
  1541. #endif
  1542. /* add new item if kdump-snappy is available */
  1543. #ifdef CONFIG_SNAPPY
  1544. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1545. item = item->next;
  1546. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
  1547. #endif
  1548. return cap;
  1549. }