dump.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753
  1. /*
  2. * QEMU dump
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu-common.h"
  14. #include "elf.h"
  15. #include "cpu.h"
  16. #include "exec/cpu-all.h"
  17. #include "exec/hwaddr.h"
  18. #include "monitor/monitor.h"
  19. #include "sysemu/kvm.h"
  20. #include "sysemu/dump.h"
  21. #include "sysemu/sysemu.h"
  22. #include "sysemu/memory_mapping.h"
  23. #include "sysemu/cpus.h"
  24. #include "qapi/error.h"
  25. #include "qmp-commands.h"
  26. #include <zlib.h>
  27. #ifdef CONFIG_LZO
  28. #include <lzo/lzo1x.h>
  29. #endif
  30. #ifdef CONFIG_SNAPPY
  31. #include <snappy-c.h>
  32. #endif
  33. #ifndef ELF_MACHINE_UNAME
  34. #define ELF_MACHINE_UNAME "Unknown"
  35. #endif
  36. uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
  37. {
  38. if (s->dump_info.d_endian == ELFDATA2LSB) {
  39. val = cpu_to_le16(val);
  40. } else {
  41. val = cpu_to_be16(val);
  42. }
  43. return val;
  44. }
  45. uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
  46. {
  47. if (s->dump_info.d_endian == ELFDATA2LSB) {
  48. val = cpu_to_le32(val);
  49. } else {
  50. val = cpu_to_be32(val);
  51. }
  52. return val;
  53. }
  54. uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
  55. {
  56. if (s->dump_info.d_endian == ELFDATA2LSB) {
  57. val = cpu_to_le64(val);
  58. } else {
  59. val = cpu_to_be64(val);
  60. }
  61. return val;
  62. }
  63. static int dump_cleanup(DumpState *s)
  64. {
  65. int ret = 0;
  66. guest_phys_blocks_free(&s->guest_phys_blocks);
  67. memory_mapping_list_free(&s->list);
  68. if (s->fd != -1) {
  69. close(s->fd);
  70. }
  71. if (s->resume) {
  72. vm_start();
  73. }
  74. return ret;
  75. }
  76. static void dump_error(DumpState *s, const char *reason)
  77. {
  78. dump_cleanup(s);
  79. }
  80. static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
  81. {
  82. DumpState *s = opaque;
  83. size_t written_size;
  84. written_size = qemu_write_full(s->fd, buf, size);
  85. if (written_size != size) {
  86. return -1;
  87. }
  88. return 0;
  89. }
  90. static int write_elf64_header(DumpState *s)
  91. {
  92. Elf64_Ehdr elf_header;
  93. int ret;
  94. memset(&elf_header, 0, sizeof(Elf64_Ehdr));
  95. memcpy(&elf_header, ELFMAG, SELFMAG);
  96. elf_header.e_ident[EI_CLASS] = ELFCLASS64;
  97. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  98. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  99. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  100. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  101. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  102. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  103. elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
  104. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
  105. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  106. if (s->have_section) {
  107. uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
  108. elf_header.e_shoff = cpu_to_dump64(s, shoff);
  109. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
  110. elf_header.e_shnum = cpu_to_dump16(s, 1);
  111. }
  112. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  113. if (ret < 0) {
  114. dump_error(s, "dump: failed to write elf header.\n");
  115. return -1;
  116. }
  117. return 0;
  118. }
  119. static int write_elf32_header(DumpState *s)
  120. {
  121. Elf32_Ehdr elf_header;
  122. int ret;
  123. memset(&elf_header, 0, sizeof(Elf32_Ehdr));
  124. memcpy(&elf_header, ELFMAG, SELFMAG);
  125. elf_header.e_ident[EI_CLASS] = ELFCLASS32;
  126. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  127. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  128. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  129. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  130. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  131. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  132. elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
  133. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
  134. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  135. if (s->have_section) {
  136. uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
  137. elf_header.e_shoff = cpu_to_dump32(s, shoff);
  138. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
  139. elf_header.e_shnum = cpu_to_dump16(s, 1);
  140. }
  141. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  142. if (ret < 0) {
  143. dump_error(s, "dump: failed to write elf header.\n");
  144. return -1;
  145. }
  146. return 0;
  147. }
  148. static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
  149. int phdr_index, hwaddr offset,
  150. hwaddr filesz)
  151. {
  152. Elf64_Phdr phdr;
  153. int ret;
  154. memset(&phdr, 0, sizeof(Elf64_Phdr));
  155. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  156. phdr.p_offset = cpu_to_dump64(s, offset);
  157. phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
  158. phdr.p_filesz = cpu_to_dump64(s, filesz);
  159. phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
  160. phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr);
  161. assert(memory_mapping->length >= filesz);
  162. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  163. if (ret < 0) {
  164. dump_error(s, "dump: failed to write program header table.\n");
  165. return -1;
  166. }
  167. return 0;
  168. }
  169. static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
  170. int phdr_index, hwaddr offset,
  171. hwaddr filesz)
  172. {
  173. Elf32_Phdr phdr;
  174. int ret;
  175. memset(&phdr, 0, sizeof(Elf32_Phdr));
  176. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  177. phdr.p_offset = cpu_to_dump32(s, offset);
  178. phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
  179. phdr.p_filesz = cpu_to_dump32(s, filesz);
  180. phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
  181. phdr.p_vaddr = cpu_to_dump32(s, memory_mapping->virt_addr);
  182. assert(memory_mapping->length >= filesz);
  183. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  184. if (ret < 0) {
  185. dump_error(s, "dump: failed to write program header table.\n");
  186. return -1;
  187. }
  188. return 0;
  189. }
  190. static int write_elf64_note(DumpState *s)
  191. {
  192. Elf64_Phdr phdr;
  193. hwaddr begin = s->memory_offset - s->note_size;
  194. int ret;
  195. memset(&phdr, 0, sizeof(Elf64_Phdr));
  196. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  197. phdr.p_offset = cpu_to_dump64(s, begin);
  198. phdr.p_paddr = 0;
  199. phdr.p_filesz = cpu_to_dump64(s, s->note_size);
  200. phdr.p_memsz = cpu_to_dump64(s, s->note_size);
  201. phdr.p_vaddr = 0;
  202. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  203. if (ret < 0) {
  204. dump_error(s, "dump: failed to write program header table.\n");
  205. return -1;
  206. }
  207. return 0;
  208. }
  209. static inline int cpu_index(CPUState *cpu)
  210. {
  211. return cpu->cpu_index + 1;
  212. }
  213. static int write_elf64_notes(WriteCoreDumpFunction f, DumpState *s)
  214. {
  215. CPUState *cpu;
  216. int ret;
  217. int id;
  218. CPU_FOREACH(cpu) {
  219. id = cpu_index(cpu);
  220. ret = cpu_write_elf64_note(f, cpu, id, s);
  221. if (ret < 0) {
  222. dump_error(s, "dump: failed to write elf notes.\n");
  223. return -1;
  224. }
  225. }
  226. CPU_FOREACH(cpu) {
  227. ret = cpu_write_elf64_qemunote(f, cpu, s);
  228. if (ret < 0) {
  229. dump_error(s, "dump: failed to write CPU status.\n");
  230. return -1;
  231. }
  232. }
  233. return 0;
  234. }
  235. static int write_elf32_note(DumpState *s)
  236. {
  237. hwaddr begin = s->memory_offset - s->note_size;
  238. Elf32_Phdr phdr;
  239. int ret;
  240. memset(&phdr, 0, sizeof(Elf32_Phdr));
  241. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  242. phdr.p_offset = cpu_to_dump32(s, begin);
  243. phdr.p_paddr = 0;
  244. phdr.p_filesz = cpu_to_dump32(s, s->note_size);
  245. phdr.p_memsz = cpu_to_dump32(s, s->note_size);
  246. phdr.p_vaddr = 0;
  247. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  248. if (ret < 0) {
  249. dump_error(s, "dump: failed to write program header table.\n");
  250. return -1;
  251. }
  252. return 0;
  253. }
  254. static int write_elf32_notes(WriteCoreDumpFunction f, DumpState *s)
  255. {
  256. CPUState *cpu;
  257. int ret;
  258. int id;
  259. CPU_FOREACH(cpu) {
  260. id = cpu_index(cpu);
  261. ret = cpu_write_elf32_note(f, cpu, id, s);
  262. if (ret < 0) {
  263. dump_error(s, "dump: failed to write elf notes.\n");
  264. return -1;
  265. }
  266. }
  267. CPU_FOREACH(cpu) {
  268. ret = cpu_write_elf32_qemunote(f, cpu, s);
  269. if (ret < 0) {
  270. dump_error(s, "dump: failed to write CPU status.\n");
  271. return -1;
  272. }
  273. }
  274. return 0;
  275. }
  276. static int write_elf_section(DumpState *s, int type)
  277. {
  278. Elf32_Shdr shdr32;
  279. Elf64_Shdr shdr64;
  280. int shdr_size;
  281. void *shdr;
  282. int ret;
  283. if (type == 0) {
  284. shdr_size = sizeof(Elf32_Shdr);
  285. memset(&shdr32, 0, shdr_size);
  286. shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
  287. shdr = &shdr32;
  288. } else {
  289. shdr_size = sizeof(Elf64_Shdr);
  290. memset(&shdr64, 0, shdr_size);
  291. shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
  292. shdr = &shdr64;
  293. }
  294. ret = fd_write_vmcore(&shdr, shdr_size, s);
  295. if (ret < 0) {
  296. dump_error(s, "dump: failed to write section header table.\n");
  297. return -1;
  298. }
  299. return 0;
  300. }
  301. static int write_data(DumpState *s, void *buf, int length)
  302. {
  303. int ret;
  304. ret = fd_write_vmcore(buf, length, s);
  305. if (ret < 0) {
  306. dump_error(s, "dump: failed to save memory.\n");
  307. return -1;
  308. }
  309. return 0;
  310. }
  311. /* write the memroy to vmcore. 1 page per I/O. */
  312. static int write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
  313. int64_t size)
  314. {
  315. int64_t i;
  316. int ret;
  317. for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
  318. ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
  319. TARGET_PAGE_SIZE);
  320. if (ret < 0) {
  321. return ret;
  322. }
  323. }
  324. if ((size % TARGET_PAGE_SIZE) != 0) {
  325. ret = write_data(s, block->host_addr + start + i * TARGET_PAGE_SIZE,
  326. size % TARGET_PAGE_SIZE);
  327. if (ret < 0) {
  328. return ret;
  329. }
  330. }
  331. return 0;
  332. }
  333. /* get the memory's offset and size in the vmcore */
  334. static void get_offset_range(hwaddr phys_addr,
  335. ram_addr_t mapping_length,
  336. DumpState *s,
  337. hwaddr *p_offset,
  338. hwaddr *p_filesz)
  339. {
  340. GuestPhysBlock *block;
  341. hwaddr offset = s->memory_offset;
  342. int64_t size_in_block, start;
  343. /* When the memory is not stored into vmcore, offset will be -1 */
  344. *p_offset = -1;
  345. *p_filesz = 0;
  346. if (s->has_filter) {
  347. if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
  348. return;
  349. }
  350. }
  351. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  352. if (s->has_filter) {
  353. if (block->target_start >= s->begin + s->length ||
  354. block->target_end <= s->begin) {
  355. /* This block is out of the range */
  356. continue;
  357. }
  358. if (s->begin <= block->target_start) {
  359. start = block->target_start;
  360. } else {
  361. start = s->begin;
  362. }
  363. size_in_block = block->target_end - start;
  364. if (s->begin + s->length < block->target_end) {
  365. size_in_block -= block->target_end - (s->begin + s->length);
  366. }
  367. } else {
  368. start = block->target_start;
  369. size_in_block = block->target_end - block->target_start;
  370. }
  371. if (phys_addr >= start && phys_addr < start + size_in_block) {
  372. *p_offset = phys_addr - start + offset;
  373. /* The offset range mapped from the vmcore file must not spill over
  374. * the GuestPhysBlock, clamp it. The rest of the mapping will be
  375. * zero-filled in memory at load time; see
  376. * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
  377. */
  378. *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
  379. mapping_length :
  380. size_in_block - (phys_addr - start);
  381. return;
  382. }
  383. offset += size_in_block;
  384. }
  385. }
  386. static int write_elf_loads(DumpState *s)
  387. {
  388. hwaddr offset, filesz;
  389. MemoryMapping *memory_mapping;
  390. uint32_t phdr_index = 1;
  391. int ret;
  392. uint32_t max_index;
  393. if (s->have_section) {
  394. max_index = s->sh_info;
  395. } else {
  396. max_index = s->phdr_num;
  397. }
  398. QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
  399. get_offset_range(memory_mapping->phys_addr,
  400. memory_mapping->length,
  401. s, &offset, &filesz);
  402. if (s->dump_info.d_class == ELFCLASS64) {
  403. ret = write_elf64_load(s, memory_mapping, phdr_index++, offset,
  404. filesz);
  405. } else {
  406. ret = write_elf32_load(s, memory_mapping, phdr_index++, offset,
  407. filesz);
  408. }
  409. if (ret < 0) {
  410. return -1;
  411. }
  412. if (phdr_index >= max_index) {
  413. break;
  414. }
  415. }
  416. return 0;
  417. }
  418. /* write elf header, PT_NOTE and elf note to vmcore. */
  419. static int dump_begin(DumpState *s)
  420. {
  421. int ret;
  422. /*
  423. * the vmcore's format is:
  424. * --------------
  425. * | elf header |
  426. * --------------
  427. * | PT_NOTE |
  428. * --------------
  429. * | PT_LOAD |
  430. * --------------
  431. * | ...... |
  432. * --------------
  433. * | PT_LOAD |
  434. * --------------
  435. * | sec_hdr |
  436. * --------------
  437. * | elf note |
  438. * --------------
  439. * | memory |
  440. * --------------
  441. *
  442. * we only know where the memory is saved after we write elf note into
  443. * vmcore.
  444. */
  445. /* write elf header to vmcore */
  446. if (s->dump_info.d_class == ELFCLASS64) {
  447. ret = write_elf64_header(s);
  448. } else {
  449. ret = write_elf32_header(s);
  450. }
  451. if (ret < 0) {
  452. return -1;
  453. }
  454. if (s->dump_info.d_class == ELFCLASS64) {
  455. /* write PT_NOTE to vmcore */
  456. if (write_elf64_note(s) < 0) {
  457. return -1;
  458. }
  459. /* write all PT_LOAD to vmcore */
  460. if (write_elf_loads(s) < 0) {
  461. return -1;
  462. }
  463. /* write section to vmcore */
  464. if (s->have_section) {
  465. if (write_elf_section(s, 1) < 0) {
  466. return -1;
  467. }
  468. }
  469. /* write notes to vmcore */
  470. if (write_elf64_notes(fd_write_vmcore, s) < 0) {
  471. return -1;
  472. }
  473. } else {
  474. /* write PT_NOTE to vmcore */
  475. if (write_elf32_note(s) < 0) {
  476. return -1;
  477. }
  478. /* write all PT_LOAD to vmcore */
  479. if (write_elf_loads(s) < 0) {
  480. return -1;
  481. }
  482. /* write section to vmcore */
  483. if (s->have_section) {
  484. if (write_elf_section(s, 0) < 0) {
  485. return -1;
  486. }
  487. }
  488. /* write notes to vmcore */
  489. if (write_elf32_notes(fd_write_vmcore, s) < 0) {
  490. return -1;
  491. }
  492. }
  493. return 0;
  494. }
  495. /* write PT_LOAD to vmcore */
  496. static int dump_completed(DumpState *s)
  497. {
  498. dump_cleanup(s);
  499. return 0;
  500. }
  501. static int get_next_block(DumpState *s, GuestPhysBlock *block)
  502. {
  503. while (1) {
  504. block = QTAILQ_NEXT(block, next);
  505. if (!block) {
  506. /* no more block */
  507. return 1;
  508. }
  509. s->start = 0;
  510. s->next_block = block;
  511. if (s->has_filter) {
  512. if (block->target_start >= s->begin + s->length ||
  513. block->target_end <= s->begin) {
  514. /* This block is out of the range */
  515. continue;
  516. }
  517. if (s->begin > block->target_start) {
  518. s->start = s->begin - block->target_start;
  519. }
  520. }
  521. return 0;
  522. }
  523. }
  524. /* write all memory to vmcore */
  525. static int dump_iterate(DumpState *s)
  526. {
  527. GuestPhysBlock *block;
  528. int64_t size;
  529. int ret;
  530. while (1) {
  531. block = s->next_block;
  532. size = block->target_end - block->target_start;
  533. if (s->has_filter) {
  534. size -= s->start;
  535. if (s->begin + s->length < block->target_end) {
  536. size -= block->target_end - (s->begin + s->length);
  537. }
  538. }
  539. ret = write_memory(s, block, s->start, size);
  540. if (ret == -1) {
  541. return ret;
  542. }
  543. ret = get_next_block(s, block);
  544. if (ret == 1) {
  545. dump_completed(s);
  546. return 0;
  547. }
  548. }
  549. }
  550. static int create_vmcore(DumpState *s)
  551. {
  552. int ret;
  553. ret = dump_begin(s);
  554. if (ret < 0) {
  555. return -1;
  556. }
  557. ret = dump_iterate(s);
  558. if (ret < 0) {
  559. return -1;
  560. }
  561. return 0;
  562. }
  563. static int write_start_flat_header(int fd)
  564. {
  565. MakedumpfileHeader *mh;
  566. int ret = 0;
  567. QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
  568. mh = g_malloc0(MAX_SIZE_MDF_HEADER);
  569. memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
  570. MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
  571. mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
  572. mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
  573. size_t written_size;
  574. written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
  575. if (written_size != MAX_SIZE_MDF_HEADER) {
  576. ret = -1;
  577. }
  578. g_free(mh);
  579. return ret;
  580. }
  581. static int write_end_flat_header(int fd)
  582. {
  583. MakedumpfileDataHeader mdh;
  584. mdh.offset = END_FLAG_FLAT_HEADER;
  585. mdh.buf_size = END_FLAG_FLAT_HEADER;
  586. size_t written_size;
  587. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  588. if (written_size != sizeof(mdh)) {
  589. return -1;
  590. }
  591. return 0;
  592. }
  593. static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
  594. {
  595. size_t written_size;
  596. MakedumpfileDataHeader mdh;
  597. mdh.offset = cpu_to_be64(offset);
  598. mdh.buf_size = cpu_to_be64(size);
  599. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  600. if (written_size != sizeof(mdh)) {
  601. return -1;
  602. }
  603. written_size = qemu_write_full(fd, buf, size);
  604. if (written_size != size) {
  605. return -1;
  606. }
  607. return 0;
  608. }
  609. static int buf_write_note(const void *buf, size_t size, void *opaque)
  610. {
  611. DumpState *s = opaque;
  612. /* note_buf is not enough */
  613. if (s->note_buf_offset + size > s->note_size) {
  614. return -1;
  615. }
  616. memcpy(s->note_buf + s->note_buf_offset, buf, size);
  617. s->note_buf_offset += size;
  618. return 0;
  619. }
  620. /* write common header, sub header and elf note to vmcore */
  621. static int create_header32(DumpState *s)
  622. {
  623. int ret = 0;
  624. DiskDumpHeader32 *dh = NULL;
  625. KdumpSubHeader32 *kh = NULL;
  626. size_t size;
  627. uint32_t block_size;
  628. uint32_t sub_hdr_size;
  629. uint32_t bitmap_blocks;
  630. uint32_t status = 0;
  631. uint64_t offset_note;
  632. /* write common header, the version of kdump-compressed format is 6th */
  633. size = sizeof(DiskDumpHeader32);
  634. dh = g_malloc0(size);
  635. strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
  636. dh->header_version = cpu_to_dump32(s, 6);
  637. block_size = TARGET_PAGE_SIZE;
  638. dh->block_size = cpu_to_dump32(s, block_size);
  639. sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
  640. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  641. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  642. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  643. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  644. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  645. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  646. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  647. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  648. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  649. status |= DUMP_DH_COMPRESSED_ZLIB;
  650. }
  651. #ifdef CONFIG_LZO
  652. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  653. status |= DUMP_DH_COMPRESSED_LZO;
  654. }
  655. #endif
  656. #ifdef CONFIG_SNAPPY
  657. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  658. status |= DUMP_DH_COMPRESSED_SNAPPY;
  659. }
  660. #endif
  661. dh->status = cpu_to_dump32(s, status);
  662. if (write_buffer(s->fd, 0, dh, size) < 0) {
  663. dump_error(s, "dump: failed to write disk dump header.\n");
  664. ret = -1;
  665. goto out;
  666. }
  667. /* write sub header */
  668. size = sizeof(KdumpSubHeader32);
  669. kh = g_malloc0(size);
  670. /* 64bit max_mapnr_64 */
  671. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  672. kh->phys_base = cpu_to_dump32(s, PHYS_BASE);
  673. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  674. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  675. kh->offset_note = cpu_to_dump64(s, offset_note);
  676. kh->note_size = cpu_to_dump32(s, s->note_size);
  677. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  678. block_size, kh, size) < 0) {
  679. dump_error(s, "dump: failed to write kdump sub header.\n");
  680. ret = -1;
  681. goto out;
  682. }
  683. /* write note */
  684. s->note_buf = g_malloc0(s->note_size);
  685. s->note_buf_offset = 0;
  686. /* use s->note_buf to store notes temporarily */
  687. if (write_elf32_notes(buf_write_note, s) < 0) {
  688. ret = -1;
  689. goto out;
  690. }
  691. if (write_buffer(s->fd, offset_note, s->note_buf,
  692. s->note_size) < 0) {
  693. dump_error(s, "dump: failed to write notes");
  694. ret = -1;
  695. goto out;
  696. }
  697. /* get offset of dump_bitmap */
  698. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  699. block_size;
  700. /* get offset of page */
  701. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  702. block_size;
  703. out:
  704. g_free(dh);
  705. g_free(kh);
  706. g_free(s->note_buf);
  707. return ret;
  708. }
  709. /* write common header, sub header and elf note to vmcore */
  710. static int create_header64(DumpState *s)
  711. {
  712. int ret = 0;
  713. DiskDumpHeader64 *dh = NULL;
  714. KdumpSubHeader64 *kh = NULL;
  715. size_t size;
  716. uint32_t block_size;
  717. uint32_t sub_hdr_size;
  718. uint32_t bitmap_blocks;
  719. uint32_t status = 0;
  720. uint64_t offset_note;
  721. /* write common header, the version of kdump-compressed format is 6th */
  722. size = sizeof(DiskDumpHeader64);
  723. dh = g_malloc0(size);
  724. strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
  725. dh->header_version = cpu_to_dump32(s, 6);
  726. block_size = TARGET_PAGE_SIZE;
  727. dh->block_size = cpu_to_dump32(s, block_size);
  728. sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
  729. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  730. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  731. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  732. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  733. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  734. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  735. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  736. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  737. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  738. status |= DUMP_DH_COMPRESSED_ZLIB;
  739. }
  740. #ifdef CONFIG_LZO
  741. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  742. status |= DUMP_DH_COMPRESSED_LZO;
  743. }
  744. #endif
  745. #ifdef CONFIG_SNAPPY
  746. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  747. status |= DUMP_DH_COMPRESSED_SNAPPY;
  748. }
  749. #endif
  750. dh->status = cpu_to_dump32(s, status);
  751. if (write_buffer(s->fd, 0, dh, size) < 0) {
  752. dump_error(s, "dump: failed to write disk dump header.\n");
  753. ret = -1;
  754. goto out;
  755. }
  756. /* write sub header */
  757. size = sizeof(KdumpSubHeader64);
  758. kh = g_malloc0(size);
  759. /* 64bit max_mapnr_64 */
  760. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  761. kh->phys_base = cpu_to_dump64(s, PHYS_BASE);
  762. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  763. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  764. kh->offset_note = cpu_to_dump64(s, offset_note);
  765. kh->note_size = cpu_to_dump64(s, s->note_size);
  766. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  767. block_size, kh, size) < 0) {
  768. dump_error(s, "dump: failed to write kdump sub header.\n");
  769. ret = -1;
  770. goto out;
  771. }
  772. /* write note */
  773. s->note_buf = g_malloc0(s->note_size);
  774. s->note_buf_offset = 0;
  775. /* use s->note_buf to store notes temporarily */
  776. if (write_elf64_notes(buf_write_note, s) < 0) {
  777. ret = -1;
  778. goto out;
  779. }
  780. if (write_buffer(s->fd, offset_note, s->note_buf,
  781. s->note_size) < 0) {
  782. dump_error(s, "dump: failed to write notes");
  783. ret = -1;
  784. goto out;
  785. }
  786. /* get offset of dump_bitmap */
  787. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  788. block_size;
  789. /* get offset of page */
  790. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  791. block_size;
  792. out:
  793. g_free(dh);
  794. g_free(kh);
  795. g_free(s->note_buf);
  796. return ret;
  797. }
  798. static int write_dump_header(DumpState *s)
  799. {
  800. if (s->dump_info.d_class == ELFCLASS32) {
  801. return create_header32(s);
  802. } else {
  803. return create_header64(s);
  804. }
  805. }
  806. /*
  807. * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
  808. * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
  809. * set_dump_bitmap will always leave the recently set bit un-sync. And setting
  810. * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
  811. * vmcore, ie. synchronizing un-sync bit into vmcore.
  812. */
  813. static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
  814. uint8_t *buf, DumpState *s)
  815. {
  816. off_t old_offset, new_offset;
  817. off_t offset_bitmap1, offset_bitmap2;
  818. uint32_t byte, bit;
  819. /* should not set the previous place */
  820. assert(last_pfn <= pfn);
  821. /*
  822. * if the bit needed to be set is not cached in buf, flush the data in buf
  823. * to vmcore firstly.
  824. * making new_offset be bigger than old_offset can also sync remained data
  825. * into vmcore.
  826. */
  827. old_offset = BUFSIZE_BITMAP * (last_pfn / PFN_BUFBITMAP);
  828. new_offset = BUFSIZE_BITMAP * (pfn / PFN_BUFBITMAP);
  829. while (old_offset < new_offset) {
  830. /* calculate the offset and write dump_bitmap */
  831. offset_bitmap1 = s->offset_dump_bitmap + old_offset;
  832. if (write_buffer(s->fd, offset_bitmap1, buf,
  833. BUFSIZE_BITMAP) < 0) {
  834. return -1;
  835. }
  836. /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
  837. offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
  838. old_offset;
  839. if (write_buffer(s->fd, offset_bitmap2, buf,
  840. BUFSIZE_BITMAP) < 0) {
  841. return -1;
  842. }
  843. memset(buf, 0, BUFSIZE_BITMAP);
  844. old_offset += BUFSIZE_BITMAP;
  845. }
  846. /* get the exact place of the bit in the buf, and set it */
  847. byte = (pfn % PFN_BUFBITMAP) / CHAR_BIT;
  848. bit = (pfn % PFN_BUFBITMAP) % CHAR_BIT;
  849. if (value) {
  850. buf[byte] |= 1u << bit;
  851. } else {
  852. buf[byte] &= ~(1u << bit);
  853. }
  854. return 0;
  855. }
  856. /*
  857. * exam every page and return the page frame number and the address of the page.
  858. * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
  859. * blocks, so block->target_start and block->target_end should be interal
  860. * multiples of the target page size.
  861. */
  862. static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
  863. uint8_t **bufptr, DumpState *s)
  864. {
  865. GuestPhysBlock *block = *blockptr;
  866. hwaddr addr;
  867. uint8_t *buf;
  868. /* block == NULL means the start of the iteration */
  869. if (!block) {
  870. block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  871. *blockptr = block;
  872. assert((block->target_start & ~TARGET_PAGE_MASK) == 0);
  873. assert((block->target_end & ~TARGET_PAGE_MASK) == 0);
  874. *pfnptr = paddr_to_pfn(block->target_start);
  875. if (bufptr) {
  876. *bufptr = block->host_addr;
  877. }
  878. return true;
  879. }
  880. *pfnptr = *pfnptr + 1;
  881. addr = pfn_to_paddr(*pfnptr);
  882. if ((addr >= block->target_start) &&
  883. (addr + TARGET_PAGE_SIZE <= block->target_end)) {
  884. buf = block->host_addr + (addr - block->target_start);
  885. } else {
  886. /* the next page is in the next block */
  887. block = QTAILQ_NEXT(block, next);
  888. *blockptr = block;
  889. if (!block) {
  890. return false;
  891. }
  892. assert((block->target_start & ~TARGET_PAGE_MASK) == 0);
  893. assert((block->target_end & ~TARGET_PAGE_MASK) == 0);
  894. *pfnptr = paddr_to_pfn(block->target_start);
  895. buf = block->host_addr;
  896. }
  897. if (bufptr) {
  898. *bufptr = buf;
  899. }
  900. return true;
  901. }
  902. static int write_dump_bitmap(DumpState *s)
  903. {
  904. int ret = 0;
  905. uint64_t last_pfn, pfn;
  906. void *dump_bitmap_buf;
  907. size_t num_dumpable;
  908. GuestPhysBlock *block_iter = NULL;
  909. /* dump_bitmap_buf is used to store dump_bitmap temporarily */
  910. dump_bitmap_buf = g_malloc0(BUFSIZE_BITMAP);
  911. num_dumpable = 0;
  912. last_pfn = 0;
  913. /*
  914. * exam memory page by page, and set the bit in dump_bitmap corresponded
  915. * to the existing page.
  916. */
  917. while (get_next_page(&block_iter, &pfn, NULL, s)) {
  918. ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
  919. if (ret < 0) {
  920. dump_error(s, "dump: failed to set dump_bitmap.\n");
  921. ret = -1;
  922. goto out;
  923. }
  924. last_pfn = pfn;
  925. num_dumpable++;
  926. }
  927. /*
  928. * set_dump_bitmap will always leave the recently set bit un-sync. Here we
  929. * set last_pfn + PFN_BUFBITMAP to 0 and those set but un-sync bit will be
  930. * synchronized into vmcore.
  931. */
  932. if (num_dumpable > 0) {
  933. ret = set_dump_bitmap(last_pfn, last_pfn + PFN_BUFBITMAP, false,
  934. dump_bitmap_buf, s);
  935. if (ret < 0) {
  936. dump_error(s, "dump: failed to sync dump_bitmap.\n");
  937. ret = -1;
  938. goto out;
  939. }
  940. }
  941. /* number of dumpable pages that will be dumped later */
  942. s->num_dumpable = num_dumpable;
  943. out:
  944. g_free(dump_bitmap_buf);
  945. return ret;
  946. }
  947. static void prepare_data_cache(DataCache *data_cache, DumpState *s,
  948. off_t offset)
  949. {
  950. data_cache->fd = s->fd;
  951. data_cache->data_size = 0;
  952. data_cache->buf_size = BUFSIZE_DATA_CACHE;
  953. data_cache->buf = g_malloc0(BUFSIZE_DATA_CACHE);
  954. data_cache->offset = offset;
  955. }
  956. static int write_cache(DataCache *dc, const void *buf, size_t size,
  957. bool flag_sync)
  958. {
  959. /*
  960. * dc->buf_size should not be less than size, otherwise dc will never be
  961. * enough
  962. */
  963. assert(size <= dc->buf_size);
  964. /*
  965. * if flag_sync is set, synchronize data in dc->buf into vmcore.
  966. * otherwise check if the space is enough for caching data in buf, if not,
  967. * write the data in dc->buf to dc->fd and reset dc->buf
  968. */
  969. if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
  970. (flag_sync && dc->data_size > 0)) {
  971. if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
  972. return -1;
  973. }
  974. dc->offset += dc->data_size;
  975. dc->data_size = 0;
  976. }
  977. if (!flag_sync) {
  978. memcpy(dc->buf + dc->data_size, buf, size);
  979. dc->data_size += size;
  980. }
  981. return 0;
  982. }
  983. static void free_data_cache(DataCache *data_cache)
  984. {
  985. g_free(data_cache->buf);
  986. }
  987. static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
  988. {
  989. switch (flag_compress) {
  990. case DUMP_DH_COMPRESSED_ZLIB:
  991. return compressBound(page_size);
  992. case DUMP_DH_COMPRESSED_LZO:
  993. /*
  994. * LZO will expand incompressible data by a little amount. Please check
  995. * the following URL to see the expansion calculation:
  996. * http://www.oberhumer.com/opensource/lzo/lzofaq.php
  997. */
  998. return page_size + page_size / 16 + 64 + 3;
  999. #ifdef CONFIG_SNAPPY
  1000. case DUMP_DH_COMPRESSED_SNAPPY:
  1001. return snappy_max_compressed_length(page_size);
  1002. #endif
  1003. }
  1004. return 0;
  1005. }
  1006. /*
  1007. * check if the page is all 0
  1008. */
  1009. static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
  1010. {
  1011. return buffer_is_zero(buf, page_size);
  1012. }
  1013. static int write_dump_pages(DumpState *s)
  1014. {
  1015. int ret = 0;
  1016. DataCache page_desc, page_data;
  1017. size_t len_buf_out, size_out;
  1018. #ifdef CONFIG_LZO
  1019. lzo_bytep wrkmem = NULL;
  1020. #endif
  1021. uint8_t *buf_out = NULL;
  1022. off_t offset_desc, offset_data;
  1023. PageDescriptor pd, pd_zero;
  1024. uint8_t *buf;
  1025. GuestPhysBlock *block_iter = NULL;
  1026. uint64_t pfn_iter;
  1027. /* get offset of page_desc and page_data in dump file */
  1028. offset_desc = s->offset_page;
  1029. offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
  1030. prepare_data_cache(&page_desc, s, offset_desc);
  1031. prepare_data_cache(&page_data, s, offset_data);
  1032. /* prepare buffer to store compressed data */
  1033. len_buf_out = get_len_buf_out(TARGET_PAGE_SIZE, s->flag_compress);
  1034. assert(len_buf_out != 0);
  1035. #ifdef CONFIG_LZO
  1036. wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
  1037. #endif
  1038. buf_out = g_malloc(len_buf_out);
  1039. /*
  1040. * init zero page's page_desc and page_data, because every zero page
  1041. * uses the same page_data
  1042. */
  1043. pd_zero.size = cpu_to_dump32(s, TARGET_PAGE_SIZE);
  1044. pd_zero.flags = cpu_to_dump32(s, 0);
  1045. pd_zero.offset = cpu_to_dump64(s, offset_data);
  1046. pd_zero.page_flags = cpu_to_dump64(s, 0);
  1047. buf = g_malloc0(TARGET_PAGE_SIZE);
  1048. ret = write_cache(&page_data, buf, TARGET_PAGE_SIZE, false);
  1049. g_free(buf);
  1050. if (ret < 0) {
  1051. dump_error(s, "dump: failed to write page data(zero page).\n");
  1052. goto out;
  1053. }
  1054. offset_data += TARGET_PAGE_SIZE;
  1055. /*
  1056. * dump memory to vmcore page by page. zero page will all be resided in the
  1057. * first page of page section
  1058. */
  1059. while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
  1060. /* check zero page */
  1061. if (is_zero_page(buf, TARGET_PAGE_SIZE)) {
  1062. ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
  1063. false);
  1064. if (ret < 0) {
  1065. dump_error(s, "dump: failed to write page desc.\n");
  1066. goto out;
  1067. }
  1068. } else {
  1069. /*
  1070. * not zero page, then:
  1071. * 1. compress the page
  1072. * 2. write the compressed page into the cache of page_data
  1073. * 3. get page desc of the compressed page and write it into the
  1074. * cache of page_desc
  1075. *
  1076. * only one compression format will be used here, for
  1077. * s->flag_compress is set. But when compression fails to work,
  1078. * we fall back to save in plaintext.
  1079. */
  1080. size_out = len_buf_out;
  1081. if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
  1082. (compress2(buf_out, (uLongf *)&size_out, buf,
  1083. TARGET_PAGE_SIZE, Z_BEST_SPEED) == Z_OK) &&
  1084. (size_out < TARGET_PAGE_SIZE)) {
  1085. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
  1086. pd.size = cpu_to_dump32(s, size_out);
  1087. ret = write_cache(&page_data, buf_out, size_out, false);
  1088. if (ret < 0) {
  1089. dump_error(s, "dump: failed to write page data.\n");
  1090. goto out;
  1091. }
  1092. #ifdef CONFIG_LZO
  1093. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
  1094. (lzo1x_1_compress(buf, TARGET_PAGE_SIZE, buf_out,
  1095. (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
  1096. (size_out < TARGET_PAGE_SIZE)) {
  1097. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
  1098. pd.size = cpu_to_dump32(s, size_out);
  1099. ret = write_cache(&page_data, buf_out, size_out, false);
  1100. if (ret < 0) {
  1101. dump_error(s, "dump: failed to write page data.\n");
  1102. goto out;
  1103. }
  1104. #endif
  1105. #ifdef CONFIG_SNAPPY
  1106. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
  1107. (snappy_compress((char *)buf, TARGET_PAGE_SIZE,
  1108. (char *)buf_out, &size_out) == SNAPPY_OK) &&
  1109. (size_out < TARGET_PAGE_SIZE)) {
  1110. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
  1111. pd.size = cpu_to_dump32(s, size_out);
  1112. ret = write_cache(&page_data, buf_out, size_out, false);
  1113. if (ret < 0) {
  1114. dump_error(s, "dump: failed to write page data.\n");
  1115. goto out;
  1116. }
  1117. #endif
  1118. } else {
  1119. /*
  1120. * fall back to save in plaintext, size_out should be
  1121. * assigned TARGET_PAGE_SIZE
  1122. */
  1123. pd.flags = cpu_to_dump32(s, 0);
  1124. size_out = TARGET_PAGE_SIZE;
  1125. pd.size = cpu_to_dump32(s, size_out);
  1126. ret = write_cache(&page_data, buf, TARGET_PAGE_SIZE, false);
  1127. if (ret < 0) {
  1128. dump_error(s, "dump: failed to write page data.\n");
  1129. goto out;
  1130. }
  1131. }
  1132. /* get and write page desc here */
  1133. pd.page_flags = cpu_to_dump64(s, 0);
  1134. pd.offset = cpu_to_dump64(s, offset_data);
  1135. offset_data += size_out;
  1136. ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
  1137. if (ret < 0) {
  1138. dump_error(s, "dump: failed to write page desc.\n");
  1139. goto out;
  1140. }
  1141. }
  1142. }
  1143. ret = write_cache(&page_desc, NULL, 0, true);
  1144. if (ret < 0) {
  1145. dump_error(s, "dump: failed to sync cache for page_desc.\n");
  1146. goto out;
  1147. }
  1148. ret = write_cache(&page_data, NULL, 0, true);
  1149. if (ret < 0) {
  1150. dump_error(s, "dump: failed to sync cache for page_data.\n");
  1151. goto out;
  1152. }
  1153. out:
  1154. free_data_cache(&page_desc);
  1155. free_data_cache(&page_data);
  1156. #ifdef CONFIG_LZO
  1157. g_free(wrkmem);
  1158. #endif
  1159. g_free(buf_out);
  1160. return ret;
  1161. }
  1162. static int create_kdump_vmcore(DumpState *s)
  1163. {
  1164. int ret;
  1165. /*
  1166. * the kdump-compressed format is:
  1167. * File offset
  1168. * +------------------------------------------+ 0x0
  1169. * | main header (struct disk_dump_header) |
  1170. * |------------------------------------------+ block 1
  1171. * | sub header (struct kdump_sub_header) |
  1172. * |------------------------------------------+ block 2
  1173. * | 1st-dump_bitmap |
  1174. * |------------------------------------------+ block 2 + X blocks
  1175. * | 2nd-dump_bitmap | (aligned by block)
  1176. * |------------------------------------------+ block 2 + 2 * X blocks
  1177. * | page desc for pfn 0 (struct page_desc) | (aligned by block)
  1178. * | page desc for pfn 1 (struct page_desc) |
  1179. * | : |
  1180. * |------------------------------------------| (not aligned by block)
  1181. * | page data (pfn 0) |
  1182. * | page data (pfn 1) |
  1183. * | : |
  1184. * +------------------------------------------+
  1185. */
  1186. ret = write_start_flat_header(s->fd);
  1187. if (ret < 0) {
  1188. dump_error(s, "dump: failed to write start flat header.\n");
  1189. return -1;
  1190. }
  1191. ret = write_dump_header(s);
  1192. if (ret < 0) {
  1193. return -1;
  1194. }
  1195. ret = write_dump_bitmap(s);
  1196. if (ret < 0) {
  1197. return -1;
  1198. }
  1199. ret = write_dump_pages(s);
  1200. if (ret < 0) {
  1201. return -1;
  1202. }
  1203. ret = write_end_flat_header(s->fd);
  1204. if (ret < 0) {
  1205. dump_error(s, "dump: failed to write end flat header.\n");
  1206. return -1;
  1207. }
  1208. dump_completed(s);
  1209. return 0;
  1210. }
  1211. static ram_addr_t get_start_block(DumpState *s)
  1212. {
  1213. GuestPhysBlock *block;
  1214. if (!s->has_filter) {
  1215. s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  1216. return 0;
  1217. }
  1218. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  1219. if (block->target_start >= s->begin + s->length ||
  1220. block->target_end <= s->begin) {
  1221. /* This block is out of the range */
  1222. continue;
  1223. }
  1224. s->next_block = block;
  1225. if (s->begin > block->target_start) {
  1226. s->start = s->begin - block->target_start;
  1227. } else {
  1228. s->start = 0;
  1229. }
  1230. return s->start;
  1231. }
  1232. return -1;
  1233. }
  1234. static void get_max_mapnr(DumpState *s)
  1235. {
  1236. GuestPhysBlock *last_block;
  1237. last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
  1238. s->max_mapnr = paddr_to_pfn(last_block->target_end);
  1239. }
  1240. static int dump_init(DumpState *s, int fd, bool has_format,
  1241. DumpGuestMemoryFormat format, bool paging, bool has_filter,
  1242. int64_t begin, int64_t length, Error **errp)
  1243. {
  1244. CPUState *cpu;
  1245. int nr_cpus;
  1246. Error *err = NULL;
  1247. int ret;
  1248. /* kdump-compressed is conflict with paging and filter */
  1249. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1250. assert(!paging && !has_filter);
  1251. }
  1252. if (runstate_is_running()) {
  1253. vm_stop(RUN_STATE_SAVE_VM);
  1254. s->resume = true;
  1255. } else {
  1256. s->resume = false;
  1257. }
  1258. /* If we use KVM, we should synchronize the registers before we get dump
  1259. * info or physmap info.
  1260. */
  1261. cpu_synchronize_all_states();
  1262. nr_cpus = 0;
  1263. CPU_FOREACH(cpu) {
  1264. nr_cpus++;
  1265. }
  1266. s->fd = fd;
  1267. s->has_filter = has_filter;
  1268. s->begin = begin;
  1269. s->length = length;
  1270. guest_phys_blocks_init(&s->guest_phys_blocks);
  1271. guest_phys_blocks_append(&s->guest_phys_blocks);
  1272. s->start = get_start_block(s);
  1273. if (s->start == -1) {
  1274. error_set(errp, QERR_INVALID_PARAMETER, "begin");
  1275. goto cleanup;
  1276. }
  1277. /* get dump info: endian, class and architecture.
  1278. * If the target architecture is not supported, cpu_get_dump_info() will
  1279. * return -1.
  1280. */
  1281. ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
  1282. if (ret < 0) {
  1283. error_set(errp, QERR_UNSUPPORTED);
  1284. goto cleanup;
  1285. }
  1286. s->note_size = cpu_get_note_size(s->dump_info.d_class,
  1287. s->dump_info.d_machine, nr_cpus);
  1288. if (s->note_size < 0) {
  1289. error_set(errp, QERR_UNSUPPORTED);
  1290. goto cleanup;
  1291. }
  1292. /* get memory mapping */
  1293. memory_mapping_list_init(&s->list);
  1294. if (paging) {
  1295. qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
  1296. if (err != NULL) {
  1297. error_propagate(errp, err);
  1298. goto cleanup;
  1299. }
  1300. } else {
  1301. qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
  1302. }
  1303. s->nr_cpus = nr_cpus;
  1304. get_max_mapnr(s);
  1305. uint64_t tmp;
  1306. tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), TARGET_PAGE_SIZE);
  1307. s->len_dump_bitmap = tmp * TARGET_PAGE_SIZE;
  1308. /* init for kdump-compressed format */
  1309. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1310. switch (format) {
  1311. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
  1312. s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
  1313. break;
  1314. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
  1315. #ifdef CONFIG_LZO
  1316. if (lzo_init() != LZO_E_OK) {
  1317. error_setg(errp, "failed to initialize the LZO library");
  1318. goto cleanup;
  1319. }
  1320. #endif
  1321. s->flag_compress = DUMP_DH_COMPRESSED_LZO;
  1322. break;
  1323. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
  1324. s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
  1325. break;
  1326. default:
  1327. s->flag_compress = 0;
  1328. }
  1329. return 0;
  1330. }
  1331. if (s->has_filter) {
  1332. memory_mapping_filter(&s->list, s->begin, s->length);
  1333. }
  1334. /*
  1335. * calculate phdr_num
  1336. *
  1337. * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
  1338. */
  1339. s->phdr_num = 1; /* PT_NOTE */
  1340. if (s->list.num < UINT16_MAX - 2) {
  1341. s->phdr_num += s->list.num;
  1342. s->have_section = false;
  1343. } else {
  1344. s->have_section = true;
  1345. s->phdr_num = PN_XNUM;
  1346. s->sh_info = 1; /* PT_NOTE */
  1347. /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
  1348. if (s->list.num <= UINT32_MAX - 1) {
  1349. s->sh_info += s->list.num;
  1350. } else {
  1351. s->sh_info = UINT32_MAX;
  1352. }
  1353. }
  1354. if (s->dump_info.d_class == ELFCLASS64) {
  1355. if (s->have_section) {
  1356. s->memory_offset = sizeof(Elf64_Ehdr) +
  1357. sizeof(Elf64_Phdr) * s->sh_info +
  1358. sizeof(Elf64_Shdr) + s->note_size;
  1359. } else {
  1360. s->memory_offset = sizeof(Elf64_Ehdr) +
  1361. sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
  1362. }
  1363. } else {
  1364. if (s->have_section) {
  1365. s->memory_offset = sizeof(Elf32_Ehdr) +
  1366. sizeof(Elf32_Phdr) * s->sh_info +
  1367. sizeof(Elf32_Shdr) + s->note_size;
  1368. } else {
  1369. s->memory_offset = sizeof(Elf32_Ehdr) +
  1370. sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
  1371. }
  1372. }
  1373. return 0;
  1374. cleanup:
  1375. guest_phys_blocks_free(&s->guest_phys_blocks);
  1376. if (s->resume) {
  1377. vm_start();
  1378. }
  1379. return -1;
  1380. }
  1381. void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
  1382. int64_t begin, bool has_length,
  1383. int64_t length, bool has_format,
  1384. DumpGuestMemoryFormat format, Error **errp)
  1385. {
  1386. const char *p;
  1387. int fd = -1;
  1388. DumpState *s;
  1389. int ret;
  1390. /*
  1391. * kdump-compressed format need the whole memory dumped, so paging or
  1392. * filter is not supported here.
  1393. */
  1394. if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
  1395. (paging || has_begin || has_length)) {
  1396. error_setg(errp, "kdump-compressed format doesn't support paging or "
  1397. "filter");
  1398. return;
  1399. }
  1400. if (has_begin && !has_length) {
  1401. error_set(errp, QERR_MISSING_PARAMETER, "length");
  1402. return;
  1403. }
  1404. if (!has_begin && has_length) {
  1405. error_set(errp, QERR_MISSING_PARAMETER, "begin");
  1406. return;
  1407. }
  1408. /* check whether lzo/snappy is supported */
  1409. #ifndef CONFIG_LZO
  1410. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
  1411. error_setg(errp, "kdump-lzo is not available now");
  1412. return;
  1413. }
  1414. #endif
  1415. #ifndef CONFIG_SNAPPY
  1416. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
  1417. error_setg(errp, "kdump-snappy is not available now");
  1418. return;
  1419. }
  1420. #endif
  1421. #if !defined(WIN32)
  1422. if (strstart(file, "fd:", &p)) {
  1423. fd = monitor_get_fd(cur_mon, p, errp);
  1424. if (fd == -1) {
  1425. return;
  1426. }
  1427. }
  1428. #endif
  1429. if (strstart(file, "file:", &p)) {
  1430. fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
  1431. if (fd < 0) {
  1432. error_setg_file_open(errp, errno, p);
  1433. return;
  1434. }
  1435. }
  1436. if (fd == -1) {
  1437. error_set(errp, QERR_INVALID_PARAMETER, "protocol");
  1438. return;
  1439. }
  1440. s = g_malloc0(sizeof(DumpState));
  1441. ret = dump_init(s, fd, has_format, format, paging, has_begin,
  1442. begin, length, errp);
  1443. if (ret < 0) {
  1444. g_free(s);
  1445. return;
  1446. }
  1447. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1448. if (create_kdump_vmcore(s) < 0) {
  1449. error_set(errp, QERR_IO_ERROR);
  1450. }
  1451. } else {
  1452. if (create_vmcore(s) < 0) {
  1453. error_set(errp, QERR_IO_ERROR);
  1454. }
  1455. }
  1456. g_free(s);
  1457. }
  1458. DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
  1459. {
  1460. DumpGuestMemoryFormatList *item;
  1461. DumpGuestMemoryCapability *cap =
  1462. g_malloc0(sizeof(DumpGuestMemoryCapability));
  1463. /* elf is always available */
  1464. item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1465. cap->formats = item;
  1466. item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
  1467. /* kdump-zlib is always available */
  1468. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1469. item = item->next;
  1470. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
  1471. /* add new item if kdump-lzo is available */
  1472. #ifdef CONFIG_LZO
  1473. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1474. item = item->next;
  1475. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
  1476. #endif
  1477. /* add new item if kdump-snappy is available */
  1478. #ifdef CONFIG_SNAPPY
  1479. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1480. item = item->next;
  1481. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
  1482. #endif
  1483. return cap;
  1484. }