dump.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743
  1. /*
  2. * QEMU dump
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu-common.h"
  15. #include "elf.h"
  16. #include "cpu.h"
  17. #include "exec/cpu-all.h"
  18. #include "exec/hwaddr.h"
  19. #include "monitor/monitor.h"
  20. #include "sysemu/kvm.h"
  21. #include "sysemu/dump.h"
  22. #include "sysemu/sysemu.h"
  23. #include "sysemu/memory_mapping.h"
  24. #include "sysemu/cpus.h"
  25. #include "qapi/qmp/qerror.h"
  26. #include "qmp-commands.h"
  27. #include <zlib.h>
  28. #ifdef CONFIG_LZO
  29. #include <lzo/lzo1x.h>
  30. #endif
  31. #ifdef CONFIG_SNAPPY
  32. #include <snappy-c.h>
  33. #endif
  34. #ifndef ELF_MACHINE_UNAME
  35. #define ELF_MACHINE_UNAME "Unknown"
  36. #endif
  37. uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
  38. {
  39. if (s->dump_info.d_endian == ELFDATA2LSB) {
  40. val = cpu_to_le16(val);
  41. } else {
  42. val = cpu_to_be16(val);
  43. }
  44. return val;
  45. }
  46. uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
  47. {
  48. if (s->dump_info.d_endian == ELFDATA2LSB) {
  49. val = cpu_to_le32(val);
  50. } else {
  51. val = cpu_to_be32(val);
  52. }
  53. return val;
  54. }
  55. uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
  56. {
  57. if (s->dump_info.d_endian == ELFDATA2LSB) {
  58. val = cpu_to_le64(val);
  59. } else {
  60. val = cpu_to_be64(val);
  61. }
  62. return val;
  63. }
  64. static int dump_cleanup(DumpState *s)
  65. {
  66. guest_phys_blocks_free(&s->guest_phys_blocks);
  67. memory_mapping_list_free(&s->list);
  68. close(s->fd);
  69. if (s->resume) {
  70. vm_start();
  71. }
  72. return 0;
  73. }
  74. static void dump_error(DumpState *s, const char *reason, Error **errp)
  75. {
  76. dump_cleanup(s);
  77. error_setg(errp, "%s", reason);
  78. }
  79. static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
  80. {
  81. DumpState *s = opaque;
  82. size_t written_size;
  83. written_size = qemu_write_full(s->fd, buf, size);
  84. if (written_size != size) {
  85. return -1;
  86. }
  87. return 0;
  88. }
  89. static void write_elf64_header(DumpState *s, Error **errp)
  90. {
  91. Elf64_Ehdr elf_header;
  92. int ret;
  93. memset(&elf_header, 0, sizeof(Elf64_Ehdr));
  94. memcpy(&elf_header, ELFMAG, SELFMAG);
  95. elf_header.e_ident[EI_CLASS] = ELFCLASS64;
  96. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  97. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  98. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  99. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  100. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  101. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  102. elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
  103. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
  104. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  105. if (s->have_section) {
  106. uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
  107. elf_header.e_shoff = cpu_to_dump64(s, shoff);
  108. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
  109. elf_header.e_shnum = cpu_to_dump16(s, 1);
  110. }
  111. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  112. if (ret < 0) {
  113. dump_error(s, "dump: failed to write elf header", errp);
  114. }
  115. }
  116. static void write_elf32_header(DumpState *s, Error **errp)
  117. {
  118. Elf32_Ehdr elf_header;
  119. int ret;
  120. memset(&elf_header, 0, sizeof(Elf32_Ehdr));
  121. memcpy(&elf_header, ELFMAG, SELFMAG);
  122. elf_header.e_ident[EI_CLASS] = ELFCLASS32;
  123. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  124. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  125. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  126. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  127. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  128. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  129. elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
  130. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
  131. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  132. if (s->have_section) {
  133. uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
  134. elf_header.e_shoff = cpu_to_dump32(s, shoff);
  135. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
  136. elf_header.e_shnum = cpu_to_dump16(s, 1);
  137. }
  138. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  139. if (ret < 0) {
  140. dump_error(s, "dump: failed to write elf header", errp);
  141. }
  142. }
  143. static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
  144. int phdr_index, hwaddr offset,
  145. hwaddr filesz, Error **errp)
  146. {
  147. Elf64_Phdr phdr;
  148. int ret;
  149. memset(&phdr, 0, sizeof(Elf64_Phdr));
  150. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  151. phdr.p_offset = cpu_to_dump64(s, offset);
  152. phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
  153. phdr.p_filesz = cpu_to_dump64(s, filesz);
  154. phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
  155. phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr);
  156. assert(memory_mapping->length >= filesz);
  157. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  158. if (ret < 0) {
  159. dump_error(s, "dump: failed to write program header table", errp);
  160. }
  161. }
  162. static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
  163. int phdr_index, hwaddr offset,
  164. hwaddr filesz, Error **errp)
  165. {
  166. Elf32_Phdr phdr;
  167. int ret;
  168. memset(&phdr, 0, sizeof(Elf32_Phdr));
  169. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  170. phdr.p_offset = cpu_to_dump32(s, offset);
  171. phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
  172. phdr.p_filesz = cpu_to_dump32(s, filesz);
  173. phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
  174. phdr.p_vaddr = cpu_to_dump32(s, memory_mapping->virt_addr);
  175. assert(memory_mapping->length >= filesz);
  176. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  177. if (ret < 0) {
  178. dump_error(s, "dump: failed to write program header table", errp);
  179. }
  180. }
  181. static void write_elf64_note(DumpState *s, Error **errp)
  182. {
  183. Elf64_Phdr phdr;
  184. hwaddr begin = s->memory_offset - s->note_size;
  185. int ret;
  186. memset(&phdr, 0, sizeof(Elf64_Phdr));
  187. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  188. phdr.p_offset = cpu_to_dump64(s, begin);
  189. phdr.p_paddr = 0;
  190. phdr.p_filesz = cpu_to_dump64(s, s->note_size);
  191. phdr.p_memsz = cpu_to_dump64(s, s->note_size);
  192. phdr.p_vaddr = 0;
  193. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  194. if (ret < 0) {
  195. dump_error(s, "dump: failed to write program header table", errp);
  196. }
  197. }
  198. static inline int cpu_index(CPUState *cpu)
  199. {
  200. return cpu->cpu_index + 1;
  201. }
  202. static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
  203. Error **errp)
  204. {
  205. CPUState *cpu;
  206. int ret;
  207. int id;
  208. CPU_FOREACH(cpu) {
  209. id = cpu_index(cpu);
  210. ret = cpu_write_elf64_note(f, cpu, id, s);
  211. if (ret < 0) {
  212. dump_error(s, "dump: failed to write elf notes", errp);
  213. return;
  214. }
  215. }
  216. CPU_FOREACH(cpu) {
  217. ret = cpu_write_elf64_qemunote(f, cpu, s);
  218. if (ret < 0) {
  219. dump_error(s, "dump: failed to write CPU status", errp);
  220. return;
  221. }
  222. }
  223. }
  224. static void write_elf32_note(DumpState *s, Error **errp)
  225. {
  226. hwaddr begin = s->memory_offset - s->note_size;
  227. Elf32_Phdr phdr;
  228. int ret;
  229. memset(&phdr, 0, sizeof(Elf32_Phdr));
  230. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  231. phdr.p_offset = cpu_to_dump32(s, begin);
  232. phdr.p_paddr = 0;
  233. phdr.p_filesz = cpu_to_dump32(s, s->note_size);
  234. phdr.p_memsz = cpu_to_dump32(s, s->note_size);
  235. phdr.p_vaddr = 0;
  236. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  237. if (ret < 0) {
  238. dump_error(s, "dump: failed to write program header table", errp);
  239. }
  240. }
  241. static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
  242. Error **errp)
  243. {
  244. CPUState *cpu;
  245. int ret;
  246. int id;
  247. CPU_FOREACH(cpu) {
  248. id = cpu_index(cpu);
  249. ret = cpu_write_elf32_note(f, cpu, id, s);
  250. if (ret < 0) {
  251. dump_error(s, "dump: failed to write elf notes", errp);
  252. return;
  253. }
  254. }
  255. CPU_FOREACH(cpu) {
  256. ret = cpu_write_elf32_qemunote(f, cpu, s);
  257. if (ret < 0) {
  258. dump_error(s, "dump: failed to write CPU status", errp);
  259. return;
  260. }
  261. }
  262. }
  263. static void write_elf_section(DumpState *s, int type, Error **errp)
  264. {
  265. Elf32_Shdr shdr32;
  266. Elf64_Shdr shdr64;
  267. int shdr_size;
  268. void *shdr;
  269. int ret;
  270. if (type == 0) {
  271. shdr_size = sizeof(Elf32_Shdr);
  272. memset(&shdr32, 0, shdr_size);
  273. shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
  274. shdr = &shdr32;
  275. } else {
  276. shdr_size = sizeof(Elf64_Shdr);
  277. memset(&shdr64, 0, shdr_size);
  278. shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
  279. shdr = &shdr64;
  280. }
  281. ret = fd_write_vmcore(&shdr, shdr_size, s);
  282. if (ret < 0) {
  283. dump_error(s, "dump: failed to write section header table", errp);
  284. }
  285. }
  286. static void write_data(DumpState *s, void *buf, int length, Error **errp)
  287. {
  288. int ret;
  289. ret = fd_write_vmcore(buf, length, s);
  290. if (ret < 0) {
  291. dump_error(s, "dump: failed to save memory", errp);
  292. }
  293. }
  294. /* write the memory to vmcore. 1 page per I/O. */
  295. static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
  296. int64_t size, Error **errp)
  297. {
  298. int64_t i;
  299. Error *local_err = NULL;
  300. for (i = 0; i < size / s->dump_info.page_size; i++) {
  301. write_data(s, block->host_addr + start + i * s->dump_info.page_size,
  302. s->dump_info.page_size, &local_err);
  303. if (local_err) {
  304. error_propagate(errp, local_err);
  305. return;
  306. }
  307. }
  308. if ((size % s->dump_info.page_size) != 0) {
  309. write_data(s, block->host_addr + start + i * s->dump_info.page_size,
  310. size % s->dump_info.page_size, &local_err);
  311. if (local_err) {
  312. error_propagate(errp, local_err);
  313. return;
  314. }
  315. }
  316. }
  317. /* get the memory's offset and size in the vmcore */
  318. static void get_offset_range(hwaddr phys_addr,
  319. ram_addr_t mapping_length,
  320. DumpState *s,
  321. hwaddr *p_offset,
  322. hwaddr *p_filesz)
  323. {
  324. GuestPhysBlock *block;
  325. hwaddr offset = s->memory_offset;
  326. int64_t size_in_block, start;
  327. /* When the memory is not stored into vmcore, offset will be -1 */
  328. *p_offset = -1;
  329. *p_filesz = 0;
  330. if (s->has_filter) {
  331. if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
  332. return;
  333. }
  334. }
  335. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  336. if (s->has_filter) {
  337. if (block->target_start >= s->begin + s->length ||
  338. block->target_end <= s->begin) {
  339. /* This block is out of the range */
  340. continue;
  341. }
  342. if (s->begin <= block->target_start) {
  343. start = block->target_start;
  344. } else {
  345. start = s->begin;
  346. }
  347. size_in_block = block->target_end - start;
  348. if (s->begin + s->length < block->target_end) {
  349. size_in_block -= block->target_end - (s->begin + s->length);
  350. }
  351. } else {
  352. start = block->target_start;
  353. size_in_block = block->target_end - block->target_start;
  354. }
  355. if (phys_addr >= start && phys_addr < start + size_in_block) {
  356. *p_offset = phys_addr - start + offset;
  357. /* The offset range mapped from the vmcore file must not spill over
  358. * the GuestPhysBlock, clamp it. The rest of the mapping will be
  359. * zero-filled in memory at load time; see
  360. * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
  361. */
  362. *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
  363. mapping_length :
  364. size_in_block - (phys_addr - start);
  365. return;
  366. }
  367. offset += size_in_block;
  368. }
  369. }
  370. static void write_elf_loads(DumpState *s, Error **errp)
  371. {
  372. hwaddr offset, filesz;
  373. MemoryMapping *memory_mapping;
  374. uint32_t phdr_index = 1;
  375. uint32_t max_index;
  376. Error *local_err = NULL;
  377. if (s->have_section) {
  378. max_index = s->sh_info;
  379. } else {
  380. max_index = s->phdr_num;
  381. }
  382. QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
  383. get_offset_range(memory_mapping->phys_addr,
  384. memory_mapping->length,
  385. s, &offset, &filesz);
  386. if (s->dump_info.d_class == ELFCLASS64) {
  387. write_elf64_load(s, memory_mapping, phdr_index++, offset,
  388. filesz, &local_err);
  389. } else {
  390. write_elf32_load(s, memory_mapping, phdr_index++, offset,
  391. filesz, &local_err);
  392. }
  393. if (local_err) {
  394. error_propagate(errp, local_err);
  395. return;
  396. }
  397. if (phdr_index >= max_index) {
  398. break;
  399. }
  400. }
  401. }
  402. /* write elf header, PT_NOTE and elf note to vmcore. */
  403. static void dump_begin(DumpState *s, Error **errp)
  404. {
  405. Error *local_err = NULL;
  406. /*
  407. * the vmcore's format is:
  408. * --------------
  409. * | elf header |
  410. * --------------
  411. * | PT_NOTE |
  412. * --------------
  413. * | PT_LOAD |
  414. * --------------
  415. * | ...... |
  416. * --------------
  417. * | PT_LOAD |
  418. * --------------
  419. * | sec_hdr |
  420. * --------------
  421. * | elf note |
  422. * --------------
  423. * | memory |
  424. * --------------
  425. *
  426. * we only know where the memory is saved after we write elf note into
  427. * vmcore.
  428. */
  429. /* write elf header to vmcore */
  430. if (s->dump_info.d_class == ELFCLASS64) {
  431. write_elf64_header(s, &local_err);
  432. } else {
  433. write_elf32_header(s, &local_err);
  434. }
  435. if (local_err) {
  436. error_propagate(errp, local_err);
  437. return;
  438. }
  439. if (s->dump_info.d_class == ELFCLASS64) {
  440. /* write PT_NOTE to vmcore */
  441. write_elf64_note(s, &local_err);
  442. if (local_err) {
  443. error_propagate(errp, local_err);
  444. return;
  445. }
  446. /* write all PT_LOAD to vmcore */
  447. write_elf_loads(s, &local_err);
  448. if (local_err) {
  449. error_propagate(errp, local_err);
  450. return;
  451. }
  452. /* write section to vmcore */
  453. if (s->have_section) {
  454. write_elf_section(s, 1, &local_err);
  455. if (local_err) {
  456. error_propagate(errp, local_err);
  457. return;
  458. }
  459. }
  460. /* write notes to vmcore */
  461. write_elf64_notes(fd_write_vmcore, s, &local_err);
  462. if (local_err) {
  463. error_propagate(errp, local_err);
  464. return;
  465. }
  466. } else {
  467. /* write PT_NOTE to vmcore */
  468. write_elf32_note(s, &local_err);
  469. if (local_err) {
  470. error_propagate(errp, local_err);
  471. return;
  472. }
  473. /* write all PT_LOAD to vmcore */
  474. write_elf_loads(s, &local_err);
  475. if (local_err) {
  476. error_propagate(errp, local_err);
  477. return;
  478. }
  479. /* write section to vmcore */
  480. if (s->have_section) {
  481. write_elf_section(s, 0, &local_err);
  482. if (local_err) {
  483. error_propagate(errp, local_err);
  484. return;
  485. }
  486. }
  487. /* write notes to vmcore */
  488. write_elf32_notes(fd_write_vmcore, s, &local_err);
  489. if (local_err) {
  490. error_propagate(errp, local_err);
  491. return;
  492. }
  493. }
  494. }
  495. static void dump_completed(DumpState *s)
  496. {
  497. dump_cleanup(s);
  498. }
  499. static int get_next_block(DumpState *s, GuestPhysBlock *block)
  500. {
  501. while (1) {
  502. block = QTAILQ_NEXT(block, next);
  503. if (!block) {
  504. /* no more block */
  505. return 1;
  506. }
  507. s->start = 0;
  508. s->next_block = block;
  509. if (s->has_filter) {
  510. if (block->target_start >= s->begin + s->length ||
  511. block->target_end <= s->begin) {
  512. /* This block is out of the range */
  513. continue;
  514. }
  515. if (s->begin > block->target_start) {
  516. s->start = s->begin - block->target_start;
  517. }
  518. }
  519. return 0;
  520. }
  521. }
  522. /* write all memory to vmcore */
  523. static void dump_iterate(DumpState *s, Error **errp)
  524. {
  525. GuestPhysBlock *block;
  526. int64_t size;
  527. Error *local_err = NULL;
  528. do {
  529. block = s->next_block;
  530. size = block->target_end - block->target_start;
  531. if (s->has_filter) {
  532. size -= s->start;
  533. if (s->begin + s->length < block->target_end) {
  534. size -= block->target_end - (s->begin + s->length);
  535. }
  536. }
  537. write_memory(s, block, s->start, size, &local_err);
  538. if (local_err) {
  539. error_propagate(errp, local_err);
  540. return;
  541. }
  542. } while (!get_next_block(s, block));
  543. dump_completed(s);
  544. }
  545. static void create_vmcore(DumpState *s, Error **errp)
  546. {
  547. Error *local_err = NULL;
  548. dump_begin(s, &local_err);
  549. if (local_err) {
  550. error_propagate(errp, local_err);
  551. return;
  552. }
  553. dump_iterate(s, errp);
  554. }
  555. static int write_start_flat_header(int fd)
  556. {
  557. MakedumpfileHeader *mh;
  558. int ret = 0;
  559. QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
  560. mh = g_malloc0(MAX_SIZE_MDF_HEADER);
  561. memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
  562. MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
  563. mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
  564. mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
  565. size_t written_size;
  566. written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
  567. if (written_size != MAX_SIZE_MDF_HEADER) {
  568. ret = -1;
  569. }
  570. g_free(mh);
  571. return ret;
  572. }
  573. static int write_end_flat_header(int fd)
  574. {
  575. MakedumpfileDataHeader mdh;
  576. mdh.offset = END_FLAG_FLAT_HEADER;
  577. mdh.buf_size = END_FLAG_FLAT_HEADER;
  578. size_t written_size;
  579. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  580. if (written_size != sizeof(mdh)) {
  581. return -1;
  582. }
  583. return 0;
  584. }
  585. static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
  586. {
  587. size_t written_size;
  588. MakedumpfileDataHeader mdh;
  589. mdh.offset = cpu_to_be64(offset);
  590. mdh.buf_size = cpu_to_be64(size);
  591. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  592. if (written_size != sizeof(mdh)) {
  593. return -1;
  594. }
  595. written_size = qemu_write_full(fd, buf, size);
  596. if (written_size != size) {
  597. return -1;
  598. }
  599. return 0;
  600. }
  601. static int buf_write_note(const void *buf, size_t size, void *opaque)
  602. {
  603. DumpState *s = opaque;
  604. /* note_buf is not enough */
  605. if (s->note_buf_offset + size > s->note_size) {
  606. return -1;
  607. }
  608. memcpy(s->note_buf + s->note_buf_offset, buf, size);
  609. s->note_buf_offset += size;
  610. return 0;
  611. }
  612. /* write common header, sub header and elf note to vmcore */
  613. static void create_header32(DumpState *s, Error **errp)
  614. {
  615. DiskDumpHeader32 *dh = NULL;
  616. KdumpSubHeader32 *kh = NULL;
  617. size_t size;
  618. uint32_t block_size;
  619. uint32_t sub_hdr_size;
  620. uint32_t bitmap_blocks;
  621. uint32_t status = 0;
  622. uint64_t offset_note;
  623. Error *local_err = NULL;
  624. /* write common header, the version of kdump-compressed format is 6th */
  625. size = sizeof(DiskDumpHeader32);
  626. dh = g_malloc0(size);
  627. strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
  628. dh->header_version = cpu_to_dump32(s, 6);
  629. block_size = s->dump_info.page_size;
  630. dh->block_size = cpu_to_dump32(s, block_size);
  631. sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
  632. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  633. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  634. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  635. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  636. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  637. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  638. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  639. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  640. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  641. status |= DUMP_DH_COMPRESSED_ZLIB;
  642. }
  643. #ifdef CONFIG_LZO
  644. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  645. status |= DUMP_DH_COMPRESSED_LZO;
  646. }
  647. #endif
  648. #ifdef CONFIG_SNAPPY
  649. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  650. status |= DUMP_DH_COMPRESSED_SNAPPY;
  651. }
  652. #endif
  653. dh->status = cpu_to_dump32(s, status);
  654. if (write_buffer(s->fd, 0, dh, size) < 0) {
  655. dump_error(s, "dump: failed to write disk dump header", errp);
  656. goto out;
  657. }
  658. /* write sub header */
  659. size = sizeof(KdumpSubHeader32);
  660. kh = g_malloc0(size);
  661. /* 64bit max_mapnr_64 */
  662. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  663. kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
  664. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  665. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  666. kh->offset_note = cpu_to_dump64(s, offset_note);
  667. kh->note_size = cpu_to_dump32(s, s->note_size);
  668. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  669. block_size, kh, size) < 0) {
  670. dump_error(s, "dump: failed to write kdump sub header", errp);
  671. goto out;
  672. }
  673. /* write note */
  674. s->note_buf = g_malloc0(s->note_size);
  675. s->note_buf_offset = 0;
  676. /* use s->note_buf to store notes temporarily */
  677. write_elf32_notes(buf_write_note, s, &local_err);
  678. if (local_err) {
  679. error_propagate(errp, local_err);
  680. goto out;
  681. }
  682. if (write_buffer(s->fd, offset_note, s->note_buf,
  683. s->note_size) < 0) {
  684. dump_error(s, "dump: failed to write notes", errp);
  685. goto out;
  686. }
  687. /* get offset of dump_bitmap */
  688. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  689. block_size;
  690. /* get offset of page */
  691. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  692. block_size;
  693. out:
  694. g_free(dh);
  695. g_free(kh);
  696. g_free(s->note_buf);
  697. }
  698. /* write common header, sub header and elf note to vmcore */
  699. static void create_header64(DumpState *s, Error **errp)
  700. {
  701. DiskDumpHeader64 *dh = NULL;
  702. KdumpSubHeader64 *kh = NULL;
  703. size_t size;
  704. uint32_t block_size;
  705. uint32_t sub_hdr_size;
  706. uint32_t bitmap_blocks;
  707. uint32_t status = 0;
  708. uint64_t offset_note;
  709. Error *local_err = NULL;
  710. /* write common header, the version of kdump-compressed format is 6th */
  711. size = sizeof(DiskDumpHeader64);
  712. dh = g_malloc0(size);
  713. strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
  714. dh->header_version = cpu_to_dump32(s, 6);
  715. block_size = s->dump_info.page_size;
  716. dh->block_size = cpu_to_dump32(s, block_size);
  717. sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
  718. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  719. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  720. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  721. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  722. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  723. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  724. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  725. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  726. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  727. status |= DUMP_DH_COMPRESSED_ZLIB;
  728. }
  729. #ifdef CONFIG_LZO
  730. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  731. status |= DUMP_DH_COMPRESSED_LZO;
  732. }
  733. #endif
  734. #ifdef CONFIG_SNAPPY
  735. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  736. status |= DUMP_DH_COMPRESSED_SNAPPY;
  737. }
  738. #endif
  739. dh->status = cpu_to_dump32(s, status);
  740. if (write_buffer(s->fd, 0, dh, size) < 0) {
  741. dump_error(s, "dump: failed to write disk dump header", errp);
  742. goto out;
  743. }
  744. /* write sub header */
  745. size = sizeof(KdumpSubHeader64);
  746. kh = g_malloc0(size);
  747. /* 64bit max_mapnr_64 */
  748. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  749. kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
  750. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  751. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  752. kh->offset_note = cpu_to_dump64(s, offset_note);
  753. kh->note_size = cpu_to_dump64(s, s->note_size);
  754. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  755. block_size, kh, size) < 0) {
  756. dump_error(s, "dump: failed to write kdump sub header", errp);
  757. goto out;
  758. }
  759. /* write note */
  760. s->note_buf = g_malloc0(s->note_size);
  761. s->note_buf_offset = 0;
  762. /* use s->note_buf to store notes temporarily */
  763. write_elf64_notes(buf_write_note, s, &local_err);
  764. if (local_err) {
  765. error_propagate(errp, local_err);
  766. goto out;
  767. }
  768. if (write_buffer(s->fd, offset_note, s->note_buf,
  769. s->note_size) < 0) {
  770. dump_error(s, "dump: failed to write notes", errp);
  771. goto out;
  772. }
  773. /* get offset of dump_bitmap */
  774. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  775. block_size;
  776. /* get offset of page */
  777. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  778. block_size;
  779. out:
  780. g_free(dh);
  781. g_free(kh);
  782. g_free(s->note_buf);
  783. }
  784. static void write_dump_header(DumpState *s, Error **errp)
  785. {
  786. Error *local_err = NULL;
  787. if (s->dump_info.d_class == ELFCLASS32) {
  788. create_header32(s, &local_err);
  789. } else {
  790. create_header64(s, &local_err);
  791. }
  792. if (local_err) {
  793. error_propagate(errp, local_err);
  794. }
  795. }
  796. static size_t dump_bitmap_get_bufsize(DumpState *s)
  797. {
  798. return s->dump_info.page_size;
  799. }
  800. /*
  801. * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
  802. * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
  803. * set_dump_bitmap will always leave the recently set bit un-sync. And setting
  804. * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
  805. * vmcore, ie. synchronizing un-sync bit into vmcore.
  806. */
  807. static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
  808. uint8_t *buf, DumpState *s)
  809. {
  810. off_t old_offset, new_offset;
  811. off_t offset_bitmap1, offset_bitmap2;
  812. uint32_t byte, bit;
  813. size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
  814. size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
  815. /* should not set the previous place */
  816. assert(last_pfn <= pfn);
  817. /*
  818. * if the bit needed to be set is not cached in buf, flush the data in buf
  819. * to vmcore firstly.
  820. * making new_offset be bigger than old_offset can also sync remained data
  821. * into vmcore.
  822. */
  823. old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
  824. new_offset = bitmap_bufsize * (pfn / bits_per_buf);
  825. while (old_offset < new_offset) {
  826. /* calculate the offset and write dump_bitmap */
  827. offset_bitmap1 = s->offset_dump_bitmap + old_offset;
  828. if (write_buffer(s->fd, offset_bitmap1, buf,
  829. bitmap_bufsize) < 0) {
  830. return -1;
  831. }
  832. /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
  833. offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
  834. old_offset;
  835. if (write_buffer(s->fd, offset_bitmap2, buf,
  836. bitmap_bufsize) < 0) {
  837. return -1;
  838. }
  839. memset(buf, 0, bitmap_bufsize);
  840. old_offset += bitmap_bufsize;
  841. }
  842. /* get the exact place of the bit in the buf, and set it */
  843. byte = (pfn % bits_per_buf) / CHAR_BIT;
  844. bit = (pfn % bits_per_buf) % CHAR_BIT;
  845. if (value) {
  846. buf[byte] |= 1u << bit;
  847. } else {
  848. buf[byte] &= ~(1u << bit);
  849. }
  850. return 0;
  851. }
  852. static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
  853. {
  854. int target_page_shift = ctz32(s->dump_info.page_size);
  855. return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
  856. }
  857. static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
  858. {
  859. int target_page_shift = ctz32(s->dump_info.page_size);
  860. return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
  861. }
  862. /*
  863. * exam every page and return the page frame number and the address of the page.
  864. * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
  865. * blocks, so block->target_start and block->target_end should be interal
  866. * multiples of the target page size.
  867. */
  868. static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
  869. uint8_t **bufptr, DumpState *s)
  870. {
  871. GuestPhysBlock *block = *blockptr;
  872. hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
  873. uint8_t *buf;
  874. /* block == NULL means the start of the iteration */
  875. if (!block) {
  876. block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  877. *blockptr = block;
  878. assert((block->target_start & ~target_page_mask) == 0);
  879. assert((block->target_end & ~target_page_mask) == 0);
  880. *pfnptr = dump_paddr_to_pfn(s, block->target_start);
  881. if (bufptr) {
  882. *bufptr = block->host_addr;
  883. }
  884. return true;
  885. }
  886. *pfnptr = *pfnptr + 1;
  887. addr = dump_pfn_to_paddr(s, *pfnptr);
  888. if ((addr >= block->target_start) &&
  889. (addr + s->dump_info.page_size <= block->target_end)) {
  890. buf = block->host_addr + (addr - block->target_start);
  891. } else {
  892. /* the next page is in the next block */
  893. block = QTAILQ_NEXT(block, next);
  894. *blockptr = block;
  895. if (!block) {
  896. return false;
  897. }
  898. assert((block->target_start & ~target_page_mask) == 0);
  899. assert((block->target_end & ~target_page_mask) == 0);
  900. *pfnptr = dump_paddr_to_pfn(s, block->target_start);
  901. buf = block->host_addr;
  902. }
  903. if (bufptr) {
  904. *bufptr = buf;
  905. }
  906. return true;
  907. }
  908. static void write_dump_bitmap(DumpState *s, Error **errp)
  909. {
  910. int ret = 0;
  911. uint64_t last_pfn, pfn;
  912. void *dump_bitmap_buf;
  913. size_t num_dumpable;
  914. GuestPhysBlock *block_iter = NULL;
  915. size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
  916. size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
  917. /* dump_bitmap_buf is used to store dump_bitmap temporarily */
  918. dump_bitmap_buf = g_malloc0(bitmap_bufsize);
  919. num_dumpable = 0;
  920. last_pfn = 0;
  921. /*
  922. * exam memory page by page, and set the bit in dump_bitmap corresponded
  923. * to the existing page.
  924. */
  925. while (get_next_page(&block_iter, &pfn, NULL, s)) {
  926. ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
  927. if (ret < 0) {
  928. dump_error(s, "dump: failed to set dump_bitmap", errp);
  929. goto out;
  930. }
  931. last_pfn = pfn;
  932. num_dumpable++;
  933. }
  934. /*
  935. * set_dump_bitmap will always leave the recently set bit un-sync. Here we
  936. * set the remaining bits from last_pfn to the end of the bitmap buffer to
  937. * 0. With those set, the un-sync bit will be synchronized into the vmcore.
  938. */
  939. if (num_dumpable > 0) {
  940. ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
  941. dump_bitmap_buf, s);
  942. if (ret < 0) {
  943. dump_error(s, "dump: failed to sync dump_bitmap", errp);
  944. goto out;
  945. }
  946. }
  947. /* number of dumpable pages that will be dumped later */
  948. s->num_dumpable = num_dumpable;
  949. out:
  950. g_free(dump_bitmap_buf);
  951. }
  952. static void prepare_data_cache(DataCache *data_cache, DumpState *s,
  953. off_t offset)
  954. {
  955. data_cache->fd = s->fd;
  956. data_cache->data_size = 0;
  957. data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
  958. data_cache->buf = g_malloc0(data_cache->buf_size);
  959. data_cache->offset = offset;
  960. }
  961. static int write_cache(DataCache *dc, const void *buf, size_t size,
  962. bool flag_sync)
  963. {
  964. /*
  965. * dc->buf_size should not be less than size, otherwise dc will never be
  966. * enough
  967. */
  968. assert(size <= dc->buf_size);
  969. /*
  970. * if flag_sync is set, synchronize data in dc->buf into vmcore.
  971. * otherwise check if the space is enough for caching data in buf, if not,
  972. * write the data in dc->buf to dc->fd and reset dc->buf
  973. */
  974. if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
  975. (flag_sync && dc->data_size > 0)) {
  976. if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
  977. return -1;
  978. }
  979. dc->offset += dc->data_size;
  980. dc->data_size = 0;
  981. }
  982. if (!flag_sync) {
  983. memcpy(dc->buf + dc->data_size, buf, size);
  984. dc->data_size += size;
  985. }
  986. return 0;
  987. }
  988. static void free_data_cache(DataCache *data_cache)
  989. {
  990. g_free(data_cache->buf);
  991. }
  992. static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
  993. {
  994. switch (flag_compress) {
  995. case DUMP_DH_COMPRESSED_ZLIB:
  996. return compressBound(page_size);
  997. case DUMP_DH_COMPRESSED_LZO:
  998. /*
  999. * LZO will expand incompressible data by a little amount. Please check
  1000. * the following URL to see the expansion calculation:
  1001. * http://www.oberhumer.com/opensource/lzo/lzofaq.php
  1002. */
  1003. return page_size + page_size / 16 + 64 + 3;
  1004. #ifdef CONFIG_SNAPPY
  1005. case DUMP_DH_COMPRESSED_SNAPPY:
  1006. return snappy_max_compressed_length(page_size);
  1007. #endif
  1008. }
  1009. return 0;
  1010. }
  1011. /*
  1012. * check if the page is all 0
  1013. */
  1014. static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
  1015. {
  1016. return buffer_is_zero(buf, page_size);
  1017. }
  1018. static void write_dump_pages(DumpState *s, Error **errp)
  1019. {
  1020. int ret = 0;
  1021. DataCache page_desc, page_data;
  1022. size_t len_buf_out, size_out;
  1023. #ifdef CONFIG_LZO
  1024. lzo_bytep wrkmem = NULL;
  1025. #endif
  1026. uint8_t *buf_out = NULL;
  1027. off_t offset_desc, offset_data;
  1028. PageDescriptor pd, pd_zero;
  1029. uint8_t *buf;
  1030. GuestPhysBlock *block_iter = NULL;
  1031. uint64_t pfn_iter;
  1032. /* get offset of page_desc and page_data in dump file */
  1033. offset_desc = s->offset_page;
  1034. offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
  1035. prepare_data_cache(&page_desc, s, offset_desc);
  1036. prepare_data_cache(&page_data, s, offset_data);
  1037. /* prepare buffer to store compressed data */
  1038. len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
  1039. assert(len_buf_out != 0);
  1040. #ifdef CONFIG_LZO
  1041. wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
  1042. #endif
  1043. buf_out = g_malloc(len_buf_out);
  1044. /*
  1045. * init zero page's page_desc and page_data, because every zero page
  1046. * uses the same page_data
  1047. */
  1048. pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
  1049. pd_zero.flags = cpu_to_dump32(s, 0);
  1050. pd_zero.offset = cpu_to_dump64(s, offset_data);
  1051. pd_zero.page_flags = cpu_to_dump64(s, 0);
  1052. buf = g_malloc0(s->dump_info.page_size);
  1053. ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
  1054. g_free(buf);
  1055. if (ret < 0) {
  1056. dump_error(s, "dump: failed to write page data (zero page)", errp);
  1057. goto out;
  1058. }
  1059. offset_data += s->dump_info.page_size;
  1060. /*
  1061. * dump memory to vmcore page by page. zero page will all be resided in the
  1062. * first page of page section
  1063. */
  1064. while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
  1065. /* check zero page */
  1066. if (is_zero_page(buf, s->dump_info.page_size)) {
  1067. ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
  1068. false);
  1069. if (ret < 0) {
  1070. dump_error(s, "dump: failed to write page desc", errp);
  1071. goto out;
  1072. }
  1073. } else {
  1074. /*
  1075. * not zero page, then:
  1076. * 1. compress the page
  1077. * 2. write the compressed page into the cache of page_data
  1078. * 3. get page desc of the compressed page and write it into the
  1079. * cache of page_desc
  1080. *
  1081. * only one compression format will be used here, for
  1082. * s->flag_compress is set. But when compression fails to work,
  1083. * we fall back to save in plaintext.
  1084. */
  1085. size_out = len_buf_out;
  1086. if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
  1087. (compress2(buf_out, (uLongf *)&size_out, buf,
  1088. s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
  1089. (size_out < s->dump_info.page_size)) {
  1090. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
  1091. pd.size = cpu_to_dump32(s, size_out);
  1092. ret = write_cache(&page_data, buf_out, size_out, false);
  1093. if (ret < 0) {
  1094. dump_error(s, "dump: failed to write page data", errp);
  1095. goto out;
  1096. }
  1097. #ifdef CONFIG_LZO
  1098. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
  1099. (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
  1100. (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
  1101. (size_out < s->dump_info.page_size)) {
  1102. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
  1103. pd.size = cpu_to_dump32(s, size_out);
  1104. ret = write_cache(&page_data, buf_out, size_out, false);
  1105. if (ret < 0) {
  1106. dump_error(s, "dump: failed to write page data", errp);
  1107. goto out;
  1108. }
  1109. #endif
  1110. #ifdef CONFIG_SNAPPY
  1111. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
  1112. (snappy_compress((char *)buf, s->dump_info.page_size,
  1113. (char *)buf_out, &size_out) == SNAPPY_OK) &&
  1114. (size_out < s->dump_info.page_size)) {
  1115. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
  1116. pd.size = cpu_to_dump32(s, size_out);
  1117. ret = write_cache(&page_data, buf_out, size_out, false);
  1118. if (ret < 0) {
  1119. dump_error(s, "dump: failed to write page data", errp);
  1120. goto out;
  1121. }
  1122. #endif
  1123. } else {
  1124. /*
  1125. * fall back to save in plaintext, size_out should be
  1126. * assigned the target's page size
  1127. */
  1128. pd.flags = cpu_to_dump32(s, 0);
  1129. size_out = s->dump_info.page_size;
  1130. pd.size = cpu_to_dump32(s, size_out);
  1131. ret = write_cache(&page_data, buf,
  1132. s->dump_info.page_size, false);
  1133. if (ret < 0) {
  1134. dump_error(s, "dump: failed to write page data", errp);
  1135. goto out;
  1136. }
  1137. }
  1138. /* get and write page desc here */
  1139. pd.page_flags = cpu_to_dump64(s, 0);
  1140. pd.offset = cpu_to_dump64(s, offset_data);
  1141. offset_data += size_out;
  1142. ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
  1143. if (ret < 0) {
  1144. dump_error(s, "dump: failed to write page desc", errp);
  1145. goto out;
  1146. }
  1147. }
  1148. }
  1149. ret = write_cache(&page_desc, NULL, 0, true);
  1150. if (ret < 0) {
  1151. dump_error(s, "dump: failed to sync cache for page_desc", errp);
  1152. goto out;
  1153. }
  1154. ret = write_cache(&page_data, NULL, 0, true);
  1155. if (ret < 0) {
  1156. dump_error(s, "dump: failed to sync cache for page_data", errp);
  1157. goto out;
  1158. }
  1159. out:
  1160. free_data_cache(&page_desc);
  1161. free_data_cache(&page_data);
  1162. #ifdef CONFIG_LZO
  1163. g_free(wrkmem);
  1164. #endif
  1165. g_free(buf_out);
  1166. }
  1167. static void create_kdump_vmcore(DumpState *s, Error **errp)
  1168. {
  1169. int ret;
  1170. Error *local_err = NULL;
  1171. /*
  1172. * the kdump-compressed format is:
  1173. * File offset
  1174. * +------------------------------------------+ 0x0
  1175. * | main header (struct disk_dump_header) |
  1176. * |------------------------------------------+ block 1
  1177. * | sub header (struct kdump_sub_header) |
  1178. * |------------------------------------------+ block 2
  1179. * | 1st-dump_bitmap |
  1180. * |------------------------------------------+ block 2 + X blocks
  1181. * | 2nd-dump_bitmap | (aligned by block)
  1182. * |------------------------------------------+ block 2 + 2 * X blocks
  1183. * | page desc for pfn 0 (struct page_desc) | (aligned by block)
  1184. * | page desc for pfn 1 (struct page_desc) |
  1185. * | : |
  1186. * |------------------------------------------| (not aligned by block)
  1187. * | page data (pfn 0) |
  1188. * | page data (pfn 1) |
  1189. * | : |
  1190. * +------------------------------------------+
  1191. */
  1192. ret = write_start_flat_header(s->fd);
  1193. if (ret < 0) {
  1194. dump_error(s, "dump: failed to write start flat header", errp);
  1195. return;
  1196. }
  1197. write_dump_header(s, &local_err);
  1198. if (local_err) {
  1199. error_propagate(errp, local_err);
  1200. return;
  1201. }
  1202. write_dump_bitmap(s, &local_err);
  1203. if (local_err) {
  1204. error_propagate(errp, local_err);
  1205. return;
  1206. }
  1207. write_dump_pages(s, &local_err);
  1208. if (local_err) {
  1209. error_propagate(errp, local_err);
  1210. return;
  1211. }
  1212. ret = write_end_flat_header(s->fd);
  1213. if (ret < 0) {
  1214. dump_error(s, "dump: failed to write end flat header", errp);
  1215. return;
  1216. }
  1217. dump_completed(s);
  1218. }
  1219. static ram_addr_t get_start_block(DumpState *s)
  1220. {
  1221. GuestPhysBlock *block;
  1222. if (!s->has_filter) {
  1223. s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  1224. return 0;
  1225. }
  1226. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  1227. if (block->target_start >= s->begin + s->length ||
  1228. block->target_end <= s->begin) {
  1229. /* This block is out of the range */
  1230. continue;
  1231. }
  1232. s->next_block = block;
  1233. if (s->begin > block->target_start) {
  1234. s->start = s->begin - block->target_start;
  1235. } else {
  1236. s->start = 0;
  1237. }
  1238. return s->start;
  1239. }
  1240. return -1;
  1241. }
  1242. static void get_max_mapnr(DumpState *s)
  1243. {
  1244. GuestPhysBlock *last_block;
  1245. last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
  1246. s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
  1247. }
  1248. static void dump_init(DumpState *s, int fd, bool has_format,
  1249. DumpGuestMemoryFormat format, bool paging, bool has_filter,
  1250. int64_t begin, int64_t length, Error **errp)
  1251. {
  1252. CPUState *cpu;
  1253. int nr_cpus;
  1254. Error *err = NULL;
  1255. int ret;
  1256. /* kdump-compressed is conflict with paging and filter */
  1257. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1258. assert(!paging && !has_filter);
  1259. }
  1260. if (runstate_is_running()) {
  1261. vm_stop(RUN_STATE_SAVE_VM);
  1262. s->resume = true;
  1263. } else {
  1264. s->resume = false;
  1265. }
  1266. /* If we use KVM, we should synchronize the registers before we get dump
  1267. * info or physmap info.
  1268. */
  1269. cpu_synchronize_all_states();
  1270. nr_cpus = 0;
  1271. CPU_FOREACH(cpu) {
  1272. nr_cpus++;
  1273. }
  1274. s->fd = fd;
  1275. s->has_filter = has_filter;
  1276. s->begin = begin;
  1277. s->length = length;
  1278. memory_mapping_list_init(&s->list);
  1279. guest_phys_blocks_init(&s->guest_phys_blocks);
  1280. guest_phys_blocks_append(&s->guest_phys_blocks);
  1281. s->start = get_start_block(s);
  1282. if (s->start == -1) {
  1283. error_setg(errp, QERR_INVALID_PARAMETER, "begin");
  1284. goto cleanup;
  1285. }
  1286. /* get dump info: endian, class and architecture.
  1287. * If the target architecture is not supported, cpu_get_dump_info() will
  1288. * return -1.
  1289. */
  1290. ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
  1291. if (ret < 0) {
  1292. error_setg(errp, QERR_UNSUPPORTED);
  1293. goto cleanup;
  1294. }
  1295. if (!s->dump_info.page_size) {
  1296. s->dump_info.page_size = TARGET_PAGE_SIZE;
  1297. }
  1298. s->note_size = cpu_get_note_size(s->dump_info.d_class,
  1299. s->dump_info.d_machine, nr_cpus);
  1300. if (s->note_size < 0) {
  1301. error_setg(errp, QERR_UNSUPPORTED);
  1302. goto cleanup;
  1303. }
  1304. /* get memory mapping */
  1305. if (paging) {
  1306. qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
  1307. if (err != NULL) {
  1308. error_propagate(errp, err);
  1309. goto cleanup;
  1310. }
  1311. } else {
  1312. qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
  1313. }
  1314. s->nr_cpus = nr_cpus;
  1315. get_max_mapnr(s);
  1316. uint64_t tmp;
  1317. tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
  1318. s->dump_info.page_size);
  1319. s->len_dump_bitmap = tmp * s->dump_info.page_size;
  1320. /* init for kdump-compressed format */
  1321. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1322. switch (format) {
  1323. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
  1324. s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
  1325. break;
  1326. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
  1327. #ifdef CONFIG_LZO
  1328. if (lzo_init() != LZO_E_OK) {
  1329. error_setg(errp, "failed to initialize the LZO library");
  1330. goto cleanup;
  1331. }
  1332. #endif
  1333. s->flag_compress = DUMP_DH_COMPRESSED_LZO;
  1334. break;
  1335. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
  1336. s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
  1337. break;
  1338. default:
  1339. s->flag_compress = 0;
  1340. }
  1341. return;
  1342. }
  1343. if (s->has_filter) {
  1344. memory_mapping_filter(&s->list, s->begin, s->length);
  1345. }
  1346. /*
  1347. * calculate phdr_num
  1348. *
  1349. * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
  1350. */
  1351. s->phdr_num = 1; /* PT_NOTE */
  1352. if (s->list.num < UINT16_MAX - 2) {
  1353. s->phdr_num += s->list.num;
  1354. s->have_section = false;
  1355. } else {
  1356. s->have_section = true;
  1357. s->phdr_num = PN_XNUM;
  1358. s->sh_info = 1; /* PT_NOTE */
  1359. /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
  1360. if (s->list.num <= UINT32_MAX - 1) {
  1361. s->sh_info += s->list.num;
  1362. } else {
  1363. s->sh_info = UINT32_MAX;
  1364. }
  1365. }
  1366. if (s->dump_info.d_class == ELFCLASS64) {
  1367. if (s->have_section) {
  1368. s->memory_offset = sizeof(Elf64_Ehdr) +
  1369. sizeof(Elf64_Phdr) * s->sh_info +
  1370. sizeof(Elf64_Shdr) + s->note_size;
  1371. } else {
  1372. s->memory_offset = sizeof(Elf64_Ehdr) +
  1373. sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
  1374. }
  1375. } else {
  1376. if (s->have_section) {
  1377. s->memory_offset = sizeof(Elf32_Ehdr) +
  1378. sizeof(Elf32_Phdr) * s->sh_info +
  1379. sizeof(Elf32_Shdr) + s->note_size;
  1380. } else {
  1381. s->memory_offset = sizeof(Elf32_Ehdr) +
  1382. sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
  1383. }
  1384. }
  1385. return;
  1386. cleanup:
  1387. dump_cleanup(s);
  1388. }
  1389. void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
  1390. int64_t begin, bool has_length,
  1391. int64_t length, bool has_format,
  1392. DumpGuestMemoryFormat format, Error **errp)
  1393. {
  1394. const char *p;
  1395. int fd = -1;
  1396. DumpState *s;
  1397. Error *local_err = NULL;
  1398. /*
  1399. * kdump-compressed format need the whole memory dumped, so paging or
  1400. * filter is not supported here.
  1401. */
  1402. if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
  1403. (paging || has_begin || has_length)) {
  1404. error_setg(errp, "kdump-compressed format doesn't support paging or "
  1405. "filter");
  1406. return;
  1407. }
  1408. if (has_begin && !has_length) {
  1409. error_setg(errp, QERR_MISSING_PARAMETER, "length");
  1410. return;
  1411. }
  1412. if (!has_begin && has_length) {
  1413. error_setg(errp, QERR_MISSING_PARAMETER, "begin");
  1414. return;
  1415. }
  1416. /* check whether lzo/snappy is supported */
  1417. #ifndef CONFIG_LZO
  1418. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
  1419. error_setg(errp, "kdump-lzo is not available now");
  1420. return;
  1421. }
  1422. #endif
  1423. #ifndef CONFIG_SNAPPY
  1424. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
  1425. error_setg(errp, "kdump-snappy is not available now");
  1426. return;
  1427. }
  1428. #endif
  1429. #if !defined(WIN32)
  1430. if (strstart(file, "fd:", &p)) {
  1431. fd = monitor_get_fd(cur_mon, p, errp);
  1432. if (fd == -1) {
  1433. return;
  1434. }
  1435. }
  1436. #endif
  1437. if (strstart(file, "file:", &p)) {
  1438. fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
  1439. if (fd < 0) {
  1440. error_setg_file_open(errp, errno, p);
  1441. return;
  1442. }
  1443. }
  1444. if (fd == -1) {
  1445. error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
  1446. return;
  1447. }
  1448. s = g_malloc0(sizeof(DumpState));
  1449. dump_init(s, fd, has_format, format, paging, has_begin,
  1450. begin, length, &local_err);
  1451. if (local_err) {
  1452. g_free(s);
  1453. error_propagate(errp, local_err);
  1454. return;
  1455. }
  1456. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1457. create_kdump_vmcore(s, errp);
  1458. } else {
  1459. create_vmcore(s, errp);
  1460. }
  1461. g_free(s);
  1462. }
  1463. DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
  1464. {
  1465. DumpGuestMemoryFormatList *item;
  1466. DumpGuestMemoryCapability *cap =
  1467. g_malloc0(sizeof(DumpGuestMemoryCapability));
  1468. /* elf is always available */
  1469. item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1470. cap->formats = item;
  1471. item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
  1472. /* kdump-zlib is always available */
  1473. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1474. item = item->next;
  1475. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
  1476. /* add new item if kdump-lzo is available */
  1477. #ifdef CONFIG_LZO
  1478. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1479. item = item->next;
  1480. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
  1481. #endif
  1482. /* add new item if kdump-snappy is available */
  1483. #ifdef CONFIG_SNAPPY
  1484. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1485. item = item->next;
  1486. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
  1487. #endif
  1488. return cap;
  1489. }