dump.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039
  1. /*
  2. * QEMU dump
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/cutils.h"
  15. #include "elf.h"
  16. #include "cpu.h"
  17. #include "exec/hwaddr.h"
  18. #include "monitor/monitor.h"
  19. #include "sysemu/kvm.h"
  20. #include "sysemu/dump.h"
  21. #include "sysemu/sysemu.h"
  22. #include "sysemu/memory_mapping.h"
  23. #include "sysemu/cpus.h"
  24. #include "qapi/qmp/qerror.h"
  25. #include "qmp-commands.h"
  26. #include "qapi-event.h"
  27. #include "qemu/error-report.h"
  28. #include "hw/misc/vmcoreinfo.h"
  29. #include <zlib.h>
  30. #ifdef CONFIG_LZO
  31. #include <lzo/lzo1x.h>
  32. #endif
  33. #ifdef CONFIG_SNAPPY
  34. #include <snappy-c.h>
  35. #endif
  36. #ifndef ELF_MACHINE_UNAME
  37. #define ELF_MACHINE_UNAME "Unknown"
  38. #endif
  39. #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
  40. #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
  41. ((DIV_ROUND_UP((hdr_size), 4) + \
  42. DIV_ROUND_UP((name_size), 4) + \
  43. DIV_ROUND_UP((desc_size), 4)) * 4)
  44. uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
  45. {
  46. if (s->dump_info.d_endian == ELFDATA2LSB) {
  47. val = cpu_to_le16(val);
  48. } else {
  49. val = cpu_to_be16(val);
  50. }
  51. return val;
  52. }
  53. uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
  54. {
  55. if (s->dump_info.d_endian == ELFDATA2LSB) {
  56. val = cpu_to_le32(val);
  57. } else {
  58. val = cpu_to_be32(val);
  59. }
  60. return val;
  61. }
  62. uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
  63. {
  64. if (s->dump_info.d_endian == ELFDATA2LSB) {
  65. val = cpu_to_le64(val);
  66. } else {
  67. val = cpu_to_be64(val);
  68. }
  69. return val;
  70. }
  71. static int dump_cleanup(DumpState *s)
  72. {
  73. guest_phys_blocks_free(&s->guest_phys_blocks);
  74. memory_mapping_list_free(&s->list);
  75. close(s->fd);
  76. g_free(s->guest_note);
  77. s->guest_note = NULL;
  78. if (s->resume) {
  79. if (s->detached) {
  80. qemu_mutex_lock_iothread();
  81. }
  82. vm_start();
  83. if (s->detached) {
  84. qemu_mutex_unlock_iothread();
  85. }
  86. }
  87. return 0;
  88. }
  89. static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
  90. {
  91. DumpState *s = opaque;
  92. size_t written_size;
  93. written_size = qemu_write_full(s->fd, buf, size);
  94. if (written_size != size) {
  95. return -1;
  96. }
  97. return 0;
  98. }
  99. static void write_elf64_header(DumpState *s, Error **errp)
  100. {
  101. Elf64_Ehdr elf_header;
  102. int ret;
  103. memset(&elf_header, 0, sizeof(Elf64_Ehdr));
  104. memcpy(&elf_header, ELFMAG, SELFMAG);
  105. elf_header.e_ident[EI_CLASS] = ELFCLASS64;
  106. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  107. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  108. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  109. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  110. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  111. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  112. elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
  113. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
  114. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  115. if (s->have_section) {
  116. uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
  117. elf_header.e_shoff = cpu_to_dump64(s, shoff);
  118. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
  119. elf_header.e_shnum = cpu_to_dump16(s, 1);
  120. }
  121. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  122. if (ret < 0) {
  123. error_setg(errp, "dump: failed to write elf header");
  124. }
  125. }
  126. static void write_elf32_header(DumpState *s, Error **errp)
  127. {
  128. Elf32_Ehdr elf_header;
  129. int ret;
  130. memset(&elf_header, 0, sizeof(Elf32_Ehdr));
  131. memcpy(&elf_header, ELFMAG, SELFMAG);
  132. elf_header.e_ident[EI_CLASS] = ELFCLASS32;
  133. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  134. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  135. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  136. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  137. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  138. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  139. elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
  140. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
  141. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  142. if (s->have_section) {
  143. uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
  144. elf_header.e_shoff = cpu_to_dump32(s, shoff);
  145. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
  146. elf_header.e_shnum = cpu_to_dump16(s, 1);
  147. }
  148. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  149. if (ret < 0) {
  150. error_setg(errp, "dump: failed to write elf header");
  151. }
  152. }
  153. static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
  154. int phdr_index, hwaddr offset,
  155. hwaddr filesz, Error **errp)
  156. {
  157. Elf64_Phdr phdr;
  158. int ret;
  159. memset(&phdr, 0, sizeof(Elf64_Phdr));
  160. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  161. phdr.p_offset = cpu_to_dump64(s, offset);
  162. phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
  163. phdr.p_filesz = cpu_to_dump64(s, filesz);
  164. phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
  165. phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr);
  166. assert(memory_mapping->length >= filesz);
  167. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  168. if (ret < 0) {
  169. error_setg(errp, "dump: failed to write program header table");
  170. }
  171. }
  172. static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
  173. int phdr_index, hwaddr offset,
  174. hwaddr filesz, Error **errp)
  175. {
  176. Elf32_Phdr phdr;
  177. int ret;
  178. memset(&phdr, 0, sizeof(Elf32_Phdr));
  179. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  180. phdr.p_offset = cpu_to_dump32(s, offset);
  181. phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
  182. phdr.p_filesz = cpu_to_dump32(s, filesz);
  183. phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
  184. phdr.p_vaddr = cpu_to_dump32(s, memory_mapping->virt_addr);
  185. assert(memory_mapping->length >= filesz);
  186. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  187. if (ret < 0) {
  188. error_setg(errp, "dump: failed to write program header table");
  189. }
  190. }
  191. static void write_elf64_note(DumpState *s, Error **errp)
  192. {
  193. Elf64_Phdr phdr;
  194. hwaddr begin = s->memory_offset - s->note_size;
  195. int ret;
  196. memset(&phdr, 0, sizeof(Elf64_Phdr));
  197. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  198. phdr.p_offset = cpu_to_dump64(s, begin);
  199. phdr.p_paddr = 0;
  200. phdr.p_filesz = cpu_to_dump64(s, s->note_size);
  201. phdr.p_memsz = cpu_to_dump64(s, s->note_size);
  202. phdr.p_vaddr = 0;
  203. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  204. if (ret < 0) {
  205. error_setg(errp, "dump: failed to write program header table");
  206. }
  207. }
  208. static inline int cpu_index(CPUState *cpu)
  209. {
  210. return cpu->cpu_index + 1;
  211. }
  212. static void write_guest_note(WriteCoreDumpFunction f, DumpState *s,
  213. Error **errp)
  214. {
  215. int ret;
  216. if (s->guest_note) {
  217. ret = f(s->guest_note, s->guest_note_size, s);
  218. if (ret < 0) {
  219. error_setg(errp, "dump: failed to write guest note");
  220. }
  221. }
  222. }
  223. static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
  224. Error **errp)
  225. {
  226. CPUState *cpu;
  227. int ret;
  228. int id;
  229. CPU_FOREACH(cpu) {
  230. id = cpu_index(cpu);
  231. ret = cpu_write_elf64_note(f, cpu, id, s);
  232. if (ret < 0) {
  233. error_setg(errp, "dump: failed to write elf notes");
  234. return;
  235. }
  236. }
  237. CPU_FOREACH(cpu) {
  238. ret = cpu_write_elf64_qemunote(f, cpu, s);
  239. if (ret < 0) {
  240. error_setg(errp, "dump: failed to write CPU status");
  241. return;
  242. }
  243. }
  244. write_guest_note(f, s, errp);
  245. }
  246. static void write_elf32_note(DumpState *s, Error **errp)
  247. {
  248. hwaddr begin = s->memory_offset - s->note_size;
  249. Elf32_Phdr phdr;
  250. int ret;
  251. memset(&phdr, 0, sizeof(Elf32_Phdr));
  252. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  253. phdr.p_offset = cpu_to_dump32(s, begin);
  254. phdr.p_paddr = 0;
  255. phdr.p_filesz = cpu_to_dump32(s, s->note_size);
  256. phdr.p_memsz = cpu_to_dump32(s, s->note_size);
  257. phdr.p_vaddr = 0;
  258. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  259. if (ret < 0) {
  260. error_setg(errp, "dump: failed to write program header table");
  261. }
  262. }
  263. static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
  264. Error **errp)
  265. {
  266. CPUState *cpu;
  267. int ret;
  268. int id;
  269. CPU_FOREACH(cpu) {
  270. id = cpu_index(cpu);
  271. ret = cpu_write_elf32_note(f, cpu, id, s);
  272. if (ret < 0) {
  273. error_setg(errp, "dump: failed to write elf notes");
  274. return;
  275. }
  276. }
  277. CPU_FOREACH(cpu) {
  278. ret = cpu_write_elf32_qemunote(f, cpu, s);
  279. if (ret < 0) {
  280. error_setg(errp, "dump: failed to write CPU status");
  281. return;
  282. }
  283. }
  284. write_guest_note(f, s, errp);
  285. }
  286. static void write_elf_section(DumpState *s, int type, Error **errp)
  287. {
  288. Elf32_Shdr shdr32;
  289. Elf64_Shdr shdr64;
  290. int shdr_size;
  291. void *shdr;
  292. int ret;
  293. if (type == 0) {
  294. shdr_size = sizeof(Elf32_Shdr);
  295. memset(&shdr32, 0, shdr_size);
  296. shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
  297. shdr = &shdr32;
  298. } else {
  299. shdr_size = sizeof(Elf64_Shdr);
  300. memset(&shdr64, 0, shdr_size);
  301. shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
  302. shdr = &shdr64;
  303. }
  304. ret = fd_write_vmcore(&shdr, shdr_size, s);
  305. if (ret < 0) {
  306. error_setg(errp, "dump: failed to write section header table");
  307. }
  308. }
  309. static void write_data(DumpState *s, void *buf, int length, Error **errp)
  310. {
  311. int ret;
  312. ret = fd_write_vmcore(buf, length, s);
  313. if (ret < 0) {
  314. error_setg(errp, "dump: failed to save memory");
  315. } else {
  316. s->written_size += length;
  317. }
  318. }
  319. /* write the memory to vmcore. 1 page per I/O. */
  320. static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
  321. int64_t size, Error **errp)
  322. {
  323. int64_t i;
  324. Error *local_err = NULL;
  325. for (i = 0; i < size / s->dump_info.page_size; i++) {
  326. write_data(s, block->host_addr + start + i * s->dump_info.page_size,
  327. s->dump_info.page_size, &local_err);
  328. if (local_err) {
  329. error_propagate(errp, local_err);
  330. return;
  331. }
  332. }
  333. if ((size % s->dump_info.page_size) != 0) {
  334. write_data(s, block->host_addr + start + i * s->dump_info.page_size,
  335. size % s->dump_info.page_size, &local_err);
  336. if (local_err) {
  337. error_propagate(errp, local_err);
  338. return;
  339. }
  340. }
  341. }
  342. /* get the memory's offset and size in the vmcore */
  343. static void get_offset_range(hwaddr phys_addr,
  344. ram_addr_t mapping_length,
  345. DumpState *s,
  346. hwaddr *p_offset,
  347. hwaddr *p_filesz)
  348. {
  349. GuestPhysBlock *block;
  350. hwaddr offset = s->memory_offset;
  351. int64_t size_in_block, start;
  352. /* When the memory is not stored into vmcore, offset will be -1 */
  353. *p_offset = -1;
  354. *p_filesz = 0;
  355. if (s->has_filter) {
  356. if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
  357. return;
  358. }
  359. }
  360. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  361. if (s->has_filter) {
  362. if (block->target_start >= s->begin + s->length ||
  363. block->target_end <= s->begin) {
  364. /* This block is out of the range */
  365. continue;
  366. }
  367. if (s->begin <= block->target_start) {
  368. start = block->target_start;
  369. } else {
  370. start = s->begin;
  371. }
  372. size_in_block = block->target_end - start;
  373. if (s->begin + s->length < block->target_end) {
  374. size_in_block -= block->target_end - (s->begin + s->length);
  375. }
  376. } else {
  377. start = block->target_start;
  378. size_in_block = block->target_end - block->target_start;
  379. }
  380. if (phys_addr >= start && phys_addr < start + size_in_block) {
  381. *p_offset = phys_addr - start + offset;
  382. /* The offset range mapped from the vmcore file must not spill over
  383. * the GuestPhysBlock, clamp it. The rest of the mapping will be
  384. * zero-filled in memory at load time; see
  385. * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
  386. */
  387. *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
  388. mapping_length :
  389. size_in_block - (phys_addr - start);
  390. return;
  391. }
  392. offset += size_in_block;
  393. }
  394. }
  395. static void write_elf_loads(DumpState *s, Error **errp)
  396. {
  397. hwaddr offset, filesz;
  398. MemoryMapping *memory_mapping;
  399. uint32_t phdr_index = 1;
  400. uint32_t max_index;
  401. Error *local_err = NULL;
  402. if (s->have_section) {
  403. max_index = s->sh_info;
  404. } else {
  405. max_index = s->phdr_num;
  406. }
  407. QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
  408. get_offset_range(memory_mapping->phys_addr,
  409. memory_mapping->length,
  410. s, &offset, &filesz);
  411. if (s->dump_info.d_class == ELFCLASS64) {
  412. write_elf64_load(s, memory_mapping, phdr_index++, offset,
  413. filesz, &local_err);
  414. } else {
  415. write_elf32_load(s, memory_mapping, phdr_index++, offset,
  416. filesz, &local_err);
  417. }
  418. if (local_err) {
  419. error_propagate(errp, local_err);
  420. return;
  421. }
  422. if (phdr_index >= max_index) {
  423. break;
  424. }
  425. }
  426. }
  427. /* write elf header, PT_NOTE and elf note to vmcore. */
  428. static void dump_begin(DumpState *s, Error **errp)
  429. {
  430. Error *local_err = NULL;
  431. /*
  432. * the vmcore's format is:
  433. * --------------
  434. * | elf header |
  435. * --------------
  436. * | PT_NOTE |
  437. * --------------
  438. * | PT_LOAD |
  439. * --------------
  440. * | ...... |
  441. * --------------
  442. * | PT_LOAD |
  443. * --------------
  444. * | sec_hdr |
  445. * --------------
  446. * | elf note |
  447. * --------------
  448. * | memory |
  449. * --------------
  450. *
  451. * we only know where the memory is saved after we write elf note into
  452. * vmcore.
  453. */
  454. /* write elf header to vmcore */
  455. if (s->dump_info.d_class == ELFCLASS64) {
  456. write_elf64_header(s, &local_err);
  457. } else {
  458. write_elf32_header(s, &local_err);
  459. }
  460. if (local_err) {
  461. error_propagate(errp, local_err);
  462. return;
  463. }
  464. if (s->dump_info.d_class == ELFCLASS64) {
  465. /* write PT_NOTE to vmcore */
  466. write_elf64_note(s, &local_err);
  467. if (local_err) {
  468. error_propagate(errp, local_err);
  469. return;
  470. }
  471. /* write all PT_LOAD to vmcore */
  472. write_elf_loads(s, &local_err);
  473. if (local_err) {
  474. error_propagate(errp, local_err);
  475. return;
  476. }
  477. /* write section to vmcore */
  478. if (s->have_section) {
  479. write_elf_section(s, 1, &local_err);
  480. if (local_err) {
  481. error_propagate(errp, local_err);
  482. return;
  483. }
  484. }
  485. /* write notes to vmcore */
  486. write_elf64_notes(fd_write_vmcore, s, &local_err);
  487. if (local_err) {
  488. error_propagate(errp, local_err);
  489. return;
  490. }
  491. } else {
  492. /* write PT_NOTE to vmcore */
  493. write_elf32_note(s, &local_err);
  494. if (local_err) {
  495. error_propagate(errp, local_err);
  496. return;
  497. }
  498. /* write all PT_LOAD to vmcore */
  499. write_elf_loads(s, &local_err);
  500. if (local_err) {
  501. error_propagate(errp, local_err);
  502. return;
  503. }
  504. /* write section to vmcore */
  505. if (s->have_section) {
  506. write_elf_section(s, 0, &local_err);
  507. if (local_err) {
  508. error_propagate(errp, local_err);
  509. return;
  510. }
  511. }
  512. /* write notes to vmcore */
  513. write_elf32_notes(fd_write_vmcore, s, &local_err);
  514. if (local_err) {
  515. error_propagate(errp, local_err);
  516. return;
  517. }
  518. }
  519. }
  520. static int get_next_block(DumpState *s, GuestPhysBlock *block)
  521. {
  522. while (1) {
  523. block = QTAILQ_NEXT(block, next);
  524. if (!block) {
  525. /* no more block */
  526. return 1;
  527. }
  528. s->start = 0;
  529. s->next_block = block;
  530. if (s->has_filter) {
  531. if (block->target_start >= s->begin + s->length ||
  532. block->target_end <= s->begin) {
  533. /* This block is out of the range */
  534. continue;
  535. }
  536. if (s->begin > block->target_start) {
  537. s->start = s->begin - block->target_start;
  538. }
  539. }
  540. return 0;
  541. }
  542. }
  543. /* write all memory to vmcore */
  544. static void dump_iterate(DumpState *s, Error **errp)
  545. {
  546. GuestPhysBlock *block;
  547. int64_t size;
  548. Error *local_err = NULL;
  549. do {
  550. block = s->next_block;
  551. size = block->target_end - block->target_start;
  552. if (s->has_filter) {
  553. size -= s->start;
  554. if (s->begin + s->length < block->target_end) {
  555. size -= block->target_end - (s->begin + s->length);
  556. }
  557. }
  558. write_memory(s, block, s->start, size, &local_err);
  559. if (local_err) {
  560. error_propagate(errp, local_err);
  561. return;
  562. }
  563. } while (!get_next_block(s, block));
  564. }
  565. static void create_vmcore(DumpState *s, Error **errp)
  566. {
  567. Error *local_err = NULL;
  568. dump_begin(s, &local_err);
  569. if (local_err) {
  570. error_propagate(errp, local_err);
  571. return;
  572. }
  573. dump_iterate(s, errp);
  574. }
  575. static int write_start_flat_header(int fd)
  576. {
  577. MakedumpfileHeader *mh;
  578. int ret = 0;
  579. QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
  580. mh = g_malloc0(MAX_SIZE_MDF_HEADER);
  581. memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
  582. MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
  583. mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
  584. mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
  585. size_t written_size;
  586. written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
  587. if (written_size != MAX_SIZE_MDF_HEADER) {
  588. ret = -1;
  589. }
  590. g_free(mh);
  591. return ret;
  592. }
  593. static int write_end_flat_header(int fd)
  594. {
  595. MakedumpfileDataHeader mdh;
  596. mdh.offset = END_FLAG_FLAT_HEADER;
  597. mdh.buf_size = END_FLAG_FLAT_HEADER;
  598. size_t written_size;
  599. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  600. if (written_size != sizeof(mdh)) {
  601. return -1;
  602. }
  603. return 0;
  604. }
  605. static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
  606. {
  607. size_t written_size;
  608. MakedumpfileDataHeader mdh;
  609. mdh.offset = cpu_to_be64(offset);
  610. mdh.buf_size = cpu_to_be64(size);
  611. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  612. if (written_size != sizeof(mdh)) {
  613. return -1;
  614. }
  615. written_size = qemu_write_full(fd, buf, size);
  616. if (written_size != size) {
  617. return -1;
  618. }
  619. return 0;
  620. }
  621. static int buf_write_note(const void *buf, size_t size, void *opaque)
  622. {
  623. DumpState *s = opaque;
  624. /* note_buf is not enough */
  625. if (s->note_buf_offset + size > s->note_size) {
  626. return -1;
  627. }
  628. memcpy(s->note_buf + s->note_buf_offset, buf, size);
  629. s->note_buf_offset += size;
  630. return 0;
  631. }
  632. /*
  633. * This function retrieves various sizes from an elf header.
  634. *
  635. * @note has to be a valid ELF note. The return sizes are unmodified
  636. * (not padded or rounded up to be multiple of 4).
  637. */
  638. static void get_note_sizes(DumpState *s, const void *note,
  639. uint64_t *note_head_size,
  640. uint64_t *name_size,
  641. uint64_t *desc_size)
  642. {
  643. uint64_t note_head_sz;
  644. uint64_t name_sz;
  645. uint64_t desc_sz;
  646. if (s->dump_info.d_class == ELFCLASS64) {
  647. const Elf64_Nhdr *hdr = note;
  648. note_head_sz = sizeof(Elf64_Nhdr);
  649. name_sz = tswap64(hdr->n_namesz);
  650. desc_sz = tswap64(hdr->n_descsz);
  651. } else {
  652. const Elf32_Nhdr *hdr = note;
  653. note_head_sz = sizeof(Elf32_Nhdr);
  654. name_sz = tswap32(hdr->n_namesz);
  655. desc_sz = tswap32(hdr->n_descsz);
  656. }
  657. if (note_head_size) {
  658. *note_head_size = note_head_sz;
  659. }
  660. if (name_size) {
  661. *name_size = name_sz;
  662. }
  663. if (desc_size) {
  664. *desc_size = desc_sz;
  665. }
  666. }
  667. static bool note_name_equal(DumpState *s,
  668. const uint8_t *note, const char *name)
  669. {
  670. int len = strlen(name) + 1;
  671. uint64_t head_size, name_size;
  672. get_note_sizes(s, note, &head_size, &name_size, NULL);
  673. head_size = ROUND_UP(head_size, 4);
  674. if (name_size != len ||
  675. memcmp(note + head_size, "VMCOREINFO", len)) {
  676. return false;
  677. }
  678. return true;
  679. }
  680. /* write common header, sub header and elf note to vmcore */
  681. static void create_header32(DumpState *s, Error **errp)
  682. {
  683. DiskDumpHeader32 *dh = NULL;
  684. KdumpSubHeader32 *kh = NULL;
  685. size_t size;
  686. uint32_t block_size;
  687. uint32_t sub_hdr_size;
  688. uint32_t bitmap_blocks;
  689. uint32_t status = 0;
  690. uint64_t offset_note;
  691. Error *local_err = NULL;
  692. /* write common header, the version of kdump-compressed format is 6th */
  693. size = sizeof(DiskDumpHeader32);
  694. dh = g_malloc0(size);
  695. strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
  696. dh->header_version = cpu_to_dump32(s, 6);
  697. block_size = s->dump_info.page_size;
  698. dh->block_size = cpu_to_dump32(s, block_size);
  699. sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
  700. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  701. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  702. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  703. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  704. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  705. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  706. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  707. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  708. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  709. status |= DUMP_DH_COMPRESSED_ZLIB;
  710. }
  711. #ifdef CONFIG_LZO
  712. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  713. status |= DUMP_DH_COMPRESSED_LZO;
  714. }
  715. #endif
  716. #ifdef CONFIG_SNAPPY
  717. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  718. status |= DUMP_DH_COMPRESSED_SNAPPY;
  719. }
  720. #endif
  721. dh->status = cpu_to_dump32(s, status);
  722. if (write_buffer(s->fd, 0, dh, size) < 0) {
  723. error_setg(errp, "dump: failed to write disk dump header");
  724. goto out;
  725. }
  726. /* write sub header */
  727. size = sizeof(KdumpSubHeader32);
  728. kh = g_malloc0(size);
  729. /* 64bit max_mapnr_64 */
  730. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  731. kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
  732. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  733. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  734. if (s->guest_note &&
  735. note_name_equal(s, s->guest_note, "VMCOREINFO")) {
  736. uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
  737. get_note_sizes(s, s->guest_note,
  738. &hsize, &name_size, &size_vmcoreinfo_desc);
  739. offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
  740. (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
  741. kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
  742. kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc);
  743. }
  744. kh->offset_note = cpu_to_dump64(s, offset_note);
  745. kh->note_size = cpu_to_dump32(s, s->note_size);
  746. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  747. block_size, kh, size) < 0) {
  748. error_setg(errp, "dump: failed to write kdump sub header");
  749. goto out;
  750. }
  751. /* write note */
  752. s->note_buf = g_malloc0(s->note_size);
  753. s->note_buf_offset = 0;
  754. /* use s->note_buf to store notes temporarily */
  755. write_elf32_notes(buf_write_note, s, &local_err);
  756. if (local_err) {
  757. error_propagate(errp, local_err);
  758. goto out;
  759. }
  760. if (write_buffer(s->fd, offset_note, s->note_buf,
  761. s->note_size) < 0) {
  762. error_setg(errp, "dump: failed to write notes");
  763. goto out;
  764. }
  765. /* get offset of dump_bitmap */
  766. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  767. block_size;
  768. /* get offset of page */
  769. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  770. block_size;
  771. out:
  772. g_free(dh);
  773. g_free(kh);
  774. g_free(s->note_buf);
  775. }
  776. /* write common header, sub header and elf note to vmcore */
  777. static void create_header64(DumpState *s, Error **errp)
  778. {
  779. DiskDumpHeader64 *dh = NULL;
  780. KdumpSubHeader64 *kh = NULL;
  781. size_t size;
  782. uint32_t block_size;
  783. uint32_t sub_hdr_size;
  784. uint32_t bitmap_blocks;
  785. uint32_t status = 0;
  786. uint64_t offset_note;
  787. Error *local_err = NULL;
  788. /* write common header, the version of kdump-compressed format is 6th */
  789. size = sizeof(DiskDumpHeader64);
  790. dh = g_malloc0(size);
  791. strncpy(dh->signature, KDUMP_SIGNATURE, strlen(KDUMP_SIGNATURE));
  792. dh->header_version = cpu_to_dump32(s, 6);
  793. block_size = s->dump_info.page_size;
  794. dh->block_size = cpu_to_dump32(s, block_size);
  795. sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
  796. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  797. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  798. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  799. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  800. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  801. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  802. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  803. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  804. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  805. status |= DUMP_DH_COMPRESSED_ZLIB;
  806. }
  807. #ifdef CONFIG_LZO
  808. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  809. status |= DUMP_DH_COMPRESSED_LZO;
  810. }
  811. #endif
  812. #ifdef CONFIG_SNAPPY
  813. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  814. status |= DUMP_DH_COMPRESSED_SNAPPY;
  815. }
  816. #endif
  817. dh->status = cpu_to_dump32(s, status);
  818. if (write_buffer(s->fd, 0, dh, size) < 0) {
  819. error_setg(errp, "dump: failed to write disk dump header");
  820. goto out;
  821. }
  822. /* write sub header */
  823. size = sizeof(KdumpSubHeader64);
  824. kh = g_malloc0(size);
  825. /* 64bit max_mapnr_64 */
  826. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  827. kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
  828. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  829. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  830. if (s->guest_note &&
  831. note_name_equal(s, s->guest_note, "VMCOREINFO")) {
  832. uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
  833. get_note_sizes(s, s->guest_note,
  834. &hsize, &name_size, &size_vmcoreinfo_desc);
  835. offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
  836. (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
  837. kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
  838. kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc);
  839. }
  840. kh->offset_note = cpu_to_dump64(s, offset_note);
  841. kh->note_size = cpu_to_dump64(s, s->note_size);
  842. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  843. block_size, kh, size) < 0) {
  844. error_setg(errp, "dump: failed to write kdump sub header");
  845. goto out;
  846. }
  847. /* write note */
  848. s->note_buf = g_malloc0(s->note_size);
  849. s->note_buf_offset = 0;
  850. /* use s->note_buf to store notes temporarily */
  851. write_elf64_notes(buf_write_note, s, &local_err);
  852. if (local_err) {
  853. error_propagate(errp, local_err);
  854. goto out;
  855. }
  856. if (write_buffer(s->fd, offset_note, s->note_buf,
  857. s->note_size) < 0) {
  858. error_setg(errp, "dump: failed to write notes");
  859. goto out;
  860. }
  861. /* get offset of dump_bitmap */
  862. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  863. block_size;
  864. /* get offset of page */
  865. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  866. block_size;
  867. out:
  868. g_free(dh);
  869. g_free(kh);
  870. g_free(s->note_buf);
  871. }
  872. static void write_dump_header(DumpState *s, Error **errp)
  873. {
  874. Error *local_err = NULL;
  875. if (s->dump_info.d_class == ELFCLASS32) {
  876. create_header32(s, &local_err);
  877. } else {
  878. create_header64(s, &local_err);
  879. }
  880. error_propagate(errp, local_err);
  881. }
  882. static size_t dump_bitmap_get_bufsize(DumpState *s)
  883. {
  884. return s->dump_info.page_size;
  885. }
  886. /*
  887. * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
  888. * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
  889. * set_dump_bitmap will always leave the recently set bit un-sync. And setting
  890. * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
  891. * vmcore, ie. synchronizing un-sync bit into vmcore.
  892. */
  893. static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
  894. uint8_t *buf, DumpState *s)
  895. {
  896. off_t old_offset, new_offset;
  897. off_t offset_bitmap1, offset_bitmap2;
  898. uint32_t byte, bit;
  899. size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
  900. size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
  901. /* should not set the previous place */
  902. assert(last_pfn <= pfn);
  903. /*
  904. * if the bit needed to be set is not cached in buf, flush the data in buf
  905. * to vmcore firstly.
  906. * making new_offset be bigger than old_offset can also sync remained data
  907. * into vmcore.
  908. */
  909. old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
  910. new_offset = bitmap_bufsize * (pfn / bits_per_buf);
  911. while (old_offset < new_offset) {
  912. /* calculate the offset and write dump_bitmap */
  913. offset_bitmap1 = s->offset_dump_bitmap + old_offset;
  914. if (write_buffer(s->fd, offset_bitmap1, buf,
  915. bitmap_bufsize) < 0) {
  916. return -1;
  917. }
  918. /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
  919. offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
  920. old_offset;
  921. if (write_buffer(s->fd, offset_bitmap2, buf,
  922. bitmap_bufsize) < 0) {
  923. return -1;
  924. }
  925. memset(buf, 0, bitmap_bufsize);
  926. old_offset += bitmap_bufsize;
  927. }
  928. /* get the exact place of the bit in the buf, and set it */
  929. byte = (pfn % bits_per_buf) / CHAR_BIT;
  930. bit = (pfn % bits_per_buf) % CHAR_BIT;
  931. if (value) {
  932. buf[byte] |= 1u << bit;
  933. } else {
  934. buf[byte] &= ~(1u << bit);
  935. }
  936. return 0;
  937. }
  938. static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
  939. {
  940. int target_page_shift = ctz32(s->dump_info.page_size);
  941. return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
  942. }
  943. static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
  944. {
  945. int target_page_shift = ctz32(s->dump_info.page_size);
  946. return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
  947. }
  948. /*
  949. * exam every page and return the page frame number and the address of the page.
  950. * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
  951. * blocks, so block->target_start and block->target_end should be interal
  952. * multiples of the target page size.
  953. */
  954. static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
  955. uint8_t **bufptr, DumpState *s)
  956. {
  957. GuestPhysBlock *block = *blockptr;
  958. hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
  959. uint8_t *buf;
  960. /* block == NULL means the start of the iteration */
  961. if (!block) {
  962. block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  963. *blockptr = block;
  964. assert((block->target_start & ~target_page_mask) == 0);
  965. assert((block->target_end & ~target_page_mask) == 0);
  966. *pfnptr = dump_paddr_to_pfn(s, block->target_start);
  967. if (bufptr) {
  968. *bufptr = block->host_addr;
  969. }
  970. return true;
  971. }
  972. *pfnptr = *pfnptr + 1;
  973. addr = dump_pfn_to_paddr(s, *pfnptr);
  974. if ((addr >= block->target_start) &&
  975. (addr + s->dump_info.page_size <= block->target_end)) {
  976. buf = block->host_addr + (addr - block->target_start);
  977. } else {
  978. /* the next page is in the next block */
  979. block = QTAILQ_NEXT(block, next);
  980. *blockptr = block;
  981. if (!block) {
  982. return false;
  983. }
  984. assert((block->target_start & ~target_page_mask) == 0);
  985. assert((block->target_end & ~target_page_mask) == 0);
  986. *pfnptr = dump_paddr_to_pfn(s, block->target_start);
  987. buf = block->host_addr;
  988. }
  989. if (bufptr) {
  990. *bufptr = buf;
  991. }
  992. return true;
  993. }
  994. static void write_dump_bitmap(DumpState *s, Error **errp)
  995. {
  996. int ret = 0;
  997. uint64_t last_pfn, pfn;
  998. void *dump_bitmap_buf;
  999. size_t num_dumpable;
  1000. GuestPhysBlock *block_iter = NULL;
  1001. size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
  1002. size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
  1003. /* dump_bitmap_buf is used to store dump_bitmap temporarily */
  1004. dump_bitmap_buf = g_malloc0(bitmap_bufsize);
  1005. num_dumpable = 0;
  1006. last_pfn = 0;
  1007. /*
  1008. * exam memory page by page, and set the bit in dump_bitmap corresponded
  1009. * to the existing page.
  1010. */
  1011. while (get_next_page(&block_iter, &pfn, NULL, s)) {
  1012. ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
  1013. if (ret < 0) {
  1014. error_setg(errp, "dump: failed to set dump_bitmap");
  1015. goto out;
  1016. }
  1017. last_pfn = pfn;
  1018. num_dumpable++;
  1019. }
  1020. /*
  1021. * set_dump_bitmap will always leave the recently set bit un-sync. Here we
  1022. * set the remaining bits from last_pfn to the end of the bitmap buffer to
  1023. * 0. With those set, the un-sync bit will be synchronized into the vmcore.
  1024. */
  1025. if (num_dumpable > 0) {
  1026. ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
  1027. dump_bitmap_buf, s);
  1028. if (ret < 0) {
  1029. error_setg(errp, "dump: failed to sync dump_bitmap");
  1030. goto out;
  1031. }
  1032. }
  1033. /* number of dumpable pages that will be dumped later */
  1034. s->num_dumpable = num_dumpable;
  1035. out:
  1036. g_free(dump_bitmap_buf);
  1037. }
  1038. static void prepare_data_cache(DataCache *data_cache, DumpState *s,
  1039. off_t offset)
  1040. {
  1041. data_cache->fd = s->fd;
  1042. data_cache->data_size = 0;
  1043. data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
  1044. data_cache->buf = g_malloc0(data_cache->buf_size);
  1045. data_cache->offset = offset;
  1046. }
  1047. static int write_cache(DataCache *dc, const void *buf, size_t size,
  1048. bool flag_sync)
  1049. {
  1050. /*
  1051. * dc->buf_size should not be less than size, otherwise dc will never be
  1052. * enough
  1053. */
  1054. assert(size <= dc->buf_size);
  1055. /*
  1056. * if flag_sync is set, synchronize data in dc->buf into vmcore.
  1057. * otherwise check if the space is enough for caching data in buf, if not,
  1058. * write the data in dc->buf to dc->fd and reset dc->buf
  1059. */
  1060. if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
  1061. (flag_sync && dc->data_size > 0)) {
  1062. if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
  1063. return -1;
  1064. }
  1065. dc->offset += dc->data_size;
  1066. dc->data_size = 0;
  1067. }
  1068. if (!flag_sync) {
  1069. memcpy(dc->buf + dc->data_size, buf, size);
  1070. dc->data_size += size;
  1071. }
  1072. return 0;
  1073. }
  1074. static void free_data_cache(DataCache *data_cache)
  1075. {
  1076. g_free(data_cache->buf);
  1077. }
  1078. static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
  1079. {
  1080. switch (flag_compress) {
  1081. case DUMP_DH_COMPRESSED_ZLIB:
  1082. return compressBound(page_size);
  1083. case DUMP_DH_COMPRESSED_LZO:
  1084. /*
  1085. * LZO will expand incompressible data by a little amount. Please check
  1086. * the following URL to see the expansion calculation:
  1087. * http://www.oberhumer.com/opensource/lzo/lzofaq.php
  1088. */
  1089. return page_size + page_size / 16 + 64 + 3;
  1090. #ifdef CONFIG_SNAPPY
  1091. case DUMP_DH_COMPRESSED_SNAPPY:
  1092. return snappy_max_compressed_length(page_size);
  1093. #endif
  1094. }
  1095. return 0;
  1096. }
  1097. /*
  1098. * check if the page is all 0
  1099. */
  1100. static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
  1101. {
  1102. return buffer_is_zero(buf, page_size);
  1103. }
  1104. static void write_dump_pages(DumpState *s, Error **errp)
  1105. {
  1106. int ret = 0;
  1107. DataCache page_desc, page_data;
  1108. size_t len_buf_out, size_out;
  1109. #ifdef CONFIG_LZO
  1110. lzo_bytep wrkmem = NULL;
  1111. #endif
  1112. uint8_t *buf_out = NULL;
  1113. off_t offset_desc, offset_data;
  1114. PageDescriptor pd, pd_zero;
  1115. uint8_t *buf;
  1116. GuestPhysBlock *block_iter = NULL;
  1117. uint64_t pfn_iter;
  1118. /* get offset of page_desc and page_data in dump file */
  1119. offset_desc = s->offset_page;
  1120. offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
  1121. prepare_data_cache(&page_desc, s, offset_desc);
  1122. prepare_data_cache(&page_data, s, offset_data);
  1123. /* prepare buffer to store compressed data */
  1124. len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
  1125. assert(len_buf_out != 0);
  1126. #ifdef CONFIG_LZO
  1127. wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
  1128. #endif
  1129. buf_out = g_malloc(len_buf_out);
  1130. /*
  1131. * init zero page's page_desc and page_data, because every zero page
  1132. * uses the same page_data
  1133. */
  1134. pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
  1135. pd_zero.flags = cpu_to_dump32(s, 0);
  1136. pd_zero.offset = cpu_to_dump64(s, offset_data);
  1137. pd_zero.page_flags = cpu_to_dump64(s, 0);
  1138. buf = g_malloc0(s->dump_info.page_size);
  1139. ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
  1140. g_free(buf);
  1141. if (ret < 0) {
  1142. error_setg(errp, "dump: failed to write page data (zero page)");
  1143. goto out;
  1144. }
  1145. offset_data += s->dump_info.page_size;
  1146. /*
  1147. * dump memory to vmcore page by page. zero page will all be resided in the
  1148. * first page of page section
  1149. */
  1150. while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
  1151. /* check zero page */
  1152. if (is_zero_page(buf, s->dump_info.page_size)) {
  1153. ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
  1154. false);
  1155. if (ret < 0) {
  1156. error_setg(errp, "dump: failed to write page desc");
  1157. goto out;
  1158. }
  1159. } else {
  1160. /*
  1161. * not zero page, then:
  1162. * 1. compress the page
  1163. * 2. write the compressed page into the cache of page_data
  1164. * 3. get page desc of the compressed page and write it into the
  1165. * cache of page_desc
  1166. *
  1167. * only one compression format will be used here, for
  1168. * s->flag_compress is set. But when compression fails to work,
  1169. * we fall back to save in plaintext.
  1170. */
  1171. size_out = len_buf_out;
  1172. if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
  1173. (compress2(buf_out, (uLongf *)&size_out, buf,
  1174. s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
  1175. (size_out < s->dump_info.page_size)) {
  1176. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
  1177. pd.size = cpu_to_dump32(s, size_out);
  1178. ret = write_cache(&page_data, buf_out, size_out, false);
  1179. if (ret < 0) {
  1180. error_setg(errp, "dump: failed to write page data");
  1181. goto out;
  1182. }
  1183. #ifdef CONFIG_LZO
  1184. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
  1185. (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
  1186. (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
  1187. (size_out < s->dump_info.page_size)) {
  1188. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
  1189. pd.size = cpu_to_dump32(s, size_out);
  1190. ret = write_cache(&page_data, buf_out, size_out, false);
  1191. if (ret < 0) {
  1192. error_setg(errp, "dump: failed to write page data");
  1193. goto out;
  1194. }
  1195. #endif
  1196. #ifdef CONFIG_SNAPPY
  1197. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
  1198. (snappy_compress((char *)buf, s->dump_info.page_size,
  1199. (char *)buf_out, &size_out) == SNAPPY_OK) &&
  1200. (size_out < s->dump_info.page_size)) {
  1201. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
  1202. pd.size = cpu_to_dump32(s, size_out);
  1203. ret = write_cache(&page_data, buf_out, size_out, false);
  1204. if (ret < 0) {
  1205. error_setg(errp, "dump: failed to write page data");
  1206. goto out;
  1207. }
  1208. #endif
  1209. } else {
  1210. /*
  1211. * fall back to save in plaintext, size_out should be
  1212. * assigned the target's page size
  1213. */
  1214. pd.flags = cpu_to_dump32(s, 0);
  1215. size_out = s->dump_info.page_size;
  1216. pd.size = cpu_to_dump32(s, size_out);
  1217. ret = write_cache(&page_data, buf,
  1218. s->dump_info.page_size, false);
  1219. if (ret < 0) {
  1220. error_setg(errp, "dump: failed to write page data");
  1221. goto out;
  1222. }
  1223. }
  1224. /* get and write page desc here */
  1225. pd.page_flags = cpu_to_dump64(s, 0);
  1226. pd.offset = cpu_to_dump64(s, offset_data);
  1227. offset_data += size_out;
  1228. ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
  1229. if (ret < 0) {
  1230. error_setg(errp, "dump: failed to write page desc");
  1231. goto out;
  1232. }
  1233. }
  1234. s->written_size += s->dump_info.page_size;
  1235. }
  1236. ret = write_cache(&page_desc, NULL, 0, true);
  1237. if (ret < 0) {
  1238. error_setg(errp, "dump: failed to sync cache for page_desc");
  1239. goto out;
  1240. }
  1241. ret = write_cache(&page_data, NULL, 0, true);
  1242. if (ret < 0) {
  1243. error_setg(errp, "dump: failed to sync cache for page_data");
  1244. goto out;
  1245. }
  1246. out:
  1247. free_data_cache(&page_desc);
  1248. free_data_cache(&page_data);
  1249. #ifdef CONFIG_LZO
  1250. g_free(wrkmem);
  1251. #endif
  1252. g_free(buf_out);
  1253. }
  1254. static void create_kdump_vmcore(DumpState *s, Error **errp)
  1255. {
  1256. int ret;
  1257. Error *local_err = NULL;
  1258. /*
  1259. * the kdump-compressed format is:
  1260. * File offset
  1261. * +------------------------------------------+ 0x0
  1262. * | main header (struct disk_dump_header) |
  1263. * |------------------------------------------+ block 1
  1264. * | sub header (struct kdump_sub_header) |
  1265. * |------------------------------------------+ block 2
  1266. * | 1st-dump_bitmap |
  1267. * |------------------------------------------+ block 2 + X blocks
  1268. * | 2nd-dump_bitmap | (aligned by block)
  1269. * |------------------------------------------+ block 2 + 2 * X blocks
  1270. * | page desc for pfn 0 (struct page_desc) | (aligned by block)
  1271. * | page desc for pfn 1 (struct page_desc) |
  1272. * | : |
  1273. * |------------------------------------------| (not aligned by block)
  1274. * | page data (pfn 0) |
  1275. * | page data (pfn 1) |
  1276. * | : |
  1277. * +------------------------------------------+
  1278. */
  1279. ret = write_start_flat_header(s->fd);
  1280. if (ret < 0) {
  1281. error_setg(errp, "dump: failed to write start flat header");
  1282. return;
  1283. }
  1284. write_dump_header(s, &local_err);
  1285. if (local_err) {
  1286. error_propagate(errp, local_err);
  1287. return;
  1288. }
  1289. write_dump_bitmap(s, &local_err);
  1290. if (local_err) {
  1291. error_propagate(errp, local_err);
  1292. return;
  1293. }
  1294. write_dump_pages(s, &local_err);
  1295. if (local_err) {
  1296. error_propagate(errp, local_err);
  1297. return;
  1298. }
  1299. ret = write_end_flat_header(s->fd);
  1300. if (ret < 0) {
  1301. error_setg(errp, "dump: failed to write end flat header");
  1302. return;
  1303. }
  1304. }
  1305. static ram_addr_t get_start_block(DumpState *s)
  1306. {
  1307. GuestPhysBlock *block;
  1308. if (!s->has_filter) {
  1309. s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  1310. return 0;
  1311. }
  1312. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  1313. if (block->target_start >= s->begin + s->length ||
  1314. block->target_end <= s->begin) {
  1315. /* This block is out of the range */
  1316. continue;
  1317. }
  1318. s->next_block = block;
  1319. if (s->begin > block->target_start) {
  1320. s->start = s->begin - block->target_start;
  1321. } else {
  1322. s->start = 0;
  1323. }
  1324. return s->start;
  1325. }
  1326. return -1;
  1327. }
  1328. static void get_max_mapnr(DumpState *s)
  1329. {
  1330. GuestPhysBlock *last_block;
  1331. last_block = QTAILQ_LAST(&s->guest_phys_blocks.head, GuestPhysBlockHead);
  1332. s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
  1333. }
  1334. static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
  1335. static void dump_state_prepare(DumpState *s)
  1336. {
  1337. /* zero the struct, setting status to active */
  1338. *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
  1339. }
  1340. bool dump_in_progress(void)
  1341. {
  1342. DumpState *state = &dump_state_global;
  1343. return (atomic_read(&state->status) == DUMP_STATUS_ACTIVE);
  1344. }
  1345. /* calculate total size of memory to be dumped (taking filter into
  1346. * acoount.) */
  1347. static int64_t dump_calculate_size(DumpState *s)
  1348. {
  1349. GuestPhysBlock *block;
  1350. int64_t size = 0, total = 0, left = 0, right = 0;
  1351. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  1352. if (s->has_filter) {
  1353. /* calculate the overlapped region. */
  1354. left = MAX(s->begin, block->target_start);
  1355. right = MIN(s->begin + s->length, block->target_end);
  1356. size = right - left;
  1357. size = size > 0 ? size : 0;
  1358. } else {
  1359. /* count the whole region in */
  1360. size = (block->target_end - block->target_start);
  1361. }
  1362. total += size;
  1363. }
  1364. return total;
  1365. }
  1366. static void vmcoreinfo_update_phys_base(DumpState *s)
  1367. {
  1368. uint64_t size, note_head_size, name_size, phys_base;
  1369. char **lines;
  1370. uint8_t *vmci;
  1371. size_t i;
  1372. if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) {
  1373. return;
  1374. }
  1375. get_note_sizes(s, s->guest_note, &note_head_size, &name_size, &size);
  1376. note_head_size = ROUND_UP(note_head_size, 4);
  1377. vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4);
  1378. *(vmci + size) = '\0';
  1379. lines = g_strsplit((char *)vmci, "\n", -1);
  1380. for (i = 0; lines[i]; i++) {
  1381. if (g_str_has_prefix(lines[i], "NUMBER(phys_base)=")) {
  1382. if (qemu_strtou64(lines[i] + 18, NULL, 16,
  1383. &phys_base) < 0) {
  1384. warn_report("Failed to read NUMBER(phys_base)=");
  1385. } else {
  1386. s->dump_info.phys_base = phys_base;
  1387. }
  1388. break;
  1389. }
  1390. }
  1391. g_strfreev(lines);
  1392. }
  1393. static void dump_init(DumpState *s, int fd, bool has_format,
  1394. DumpGuestMemoryFormat format, bool paging, bool has_filter,
  1395. int64_t begin, int64_t length, Error **errp)
  1396. {
  1397. VMCoreInfoState *vmci = vmcoreinfo_find();
  1398. CPUState *cpu;
  1399. int nr_cpus;
  1400. Error *err = NULL;
  1401. int ret;
  1402. s->has_format = has_format;
  1403. s->format = format;
  1404. s->written_size = 0;
  1405. /* kdump-compressed is conflict with paging and filter */
  1406. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1407. assert(!paging && !has_filter);
  1408. }
  1409. if (runstate_is_running()) {
  1410. vm_stop(RUN_STATE_SAVE_VM);
  1411. s->resume = true;
  1412. } else {
  1413. s->resume = false;
  1414. }
  1415. /* If we use KVM, we should synchronize the registers before we get dump
  1416. * info or physmap info.
  1417. */
  1418. cpu_synchronize_all_states();
  1419. nr_cpus = 0;
  1420. CPU_FOREACH(cpu) {
  1421. nr_cpus++;
  1422. }
  1423. s->fd = fd;
  1424. s->has_filter = has_filter;
  1425. s->begin = begin;
  1426. s->length = length;
  1427. memory_mapping_list_init(&s->list);
  1428. guest_phys_blocks_init(&s->guest_phys_blocks);
  1429. guest_phys_blocks_append(&s->guest_phys_blocks);
  1430. s->total_size = dump_calculate_size(s);
  1431. #ifdef DEBUG_DUMP_GUEST_MEMORY
  1432. fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
  1433. #endif
  1434. /* it does not make sense to dump non-existent memory */
  1435. if (!s->total_size) {
  1436. error_setg(errp, "dump: no guest memory to dump");
  1437. goto cleanup;
  1438. }
  1439. s->start = get_start_block(s);
  1440. if (s->start == -1) {
  1441. error_setg(errp, QERR_INVALID_PARAMETER, "begin");
  1442. goto cleanup;
  1443. }
  1444. /* get dump info: endian, class and architecture.
  1445. * If the target architecture is not supported, cpu_get_dump_info() will
  1446. * return -1.
  1447. */
  1448. ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
  1449. if (ret < 0) {
  1450. error_setg(errp, QERR_UNSUPPORTED);
  1451. goto cleanup;
  1452. }
  1453. if (!s->dump_info.page_size) {
  1454. s->dump_info.page_size = TARGET_PAGE_SIZE;
  1455. }
  1456. s->note_size = cpu_get_note_size(s->dump_info.d_class,
  1457. s->dump_info.d_machine, nr_cpus);
  1458. if (s->note_size < 0) {
  1459. error_setg(errp, QERR_UNSUPPORTED);
  1460. goto cleanup;
  1461. }
  1462. /*
  1463. * The goal of this block is to (a) update the previously guessed
  1464. * phys_base, (b) copy the guest note out of the guest.
  1465. * Failure to do so is not fatal for dumping.
  1466. */
  1467. if (vmci) {
  1468. uint64_t addr, note_head_size, name_size, desc_size;
  1469. uint32_t size;
  1470. uint16_t format;
  1471. note_head_size = s->dump_info.d_class == ELFCLASS32 ?
  1472. sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr);
  1473. format = le16_to_cpu(vmci->vmcoreinfo.guest_format);
  1474. size = le32_to_cpu(vmci->vmcoreinfo.size);
  1475. addr = le64_to_cpu(vmci->vmcoreinfo.paddr);
  1476. if (!vmci->has_vmcoreinfo) {
  1477. warn_report("guest note is not present");
  1478. } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) {
  1479. warn_report("guest note size is invalid: %" PRIu32, size);
  1480. } else if (format != VMCOREINFO_FORMAT_ELF) {
  1481. warn_report("guest note format is unsupported: %" PRIu16, format);
  1482. } else {
  1483. s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */
  1484. cpu_physical_memory_read(addr, s->guest_note, size);
  1485. get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size);
  1486. s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size,
  1487. desc_size);
  1488. if (name_size > MAX_GUEST_NOTE_SIZE ||
  1489. desc_size > MAX_GUEST_NOTE_SIZE ||
  1490. s->guest_note_size > size) {
  1491. warn_report("Invalid guest note header");
  1492. g_free(s->guest_note);
  1493. s->guest_note = NULL;
  1494. } else {
  1495. vmcoreinfo_update_phys_base(s);
  1496. s->note_size += s->guest_note_size;
  1497. }
  1498. }
  1499. }
  1500. /* get memory mapping */
  1501. if (paging) {
  1502. qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
  1503. if (err != NULL) {
  1504. error_propagate(errp, err);
  1505. goto cleanup;
  1506. }
  1507. } else {
  1508. qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
  1509. }
  1510. s->nr_cpus = nr_cpus;
  1511. get_max_mapnr(s);
  1512. uint64_t tmp;
  1513. tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
  1514. s->dump_info.page_size);
  1515. s->len_dump_bitmap = tmp * s->dump_info.page_size;
  1516. /* init for kdump-compressed format */
  1517. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1518. switch (format) {
  1519. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
  1520. s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
  1521. break;
  1522. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
  1523. #ifdef CONFIG_LZO
  1524. if (lzo_init() != LZO_E_OK) {
  1525. error_setg(errp, "failed to initialize the LZO library");
  1526. goto cleanup;
  1527. }
  1528. #endif
  1529. s->flag_compress = DUMP_DH_COMPRESSED_LZO;
  1530. break;
  1531. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
  1532. s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
  1533. break;
  1534. default:
  1535. s->flag_compress = 0;
  1536. }
  1537. return;
  1538. }
  1539. if (s->has_filter) {
  1540. memory_mapping_filter(&s->list, s->begin, s->length);
  1541. }
  1542. /*
  1543. * calculate phdr_num
  1544. *
  1545. * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
  1546. */
  1547. s->phdr_num = 1; /* PT_NOTE */
  1548. if (s->list.num < UINT16_MAX - 2) {
  1549. s->phdr_num += s->list.num;
  1550. s->have_section = false;
  1551. } else {
  1552. s->have_section = true;
  1553. s->phdr_num = PN_XNUM;
  1554. s->sh_info = 1; /* PT_NOTE */
  1555. /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
  1556. if (s->list.num <= UINT32_MAX - 1) {
  1557. s->sh_info += s->list.num;
  1558. } else {
  1559. s->sh_info = UINT32_MAX;
  1560. }
  1561. }
  1562. if (s->dump_info.d_class == ELFCLASS64) {
  1563. if (s->have_section) {
  1564. s->memory_offset = sizeof(Elf64_Ehdr) +
  1565. sizeof(Elf64_Phdr) * s->sh_info +
  1566. sizeof(Elf64_Shdr) + s->note_size;
  1567. } else {
  1568. s->memory_offset = sizeof(Elf64_Ehdr) +
  1569. sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
  1570. }
  1571. } else {
  1572. if (s->have_section) {
  1573. s->memory_offset = sizeof(Elf32_Ehdr) +
  1574. sizeof(Elf32_Phdr) * s->sh_info +
  1575. sizeof(Elf32_Shdr) + s->note_size;
  1576. } else {
  1577. s->memory_offset = sizeof(Elf32_Ehdr) +
  1578. sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
  1579. }
  1580. }
  1581. return;
  1582. cleanup:
  1583. dump_cleanup(s);
  1584. }
  1585. /* this operation might be time consuming. */
  1586. static void dump_process(DumpState *s, Error **errp)
  1587. {
  1588. Error *local_err = NULL;
  1589. DumpQueryResult *result = NULL;
  1590. if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1591. create_kdump_vmcore(s, &local_err);
  1592. } else {
  1593. create_vmcore(s, &local_err);
  1594. }
  1595. /* make sure status is written after written_size updates */
  1596. smp_wmb();
  1597. atomic_set(&s->status,
  1598. (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
  1599. /* send DUMP_COMPLETED message (unconditionally) */
  1600. result = qmp_query_dump(NULL);
  1601. /* should never fail */
  1602. assert(result);
  1603. qapi_event_send_dump_completed(result, !!local_err, (local_err ? \
  1604. error_get_pretty(local_err) : NULL),
  1605. &error_abort);
  1606. qapi_free_DumpQueryResult(result);
  1607. error_propagate(errp, local_err);
  1608. dump_cleanup(s);
  1609. }
  1610. static void *dump_thread(void *data)
  1611. {
  1612. DumpState *s = (DumpState *)data;
  1613. dump_process(s, NULL);
  1614. return NULL;
  1615. }
  1616. DumpQueryResult *qmp_query_dump(Error **errp)
  1617. {
  1618. DumpQueryResult *result = g_new(DumpQueryResult, 1);
  1619. DumpState *state = &dump_state_global;
  1620. result->status = atomic_read(&state->status);
  1621. /* make sure we are reading status and written_size in order */
  1622. smp_rmb();
  1623. result->completed = state->written_size;
  1624. result->total = state->total_size;
  1625. return result;
  1626. }
  1627. void qmp_dump_guest_memory(bool paging, const char *file,
  1628. bool has_detach, bool detach,
  1629. bool has_begin, int64_t begin, bool has_length,
  1630. int64_t length, bool has_format,
  1631. DumpGuestMemoryFormat format, Error **errp)
  1632. {
  1633. const char *p;
  1634. int fd = -1;
  1635. DumpState *s;
  1636. Error *local_err = NULL;
  1637. bool detach_p = false;
  1638. if (runstate_check(RUN_STATE_INMIGRATE)) {
  1639. error_setg(errp, "Dump not allowed during incoming migration.");
  1640. return;
  1641. }
  1642. /* if there is a dump in background, we should wait until the dump
  1643. * finished */
  1644. if (dump_in_progress()) {
  1645. error_setg(errp, "There is a dump in process, please wait.");
  1646. return;
  1647. }
  1648. /*
  1649. * kdump-compressed format need the whole memory dumped, so paging or
  1650. * filter is not supported here.
  1651. */
  1652. if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
  1653. (paging || has_begin || has_length)) {
  1654. error_setg(errp, "kdump-compressed format doesn't support paging or "
  1655. "filter");
  1656. return;
  1657. }
  1658. if (has_begin && !has_length) {
  1659. error_setg(errp, QERR_MISSING_PARAMETER, "length");
  1660. return;
  1661. }
  1662. if (!has_begin && has_length) {
  1663. error_setg(errp, QERR_MISSING_PARAMETER, "begin");
  1664. return;
  1665. }
  1666. if (has_detach) {
  1667. detach_p = detach;
  1668. }
  1669. /* check whether lzo/snappy is supported */
  1670. #ifndef CONFIG_LZO
  1671. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
  1672. error_setg(errp, "kdump-lzo is not available now");
  1673. return;
  1674. }
  1675. #endif
  1676. #ifndef CONFIG_SNAPPY
  1677. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
  1678. error_setg(errp, "kdump-snappy is not available now");
  1679. return;
  1680. }
  1681. #endif
  1682. #if !defined(WIN32)
  1683. if (strstart(file, "fd:", &p)) {
  1684. fd = monitor_get_fd(cur_mon, p, errp);
  1685. if (fd == -1) {
  1686. return;
  1687. }
  1688. }
  1689. #endif
  1690. if (strstart(file, "file:", &p)) {
  1691. fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
  1692. if (fd < 0) {
  1693. error_setg_file_open(errp, errno, p);
  1694. return;
  1695. }
  1696. }
  1697. if (fd == -1) {
  1698. error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
  1699. return;
  1700. }
  1701. s = &dump_state_global;
  1702. dump_state_prepare(s);
  1703. dump_init(s, fd, has_format, format, paging, has_begin,
  1704. begin, length, &local_err);
  1705. if (local_err) {
  1706. error_propagate(errp, local_err);
  1707. atomic_set(&s->status, DUMP_STATUS_FAILED);
  1708. return;
  1709. }
  1710. if (detach_p) {
  1711. /* detached dump */
  1712. s->detached = true;
  1713. qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
  1714. s, QEMU_THREAD_DETACHED);
  1715. } else {
  1716. /* sync dump */
  1717. dump_process(s, errp);
  1718. }
  1719. }
  1720. DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
  1721. {
  1722. DumpGuestMemoryFormatList *item;
  1723. DumpGuestMemoryCapability *cap =
  1724. g_malloc0(sizeof(DumpGuestMemoryCapability));
  1725. /* elf is always available */
  1726. item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1727. cap->formats = item;
  1728. item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
  1729. /* kdump-zlib is always available */
  1730. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1731. item = item->next;
  1732. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
  1733. /* add new item if kdump-lzo is available */
  1734. #ifdef CONFIG_LZO
  1735. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1736. item = item->next;
  1737. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
  1738. #endif
  1739. /* add new item if kdump-snappy is available */
  1740. #ifdef CONFIG_SNAPPY
  1741. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1742. item = item->next;
  1743. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
  1744. #endif
  1745. return cap;
  1746. }