2
0

dump.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069
  1. /*
  2. * QEMU dump
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu-common.h"
  15. #include "qemu/cutils.h"
  16. #include "elf.h"
  17. #include "cpu.h"
  18. #include "exec/hwaddr.h"
  19. #include "monitor/monitor.h"
  20. #include "sysemu/kvm.h"
  21. #include "sysemu/dump.h"
  22. #include "sysemu/memory_mapping.h"
  23. #include "sysemu/runstate.h"
  24. #include "sysemu/cpus.h"
  25. #include "qapi/error.h"
  26. #include "qapi/qapi-commands-dump.h"
  27. #include "qapi/qapi-events-dump.h"
  28. #include "qapi/qmp/qerror.h"
  29. #include "qemu/error-report.h"
  30. #include "qemu/main-loop.h"
  31. #include "hw/misc/vmcoreinfo.h"
  32. #ifdef TARGET_X86_64
  33. #include "win_dump.h"
  34. #endif
  35. #include <zlib.h>
  36. #ifdef CONFIG_LZO
  37. #include <lzo/lzo1x.h>
  38. #endif
  39. #ifdef CONFIG_SNAPPY
  40. #include <snappy-c.h>
  41. #endif
  42. #ifndef ELF_MACHINE_UNAME
  43. #define ELF_MACHINE_UNAME "Unknown"
  44. #endif
  45. #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */
  46. #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \
  47. ((DIV_ROUND_UP((hdr_size), 4) + \
  48. DIV_ROUND_UP((name_size), 4) + \
  49. DIV_ROUND_UP((desc_size), 4)) * 4)
  50. uint16_t cpu_to_dump16(DumpState *s, uint16_t val)
  51. {
  52. if (s->dump_info.d_endian == ELFDATA2LSB) {
  53. val = cpu_to_le16(val);
  54. } else {
  55. val = cpu_to_be16(val);
  56. }
  57. return val;
  58. }
  59. uint32_t cpu_to_dump32(DumpState *s, uint32_t val)
  60. {
  61. if (s->dump_info.d_endian == ELFDATA2LSB) {
  62. val = cpu_to_le32(val);
  63. } else {
  64. val = cpu_to_be32(val);
  65. }
  66. return val;
  67. }
  68. uint64_t cpu_to_dump64(DumpState *s, uint64_t val)
  69. {
  70. if (s->dump_info.d_endian == ELFDATA2LSB) {
  71. val = cpu_to_le64(val);
  72. } else {
  73. val = cpu_to_be64(val);
  74. }
  75. return val;
  76. }
  77. static int dump_cleanup(DumpState *s)
  78. {
  79. guest_phys_blocks_free(&s->guest_phys_blocks);
  80. memory_mapping_list_free(&s->list);
  81. close(s->fd);
  82. g_free(s->guest_note);
  83. s->guest_note = NULL;
  84. if (s->resume) {
  85. if (s->detached) {
  86. qemu_mutex_lock_iothread();
  87. }
  88. vm_start();
  89. if (s->detached) {
  90. qemu_mutex_unlock_iothread();
  91. }
  92. }
  93. return 0;
  94. }
  95. static int fd_write_vmcore(const void *buf, size_t size, void *opaque)
  96. {
  97. DumpState *s = opaque;
  98. size_t written_size;
  99. written_size = qemu_write_full(s->fd, buf, size);
  100. if (written_size != size) {
  101. return -errno;
  102. }
  103. return 0;
  104. }
  105. static void write_elf64_header(DumpState *s, Error **errp)
  106. {
  107. Elf64_Ehdr elf_header;
  108. int ret;
  109. memset(&elf_header, 0, sizeof(Elf64_Ehdr));
  110. memcpy(&elf_header, ELFMAG, SELFMAG);
  111. elf_header.e_ident[EI_CLASS] = ELFCLASS64;
  112. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  113. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  114. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  115. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  116. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  117. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  118. elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr));
  119. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr));
  120. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  121. if (s->have_section) {
  122. uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
  123. elf_header.e_shoff = cpu_to_dump64(s, shoff);
  124. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr));
  125. elf_header.e_shnum = cpu_to_dump16(s, 1);
  126. }
  127. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  128. if (ret < 0) {
  129. error_setg_errno(errp, -ret, "dump: failed to write elf header");
  130. }
  131. }
  132. static void write_elf32_header(DumpState *s, Error **errp)
  133. {
  134. Elf32_Ehdr elf_header;
  135. int ret;
  136. memset(&elf_header, 0, sizeof(Elf32_Ehdr));
  137. memcpy(&elf_header, ELFMAG, SELFMAG);
  138. elf_header.e_ident[EI_CLASS] = ELFCLASS32;
  139. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  140. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  141. elf_header.e_type = cpu_to_dump16(s, ET_CORE);
  142. elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine);
  143. elf_header.e_version = cpu_to_dump32(s, EV_CURRENT);
  144. elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header));
  145. elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr));
  146. elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr));
  147. elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num);
  148. if (s->have_section) {
  149. uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
  150. elf_header.e_shoff = cpu_to_dump32(s, shoff);
  151. elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr));
  152. elf_header.e_shnum = cpu_to_dump16(s, 1);
  153. }
  154. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  155. if (ret < 0) {
  156. error_setg_errno(errp, -ret, "dump: failed to write elf header");
  157. }
  158. }
  159. static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
  160. int phdr_index, hwaddr offset,
  161. hwaddr filesz, Error **errp)
  162. {
  163. Elf64_Phdr phdr;
  164. int ret;
  165. memset(&phdr, 0, sizeof(Elf64_Phdr));
  166. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  167. phdr.p_offset = cpu_to_dump64(s, offset);
  168. phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr);
  169. phdr.p_filesz = cpu_to_dump64(s, filesz);
  170. phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length);
  171. phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
  172. assert(memory_mapping->length >= filesz);
  173. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  174. if (ret < 0) {
  175. error_setg_errno(errp, -ret,
  176. "dump: failed to write program header table");
  177. }
  178. }
  179. static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
  180. int phdr_index, hwaddr offset,
  181. hwaddr filesz, Error **errp)
  182. {
  183. Elf32_Phdr phdr;
  184. int ret;
  185. memset(&phdr, 0, sizeof(Elf32_Phdr));
  186. phdr.p_type = cpu_to_dump32(s, PT_LOAD);
  187. phdr.p_offset = cpu_to_dump32(s, offset);
  188. phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr);
  189. phdr.p_filesz = cpu_to_dump32(s, filesz);
  190. phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length);
  191. phdr.p_vaddr =
  192. cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr;
  193. assert(memory_mapping->length >= filesz);
  194. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  195. if (ret < 0) {
  196. error_setg_errno(errp, -ret,
  197. "dump: failed to write program header table");
  198. }
  199. }
  200. static void write_elf64_note(DumpState *s, Error **errp)
  201. {
  202. Elf64_Phdr phdr;
  203. hwaddr begin = s->memory_offset - s->note_size;
  204. int ret;
  205. memset(&phdr, 0, sizeof(Elf64_Phdr));
  206. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  207. phdr.p_offset = cpu_to_dump64(s, begin);
  208. phdr.p_paddr = 0;
  209. phdr.p_filesz = cpu_to_dump64(s, s->note_size);
  210. phdr.p_memsz = cpu_to_dump64(s, s->note_size);
  211. phdr.p_vaddr = 0;
  212. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  213. if (ret < 0) {
  214. error_setg_errno(errp, -ret,
  215. "dump: failed to write program header table");
  216. }
  217. }
  218. static inline int cpu_index(CPUState *cpu)
  219. {
  220. return cpu->cpu_index + 1;
  221. }
  222. static void write_guest_note(WriteCoreDumpFunction f, DumpState *s,
  223. Error **errp)
  224. {
  225. int ret;
  226. if (s->guest_note) {
  227. ret = f(s->guest_note, s->guest_note_size, s);
  228. if (ret < 0) {
  229. error_setg(errp, "dump: failed to write guest note");
  230. }
  231. }
  232. }
  233. static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s,
  234. Error **errp)
  235. {
  236. CPUState *cpu;
  237. int ret;
  238. int id;
  239. CPU_FOREACH(cpu) {
  240. id = cpu_index(cpu);
  241. ret = cpu_write_elf64_note(f, cpu, id, s);
  242. if (ret < 0) {
  243. error_setg(errp, "dump: failed to write elf notes");
  244. return;
  245. }
  246. }
  247. CPU_FOREACH(cpu) {
  248. ret = cpu_write_elf64_qemunote(f, cpu, s);
  249. if (ret < 0) {
  250. error_setg(errp, "dump: failed to write CPU status");
  251. return;
  252. }
  253. }
  254. write_guest_note(f, s, errp);
  255. }
  256. static void write_elf32_note(DumpState *s, Error **errp)
  257. {
  258. hwaddr begin = s->memory_offset - s->note_size;
  259. Elf32_Phdr phdr;
  260. int ret;
  261. memset(&phdr, 0, sizeof(Elf32_Phdr));
  262. phdr.p_type = cpu_to_dump32(s, PT_NOTE);
  263. phdr.p_offset = cpu_to_dump32(s, begin);
  264. phdr.p_paddr = 0;
  265. phdr.p_filesz = cpu_to_dump32(s, s->note_size);
  266. phdr.p_memsz = cpu_to_dump32(s, s->note_size);
  267. phdr.p_vaddr = 0;
  268. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  269. if (ret < 0) {
  270. error_setg_errno(errp, -ret,
  271. "dump: failed to write program header table");
  272. }
  273. }
  274. static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s,
  275. Error **errp)
  276. {
  277. CPUState *cpu;
  278. int ret;
  279. int id;
  280. CPU_FOREACH(cpu) {
  281. id = cpu_index(cpu);
  282. ret = cpu_write_elf32_note(f, cpu, id, s);
  283. if (ret < 0) {
  284. error_setg(errp, "dump: failed to write elf notes");
  285. return;
  286. }
  287. }
  288. CPU_FOREACH(cpu) {
  289. ret = cpu_write_elf32_qemunote(f, cpu, s);
  290. if (ret < 0) {
  291. error_setg(errp, "dump: failed to write CPU status");
  292. return;
  293. }
  294. }
  295. write_guest_note(f, s, errp);
  296. }
  297. static void write_elf_section(DumpState *s, int type, Error **errp)
  298. {
  299. Elf32_Shdr shdr32;
  300. Elf64_Shdr shdr64;
  301. int shdr_size;
  302. void *shdr;
  303. int ret;
  304. if (type == 0) {
  305. shdr_size = sizeof(Elf32_Shdr);
  306. memset(&shdr32, 0, shdr_size);
  307. shdr32.sh_info = cpu_to_dump32(s, s->sh_info);
  308. shdr = &shdr32;
  309. } else {
  310. shdr_size = sizeof(Elf64_Shdr);
  311. memset(&shdr64, 0, shdr_size);
  312. shdr64.sh_info = cpu_to_dump32(s, s->sh_info);
  313. shdr = &shdr64;
  314. }
  315. ret = fd_write_vmcore(shdr, shdr_size, s);
  316. if (ret < 0) {
  317. error_setg_errno(errp, -ret,
  318. "dump: failed to write section header table");
  319. }
  320. }
  321. static void write_data(DumpState *s, void *buf, int length, Error **errp)
  322. {
  323. int ret;
  324. ret = fd_write_vmcore(buf, length, s);
  325. if (ret < 0) {
  326. error_setg_errno(errp, -ret, "dump: failed to save memory");
  327. } else {
  328. s->written_size += length;
  329. }
  330. }
  331. /* write the memory to vmcore. 1 page per I/O. */
  332. static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start,
  333. int64_t size, Error **errp)
  334. {
  335. int64_t i;
  336. Error *local_err = NULL;
  337. for (i = 0; i < size / s->dump_info.page_size; i++) {
  338. write_data(s, block->host_addr + start + i * s->dump_info.page_size,
  339. s->dump_info.page_size, &local_err);
  340. if (local_err) {
  341. error_propagate(errp, local_err);
  342. return;
  343. }
  344. }
  345. if ((size % s->dump_info.page_size) != 0) {
  346. write_data(s, block->host_addr + start + i * s->dump_info.page_size,
  347. size % s->dump_info.page_size, &local_err);
  348. if (local_err) {
  349. error_propagate(errp, local_err);
  350. return;
  351. }
  352. }
  353. }
  354. /* get the memory's offset and size in the vmcore */
  355. static void get_offset_range(hwaddr phys_addr,
  356. ram_addr_t mapping_length,
  357. DumpState *s,
  358. hwaddr *p_offset,
  359. hwaddr *p_filesz)
  360. {
  361. GuestPhysBlock *block;
  362. hwaddr offset = s->memory_offset;
  363. int64_t size_in_block, start;
  364. /* When the memory is not stored into vmcore, offset will be -1 */
  365. *p_offset = -1;
  366. *p_filesz = 0;
  367. if (s->has_filter) {
  368. if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
  369. return;
  370. }
  371. }
  372. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  373. if (s->has_filter) {
  374. if (block->target_start >= s->begin + s->length ||
  375. block->target_end <= s->begin) {
  376. /* This block is out of the range */
  377. continue;
  378. }
  379. if (s->begin <= block->target_start) {
  380. start = block->target_start;
  381. } else {
  382. start = s->begin;
  383. }
  384. size_in_block = block->target_end - start;
  385. if (s->begin + s->length < block->target_end) {
  386. size_in_block -= block->target_end - (s->begin + s->length);
  387. }
  388. } else {
  389. start = block->target_start;
  390. size_in_block = block->target_end - block->target_start;
  391. }
  392. if (phys_addr >= start && phys_addr < start + size_in_block) {
  393. *p_offset = phys_addr - start + offset;
  394. /* The offset range mapped from the vmcore file must not spill over
  395. * the GuestPhysBlock, clamp it. The rest of the mapping will be
  396. * zero-filled in memory at load time; see
  397. * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>.
  398. */
  399. *p_filesz = phys_addr + mapping_length <= start + size_in_block ?
  400. mapping_length :
  401. size_in_block - (phys_addr - start);
  402. return;
  403. }
  404. offset += size_in_block;
  405. }
  406. }
  407. static void write_elf_loads(DumpState *s, Error **errp)
  408. {
  409. hwaddr offset, filesz;
  410. MemoryMapping *memory_mapping;
  411. uint32_t phdr_index = 1;
  412. uint32_t max_index;
  413. Error *local_err = NULL;
  414. if (s->have_section) {
  415. max_index = s->sh_info;
  416. } else {
  417. max_index = s->phdr_num;
  418. }
  419. QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
  420. get_offset_range(memory_mapping->phys_addr,
  421. memory_mapping->length,
  422. s, &offset, &filesz);
  423. if (s->dump_info.d_class == ELFCLASS64) {
  424. write_elf64_load(s, memory_mapping, phdr_index++, offset,
  425. filesz, &local_err);
  426. } else {
  427. write_elf32_load(s, memory_mapping, phdr_index++, offset,
  428. filesz, &local_err);
  429. }
  430. if (local_err) {
  431. error_propagate(errp, local_err);
  432. return;
  433. }
  434. if (phdr_index >= max_index) {
  435. break;
  436. }
  437. }
  438. }
  439. /* write elf header, PT_NOTE and elf note to vmcore. */
  440. static void dump_begin(DumpState *s, Error **errp)
  441. {
  442. Error *local_err = NULL;
  443. /*
  444. * the vmcore's format is:
  445. * --------------
  446. * | elf header |
  447. * --------------
  448. * | PT_NOTE |
  449. * --------------
  450. * | PT_LOAD |
  451. * --------------
  452. * | ...... |
  453. * --------------
  454. * | PT_LOAD |
  455. * --------------
  456. * | sec_hdr |
  457. * --------------
  458. * | elf note |
  459. * --------------
  460. * | memory |
  461. * --------------
  462. *
  463. * we only know where the memory is saved after we write elf note into
  464. * vmcore.
  465. */
  466. /* write elf header to vmcore */
  467. if (s->dump_info.d_class == ELFCLASS64) {
  468. write_elf64_header(s, &local_err);
  469. } else {
  470. write_elf32_header(s, &local_err);
  471. }
  472. if (local_err) {
  473. error_propagate(errp, local_err);
  474. return;
  475. }
  476. if (s->dump_info.d_class == ELFCLASS64) {
  477. /* write PT_NOTE to vmcore */
  478. write_elf64_note(s, &local_err);
  479. if (local_err) {
  480. error_propagate(errp, local_err);
  481. return;
  482. }
  483. /* write all PT_LOAD to vmcore */
  484. write_elf_loads(s, &local_err);
  485. if (local_err) {
  486. error_propagate(errp, local_err);
  487. return;
  488. }
  489. /* write section to vmcore */
  490. if (s->have_section) {
  491. write_elf_section(s, 1, &local_err);
  492. if (local_err) {
  493. error_propagate(errp, local_err);
  494. return;
  495. }
  496. }
  497. /* write notes to vmcore */
  498. write_elf64_notes(fd_write_vmcore, s, &local_err);
  499. if (local_err) {
  500. error_propagate(errp, local_err);
  501. return;
  502. }
  503. } else {
  504. /* write PT_NOTE to vmcore */
  505. write_elf32_note(s, &local_err);
  506. if (local_err) {
  507. error_propagate(errp, local_err);
  508. return;
  509. }
  510. /* write all PT_LOAD to vmcore */
  511. write_elf_loads(s, &local_err);
  512. if (local_err) {
  513. error_propagate(errp, local_err);
  514. return;
  515. }
  516. /* write section to vmcore */
  517. if (s->have_section) {
  518. write_elf_section(s, 0, &local_err);
  519. if (local_err) {
  520. error_propagate(errp, local_err);
  521. return;
  522. }
  523. }
  524. /* write notes to vmcore */
  525. write_elf32_notes(fd_write_vmcore, s, &local_err);
  526. if (local_err) {
  527. error_propagate(errp, local_err);
  528. return;
  529. }
  530. }
  531. }
  532. static int get_next_block(DumpState *s, GuestPhysBlock *block)
  533. {
  534. while (1) {
  535. block = QTAILQ_NEXT(block, next);
  536. if (!block) {
  537. /* no more block */
  538. return 1;
  539. }
  540. s->start = 0;
  541. s->next_block = block;
  542. if (s->has_filter) {
  543. if (block->target_start >= s->begin + s->length ||
  544. block->target_end <= s->begin) {
  545. /* This block is out of the range */
  546. continue;
  547. }
  548. if (s->begin > block->target_start) {
  549. s->start = s->begin - block->target_start;
  550. }
  551. }
  552. return 0;
  553. }
  554. }
  555. /* write all memory to vmcore */
  556. static void dump_iterate(DumpState *s, Error **errp)
  557. {
  558. GuestPhysBlock *block;
  559. int64_t size;
  560. Error *local_err = NULL;
  561. do {
  562. block = s->next_block;
  563. size = block->target_end - block->target_start;
  564. if (s->has_filter) {
  565. size -= s->start;
  566. if (s->begin + s->length < block->target_end) {
  567. size -= block->target_end - (s->begin + s->length);
  568. }
  569. }
  570. write_memory(s, block, s->start, size, &local_err);
  571. if (local_err) {
  572. error_propagate(errp, local_err);
  573. return;
  574. }
  575. } while (!get_next_block(s, block));
  576. }
  577. static void create_vmcore(DumpState *s, Error **errp)
  578. {
  579. Error *local_err = NULL;
  580. dump_begin(s, &local_err);
  581. if (local_err) {
  582. error_propagate(errp, local_err);
  583. return;
  584. }
  585. dump_iterate(s, errp);
  586. }
  587. static int write_start_flat_header(int fd)
  588. {
  589. MakedumpfileHeader *mh;
  590. int ret = 0;
  591. QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER);
  592. mh = g_malloc0(MAX_SIZE_MDF_HEADER);
  593. memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE,
  594. MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE));
  595. mh->type = cpu_to_be64(TYPE_FLAT_HEADER);
  596. mh->version = cpu_to_be64(VERSION_FLAT_HEADER);
  597. size_t written_size;
  598. written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER);
  599. if (written_size != MAX_SIZE_MDF_HEADER) {
  600. ret = -1;
  601. }
  602. g_free(mh);
  603. return ret;
  604. }
  605. static int write_end_flat_header(int fd)
  606. {
  607. MakedumpfileDataHeader mdh;
  608. mdh.offset = END_FLAG_FLAT_HEADER;
  609. mdh.buf_size = END_FLAG_FLAT_HEADER;
  610. size_t written_size;
  611. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  612. if (written_size != sizeof(mdh)) {
  613. return -1;
  614. }
  615. return 0;
  616. }
  617. static int write_buffer(int fd, off_t offset, const void *buf, size_t size)
  618. {
  619. size_t written_size;
  620. MakedumpfileDataHeader mdh;
  621. mdh.offset = cpu_to_be64(offset);
  622. mdh.buf_size = cpu_to_be64(size);
  623. written_size = qemu_write_full(fd, &mdh, sizeof(mdh));
  624. if (written_size != sizeof(mdh)) {
  625. return -1;
  626. }
  627. written_size = qemu_write_full(fd, buf, size);
  628. if (written_size != size) {
  629. return -1;
  630. }
  631. return 0;
  632. }
  633. static int buf_write_note(const void *buf, size_t size, void *opaque)
  634. {
  635. DumpState *s = opaque;
  636. /* note_buf is not enough */
  637. if (s->note_buf_offset + size > s->note_size) {
  638. return -1;
  639. }
  640. memcpy(s->note_buf + s->note_buf_offset, buf, size);
  641. s->note_buf_offset += size;
  642. return 0;
  643. }
  644. /*
  645. * This function retrieves various sizes from an elf header.
  646. *
  647. * @note has to be a valid ELF note. The return sizes are unmodified
  648. * (not padded or rounded up to be multiple of 4).
  649. */
  650. static void get_note_sizes(DumpState *s, const void *note,
  651. uint64_t *note_head_size,
  652. uint64_t *name_size,
  653. uint64_t *desc_size)
  654. {
  655. uint64_t note_head_sz;
  656. uint64_t name_sz;
  657. uint64_t desc_sz;
  658. if (s->dump_info.d_class == ELFCLASS64) {
  659. const Elf64_Nhdr *hdr = note;
  660. note_head_sz = sizeof(Elf64_Nhdr);
  661. name_sz = tswap64(hdr->n_namesz);
  662. desc_sz = tswap64(hdr->n_descsz);
  663. } else {
  664. const Elf32_Nhdr *hdr = note;
  665. note_head_sz = sizeof(Elf32_Nhdr);
  666. name_sz = tswap32(hdr->n_namesz);
  667. desc_sz = tswap32(hdr->n_descsz);
  668. }
  669. if (note_head_size) {
  670. *note_head_size = note_head_sz;
  671. }
  672. if (name_size) {
  673. *name_size = name_sz;
  674. }
  675. if (desc_size) {
  676. *desc_size = desc_sz;
  677. }
  678. }
  679. static bool note_name_equal(DumpState *s,
  680. const uint8_t *note, const char *name)
  681. {
  682. int len = strlen(name) + 1;
  683. uint64_t head_size, name_size;
  684. get_note_sizes(s, note, &head_size, &name_size, NULL);
  685. head_size = ROUND_UP(head_size, 4);
  686. return name_size == len && memcmp(note + head_size, name, len) == 0;
  687. }
  688. /* write common header, sub header and elf note to vmcore */
  689. static void create_header32(DumpState *s, Error **errp)
  690. {
  691. DiskDumpHeader32 *dh = NULL;
  692. KdumpSubHeader32 *kh = NULL;
  693. size_t size;
  694. uint32_t block_size;
  695. uint32_t sub_hdr_size;
  696. uint32_t bitmap_blocks;
  697. uint32_t status = 0;
  698. uint64_t offset_note;
  699. Error *local_err = NULL;
  700. /* write common header, the version of kdump-compressed format is 6th */
  701. size = sizeof(DiskDumpHeader32);
  702. dh = g_malloc0(size);
  703. memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
  704. dh->header_version = cpu_to_dump32(s, 6);
  705. block_size = s->dump_info.page_size;
  706. dh->block_size = cpu_to_dump32(s, block_size);
  707. sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size;
  708. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  709. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  710. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  711. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  712. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  713. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  714. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  715. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  716. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  717. status |= DUMP_DH_COMPRESSED_ZLIB;
  718. }
  719. #ifdef CONFIG_LZO
  720. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  721. status |= DUMP_DH_COMPRESSED_LZO;
  722. }
  723. #endif
  724. #ifdef CONFIG_SNAPPY
  725. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  726. status |= DUMP_DH_COMPRESSED_SNAPPY;
  727. }
  728. #endif
  729. dh->status = cpu_to_dump32(s, status);
  730. if (write_buffer(s->fd, 0, dh, size) < 0) {
  731. error_setg(errp, "dump: failed to write disk dump header");
  732. goto out;
  733. }
  734. /* write sub header */
  735. size = sizeof(KdumpSubHeader32);
  736. kh = g_malloc0(size);
  737. /* 64bit max_mapnr_64 */
  738. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  739. kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base);
  740. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  741. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  742. if (s->guest_note &&
  743. note_name_equal(s, s->guest_note, "VMCOREINFO")) {
  744. uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
  745. get_note_sizes(s, s->guest_note,
  746. &hsize, &name_size, &size_vmcoreinfo_desc);
  747. offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
  748. (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
  749. kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
  750. kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc);
  751. }
  752. kh->offset_note = cpu_to_dump64(s, offset_note);
  753. kh->note_size = cpu_to_dump32(s, s->note_size);
  754. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  755. block_size, kh, size) < 0) {
  756. error_setg(errp, "dump: failed to write kdump sub header");
  757. goto out;
  758. }
  759. /* write note */
  760. s->note_buf = g_malloc0(s->note_size);
  761. s->note_buf_offset = 0;
  762. /* use s->note_buf to store notes temporarily */
  763. write_elf32_notes(buf_write_note, s, &local_err);
  764. if (local_err) {
  765. error_propagate(errp, local_err);
  766. goto out;
  767. }
  768. if (write_buffer(s->fd, offset_note, s->note_buf,
  769. s->note_size) < 0) {
  770. error_setg(errp, "dump: failed to write notes");
  771. goto out;
  772. }
  773. /* get offset of dump_bitmap */
  774. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  775. block_size;
  776. /* get offset of page */
  777. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  778. block_size;
  779. out:
  780. g_free(dh);
  781. g_free(kh);
  782. g_free(s->note_buf);
  783. }
  784. /* write common header, sub header and elf note to vmcore */
  785. static void create_header64(DumpState *s, Error **errp)
  786. {
  787. DiskDumpHeader64 *dh = NULL;
  788. KdumpSubHeader64 *kh = NULL;
  789. size_t size;
  790. uint32_t block_size;
  791. uint32_t sub_hdr_size;
  792. uint32_t bitmap_blocks;
  793. uint32_t status = 0;
  794. uint64_t offset_note;
  795. Error *local_err = NULL;
  796. /* write common header, the version of kdump-compressed format is 6th */
  797. size = sizeof(DiskDumpHeader64);
  798. dh = g_malloc0(size);
  799. memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN);
  800. dh->header_version = cpu_to_dump32(s, 6);
  801. block_size = s->dump_info.page_size;
  802. dh->block_size = cpu_to_dump32(s, block_size);
  803. sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size;
  804. sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size);
  805. dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size);
  806. /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */
  807. dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX));
  808. dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus);
  809. bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2;
  810. dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks);
  811. strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine));
  812. if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) {
  813. status |= DUMP_DH_COMPRESSED_ZLIB;
  814. }
  815. #ifdef CONFIG_LZO
  816. if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) {
  817. status |= DUMP_DH_COMPRESSED_LZO;
  818. }
  819. #endif
  820. #ifdef CONFIG_SNAPPY
  821. if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) {
  822. status |= DUMP_DH_COMPRESSED_SNAPPY;
  823. }
  824. #endif
  825. dh->status = cpu_to_dump32(s, status);
  826. if (write_buffer(s->fd, 0, dh, size) < 0) {
  827. error_setg(errp, "dump: failed to write disk dump header");
  828. goto out;
  829. }
  830. /* write sub header */
  831. size = sizeof(KdumpSubHeader64);
  832. kh = g_malloc0(size);
  833. /* 64bit max_mapnr_64 */
  834. kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr);
  835. kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base);
  836. kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL);
  837. offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size;
  838. if (s->guest_note &&
  839. note_name_equal(s, s->guest_note, "VMCOREINFO")) {
  840. uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo;
  841. get_note_sizes(s, s->guest_note,
  842. &hsize, &name_size, &size_vmcoreinfo_desc);
  843. offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size +
  844. (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4;
  845. kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo);
  846. kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc);
  847. }
  848. kh->offset_note = cpu_to_dump64(s, offset_note);
  849. kh->note_size = cpu_to_dump64(s, s->note_size);
  850. if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS *
  851. block_size, kh, size) < 0) {
  852. error_setg(errp, "dump: failed to write kdump sub header");
  853. goto out;
  854. }
  855. /* write note */
  856. s->note_buf = g_malloc0(s->note_size);
  857. s->note_buf_offset = 0;
  858. /* use s->note_buf to store notes temporarily */
  859. write_elf64_notes(buf_write_note, s, &local_err);
  860. if (local_err) {
  861. error_propagate(errp, local_err);
  862. goto out;
  863. }
  864. if (write_buffer(s->fd, offset_note, s->note_buf,
  865. s->note_size) < 0) {
  866. error_setg(errp, "dump: failed to write notes");
  867. goto out;
  868. }
  869. /* get offset of dump_bitmap */
  870. s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) *
  871. block_size;
  872. /* get offset of page */
  873. s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) *
  874. block_size;
  875. out:
  876. g_free(dh);
  877. g_free(kh);
  878. g_free(s->note_buf);
  879. }
  880. static void write_dump_header(DumpState *s, Error **errp)
  881. {
  882. if (s->dump_info.d_class == ELFCLASS32) {
  883. create_header32(s, errp);
  884. } else {
  885. create_header64(s, errp);
  886. }
  887. }
  888. static size_t dump_bitmap_get_bufsize(DumpState *s)
  889. {
  890. return s->dump_info.page_size;
  891. }
  892. /*
  893. * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be
  894. * rewritten, so if need to set the first bit, set last_pfn and pfn to 0.
  895. * set_dump_bitmap will always leave the recently set bit un-sync. And setting
  896. * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into
  897. * vmcore, ie. synchronizing un-sync bit into vmcore.
  898. */
  899. static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value,
  900. uint8_t *buf, DumpState *s)
  901. {
  902. off_t old_offset, new_offset;
  903. off_t offset_bitmap1, offset_bitmap2;
  904. uint32_t byte, bit;
  905. size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
  906. size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
  907. /* should not set the previous place */
  908. assert(last_pfn <= pfn);
  909. /*
  910. * if the bit needed to be set is not cached in buf, flush the data in buf
  911. * to vmcore firstly.
  912. * making new_offset be bigger than old_offset can also sync remained data
  913. * into vmcore.
  914. */
  915. old_offset = bitmap_bufsize * (last_pfn / bits_per_buf);
  916. new_offset = bitmap_bufsize * (pfn / bits_per_buf);
  917. while (old_offset < new_offset) {
  918. /* calculate the offset and write dump_bitmap */
  919. offset_bitmap1 = s->offset_dump_bitmap + old_offset;
  920. if (write_buffer(s->fd, offset_bitmap1, buf,
  921. bitmap_bufsize) < 0) {
  922. return -1;
  923. }
  924. /* dump level 1 is chosen, so 1st and 2nd bitmap are same */
  925. offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap +
  926. old_offset;
  927. if (write_buffer(s->fd, offset_bitmap2, buf,
  928. bitmap_bufsize) < 0) {
  929. return -1;
  930. }
  931. memset(buf, 0, bitmap_bufsize);
  932. old_offset += bitmap_bufsize;
  933. }
  934. /* get the exact place of the bit in the buf, and set it */
  935. byte = (pfn % bits_per_buf) / CHAR_BIT;
  936. bit = (pfn % bits_per_buf) % CHAR_BIT;
  937. if (value) {
  938. buf[byte] |= 1u << bit;
  939. } else {
  940. buf[byte] &= ~(1u << bit);
  941. }
  942. return 0;
  943. }
  944. static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr)
  945. {
  946. int target_page_shift = ctz32(s->dump_info.page_size);
  947. return (addr >> target_page_shift) - ARCH_PFN_OFFSET;
  948. }
  949. static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn)
  950. {
  951. int target_page_shift = ctz32(s->dump_info.page_size);
  952. return (pfn + ARCH_PFN_OFFSET) << target_page_shift;
  953. }
  954. /*
  955. * exam every page and return the page frame number and the address of the page.
  956. * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys
  957. * blocks, so block->target_start and block->target_end should be interal
  958. * multiples of the target page size.
  959. */
  960. static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr,
  961. uint8_t **bufptr, DumpState *s)
  962. {
  963. GuestPhysBlock *block = *blockptr;
  964. hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1);
  965. uint8_t *buf;
  966. /* block == NULL means the start of the iteration */
  967. if (!block) {
  968. block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  969. *blockptr = block;
  970. assert((block->target_start & ~target_page_mask) == 0);
  971. assert((block->target_end & ~target_page_mask) == 0);
  972. *pfnptr = dump_paddr_to_pfn(s, block->target_start);
  973. if (bufptr) {
  974. *bufptr = block->host_addr;
  975. }
  976. return true;
  977. }
  978. *pfnptr = *pfnptr + 1;
  979. addr = dump_pfn_to_paddr(s, *pfnptr);
  980. if ((addr >= block->target_start) &&
  981. (addr + s->dump_info.page_size <= block->target_end)) {
  982. buf = block->host_addr + (addr - block->target_start);
  983. } else {
  984. /* the next page is in the next block */
  985. block = QTAILQ_NEXT(block, next);
  986. *blockptr = block;
  987. if (!block) {
  988. return false;
  989. }
  990. assert((block->target_start & ~target_page_mask) == 0);
  991. assert((block->target_end & ~target_page_mask) == 0);
  992. *pfnptr = dump_paddr_to_pfn(s, block->target_start);
  993. buf = block->host_addr;
  994. }
  995. if (bufptr) {
  996. *bufptr = buf;
  997. }
  998. return true;
  999. }
  1000. static void write_dump_bitmap(DumpState *s, Error **errp)
  1001. {
  1002. int ret = 0;
  1003. uint64_t last_pfn, pfn;
  1004. void *dump_bitmap_buf;
  1005. size_t num_dumpable;
  1006. GuestPhysBlock *block_iter = NULL;
  1007. size_t bitmap_bufsize = dump_bitmap_get_bufsize(s);
  1008. size_t bits_per_buf = bitmap_bufsize * CHAR_BIT;
  1009. /* dump_bitmap_buf is used to store dump_bitmap temporarily */
  1010. dump_bitmap_buf = g_malloc0(bitmap_bufsize);
  1011. num_dumpable = 0;
  1012. last_pfn = 0;
  1013. /*
  1014. * exam memory page by page, and set the bit in dump_bitmap corresponded
  1015. * to the existing page.
  1016. */
  1017. while (get_next_page(&block_iter, &pfn, NULL, s)) {
  1018. ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s);
  1019. if (ret < 0) {
  1020. error_setg(errp, "dump: failed to set dump_bitmap");
  1021. goto out;
  1022. }
  1023. last_pfn = pfn;
  1024. num_dumpable++;
  1025. }
  1026. /*
  1027. * set_dump_bitmap will always leave the recently set bit un-sync. Here we
  1028. * set the remaining bits from last_pfn to the end of the bitmap buffer to
  1029. * 0. With those set, the un-sync bit will be synchronized into the vmcore.
  1030. */
  1031. if (num_dumpable > 0) {
  1032. ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false,
  1033. dump_bitmap_buf, s);
  1034. if (ret < 0) {
  1035. error_setg(errp, "dump: failed to sync dump_bitmap");
  1036. goto out;
  1037. }
  1038. }
  1039. /* number of dumpable pages that will be dumped later */
  1040. s->num_dumpable = num_dumpable;
  1041. out:
  1042. g_free(dump_bitmap_buf);
  1043. }
  1044. static void prepare_data_cache(DataCache *data_cache, DumpState *s,
  1045. off_t offset)
  1046. {
  1047. data_cache->fd = s->fd;
  1048. data_cache->data_size = 0;
  1049. data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s);
  1050. data_cache->buf = g_malloc0(data_cache->buf_size);
  1051. data_cache->offset = offset;
  1052. }
  1053. static int write_cache(DataCache *dc, const void *buf, size_t size,
  1054. bool flag_sync)
  1055. {
  1056. /*
  1057. * dc->buf_size should not be less than size, otherwise dc will never be
  1058. * enough
  1059. */
  1060. assert(size <= dc->buf_size);
  1061. /*
  1062. * if flag_sync is set, synchronize data in dc->buf into vmcore.
  1063. * otherwise check if the space is enough for caching data in buf, if not,
  1064. * write the data in dc->buf to dc->fd and reset dc->buf
  1065. */
  1066. if ((!flag_sync && dc->data_size + size > dc->buf_size) ||
  1067. (flag_sync && dc->data_size > 0)) {
  1068. if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) {
  1069. return -1;
  1070. }
  1071. dc->offset += dc->data_size;
  1072. dc->data_size = 0;
  1073. }
  1074. if (!flag_sync) {
  1075. memcpy(dc->buf + dc->data_size, buf, size);
  1076. dc->data_size += size;
  1077. }
  1078. return 0;
  1079. }
  1080. static void free_data_cache(DataCache *data_cache)
  1081. {
  1082. g_free(data_cache->buf);
  1083. }
  1084. static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress)
  1085. {
  1086. switch (flag_compress) {
  1087. case DUMP_DH_COMPRESSED_ZLIB:
  1088. return compressBound(page_size);
  1089. case DUMP_DH_COMPRESSED_LZO:
  1090. /*
  1091. * LZO will expand incompressible data by a little amount. Please check
  1092. * the following URL to see the expansion calculation:
  1093. * http://www.oberhumer.com/opensource/lzo/lzofaq.php
  1094. */
  1095. return page_size + page_size / 16 + 64 + 3;
  1096. #ifdef CONFIG_SNAPPY
  1097. case DUMP_DH_COMPRESSED_SNAPPY:
  1098. return snappy_max_compressed_length(page_size);
  1099. #endif
  1100. }
  1101. return 0;
  1102. }
  1103. /*
  1104. * check if the page is all 0
  1105. */
  1106. static inline bool is_zero_page(const uint8_t *buf, size_t page_size)
  1107. {
  1108. return buffer_is_zero(buf, page_size);
  1109. }
  1110. static void write_dump_pages(DumpState *s, Error **errp)
  1111. {
  1112. int ret = 0;
  1113. DataCache page_desc, page_data;
  1114. size_t len_buf_out, size_out;
  1115. #ifdef CONFIG_LZO
  1116. lzo_bytep wrkmem = NULL;
  1117. #endif
  1118. uint8_t *buf_out = NULL;
  1119. off_t offset_desc, offset_data;
  1120. PageDescriptor pd, pd_zero;
  1121. uint8_t *buf;
  1122. GuestPhysBlock *block_iter = NULL;
  1123. uint64_t pfn_iter;
  1124. /* get offset of page_desc and page_data in dump file */
  1125. offset_desc = s->offset_page;
  1126. offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable;
  1127. prepare_data_cache(&page_desc, s, offset_desc);
  1128. prepare_data_cache(&page_data, s, offset_data);
  1129. /* prepare buffer to store compressed data */
  1130. len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress);
  1131. assert(len_buf_out != 0);
  1132. #ifdef CONFIG_LZO
  1133. wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS);
  1134. #endif
  1135. buf_out = g_malloc(len_buf_out);
  1136. /*
  1137. * init zero page's page_desc and page_data, because every zero page
  1138. * uses the same page_data
  1139. */
  1140. pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size);
  1141. pd_zero.flags = cpu_to_dump32(s, 0);
  1142. pd_zero.offset = cpu_to_dump64(s, offset_data);
  1143. pd_zero.page_flags = cpu_to_dump64(s, 0);
  1144. buf = g_malloc0(s->dump_info.page_size);
  1145. ret = write_cache(&page_data, buf, s->dump_info.page_size, false);
  1146. g_free(buf);
  1147. if (ret < 0) {
  1148. error_setg(errp, "dump: failed to write page data (zero page)");
  1149. goto out;
  1150. }
  1151. offset_data += s->dump_info.page_size;
  1152. /*
  1153. * dump memory to vmcore page by page. zero page will all be resided in the
  1154. * first page of page section
  1155. */
  1156. while (get_next_page(&block_iter, &pfn_iter, &buf, s)) {
  1157. /* check zero page */
  1158. if (is_zero_page(buf, s->dump_info.page_size)) {
  1159. ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor),
  1160. false);
  1161. if (ret < 0) {
  1162. error_setg(errp, "dump: failed to write page desc");
  1163. goto out;
  1164. }
  1165. } else {
  1166. /*
  1167. * not zero page, then:
  1168. * 1. compress the page
  1169. * 2. write the compressed page into the cache of page_data
  1170. * 3. get page desc of the compressed page and write it into the
  1171. * cache of page_desc
  1172. *
  1173. * only one compression format will be used here, for
  1174. * s->flag_compress is set. But when compression fails to work,
  1175. * we fall back to save in plaintext.
  1176. */
  1177. size_out = len_buf_out;
  1178. if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) &&
  1179. (compress2(buf_out, (uLongf *)&size_out, buf,
  1180. s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) &&
  1181. (size_out < s->dump_info.page_size)) {
  1182. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB);
  1183. pd.size = cpu_to_dump32(s, size_out);
  1184. ret = write_cache(&page_data, buf_out, size_out, false);
  1185. if (ret < 0) {
  1186. error_setg(errp, "dump: failed to write page data");
  1187. goto out;
  1188. }
  1189. #ifdef CONFIG_LZO
  1190. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) &&
  1191. (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out,
  1192. (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) &&
  1193. (size_out < s->dump_info.page_size)) {
  1194. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO);
  1195. pd.size = cpu_to_dump32(s, size_out);
  1196. ret = write_cache(&page_data, buf_out, size_out, false);
  1197. if (ret < 0) {
  1198. error_setg(errp, "dump: failed to write page data");
  1199. goto out;
  1200. }
  1201. #endif
  1202. #ifdef CONFIG_SNAPPY
  1203. } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) &&
  1204. (snappy_compress((char *)buf, s->dump_info.page_size,
  1205. (char *)buf_out, &size_out) == SNAPPY_OK) &&
  1206. (size_out < s->dump_info.page_size)) {
  1207. pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY);
  1208. pd.size = cpu_to_dump32(s, size_out);
  1209. ret = write_cache(&page_data, buf_out, size_out, false);
  1210. if (ret < 0) {
  1211. error_setg(errp, "dump: failed to write page data");
  1212. goto out;
  1213. }
  1214. #endif
  1215. } else {
  1216. /*
  1217. * fall back to save in plaintext, size_out should be
  1218. * assigned the target's page size
  1219. */
  1220. pd.flags = cpu_to_dump32(s, 0);
  1221. size_out = s->dump_info.page_size;
  1222. pd.size = cpu_to_dump32(s, size_out);
  1223. ret = write_cache(&page_data, buf,
  1224. s->dump_info.page_size, false);
  1225. if (ret < 0) {
  1226. error_setg(errp, "dump: failed to write page data");
  1227. goto out;
  1228. }
  1229. }
  1230. /* get and write page desc here */
  1231. pd.page_flags = cpu_to_dump64(s, 0);
  1232. pd.offset = cpu_to_dump64(s, offset_data);
  1233. offset_data += size_out;
  1234. ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false);
  1235. if (ret < 0) {
  1236. error_setg(errp, "dump: failed to write page desc");
  1237. goto out;
  1238. }
  1239. }
  1240. s->written_size += s->dump_info.page_size;
  1241. }
  1242. ret = write_cache(&page_desc, NULL, 0, true);
  1243. if (ret < 0) {
  1244. error_setg(errp, "dump: failed to sync cache for page_desc");
  1245. goto out;
  1246. }
  1247. ret = write_cache(&page_data, NULL, 0, true);
  1248. if (ret < 0) {
  1249. error_setg(errp, "dump: failed to sync cache for page_data");
  1250. goto out;
  1251. }
  1252. out:
  1253. free_data_cache(&page_desc);
  1254. free_data_cache(&page_data);
  1255. #ifdef CONFIG_LZO
  1256. g_free(wrkmem);
  1257. #endif
  1258. g_free(buf_out);
  1259. }
  1260. static void create_kdump_vmcore(DumpState *s, Error **errp)
  1261. {
  1262. int ret;
  1263. Error *local_err = NULL;
  1264. /*
  1265. * the kdump-compressed format is:
  1266. * File offset
  1267. * +------------------------------------------+ 0x0
  1268. * | main header (struct disk_dump_header) |
  1269. * |------------------------------------------+ block 1
  1270. * | sub header (struct kdump_sub_header) |
  1271. * |------------------------------------------+ block 2
  1272. * | 1st-dump_bitmap |
  1273. * |------------------------------------------+ block 2 + X blocks
  1274. * | 2nd-dump_bitmap | (aligned by block)
  1275. * |------------------------------------------+ block 2 + 2 * X blocks
  1276. * | page desc for pfn 0 (struct page_desc) | (aligned by block)
  1277. * | page desc for pfn 1 (struct page_desc) |
  1278. * | : |
  1279. * |------------------------------------------| (not aligned by block)
  1280. * | page data (pfn 0) |
  1281. * | page data (pfn 1) |
  1282. * | : |
  1283. * +------------------------------------------+
  1284. */
  1285. ret = write_start_flat_header(s->fd);
  1286. if (ret < 0) {
  1287. error_setg(errp, "dump: failed to write start flat header");
  1288. return;
  1289. }
  1290. write_dump_header(s, &local_err);
  1291. if (local_err) {
  1292. error_propagate(errp, local_err);
  1293. return;
  1294. }
  1295. write_dump_bitmap(s, &local_err);
  1296. if (local_err) {
  1297. error_propagate(errp, local_err);
  1298. return;
  1299. }
  1300. write_dump_pages(s, &local_err);
  1301. if (local_err) {
  1302. error_propagate(errp, local_err);
  1303. return;
  1304. }
  1305. ret = write_end_flat_header(s->fd);
  1306. if (ret < 0) {
  1307. error_setg(errp, "dump: failed to write end flat header");
  1308. return;
  1309. }
  1310. }
  1311. static ram_addr_t get_start_block(DumpState *s)
  1312. {
  1313. GuestPhysBlock *block;
  1314. if (!s->has_filter) {
  1315. s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head);
  1316. return 0;
  1317. }
  1318. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  1319. if (block->target_start >= s->begin + s->length ||
  1320. block->target_end <= s->begin) {
  1321. /* This block is out of the range */
  1322. continue;
  1323. }
  1324. s->next_block = block;
  1325. if (s->begin > block->target_start) {
  1326. s->start = s->begin - block->target_start;
  1327. } else {
  1328. s->start = 0;
  1329. }
  1330. return s->start;
  1331. }
  1332. return -1;
  1333. }
  1334. static void get_max_mapnr(DumpState *s)
  1335. {
  1336. GuestPhysBlock *last_block;
  1337. last_block = QTAILQ_LAST(&s->guest_phys_blocks.head);
  1338. s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end);
  1339. }
  1340. static DumpState dump_state_global = { .status = DUMP_STATUS_NONE };
  1341. static void dump_state_prepare(DumpState *s)
  1342. {
  1343. /* zero the struct, setting status to active */
  1344. *s = (DumpState) { .status = DUMP_STATUS_ACTIVE };
  1345. }
  1346. bool dump_in_progress(void)
  1347. {
  1348. DumpState *state = &dump_state_global;
  1349. return (qatomic_read(&state->status) == DUMP_STATUS_ACTIVE);
  1350. }
  1351. /* calculate total size of memory to be dumped (taking filter into
  1352. * acoount.) */
  1353. static int64_t dump_calculate_size(DumpState *s)
  1354. {
  1355. GuestPhysBlock *block;
  1356. int64_t size = 0, total = 0, left = 0, right = 0;
  1357. QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) {
  1358. if (s->has_filter) {
  1359. /* calculate the overlapped region. */
  1360. left = MAX(s->begin, block->target_start);
  1361. right = MIN(s->begin + s->length, block->target_end);
  1362. size = right - left;
  1363. size = size > 0 ? size : 0;
  1364. } else {
  1365. /* count the whole region in */
  1366. size = (block->target_end - block->target_start);
  1367. }
  1368. total += size;
  1369. }
  1370. return total;
  1371. }
  1372. static void vmcoreinfo_update_phys_base(DumpState *s)
  1373. {
  1374. uint64_t size, note_head_size, name_size, phys_base;
  1375. char **lines;
  1376. uint8_t *vmci;
  1377. size_t i;
  1378. if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) {
  1379. return;
  1380. }
  1381. get_note_sizes(s, s->guest_note, &note_head_size, &name_size, &size);
  1382. note_head_size = ROUND_UP(note_head_size, 4);
  1383. vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4);
  1384. *(vmci + size) = '\0';
  1385. lines = g_strsplit((char *)vmci, "\n", -1);
  1386. for (i = 0; lines[i]; i++) {
  1387. const char *prefix = NULL;
  1388. if (s->dump_info.d_machine == EM_X86_64) {
  1389. prefix = "NUMBER(phys_base)=";
  1390. } else if (s->dump_info.d_machine == EM_AARCH64) {
  1391. prefix = "NUMBER(PHYS_OFFSET)=";
  1392. }
  1393. if (prefix && g_str_has_prefix(lines[i], prefix)) {
  1394. if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16,
  1395. &phys_base) < 0) {
  1396. warn_report("Failed to read %s", prefix);
  1397. } else {
  1398. s->dump_info.phys_base = phys_base;
  1399. }
  1400. break;
  1401. }
  1402. }
  1403. g_strfreev(lines);
  1404. }
  1405. static void dump_init(DumpState *s, int fd, bool has_format,
  1406. DumpGuestMemoryFormat format, bool paging, bool has_filter,
  1407. int64_t begin, int64_t length, Error **errp)
  1408. {
  1409. VMCoreInfoState *vmci = vmcoreinfo_find();
  1410. CPUState *cpu;
  1411. int nr_cpus;
  1412. Error *err = NULL;
  1413. int ret;
  1414. s->has_format = has_format;
  1415. s->format = format;
  1416. s->written_size = 0;
  1417. /* kdump-compressed is conflict with paging and filter */
  1418. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1419. assert(!paging && !has_filter);
  1420. }
  1421. if (runstate_is_running()) {
  1422. vm_stop(RUN_STATE_SAVE_VM);
  1423. s->resume = true;
  1424. } else {
  1425. s->resume = false;
  1426. }
  1427. /* If we use KVM, we should synchronize the registers before we get dump
  1428. * info or physmap info.
  1429. */
  1430. cpu_synchronize_all_states();
  1431. nr_cpus = 0;
  1432. CPU_FOREACH(cpu) {
  1433. nr_cpus++;
  1434. }
  1435. s->fd = fd;
  1436. s->has_filter = has_filter;
  1437. s->begin = begin;
  1438. s->length = length;
  1439. memory_mapping_list_init(&s->list);
  1440. guest_phys_blocks_init(&s->guest_phys_blocks);
  1441. guest_phys_blocks_append(&s->guest_phys_blocks);
  1442. s->total_size = dump_calculate_size(s);
  1443. #ifdef DEBUG_DUMP_GUEST_MEMORY
  1444. fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size);
  1445. #endif
  1446. /* it does not make sense to dump non-existent memory */
  1447. if (!s->total_size) {
  1448. error_setg(errp, "dump: no guest memory to dump");
  1449. goto cleanup;
  1450. }
  1451. s->start = get_start_block(s);
  1452. if (s->start == -1) {
  1453. error_setg(errp, QERR_INVALID_PARAMETER, "begin");
  1454. goto cleanup;
  1455. }
  1456. /* get dump info: endian, class and architecture.
  1457. * If the target architecture is not supported, cpu_get_dump_info() will
  1458. * return -1.
  1459. */
  1460. ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks);
  1461. if (ret < 0) {
  1462. error_setg(errp, QERR_UNSUPPORTED);
  1463. goto cleanup;
  1464. }
  1465. if (!s->dump_info.page_size) {
  1466. s->dump_info.page_size = TARGET_PAGE_SIZE;
  1467. }
  1468. s->note_size = cpu_get_note_size(s->dump_info.d_class,
  1469. s->dump_info.d_machine, nr_cpus);
  1470. if (s->note_size < 0) {
  1471. error_setg(errp, QERR_UNSUPPORTED);
  1472. goto cleanup;
  1473. }
  1474. /*
  1475. * The goal of this block is to (a) update the previously guessed
  1476. * phys_base, (b) copy the guest note out of the guest.
  1477. * Failure to do so is not fatal for dumping.
  1478. */
  1479. if (vmci) {
  1480. uint64_t addr, note_head_size, name_size, desc_size;
  1481. uint32_t size;
  1482. uint16_t format;
  1483. note_head_size = s->dump_info.d_class == ELFCLASS32 ?
  1484. sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr);
  1485. format = le16_to_cpu(vmci->vmcoreinfo.guest_format);
  1486. size = le32_to_cpu(vmci->vmcoreinfo.size);
  1487. addr = le64_to_cpu(vmci->vmcoreinfo.paddr);
  1488. if (!vmci->has_vmcoreinfo) {
  1489. warn_report("guest note is not present");
  1490. } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) {
  1491. warn_report("guest note size is invalid: %" PRIu32, size);
  1492. } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) {
  1493. warn_report("guest note format is unsupported: %" PRIu16, format);
  1494. } else {
  1495. s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */
  1496. cpu_physical_memory_read(addr, s->guest_note, size);
  1497. get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size);
  1498. s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size,
  1499. desc_size);
  1500. if (name_size > MAX_GUEST_NOTE_SIZE ||
  1501. desc_size > MAX_GUEST_NOTE_SIZE ||
  1502. s->guest_note_size > size) {
  1503. warn_report("Invalid guest note header");
  1504. g_free(s->guest_note);
  1505. s->guest_note = NULL;
  1506. } else {
  1507. vmcoreinfo_update_phys_base(s);
  1508. s->note_size += s->guest_note_size;
  1509. }
  1510. }
  1511. }
  1512. /* get memory mapping */
  1513. if (paging) {
  1514. qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err);
  1515. if (err != NULL) {
  1516. error_propagate(errp, err);
  1517. goto cleanup;
  1518. }
  1519. } else {
  1520. qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks);
  1521. }
  1522. s->nr_cpus = nr_cpus;
  1523. get_max_mapnr(s);
  1524. uint64_t tmp;
  1525. tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT),
  1526. s->dump_info.page_size);
  1527. s->len_dump_bitmap = tmp * s->dump_info.page_size;
  1528. /* init for kdump-compressed format */
  1529. if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1530. switch (format) {
  1531. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB:
  1532. s->flag_compress = DUMP_DH_COMPRESSED_ZLIB;
  1533. break;
  1534. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO:
  1535. #ifdef CONFIG_LZO
  1536. if (lzo_init() != LZO_E_OK) {
  1537. error_setg(errp, "failed to initialize the LZO library");
  1538. goto cleanup;
  1539. }
  1540. #endif
  1541. s->flag_compress = DUMP_DH_COMPRESSED_LZO;
  1542. break;
  1543. case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY:
  1544. s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY;
  1545. break;
  1546. default:
  1547. s->flag_compress = 0;
  1548. }
  1549. return;
  1550. }
  1551. if (s->has_filter) {
  1552. memory_mapping_filter(&s->list, s->begin, s->length);
  1553. }
  1554. /*
  1555. * calculate phdr_num
  1556. *
  1557. * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
  1558. */
  1559. s->phdr_num = 1; /* PT_NOTE */
  1560. if (s->list.num < UINT16_MAX - 2) {
  1561. s->phdr_num += s->list.num;
  1562. s->have_section = false;
  1563. } else {
  1564. s->have_section = true;
  1565. s->phdr_num = PN_XNUM;
  1566. s->sh_info = 1; /* PT_NOTE */
  1567. /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
  1568. if (s->list.num <= UINT32_MAX - 1) {
  1569. s->sh_info += s->list.num;
  1570. } else {
  1571. s->sh_info = UINT32_MAX;
  1572. }
  1573. }
  1574. if (s->dump_info.d_class == ELFCLASS64) {
  1575. if (s->have_section) {
  1576. s->memory_offset = sizeof(Elf64_Ehdr) +
  1577. sizeof(Elf64_Phdr) * s->sh_info +
  1578. sizeof(Elf64_Shdr) + s->note_size;
  1579. } else {
  1580. s->memory_offset = sizeof(Elf64_Ehdr) +
  1581. sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
  1582. }
  1583. } else {
  1584. if (s->have_section) {
  1585. s->memory_offset = sizeof(Elf32_Ehdr) +
  1586. sizeof(Elf32_Phdr) * s->sh_info +
  1587. sizeof(Elf32_Shdr) + s->note_size;
  1588. } else {
  1589. s->memory_offset = sizeof(Elf32_Ehdr) +
  1590. sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
  1591. }
  1592. }
  1593. return;
  1594. cleanup:
  1595. dump_cleanup(s);
  1596. }
  1597. /* this operation might be time consuming. */
  1598. static void dump_process(DumpState *s, Error **errp)
  1599. {
  1600. Error *local_err = NULL;
  1601. DumpQueryResult *result = NULL;
  1602. if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
  1603. #ifdef TARGET_X86_64
  1604. create_win_dump(s, &local_err);
  1605. #endif
  1606. } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) {
  1607. create_kdump_vmcore(s, &local_err);
  1608. } else {
  1609. create_vmcore(s, &local_err);
  1610. }
  1611. /* make sure status is written after written_size updates */
  1612. smp_wmb();
  1613. qatomic_set(&s->status,
  1614. (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED));
  1615. /* send DUMP_COMPLETED message (unconditionally) */
  1616. result = qmp_query_dump(NULL);
  1617. /* should never fail */
  1618. assert(result);
  1619. qapi_event_send_dump_completed(result, !!local_err, (local_err ?
  1620. error_get_pretty(local_err) : NULL));
  1621. qapi_free_DumpQueryResult(result);
  1622. error_propagate(errp, local_err);
  1623. dump_cleanup(s);
  1624. }
  1625. static void *dump_thread(void *data)
  1626. {
  1627. DumpState *s = (DumpState *)data;
  1628. dump_process(s, NULL);
  1629. return NULL;
  1630. }
  1631. DumpQueryResult *qmp_query_dump(Error **errp)
  1632. {
  1633. DumpQueryResult *result = g_new(DumpQueryResult, 1);
  1634. DumpState *state = &dump_state_global;
  1635. result->status = qatomic_read(&state->status);
  1636. /* make sure we are reading status and written_size in order */
  1637. smp_rmb();
  1638. result->completed = state->written_size;
  1639. result->total = state->total_size;
  1640. return result;
  1641. }
  1642. void qmp_dump_guest_memory(bool paging, const char *file,
  1643. bool has_detach, bool detach,
  1644. bool has_begin, int64_t begin, bool has_length,
  1645. int64_t length, bool has_format,
  1646. DumpGuestMemoryFormat format, Error **errp)
  1647. {
  1648. const char *p;
  1649. int fd = -1;
  1650. DumpState *s;
  1651. Error *local_err = NULL;
  1652. bool detach_p = false;
  1653. if (runstate_check(RUN_STATE_INMIGRATE)) {
  1654. error_setg(errp, "Dump not allowed during incoming migration.");
  1655. return;
  1656. }
  1657. /* if there is a dump in background, we should wait until the dump
  1658. * finished */
  1659. if (dump_in_progress()) {
  1660. error_setg(errp, "There is a dump in process, please wait.");
  1661. return;
  1662. }
  1663. /*
  1664. * kdump-compressed format need the whole memory dumped, so paging or
  1665. * filter is not supported here.
  1666. */
  1667. if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) &&
  1668. (paging || has_begin || has_length)) {
  1669. error_setg(errp, "kdump-compressed format doesn't support paging or "
  1670. "filter");
  1671. return;
  1672. }
  1673. if (has_begin && !has_length) {
  1674. error_setg(errp, QERR_MISSING_PARAMETER, "length");
  1675. return;
  1676. }
  1677. if (!has_begin && has_length) {
  1678. error_setg(errp, QERR_MISSING_PARAMETER, "begin");
  1679. return;
  1680. }
  1681. if (has_detach) {
  1682. detach_p = detach;
  1683. }
  1684. /* check whether lzo/snappy is supported */
  1685. #ifndef CONFIG_LZO
  1686. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) {
  1687. error_setg(errp, "kdump-lzo is not available now");
  1688. return;
  1689. }
  1690. #endif
  1691. #ifndef CONFIG_SNAPPY
  1692. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) {
  1693. error_setg(errp, "kdump-snappy is not available now");
  1694. return;
  1695. }
  1696. #endif
  1697. #ifndef TARGET_X86_64
  1698. if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) {
  1699. error_setg(errp, "Windows dump is only available for x86-64");
  1700. return;
  1701. }
  1702. #endif
  1703. #if !defined(WIN32)
  1704. if (strstart(file, "fd:", &p)) {
  1705. fd = monitor_get_fd(monitor_cur(), p, errp);
  1706. if (fd == -1) {
  1707. return;
  1708. }
  1709. }
  1710. #endif
  1711. if (strstart(file, "file:", &p)) {
  1712. fd = qemu_open_old(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
  1713. if (fd < 0) {
  1714. error_setg_file_open(errp, errno, p);
  1715. return;
  1716. }
  1717. }
  1718. if (fd == -1) {
  1719. error_setg(errp, QERR_INVALID_PARAMETER, "protocol");
  1720. return;
  1721. }
  1722. s = &dump_state_global;
  1723. dump_state_prepare(s);
  1724. dump_init(s, fd, has_format, format, paging, has_begin,
  1725. begin, length, &local_err);
  1726. if (local_err) {
  1727. error_propagate(errp, local_err);
  1728. qatomic_set(&s->status, DUMP_STATUS_FAILED);
  1729. return;
  1730. }
  1731. if (detach_p) {
  1732. /* detached dump */
  1733. s->detached = true;
  1734. qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread,
  1735. s, QEMU_THREAD_DETACHED);
  1736. } else {
  1737. /* sync dump */
  1738. dump_process(s, errp);
  1739. }
  1740. }
  1741. DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp)
  1742. {
  1743. DumpGuestMemoryFormatList *item;
  1744. DumpGuestMemoryCapability *cap =
  1745. g_malloc0(sizeof(DumpGuestMemoryCapability));
  1746. /* elf is always available */
  1747. item = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1748. cap->formats = item;
  1749. item->value = DUMP_GUEST_MEMORY_FORMAT_ELF;
  1750. /* kdump-zlib is always available */
  1751. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1752. item = item->next;
  1753. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB;
  1754. /* add new item if kdump-lzo is available */
  1755. #ifdef CONFIG_LZO
  1756. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1757. item = item->next;
  1758. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO;
  1759. #endif
  1760. /* add new item if kdump-snappy is available */
  1761. #ifdef CONFIG_SNAPPY
  1762. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1763. item = item->next;
  1764. item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY;
  1765. #endif
  1766. /* Windows dump is available only if target is x86_64 */
  1767. #ifdef TARGET_X86_64
  1768. item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList));
  1769. item = item->next;
  1770. item->value = DUMP_GUEST_MEMORY_FORMAT_WIN_DMP;
  1771. #endif
  1772. return cap;
  1773. }