dump.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * QEMU dump
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu-common.h"
  14. #include "elf.h"
  15. #include "cpu.h"
  16. #include "cpu-all.h"
  17. #include "targphys.h"
  18. #include "monitor.h"
  19. #include "kvm.h"
  20. #include "dump.h"
  21. #include "sysemu.h"
  22. #include "memory_mapping.h"
  23. #include "error.h"
  24. #include "qmp-commands.h"
  25. #include "gdbstub.h"
  26. static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
  27. {
  28. if (endian == ELFDATA2LSB) {
  29. val = cpu_to_le16(val);
  30. } else {
  31. val = cpu_to_be16(val);
  32. }
  33. return val;
  34. }
  35. static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
  36. {
  37. if (endian == ELFDATA2LSB) {
  38. val = cpu_to_le32(val);
  39. } else {
  40. val = cpu_to_be32(val);
  41. }
  42. return val;
  43. }
  44. static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
  45. {
  46. if (endian == ELFDATA2LSB) {
  47. val = cpu_to_le64(val);
  48. } else {
  49. val = cpu_to_be64(val);
  50. }
  51. return val;
  52. }
  53. typedef struct DumpState {
  54. ArchDumpInfo dump_info;
  55. MemoryMappingList list;
  56. uint16_t phdr_num;
  57. uint32_t sh_info;
  58. bool have_section;
  59. bool resume;
  60. size_t note_size;
  61. target_phys_addr_t memory_offset;
  62. int fd;
  63. RAMBlock *block;
  64. ram_addr_t start;
  65. bool has_filter;
  66. int64_t begin;
  67. int64_t length;
  68. Error **errp;
  69. } DumpState;
  70. static int dump_cleanup(DumpState *s)
  71. {
  72. int ret = 0;
  73. memory_mapping_list_free(&s->list);
  74. if (s->fd != -1) {
  75. close(s->fd);
  76. }
  77. if (s->resume) {
  78. vm_start();
  79. }
  80. return ret;
  81. }
  82. static void dump_error(DumpState *s, const char *reason)
  83. {
  84. dump_cleanup(s);
  85. }
  86. static int fd_write_vmcore(void *buf, size_t size, void *opaque)
  87. {
  88. DumpState *s = opaque;
  89. int fd = s->fd;
  90. size_t writen_size;
  91. /* The fd may be passed from user, and it can be non-blocked */
  92. while (size) {
  93. writen_size = qemu_write_full(fd, buf, size);
  94. if (writen_size != size && errno != EAGAIN) {
  95. return -1;
  96. }
  97. buf += writen_size;
  98. size -= writen_size;
  99. }
  100. return 0;
  101. }
  102. static int write_elf64_header(DumpState *s)
  103. {
  104. Elf64_Ehdr elf_header;
  105. int ret;
  106. int endian = s->dump_info.d_endian;
  107. memset(&elf_header, 0, sizeof(Elf64_Ehdr));
  108. memcpy(&elf_header, ELFMAG, SELFMAG);
  109. elf_header.e_ident[EI_CLASS] = ELFCLASS64;
  110. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  111. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  112. elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
  113. elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
  114. endian);
  115. elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
  116. elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
  117. elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
  118. elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
  119. endian);
  120. elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
  121. if (s->have_section) {
  122. uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
  123. elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
  124. elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
  125. endian);
  126. elf_header.e_shnum = cpu_convert_to_target16(1, endian);
  127. }
  128. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  129. if (ret < 0) {
  130. dump_error(s, "dump: failed to write elf header.\n");
  131. return -1;
  132. }
  133. return 0;
  134. }
  135. static int write_elf32_header(DumpState *s)
  136. {
  137. Elf32_Ehdr elf_header;
  138. int ret;
  139. int endian = s->dump_info.d_endian;
  140. memset(&elf_header, 0, sizeof(Elf32_Ehdr));
  141. memcpy(&elf_header, ELFMAG, SELFMAG);
  142. elf_header.e_ident[EI_CLASS] = ELFCLASS32;
  143. elf_header.e_ident[EI_DATA] = endian;
  144. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  145. elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
  146. elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
  147. endian);
  148. elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
  149. elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
  150. elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
  151. elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
  152. endian);
  153. elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
  154. if (s->have_section) {
  155. uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
  156. elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
  157. elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
  158. endian);
  159. elf_header.e_shnum = cpu_convert_to_target16(1, endian);
  160. }
  161. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  162. if (ret < 0) {
  163. dump_error(s, "dump: failed to write elf header.\n");
  164. return -1;
  165. }
  166. return 0;
  167. }
  168. static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
  169. int phdr_index, target_phys_addr_t offset)
  170. {
  171. Elf64_Phdr phdr;
  172. int ret;
  173. int endian = s->dump_info.d_endian;
  174. memset(&phdr, 0, sizeof(Elf64_Phdr));
  175. phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
  176. phdr.p_offset = cpu_convert_to_target64(offset, endian);
  177. phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
  178. if (offset == -1) {
  179. /* When the memory is not stored into vmcore, offset will be -1 */
  180. phdr.p_filesz = 0;
  181. } else {
  182. phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
  183. }
  184. phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
  185. phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
  186. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  187. if (ret < 0) {
  188. dump_error(s, "dump: failed to write program header table.\n");
  189. return -1;
  190. }
  191. return 0;
  192. }
  193. static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
  194. int phdr_index, target_phys_addr_t offset)
  195. {
  196. Elf32_Phdr phdr;
  197. int ret;
  198. int endian = s->dump_info.d_endian;
  199. memset(&phdr, 0, sizeof(Elf32_Phdr));
  200. phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
  201. phdr.p_offset = cpu_convert_to_target32(offset, endian);
  202. phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
  203. if (offset == -1) {
  204. /* When the memory is not stored into vmcore, offset will be -1 */
  205. phdr.p_filesz = 0;
  206. } else {
  207. phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
  208. }
  209. phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
  210. phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
  211. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  212. if (ret < 0) {
  213. dump_error(s, "dump: failed to write program header table.\n");
  214. return -1;
  215. }
  216. return 0;
  217. }
  218. static int write_elf64_note(DumpState *s)
  219. {
  220. Elf64_Phdr phdr;
  221. int endian = s->dump_info.d_endian;
  222. target_phys_addr_t begin = s->memory_offset - s->note_size;
  223. int ret;
  224. memset(&phdr, 0, sizeof(Elf64_Phdr));
  225. phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
  226. phdr.p_offset = cpu_convert_to_target64(begin, endian);
  227. phdr.p_paddr = 0;
  228. phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
  229. phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
  230. phdr.p_vaddr = 0;
  231. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  232. if (ret < 0) {
  233. dump_error(s, "dump: failed to write program header table.\n");
  234. return -1;
  235. }
  236. return 0;
  237. }
  238. static int write_elf64_notes(DumpState *s)
  239. {
  240. CPUArchState *env;
  241. int ret;
  242. int id;
  243. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  244. id = cpu_index(env);
  245. ret = cpu_write_elf64_note(fd_write_vmcore, env, id, s);
  246. if (ret < 0) {
  247. dump_error(s, "dump: failed to write elf notes.\n");
  248. return -1;
  249. }
  250. }
  251. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  252. ret = cpu_write_elf64_qemunote(fd_write_vmcore, env, s);
  253. if (ret < 0) {
  254. dump_error(s, "dump: failed to write CPU status.\n");
  255. return -1;
  256. }
  257. }
  258. return 0;
  259. }
  260. static int write_elf32_note(DumpState *s)
  261. {
  262. target_phys_addr_t begin = s->memory_offset - s->note_size;
  263. Elf32_Phdr phdr;
  264. int endian = s->dump_info.d_endian;
  265. int ret;
  266. memset(&phdr, 0, sizeof(Elf32_Phdr));
  267. phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
  268. phdr.p_offset = cpu_convert_to_target32(begin, endian);
  269. phdr.p_paddr = 0;
  270. phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
  271. phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
  272. phdr.p_vaddr = 0;
  273. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  274. if (ret < 0) {
  275. dump_error(s, "dump: failed to write program header table.\n");
  276. return -1;
  277. }
  278. return 0;
  279. }
  280. static int write_elf32_notes(DumpState *s)
  281. {
  282. CPUArchState *env;
  283. int ret;
  284. int id;
  285. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  286. id = cpu_index(env);
  287. ret = cpu_write_elf32_note(fd_write_vmcore, env, id, s);
  288. if (ret < 0) {
  289. dump_error(s, "dump: failed to write elf notes.\n");
  290. return -1;
  291. }
  292. }
  293. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  294. ret = cpu_write_elf32_qemunote(fd_write_vmcore, env, s);
  295. if (ret < 0) {
  296. dump_error(s, "dump: failed to write CPU status.\n");
  297. return -1;
  298. }
  299. }
  300. return 0;
  301. }
  302. static int write_elf_section(DumpState *s, int type)
  303. {
  304. Elf32_Shdr shdr32;
  305. Elf64_Shdr shdr64;
  306. int endian = s->dump_info.d_endian;
  307. int shdr_size;
  308. void *shdr;
  309. int ret;
  310. if (type == 0) {
  311. shdr_size = sizeof(Elf32_Shdr);
  312. memset(&shdr32, 0, shdr_size);
  313. shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
  314. shdr = &shdr32;
  315. } else {
  316. shdr_size = sizeof(Elf64_Shdr);
  317. memset(&shdr64, 0, shdr_size);
  318. shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
  319. shdr = &shdr64;
  320. }
  321. ret = fd_write_vmcore(&shdr, shdr_size, s);
  322. if (ret < 0) {
  323. dump_error(s, "dump: failed to write section header table.\n");
  324. return -1;
  325. }
  326. return 0;
  327. }
  328. static int write_data(DumpState *s, void *buf, int length)
  329. {
  330. int ret;
  331. ret = fd_write_vmcore(buf, length, s);
  332. if (ret < 0) {
  333. dump_error(s, "dump: failed to save memory.\n");
  334. return -1;
  335. }
  336. return 0;
  337. }
  338. /* write the memroy to vmcore. 1 page per I/O. */
  339. static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
  340. int64_t size)
  341. {
  342. int64_t i;
  343. int ret;
  344. for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
  345. ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
  346. TARGET_PAGE_SIZE);
  347. if (ret < 0) {
  348. return ret;
  349. }
  350. }
  351. if ((size % TARGET_PAGE_SIZE) != 0) {
  352. ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
  353. size % TARGET_PAGE_SIZE);
  354. if (ret < 0) {
  355. return ret;
  356. }
  357. }
  358. return 0;
  359. }
  360. /* get the memory's offset in the vmcore */
  361. static target_phys_addr_t get_offset(target_phys_addr_t phys_addr,
  362. DumpState *s)
  363. {
  364. RAMBlock *block;
  365. target_phys_addr_t offset = s->memory_offset;
  366. int64_t size_in_block, start;
  367. if (s->has_filter) {
  368. if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
  369. return -1;
  370. }
  371. }
  372. QLIST_FOREACH(block, &ram_list.blocks, next) {
  373. if (s->has_filter) {
  374. if (block->offset >= s->begin + s->length ||
  375. block->offset + block->length <= s->begin) {
  376. /* This block is out of the range */
  377. continue;
  378. }
  379. if (s->begin <= block->offset) {
  380. start = block->offset;
  381. } else {
  382. start = s->begin;
  383. }
  384. size_in_block = block->length - (start - block->offset);
  385. if (s->begin + s->length < block->offset + block->length) {
  386. size_in_block -= block->offset + block->length -
  387. (s->begin + s->length);
  388. }
  389. } else {
  390. start = block->offset;
  391. size_in_block = block->length;
  392. }
  393. if (phys_addr >= start && phys_addr < start + size_in_block) {
  394. return phys_addr - start + offset;
  395. }
  396. offset += size_in_block;
  397. }
  398. return -1;
  399. }
  400. static int write_elf_loads(DumpState *s)
  401. {
  402. target_phys_addr_t offset;
  403. MemoryMapping *memory_mapping;
  404. uint32_t phdr_index = 1;
  405. int ret;
  406. uint32_t max_index;
  407. if (s->have_section) {
  408. max_index = s->sh_info;
  409. } else {
  410. max_index = s->phdr_num;
  411. }
  412. QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
  413. offset = get_offset(memory_mapping->phys_addr, s);
  414. if (s->dump_info.d_class == ELFCLASS64) {
  415. ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
  416. } else {
  417. ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
  418. }
  419. if (ret < 0) {
  420. return -1;
  421. }
  422. if (phdr_index >= max_index) {
  423. break;
  424. }
  425. }
  426. return 0;
  427. }
  428. /* write elf header, PT_NOTE and elf note to vmcore. */
  429. static int dump_begin(DumpState *s)
  430. {
  431. int ret;
  432. /*
  433. * the vmcore's format is:
  434. * --------------
  435. * | elf header |
  436. * --------------
  437. * | PT_NOTE |
  438. * --------------
  439. * | PT_LOAD |
  440. * --------------
  441. * | ...... |
  442. * --------------
  443. * | PT_LOAD |
  444. * --------------
  445. * | sec_hdr |
  446. * --------------
  447. * | elf note |
  448. * --------------
  449. * | memory |
  450. * --------------
  451. *
  452. * we only know where the memory is saved after we write elf note into
  453. * vmcore.
  454. */
  455. /* write elf header to vmcore */
  456. if (s->dump_info.d_class == ELFCLASS64) {
  457. ret = write_elf64_header(s);
  458. } else {
  459. ret = write_elf32_header(s);
  460. }
  461. if (ret < 0) {
  462. return -1;
  463. }
  464. if (s->dump_info.d_class == ELFCLASS64) {
  465. /* write PT_NOTE to vmcore */
  466. if (write_elf64_note(s) < 0) {
  467. return -1;
  468. }
  469. /* write all PT_LOAD to vmcore */
  470. if (write_elf_loads(s) < 0) {
  471. return -1;
  472. }
  473. /* write section to vmcore */
  474. if (s->have_section) {
  475. if (write_elf_section(s, 1) < 0) {
  476. return -1;
  477. }
  478. }
  479. /* write notes to vmcore */
  480. if (write_elf64_notes(s) < 0) {
  481. return -1;
  482. }
  483. } else {
  484. /* write PT_NOTE to vmcore */
  485. if (write_elf32_note(s) < 0) {
  486. return -1;
  487. }
  488. /* write all PT_LOAD to vmcore */
  489. if (write_elf_loads(s) < 0) {
  490. return -1;
  491. }
  492. /* write section to vmcore */
  493. if (s->have_section) {
  494. if (write_elf_section(s, 0) < 0) {
  495. return -1;
  496. }
  497. }
  498. /* write notes to vmcore */
  499. if (write_elf32_notes(s) < 0) {
  500. return -1;
  501. }
  502. }
  503. return 0;
  504. }
  505. /* write PT_LOAD to vmcore */
  506. static int dump_completed(DumpState *s)
  507. {
  508. dump_cleanup(s);
  509. return 0;
  510. }
  511. static int get_next_block(DumpState *s, RAMBlock *block)
  512. {
  513. while (1) {
  514. block = QLIST_NEXT(block, next);
  515. if (!block) {
  516. /* no more block */
  517. return 1;
  518. }
  519. s->start = 0;
  520. s->block = block;
  521. if (s->has_filter) {
  522. if (block->offset >= s->begin + s->length ||
  523. block->offset + block->length <= s->begin) {
  524. /* This block is out of the range */
  525. continue;
  526. }
  527. if (s->begin > block->offset) {
  528. s->start = s->begin - block->offset;
  529. }
  530. }
  531. return 0;
  532. }
  533. }
  534. /* write all memory to vmcore */
  535. static int dump_iterate(DumpState *s)
  536. {
  537. RAMBlock *block;
  538. int64_t size;
  539. int ret;
  540. while (1) {
  541. block = s->block;
  542. size = block->length;
  543. if (s->has_filter) {
  544. size -= s->start;
  545. if (s->begin + s->length < block->offset + block->length) {
  546. size -= block->offset + block->length - (s->begin + s->length);
  547. }
  548. }
  549. ret = write_memory(s, block, s->start, size);
  550. if (ret == -1) {
  551. return ret;
  552. }
  553. ret = get_next_block(s, block);
  554. if (ret == 1) {
  555. dump_completed(s);
  556. return 0;
  557. }
  558. }
  559. }
  560. static int create_vmcore(DumpState *s)
  561. {
  562. int ret;
  563. ret = dump_begin(s);
  564. if (ret < 0) {
  565. return -1;
  566. }
  567. ret = dump_iterate(s);
  568. if (ret < 0) {
  569. return -1;
  570. }
  571. return 0;
  572. }
  573. static ram_addr_t get_start_block(DumpState *s)
  574. {
  575. RAMBlock *block;
  576. if (!s->has_filter) {
  577. s->block = QLIST_FIRST(&ram_list.blocks);
  578. return 0;
  579. }
  580. QLIST_FOREACH(block, &ram_list.blocks, next) {
  581. if (block->offset >= s->begin + s->length ||
  582. block->offset + block->length <= s->begin) {
  583. /* This block is out of the range */
  584. continue;
  585. }
  586. s->block = block;
  587. if (s->begin > block->offset) {
  588. s->start = s->begin - block->offset;
  589. } else {
  590. s->start = 0;
  591. }
  592. return s->start;
  593. }
  594. return -1;
  595. }
  596. static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
  597. int64_t begin, int64_t length, Error **errp)
  598. {
  599. CPUArchState *env;
  600. int nr_cpus;
  601. int ret;
  602. if (runstate_is_running()) {
  603. vm_stop(RUN_STATE_SAVE_VM);
  604. s->resume = true;
  605. } else {
  606. s->resume = false;
  607. }
  608. s->errp = errp;
  609. s->fd = fd;
  610. s->has_filter = has_filter;
  611. s->begin = begin;
  612. s->length = length;
  613. s->start = get_start_block(s);
  614. if (s->start == -1) {
  615. error_set(errp, QERR_INVALID_PARAMETER, "begin");
  616. goto cleanup;
  617. }
  618. /*
  619. * get dump info: endian, class and architecture.
  620. * If the target architecture is not supported, cpu_get_dump_info() will
  621. * return -1.
  622. *
  623. * if we use kvm, we should synchronize the register before we get dump
  624. * info.
  625. */
  626. nr_cpus = 0;
  627. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  628. cpu_synchronize_state(env);
  629. nr_cpus++;
  630. }
  631. ret = cpu_get_dump_info(&s->dump_info);
  632. if (ret < 0) {
  633. error_set(errp, QERR_UNSUPPORTED);
  634. goto cleanup;
  635. }
  636. s->note_size = cpu_get_note_size(s->dump_info.d_class,
  637. s->dump_info.d_machine, nr_cpus);
  638. if (ret < 0) {
  639. error_set(errp, QERR_UNSUPPORTED);
  640. goto cleanup;
  641. }
  642. /* get memory mapping */
  643. memory_mapping_list_init(&s->list);
  644. if (paging) {
  645. qemu_get_guest_memory_mapping(&s->list);
  646. } else {
  647. qemu_get_guest_simple_memory_mapping(&s->list);
  648. }
  649. if (s->has_filter) {
  650. memory_mapping_filter(&s->list, s->begin, s->length);
  651. }
  652. /*
  653. * calculate phdr_num
  654. *
  655. * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
  656. */
  657. s->phdr_num = 1; /* PT_NOTE */
  658. if (s->list.num < UINT16_MAX - 2) {
  659. s->phdr_num += s->list.num;
  660. s->have_section = false;
  661. } else {
  662. s->have_section = true;
  663. s->phdr_num = PN_XNUM;
  664. s->sh_info = 1; /* PT_NOTE */
  665. /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
  666. if (s->list.num <= UINT32_MAX - 1) {
  667. s->sh_info += s->list.num;
  668. } else {
  669. s->sh_info = UINT32_MAX;
  670. }
  671. }
  672. if (s->dump_info.d_class == ELFCLASS64) {
  673. if (s->have_section) {
  674. s->memory_offset = sizeof(Elf64_Ehdr) +
  675. sizeof(Elf64_Phdr) * s->sh_info +
  676. sizeof(Elf64_Shdr) + s->note_size;
  677. } else {
  678. s->memory_offset = sizeof(Elf64_Ehdr) +
  679. sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
  680. }
  681. } else {
  682. if (s->have_section) {
  683. s->memory_offset = sizeof(Elf32_Ehdr) +
  684. sizeof(Elf32_Phdr) * s->sh_info +
  685. sizeof(Elf32_Shdr) + s->note_size;
  686. } else {
  687. s->memory_offset = sizeof(Elf32_Ehdr) +
  688. sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
  689. }
  690. }
  691. return 0;
  692. cleanup:
  693. if (s->resume) {
  694. vm_start();
  695. }
  696. return -1;
  697. }
  698. void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
  699. int64_t begin, bool has_length, int64_t length,
  700. Error **errp)
  701. {
  702. const char *p;
  703. int fd = -1;
  704. DumpState *s;
  705. int ret;
  706. if (has_begin && !has_length) {
  707. error_set(errp, QERR_MISSING_PARAMETER, "length");
  708. return;
  709. }
  710. if (!has_begin && has_length) {
  711. error_set(errp, QERR_MISSING_PARAMETER, "begin");
  712. return;
  713. }
  714. #if !defined(WIN32)
  715. if (strstart(file, "fd:", &p)) {
  716. fd = monitor_get_fd(cur_mon, p);
  717. if (fd == -1) {
  718. error_set(errp, QERR_FD_NOT_FOUND, p);
  719. return;
  720. }
  721. }
  722. #endif
  723. if (strstart(file, "file:", &p)) {
  724. fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
  725. if (fd < 0) {
  726. error_set(errp, QERR_OPEN_FILE_FAILED, p);
  727. return;
  728. }
  729. }
  730. if (fd == -1) {
  731. error_set(errp, QERR_INVALID_PARAMETER, "protocol");
  732. return;
  733. }
  734. s = g_malloc(sizeof(DumpState));
  735. ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
  736. if (ret < 0) {
  737. g_free(s);
  738. return;
  739. }
  740. if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
  741. error_set(errp, QERR_IO_ERROR);
  742. }
  743. g_free(s);
  744. }