dump.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. /*
  2. * QEMU dump
  3. *
  4. * Copyright Fujitsu, Corp. 2011, 2012
  5. *
  6. * Authors:
  7. * Wen Congyang <wency@cn.fujitsu.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu-common.h"
  14. #include "elf.h"
  15. #include "cpu.h"
  16. #include "exec/cpu-all.h"
  17. #include "exec/hwaddr.h"
  18. #include "monitor/monitor.h"
  19. #include "sysemu/kvm.h"
  20. #include "sysemu/dump.h"
  21. #include "sysemu/sysemu.h"
  22. #include "sysemu/memory_mapping.h"
  23. #include "qapi/error.h"
  24. #include "qmp-commands.h"
  25. #include "exec/gdbstub.h"
  26. static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
  27. {
  28. if (endian == ELFDATA2LSB) {
  29. val = cpu_to_le16(val);
  30. } else {
  31. val = cpu_to_be16(val);
  32. }
  33. return val;
  34. }
  35. static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
  36. {
  37. if (endian == ELFDATA2LSB) {
  38. val = cpu_to_le32(val);
  39. } else {
  40. val = cpu_to_be32(val);
  41. }
  42. return val;
  43. }
  44. static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
  45. {
  46. if (endian == ELFDATA2LSB) {
  47. val = cpu_to_le64(val);
  48. } else {
  49. val = cpu_to_be64(val);
  50. }
  51. return val;
  52. }
  53. typedef struct DumpState {
  54. ArchDumpInfo dump_info;
  55. MemoryMappingList list;
  56. uint16_t phdr_num;
  57. uint32_t sh_info;
  58. bool have_section;
  59. bool resume;
  60. size_t note_size;
  61. hwaddr memory_offset;
  62. int fd;
  63. RAMBlock *block;
  64. ram_addr_t start;
  65. bool has_filter;
  66. int64_t begin;
  67. int64_t length;
  68. Error **errp;
  69. } DumpState;
  70. static int dump_cleanup(DumpState *s)
  71. {
  72. int ret = 0;
  73. memory_mapping_list_free(&s->list);
  74. if (s->fd != -1) {
  75. close(s->fd);
  76. }
  77. if (s->resume) {
  78. vm_start();
  79. }
  80. return ret;
  81. }
  82. static void dump_error(DumpState *s, const char *reason)
  83. {
  84. dump_cleanup(s);
  85. }
  86. static int fd_write_vmcore(void *buf, size_t size, void *opaque)
  87. {
  88. DumpState *s = opaque;
  89. size_t written_size;
  90. written_size = qemu_write_full(s->fd, buf, size);
  91. if (written_size != size) {
  92. return -1;
  93. }
  94. return 0;
  95. }
  96. static int write_elf64_header(DumpState *s)
  97. {
  98. Elf64_Ehdr elf_header;
  99. int ret;
  100. int endian = s->dump_info.d_endian;
  101. memset(&elf_header, 0, sizeof(Elf64_Ehdr));
  102. memcpy(&elf_header, ELFMAG, SELFMAG);
  103. elf_header.e_ident[EI_CLASS] = ELFCLASS64;
  104. elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
  105. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  106. elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
  107. elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
  108. endian);
  109. elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
  110. elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
  111. elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
  112. elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
  113. endian);
  114. elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
  115. if (s->have_section) {
  116. uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
  117. elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
  118. elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
  119. endian);
  120. elf_header.e_shnum = cpu_convert_to_target16(1, endian);
  121. }
  122. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  123. if (ret < 0) {
  124. dump_error(s, "dump: failed to write elf header.\n");
  125. return -1;
  126. }
  127. return 0;
  128. }
  129. static int write_elf32_header(DumpState *s)
  130. {
  131. Elf32_Ehdr elf_header;
  132. int ret;
  133. int endian = s->dump_info.d_endian;
  134. memset(&elf_header, 0, sizeof(Elf32_Ehdr));
  135. memcpy(&elf_header, ELFMAG, SELFMAG);
  136. elf_header.e_ident[EI_CLASS] = ELFCLASS32;
  137. elf_header.e_ident[EI_DATA] = endian;
  138. elf_header.e_ident[EI_VERSION] = EV_CURRENT;
  139. elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
  140. elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
  141. endian);
  142. elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
  143. elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
  144. elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
  145. elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
  146. endian);
  147. elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
  148. if (s->have_section) {
  149. uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
  150. elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
  151. elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
  152. endian);
  153. elf_header.e_shnum = cpu_convert_to_target16(1, endian);
  154. }
  155. ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
  156. if (ret < 0) {
  157. dump_error(s, "dump: failed to write elf header.\n");
  158. return -1;
  159. }
  160. return 0;
  161. }
  162. static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
  163. int phdr_index, hwaddr offset)
  164. {
  165. Elf64_Phdr phdr;
  166. int ret;
  167. int endian = s->dump_info.d_endian;
  168. memset(&phdr, 0, sizeof(Elf64_Phdr));
  169. phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
  170. phdr.p_offset = cpu_convert_to_target64(offset, endian);
  171. phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
  172. if (offset == -1) {
  173. /* When the memory is not stored into vmcore, offset will be -1 */
  174. phdr.p_filesz = 0;
  175. } else {
  176. phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
  177. }
  178. phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
  179. phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
  180. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  181. if (ret < 0) {
  182. dump_error(s, "dump: failed to write program header table.\n");
  183. return -1;
  184. }
  185. return 0;
  186. }
  187. static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
  188. int phdr_index, hwaddr offset)
  189. {
  190. Elf32_Phdr phdr;
  191. int ret;
  192. int endian = s->dump_info.d_endian;
  193. memset(&phdr, 0, sizeof(Elf32_Phdr));
  194. phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
  195. phdr.p_offset = cpu_convert_to_target32(offset, endian);
  196. phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
  197. if (offset == -1) {
  198. /* When the memory is not stored into vmcore, offset will be -1 */
  199. phdr.p_filesz = 0;
  200. } else {
  201. phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
  202. }
  203. phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
  204. phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
  205. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  206. if (ret < 0) {
  207. dump_error(s, "dump: failed to write program header table.\n");
  208. return -1;
  209. }
  210. return 0;
  211. }
  212. static int write_elf64_note(DumpState *s)
  213. {
  214. Elf64_Phdr phdr;
  215. int endian = s->dump_info.d_endian;
  216. hwaddr begin = s->memory_offset - s->note_size;
  217. int ret;
  218. memset(&phdr, 0, sizeof(Elf64_Phdr));
  219. phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
  220. phdr.p_offset = cpu_convert_to_target64(begin, endian);
  221. phdr.p_paddr = 0;
  222. phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
  223. phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
  224. phdr.p_vaddr = 0;
  225. ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
  226. if (ret < 0) {
  227. dump_error(s, "dump: failed to write program header table.\n");
  228. return -1;
  229. }
  230. return 0;
  231. }
  232. static int write_elf64_notes(DumpState *s)
  233. {
  234. CPUArchState *env;
  235. int ret;
  236. int id;
  237. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  238. id = cpu_index(env);
  239. ret = cpu_write_elf64_note(fd_write_vmcore, env, id, s);
  240. if (ret < 0) {
  241. dump_error(s, "dump: failed to write elf notes.\n");
  242. return -1;
  243. }
  244. }
  245. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  246. ret = cpu_write_elf64_qemunote(fd_write_vmcore, env, s);
  247. if (ret < 0) {
  248. dump_error(s, "dump: failed to write CPU status.\n");
  249. return -1;
  250. }
  251. }
  252. return 0;
  253. }
  254. static int write_elf32_note(DumpState *s)
  255. {
  256. hwaddr begin = s->memory_offset - s->note_size;
  257. Elf32_Phdr phdr;
  258. int endian = s->dump_info.d_endian;
  259. int ret;
  260. memset(&phdr, 0, sizeof(Elf32_Phdr));
  261. phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
  262. phdr.p_offset = cpu_convert_to_target32(begin, endian);
  263. phdr.p_paddr = 0;
  264. phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
  265. phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
  266. phdr.p_vaddr = 0;
  267. ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
  268. if (ret < 0) {
  269. dump_error(s, "dump: failed to write program header table.\n");
  270. return -1;
  271. }
  272. return 0;
  273. }
  274. static int write_elf32_notes(DumpState *s)
  275. {
  276. CPUArchState *env;
  277. int ret;
  278. int id;
  279. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  280. id = cpu_index(env);
  281. ret = cpu_write_elf32_note(fd_write_vmcore, env, id, s);
  282. if (ret < 0) {
  283. dump_error(s, "dump: failed to write elf notes.\n");
  284. return -1;
  285. }
  286. }
  287. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  288. ret = cpu_write_elf32_qemunote(fd_write_vmcore, env, s);
  289. if (ret < 0) {
  290. dump_error(s, "dump: failed to write CPU status.\n");
  291. return -1;
  292. }
  293. }
  294. return 0;
  295. }
  296. static int write_elf_section(DumpState *s, int type)
  297. {
  298. Elf32_Shdr shdr32;
  299. Elf64_Shdr shdr64;
  300. int endian = s->dump_info.d_endian;
  301. int shdr_size;
  302. void *shdr;
  303. int ret;
  304. if (type == 0) {
  305. shdr_size = sizeof(Elf32_Shdr);
  306. memset(&shdr32, 0, shdr_size);
  307. shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
  308. shdr = &shdr32;
  309. } else {
  310. shdr_size = sizeof(Elf64_Shdr);
  311. memset(&shdr64, 0, shdr_size);
  312. shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
  313. shdr = &shdr64;
  314. }
  315. ret = fd_write_vmcore(&shdr, shdr_size, s);
  316. if (ret < 0) {
  317. dump_error(s, "dump: failed to write section header table.\n");
  318. return -1;
  319. }
  320. return 0;
  321. }
  322. static int write_data(DumpState *s, void *buf, int length)
  323. {
  324. int ret;
  325. ret = fd_write_vmcore(buf, length, s);
  326. if (ret < 0) {
  327. dump_error(s, "dump: failed to save memory.\n");
  328. return -1;
  329. }
  330. return 0;
  331. }
  332. /* write the memroy to vmcore. 1 page per I/O. */
  333. static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
  334. int64_t size)
  335. {
  336. int64_t i;
  337. int ret;
  338. for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
  339. ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
  340. TARGET_PAGE_SIZE);
  341. if (ret < 0) {
  342. return ret;
  343. }
  344. }
  345. if ((size % TARGET_PAGE_SIZE) != 0) {
  346. ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
  347. size % TARGET_PAGE_SIZE);
  348. if (ret < 0) {
  349. return ret;
  350. }
  351. }
  352. return 0;
  353. }
  354. /* get the memory's offset in the vmcore */
  355. static hwaddr get_offset(hwaddr phys_addr,
  356. DumpState *s)
  357. {
  358. RAMBlock *block;
  359. hwaddr offset = s->memory_offset;
  360. int64_t size_in_block, start;
  361. if (s->has_filter) {
  362. if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
  363. return -1;
  364. }
  365. }
  366. QTAILQ_FOREACH(block, &ram_list.blocks, next) {
  367. if (s->has_filter) {
  368. if (block->offset >= s->begin + s->length ||
  369. block->offset + block->length <= s->begin) {
  370. /* This block is out of the range */
  371. continue;
  372. }
  373. if (s->begin <= block->offset) {
  374. start = block->offset;
  375. } else {
  376. start = s->begin;
  377. }
  378. size_in_block = block->length - (start - block->offset);
  379. if (s->begin + s->length < block->offset + block->length) {
  380. size_in_block -= block->offset + block->length -
  381. (s->begin + s->length);
  382. }
  383. } else {
  384. start = block->offset;
  385. size_in_block = block->length;
  386. }
  387. if (phys_addr >= start && phys_addr < start + size_in_block) {
  388. return phys_addr - start + offset;
  389. }
  390. offset += size_in_block;
  391. }
  392. return -1;
  393. }
  394. static int write_elf_loads(DumpState *s)
  395. {
  396. hwaddr offset;
  397. MemoryMapping *memory_mapping;
  398. uint32_t phdr_index = 1;
  399. int ret;
  400. uint32_t max_index;
  401. if (s->have_section) {
  402. max_index = s->sh_info;
  403. } else {
  404. max_index = s->phdr_num;
  405. }
  406. QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
  407. offset = get_offset(memory_mapping->phys_addr, s);
  408. if (s->dump_info.d_class == ELFCLASS64) {
  409. ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
  410. } else {
  411. ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
  412. }
  413. if (ret < 0) {
  414. return -1;
  415. }
  416. if (phdr_index >= max_index) {
  417. break;
  418. }
  419. }
  420. return 0;
  421. }
  422. /* write elf header, PT_NOTE and elf note to vmcore. */
  423. static int dump_begin(DumpState *s)
  424. {
  425. int ret;
  426. /*
  427. * the vmcore's format is:
  428. * --------------
  429. * | elf header |
  430. * --------------
  431. * | PT_NOTE |
  432. * --------------
  433. * | PT_LOAD |
  434. * --------------
  435. * | ...... |
  436. * --------------
  437. * | PT_LOAD |
  438. * --------------
  439. * | sec_hdr |
  440. * --------------
  441. * | elf note |
  442. * --------------
  443. * | memory |
  444. * --------------
  445. *
  446. * we only know where the memory is saved after we write elf note into
  447. * vmcore.
  448. */
  449. /* write elf header to vmcore */
  450. if (s->dump_info.d_class == ELFCLASS64) {
  451. ret = write_elf64_header(s);
  452. } else {
  453. ret = write_elf32_header(s);
  454. }
  455. if (ret < 0) {
  456. return -1;
  457. }
  458. if (s->dump_info.d_class == ELFCLASS64) {
  459. /* write PT_NOTE to vmcore */
  460. if (write_elf64_note(s) < 0) {
  461. return -1;
  462. }
  463. /* write all PT_LOAD to vmcore */
  464. if (write_elf_loads(s) < 0) {
  465. return -1;
  466. }
  467. /* write section to vmcore */
  468. if (s->have_section) {
  469. if (write_elf_section(s, 1) < 0) {
  470. return -1;
  471. }
  472. }
  473. /* write notes to vmcore */
  474. if (write_elf64_notes(s) < 0) {
  475. return -1;
  476. }
  477. } else {
  478. /* write PT_NOTE to vmcore */
  479. if (write_elf32_note(s) < 0) {
  480. return -1;
  481. }
  482. /* write all PT_LOAD to vmcore */
  483. if (write_elf_loads(s) < 0) {
  484. return -1;
  485. }
  486. /* write section to vmcore */
  487. if (s->have_section) {
  488. if (write_elf_section(s, 0) < 0) {
  489. return -1;
  490. }
  491. }
  492. /* write notes to vmcore */
  493. if (write_elf32_notes(s) < 0) {
  494. return -1;
  495. }
  496. }
  497. return 0;
  498. }
  499. /* write PT_LOAD to vmcore */
  500. static int dump_completed(DumpState *s)
  501. {
  502. dump_cleanup(s);
  503. return 0;
  504. }
  505. static int get_next_block(DumpState *s, RAMBlock *block)
  506. {
  507. while (1) {
  508. block = QTAILQ_NEXT(block, next);
  509. if (!block) {
  510. /* no more block */
  511. return 1;
  512. }
  513. s->start = 0;
  514. s->block = block;
  515. if (s->has_filter) {
  516. if (block->offset >= s->begin + s->length ||
  517. block->offset + block->length <= s->begin) {
  518. /* This block is out of the range */
  519. continue;
  520. }
  521. if (s->begin > block->offset) {
  522. s->start = s->begin - block->offset;
  523. }
  524. }
  525. return 0;
  526. }
  527. }
  528. /* write all memory to vmcore */
  529. static int dump_iterate(DumpState *s)
  530. {
  531. RAMBlock *block;
  532. int64_t size;
  533. int ret;
  534. while (1) {
  535. block = s->block;
  536. size = block->length;
  537. if (s->has_filter) {
  538. size -= s->start;
  539. if (s->begin + s->length < block->offset + block->length) {
  540. size -= block->offset + block->length - (s->begin + s->length);
  541. }
  542. }
  543. ret = write_memory(s, block, s->start, size);
  544. if (ret == -1) {
  545. return ret;
  546. }
  547. ret = get_next_block(s, block);
  548. if (ret == 1) {
  549. dump_completed(s);
  550. return 0;
  551. }
  552. }
  553. }
  554. static int create_vmcore(DumpState *s)
  555. {
  556. int ret;
  557. ret = dump_begin(s);
  558. if (ret < 0) {
  559. return -1;
  560. }
  561. ret = dump_iterate(s);
  562. if (ret < 0) {
  563. return -1;
  564. }
  565. return 0;
  566. }
  567. static ram_addr_t get_start_block(DumpState *s)
  568. {
  569. RAMBlock *block;
  570. if (!s->has_filter) {
  571. s->block = QTAILQ_FIRST(&ram_list.blocks);
  572. return 0;
  573. }
  574. QTAILQ_FOREACH(block, &ram_list.blocks, next) {
  575. if (block->offset >= s->begin + s->length ||
  576. block->offset + block->length <= s->begin) {
  577. /* This block is out of the range */
  578. continue;
  579. }
  580. s->block = block;
  581. if (s->begin > block->offset) {
  582. s->start = s->begin - block->offset;
  583. } else {
  584. s->start = 0;
  585. }
  586. return s->start;
  587. }
  588. return -1;
  589. }
  590. static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
  591. int64_t begin, int64_t length, Error **errp)
  592. {
  593. CPUArchState *env;
  594. int nr_cpus;
  595. int ret;
  596. if (runstate_is_running()) {
  597. vm_stop(RUN_STATE_SAVE_VM);
  598. s->resume = true;
  599. } else {
  600. s->resume = false;
  601. }
  602. s->errp = errp;
  603. s->fd = fd;
  604. s->has_filter = has_filter;
  605. s->begin = begin;
  606. s->length = length;
  607. s->start = get_start_block(s);
  608. if (s->start == -1) {
  609. error_set(errp, QERR_INVALID_PARAMETER, "begin");
  610. goto cleanup;
  611. }
  612. /*
  613. * get dump info: endian, class and architecture.
  614. * If the target architecture is not supported, cpu_get_dump_info() will
  615. * return -1.
  616. *
  617. * if we use kvm, we should synchronize the register before we get dump
  618. * info.
  619. */
  620. nr_cpus = 0;
  621. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  622. cpu_synchronize_state(env);
  623. nr_cpus++;
  624. }
  625. ret = cpu_get_dump_info(&s->dump_info);
  626. if (ret < 0) {
  627. error_set(errp, QERR_UNSUPPORTED);
  628. goto cleanup;
  629. }
  630. s->note_size = cpu_get_note_size(s->dump_info.d_class,
  631. s->dump_info.d_machine, nr_cpus);
  632. if (ret < 0) {
  633. error_set(errp, QERR_UNSUPPORTED);
  634. goto cleanup;
  635. }
  636. /* get memory mapping */
  637. memory_mapping_list_init(&s->list);
  638. if (paging) {
  639. qemu_get_guest_memory_mapping(&s->list);
  640. } else {
  641. qemu_get_guest_simple_memory_mapping(&s->list);
  642. }
  643. if (s->has_filter) {
  644. memory_mapping_filter(&s->list, s->begin, s->length);
  645. }
  646. /*
  647. * calculate phdr_num
  648. *
  649. * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
  650. */
  651. s->phdr_num = 1; /* PT_NOTE */
  652. if (s->list.num < UINT16_MAX - 2) {
  653. s->phdr_num += s->list.num;
  654. s->have_section = false;
  655. } else {
  656. s->have_section = true;
  657. s->phdr_num = PN_XNUM;
  658. s->sh_info = 1; /* PT_NOTE */
  659. /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
  660. if (s->list.num <= UINT32_MAX - 1) {
  661. s->sh_info += s->list.num;
  662. } else {
  663. s->sh_info = UINT32_MAX;
  664. }
  665. }
  666. if (s->dump_info.d_class == ELFCLASS64) {
  667. if (s->have_section) {
  668. s->memory_offset = sizeof(Elf64_Ehdr) +
  669. sizeof(Elf64_Phdr) * s->sh_info +
  670. sizeof(Elf64_Shdr) + s->note_size;
  671. } else {
  672. s->memory_offset = sizeof(Elf64_Ehdr) +
  673. sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
  674. }
  675. } else {
  676. if (s->have_section) {
  677. s->memory_offset = sizeof(Elf32_Ehdr) +
  678. sizeof(Elf32_Phdr) * s->sh_info +
  679. sizeof(Elf32_Shdr) + s->note_size;
  680. } else {
  681. s->memory_offset = sizeof(Elf32_Ehdr) +
  682. sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
  683. }
  684. }
  685. return 0;
  686. cleanup:
  687. if (s->resume) {
  688. vm_start();
  689. }
  690. return -1;
  691. }
  692. void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
  693. int64_t begin, bool has_length, int64_t length,
  694. Error **errp)
  695. {
  696. const char *p;
  697. int fd = -1;
  698. DumpState *s;
  699. int ret;
  700. if (has_begin && !has_length) {
  701. error_set(errp, QERR_MISSING_PARAMETER, "length");
  702. return;
  703. }
  704. if (!has_begin && has_length) {
  705. error_set(errp, QERR_MISSING_PARAMETER, "begin");
  706. return;
  707. }
  708. #if !defined(WIN32)
  709. if (strstart(file, "fd:", &p)) {
  710. fd = monitor_get_fd(cur_mon, p, errp);
  711. if (fd == -1) {
  712. return;
  713. }
  714. }
  715. #endif
  716. if (strstart(file, "file:", &p)) {
  717. fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
  718. if (fd < 0) {
  719. error_set(errp, QERR_OPEN_FILE_FAILED, p);
  720. return;
  721. }
  722. }
  723. if (fd == -1) {
  724. error_set(errp, QERR_INVALID_PARAMETER, "protocol");
  725. return;
  726. }
  727. s = g_malloc(sizeof(DumpState));
  728. ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
  729. if (ret < 0) {
  730. g_free(s);
  731. return;
  732. }
  733. if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
  734. error_set(errp, QERR_IO_ERROR);
  735. }
  736. g_free(s);
  737. }