softmmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /*
  2. * gdb server stub - softmmu specific bits
  3. *
  4. * Debug integration depends on support from the individual
  5. * accelerators so most of this involves calling the ops helpers.
  6. *
  7. * Copyright (c) 2003-2005 Fabrice Bellard
  8. * Copyright (c) 2022 Linaro Ltd
  9. *
  10. * SPDX-License-Identifier: LGPL-2.0+
  11. */
  12. #include "qemu/osdep.h"
  13. #include "qapi/error.h"
  14. #include "qemu/error-report.h"
  15. #include "qemu/cutils.h"
  16. #include "exec/gdbstub.h"
  17. #include "gdbstub/syscalls.h"
  18. #include "exec/hwaddr.h"
  19. #include "exec/tb-flush.h"
  20. #include "sysemu/cpus.h"
  21. #include "sysemu/runstate.h"
  22. #include "sysemu/replay.h"
  23. #include "hw/core/cpu.h"
  24. #include "hw/cpu/cluster.h"
  25. #include "hw/boards.h"
  26. #include "chardev/char.h"
  27. #include "chardev/char-fe.h"
  28. #include "monitor/monitor.h"
  29. #include "trace.h"
  30. #include "internals.h"
  31. /* System emulation specific state */
  32. typedef struct {
  33. CharBackend chr;
  34. Chardev *mon_chr;
  35. } GDBSystemState;
  36. GDBSystemState gdbserver_system_state;
  37. static void reset_gdbserver_state(void)
  38. {
  39. g_free(gdbserver_state.processes);
  40. gdbserver_state.processes = NULL;
  41. gdbserver_state.process_num = 0;
  42. }
  43. /*
  44. * Return the GDB index for a given vCPU state.
  45. *
  46. * In system mode GDB numbers CPUs from 1 as 0 is reserved as an "any
  47. * cpu" index.
  48. */
  49. int gdb_get_cpu_index(CPUState *cpu)
  50. {
  51. return cpu->cpu_index + 1;
  52. }
  53. /*
  54. * We check the status of the last message in the chardev receive code
  55. */
  56. bool gdb_got_immediate_ack(void)
  57. {
  58. return true;
  59. }
  60. /*
  61. * GDB Connection management. For system emulation we do all of this
  62. * via our existing Chardev infrastructure which allows us to support
  63. * network and unix sockets.
  64. */
  65. void gdb_put_buffer(const uint8_t *buf, int len)
  66. {
  67. /*
  68. * XXX this blocks entire thread. Rewrite to use
  69. * qemu_chr_fe_write and background I/O callbacks
  70. */
  71. qemu_chr_fe_write_all(&gdbserver_system_state.chr, buf, len);
  72. }
  73. static void gdb_chr_event(void *opaque, QEMUChrEvent event)
  74. {
  75. int i;
  76. GDBState *s = (GDBState *) opaque;
  77. switch (event) {
  78. case CHR_EVENT_OPENED:
  79. /* Start with first process attached, others detached */
  80. for (i = 0; i < s->process_num; i++) {
  81. s->processes[i].attached = !i;
  82. }
  83. s->c_cpu = gdb_first_attached_cpu();
  84. s->g_cpu = s->c_cpu;
  85. vm_stop(RUN_STATE_PAUSED);
  86. replay_gdb_attached();
  87. gdb_has_xml = false;
  88. break;
  89. default:
  90. break;
  91. }
  92. }
  93. /*
  94. * In softmmu mode we stop the VM and wait to send the syscall packet
  95. * until notification that the CPU has stopped. This must be done
  96. * because if the packet is sent now the reply from the syscall
  97. * request could be received while the CPU is still in the running
  98. * state, which can cause packets to be dropped and state transition
  99. * 'T' packets to be sent while the syscall is still being processed.
  100. */
  101. void gdb_syscall_handling(const char *syscall_packet)
  102. {
  103. vm_stop(RUN_STATE_DEBUG);
  104. qemu_cpu_kick(gdbserver_state.c_cpu);
  105. }
  106. static void gdb_vm_state_change(void *opaque, bool running, RunState state)
  107. {
  108. CPUState *cpu = gdbserver_state.c_cpu;
  109. g_autoptr(GString) buf = g_string_new(NULL);
  110. g_autoptr(GString) tid = g_string_new(NULL);
  111. const char *type;
  112. int ret;
  113. if (running || gdbserver_state.state == RS_INACTIVE) {
  114. return;
  115. }
  116. /* Is there a GDB syscall waiting to be sent? */
  117. if (gdb_handled_syscall()) {
  118. return;
  119. }
  120. if (cpu == NULL) {
  121. /* No process attached */
  122. return;
  123. }
  124. gdb_append_thread_id(cpu, tid);
  125. switch (state) {
  126. case RUN_STATE_DEBUG:
  127. if (cpu->watchpoint_hit) {
  128. switch (cpu->watchpoint_hit->flags & BP_MEM_ACCESS) {
  129. case BP_MEM_READ:
  130. type = "r";
  131. break;
  132. case BP_MEM_ACCESS:
  133. type = "a";
  134. break;
  135. default:
  136. type = "";
  137. break;
  138. }
  139. trace_gdbstub_hit_watchpoint(type,
  140. gdb_get_cpu_index(cpu),
  141. cpu->watchpoint_hit->vaddr);
  142. g_string_printf(buf, "T%02xthread:%s;%swatch:%" VADDR_PRIx ";",
  143. GDB_SIGNAL_TRAP, tid->str, type,
  144. cpu->watchpoint_hit->vaddr);
  145. cpu->watchpoint_hit = NULL;
  146. goto send_packet;
  147. } else {
  148. trace_gdbstub_hit_break();
  149. }
  150. tb_flush(cpu);
  151. ret = GDB_SIGNAL_TRAP;
  152. break;
  153. case RUN_STATE_PAUSED:
  154. trace_gdbstub_hit_paused();
  155. ret = GDB_SIGNAL_INT;
  156. break;
  157. case RUN_STATE_SHUTDOWN:
  158. trace_gdbstub_hit_shutdown();
  159. ret = GDB_SIGNAL_QUIT;
  160. break;
  161. case RUN_STATE_IO_ERROR:
  162. trace_gdbstub_hit_io_error();
  163. ret = GDB_SIGNAL_IO;
  164. break;
  165. case RUN_STATE_WATCHDOG:
  166. trace_gdbstub_hit_watchdog();
  167. ret = GDB_SIGNAL_ALRM;
  168. break;
  169. case RUN_STATE_INTERNAL_ERROR:
  170. trace_gdbstub_hit_internal_error();
  171. ret = GDB_SIGNAL_ABRT;
  172. break;
  173. case RUN_STATE_SAVE_VM:
  174. case RUN_STATE_RESTORE_VM:
  175. return;
  176. case RUN_STATE_FINISH_MIGRATE:
  177. ret = GDB_SIGNAL_XCPU;
  178. break;
  179. default:
  180. trace_gdbstub_hit_unknown(state);
  181. ret = GDB_SIGNAL_UNKNOWN;
  182. break;
  183. }
  184. gdb_set_stop_cpu(cpu);
  185. g_string_printf(buf, "T%02xthread:%s;", ret, tid->str);
  186. send_packet:
  187. gdb_put_packet(buf->str);
  188. /* disable single step if it was enabled */
  189. cpu_single_step(cpu, 0);
  190. }
  191. #ifndef _WIN32
  192. static void gdb_sigterm_handler(int signal)
  193. {
  194. if (runstate_is_running()) {
  195. vm_stop(RUN_STATE_PAUSED);
  196. }
  197. }
  198. #endif
  199. static int gdb_monitor_write(Chardev *chr, const uint8_t *buf, int len)
  200. {
  201. g_autoptr(GString) hex_buf = g_string_new("O");
  202. gdb_memtohex(hex_buf, buf, len);
  203. gdb_put_packet(hex_buf->str);
  204. return len;
  205. }
  206. static void gdb_monitor_open(Chardev *chr, ChardevBackend *backend,
  207. bool *be_opened, Error **errp)
  208. {
  209. *be_opened = false;
  210. }
  211. static void char_gdb_class_init(ObjectClass *oc, void *data)
  212. {
  213. ChardevClass *cc = CHARDEV_CLASS(oc);
  214. cc->internal = true;
  215. cc->open = gdb_monitor_open;
  216. cc->chr_write = gdb_monitor_write;
  217. }
  218. #define TYPE_CHARDEV_GDB "chardev-gdb"
  219. static const TypeInfo char_gdb_type_info = {
  220. .name = TYPE_CHARDEV_GDB,
  221. .parent = TYPE_CHARDEV,
  222. .class_init = char_gdb_class_init,
  223. };
  224. static int gdb_chr_can_receive(void *opaque)
  225. {
  226. /*
  227. * We can handle an arbitrarily large amount of data.
  228. * Pick the maximum packet size, which is as good as anything.
  229. */
  230. return MAX_PACKET_LENGTH;
  231. }
  232. static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
  233. {
  234. int i;
  235. for (i = 0; i < size; i++) {
  236. gdb_read_byte(buf[i]);
  237. }
  238. }
  239. static int find_cpu_clusters(Object *child, void *opaque)
  240. {
  241. if (object_dynamic_cast(child, TYPE_CPU_CLUSTER)) {
  242. GDBState *s = (GDBState *) opaque;
  243. CPUClusterState *cluster = CPU_CLUSTER(child);
  244. GDBProcess *process;
  245. s->processes = g_renew(GDBProcess, s->processes, ++s->process_num);
  246. process = &s->processes[s->process_num - 1];
  247. /*
  248. * GDB process IDs -1 and 0 are reserved. To avoid subtle errors at
  249. * runtime, we enforce here that the machine does not use a cluster ID
  250. * that would lead to PID 0.
  251. */
  252. assert(cluster->cluster_id != UINT32_MAX);
  253. process->pid = cluster->cluster_id + 1;
  254. process->attached = false;
  255. process->target_xml[0] = '\0';
  256. return 0;
  257. }
  258. return object_child_foreach(child, find_cpu_clusters, opaque);
  259. }
  260. static int pid_order(const void *a, const void *b)
  261. {
  262. GDBProcess *pa = (GDBProcess *) a;
  263. GDBProcess *pb = (GDBProcess *) b;
  264. if (pa->pid < pb->pid) {
  265. return -1;
  266. } else if (pa->pid > pb->pid) {
  267. return 1;
  268. } else {
  269. return 0;
  270. }
  271. }
  272. static void create_processes(GDBState *s)
  273. {
  274. object_child_foreach(object_get_root(), find_cpu_clusters, s);
  275. if (gdbserver_state.processes) {
  276. /* Sort by PID */
  277. qsort(gdbserver_state.processes,
  278. gdbserver_state.process_num,
  279. sizeof(gdbserver_state.processes[0]),
  280. pid_order);
  281. }
  282. gdb_create_default_process(s);
  283. }
  284. int gdbserver_start(const char *device)
  285. {
  286. trace_gdbstub_op_start(device);
  287. char gdbstub_device_name[128];
  288. Chardev *chr = NULL;
  289. Chardev *mon_chr;
  290. if (!first_cpu) {
  291. error_report("gdbstub: meaningless to attach gdb to a "
  292. "machine without any CPU.");
  293. return -1;
  294. }
  295. if (!gdb_supports_guest_debug()) {
  296. error_report("gdbstub: current accelerator doesn't "
  297. "support guest debugging");
  298. return -1;
  299. }
  300. if (!device) {
  301. return -1;
  302. }
  303. if (strcmp(device, "none") != 0) {
  304. if (strstart(device, "tcp:", NULL)) {
  305. /* enforce required TCP attributes */
  306. snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
  307. "%s,wait=off,nodelay=on,server=on", device);
  308. device = gdbstub_device_name;
  309. }
  310. #ifndef _WIN32
  311. else if (strcmp(device, "stdio") == 0) {
  312. struct sigaction act;
  313. memset(&act, 0, sizeof(act));
  314. act.sa_handler = gdb_sigterm_handler;
  315. sigaction(SIGINT, &act, NULL);
  316. }
  317. #endif
  318. /*
  319. * FIXME: it's a bit weird to allow using a mux chardev here
  320. * and implicitly setup a monitor. We may want to break this.
  321. */
  322. chr = qemu_chr_new_noreplay("gdb", device, true, NULL);
  323. if (!chr) {
  324. return -1;
  325. }
  326. }
  327. if (!gdbserver_state.init) {
  328. gdb_init_gdbserver_state();
  329. qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
  330. /* Initialize a monitor terminal for gdb */
  331. mon_chr = qemu_chardev_new(NULL, TYPE_CHARDEV_GDB,
  332. NULL, NULL, &error_abort);
  333. monitor_init_hmp(mon_chr, false, &error_abort);
  334. } else {
  335. qemu_chr_fe_deinit(&gdbserver_system_state.chr, true);
  336. mon_chr = gdbserver_system_state.mon_chr;
  337. reset_gdbserver_state();
  338. }
  339. create_processes(&gdbserver_state);
  340. if (chr) {
  341. qemu_chr_fe_init(&gdbserver_system_state.chr, chr, &error_abort);
  342. qemu_chr_fe_set_handlers(&gdbserver_system_state.chr,
  343. gdb_chr_can_receive,
  344. gdb_chr_receive, gdb_chr_event,
  345. NULL, &gdbserver_state, NULL, true);
  346. }
  347. gdbserver_state.state = chr ? RS_IDLE : RS_INACTIVE;
  348. gdbserver_system_state.mon_chr = mon_chr;
  349. gdb_syscall_reset();
  350. return 0;
  351. }
  352. static void register_types(void)
  353. {
  354. type_register_static(&char_gdb_type_info);
  355. }
  356. type_init(register_types);
  357. /* Tell the remote gdb that the process has exited. */
  358. void gdb_exit(int code)
  359. {
  360. char buf[4];
  361. if (!gdbserver_state.init) {
  362. return;
  363. }
  364. trace_gdbstub_op_exiting((uint8_t)code);
  365. snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
  366. gdb_put_packet(buf);
  367. qemu_chr_fe_deinit(&gdbserver_system_state.chr, true);
  368. }
  369. /*
  370. * Memory access
  371. */
  372. static int phy_memory_mode;
  373. int gdb_target_memory_rw_debug(CPUState *cpu, hwaddr addr,
  374. uint8_t *buf, int len, bool is_write)
  375. {
  376. CPUClass *cc;
  377. if (phy_memory_mode) {
  378. if (is_write) {
  379. cpu_physical_memory_write(addr, buf, len);
  380. } else {
  381. cpu_physical_memory_read(addr, buf, len);
  382. }
  383. return 0;
  384. }
  385. cc = CPU_GET_CLASS(cpu);
  386. if (cc->memory_rw_debug) {
  387. return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
  388. }
  389. return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
  390. }
  391. /*
  392. * cpu helpers
  393. */
  394. unsigned int gdb_get_max_cpus(void)
  395. {
  396. MachineState *ms = MACHINE(qdev_get_machine());
  397. return ms->smp.max_cpus;
  398. }
  399. bool gdb_can_reverse(void)
  400. {
  401. return replay_mode == REPLAY_MODE_PLAY;
  402. }
  403. /*
  404. * Softmmu specific command helpers
  405. */
  406. void gdb_handle_query_qemu_phy_mem_mode(GArray *params,
  407. void *user_ctx)
  408. {
  409. g_string_printf(gdbserver_state.str_buf, "%d", phy_memory_mode);
  410. gdb_put_strbuf();
  411. }
  412. void gdb_handle_set_qemu_phy_mem_mode(GArray *params, void *user_ctx)
  413. {
  414. if (!params->len) {
  415. gdb_put_packet("E22");
  416. return;
  417. }
  418. if (!get_param(params, 0)->val_ul) {
  419. phy_memory_mode = 0;
  420. } else {
  421. phy_memory_mode = 1;
  422. }
  423. gdb_put_packet("OK");
  424. }
  425. void gdb_handle_query_rcmd(GArray *params, void *user_ctx)
  426. {
  427. const guint8 zero = 0;
  428. int len;
  429. if (!params->len) {
  430. gdb_put_packet("E22");
  431. return;
  432. }
  433. len = strlen(get_param(params, 0)->data);
  434. if (len % 2) {
  435. gdb_put_packet("E01");
  436. return;
  437. }
  438. g_assert(gdbserver_state.mem_buf->len == 0);
  439. len = len / 2;
  440. gdb_hextomem(gdbserver_state.mem_buf, get_param(params, 0)->data, len);
  441. g_byte_array_append(gdbserver_state.mem_buf, &zero, 1);
  442. qemu_chr_be_write(gdbserver_system_state.mon_chr,
  443. gdbserver_state.mem_buf->data,
  444. gdbserver_state.mem_buf->len);
  445. gdb_put_packet("OK");
  446. }
  447. /*
  448. * Execution state helpers
  449. */
  450. void gdb_handle_query_attached(GArray *params, void *user_ctx)
  451. {
  452. gdb_put_packet("1");
  453. }
  454. void gdb_continue(void)
  455. {
  456. if (!runstate_needs_reset()) {
  457. trace_gdbstub_op_continue();
  458. vm_start();
  459. }
  460. }
  461. /*
  462. * Resume execution, per CPU actions.
  463. */
  464. int gdb_continue_partial(char *newstates)
  465. {
  466. CPUState *cpu;
  467. int res = 0;
  468. int flag = 0;
  469. if (!runstate_needs_reset()) {
  470. bool step_requested = false;
  471. CPU_FOREACH(cpu) {
  472. if (newstates[cpu->cpu_index] == 's') {
  473. step_requested = true;
  474. break;
  475. }
  476. }
  477. if (vm_prepare_start(step_requested)) {
  478. return 0;
  479. }
  480. CPU_FOREACH(cpu) {
  481. switch (newstates[cpu->cpu_index]) {
  482. case 0:
  483. case 1:
  484. break; /* nothing to do here */
  485. case 's':
  486. trace_gdbstub_op_stepping(cpu->cpu_index);
  487. cpu_single_step(cpu, gdbserver_state.sstep_flags);
  488. cpu_resume(cpu);
  489. flag = 1;
  490. break;
  491. case 'c':
  492. trace_gdbstub_op_continue_cpu(cpu->cpu_index);
  493. cpu_resume(cpu);
  494. flag = 1;
  495. break;
  496. default:
  497. res = -1;
  498. break;
  499. }
  500. }
  501. }
  502. if (flag) {
  503. qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
  504. }
  505. return res;
  506. }
  507. /*
  508. * Signal Handling - in system mode we only need SIGINT and SIGTRAP; other
  509. * signals are not yet supported.
  510. */
  511. enum {
  512. TARGET_SIGINT = 2,
  513. TARGET_SIGTRAP = 5
  514. };
  515. int gdb_signal_to_target(int sig)
  516. {
  517. switch (sig) {
  518. case 2:
  519. return TARGET_SIGINT;
  520. case 5:
  521. return TARGET_SIGTRAP;
  522. default:
  523. return -1;
  524. }
  525. }
  526. /*
  527. * Break/Watch point helpers
  528. */
  529. bool gdb_supports_guest_debug(void)
  530. {
  531. const AccelOpsClass *ops = cpus_get_accel();
  532. if (ops->supports_guest_debug) {
  533. return ops->supports_guest_debug();
  534. }
  535. return false;
  536. }
  537. int gdb_breakpoint_insert(CPUState *cs, int type, vaddr addr, vaddr len)
  538. {
  539. const AccelOpsClass *ops = cpus_get_accel();
  540. if (ops->insert_breakpoint) {
  541. return ops->insert_breakpoint(cs, type, addr, len);
  542. }
  543. return -ENOSYS;
  544. }
  545. int gdb_breakpoint_remove(CPUState *cs, int type, vaddr addr, vaddr len)
  546. {
  547. const AccelOpsClass *ops = cpus_get_accel();
  548. if (ops->remove_breakpoint) {
  549. return ops->remove_breakpoint(cs, type, addr, len);
  550. }
  551. return -ENOSYS;
  552. }
  553. void gdb_breakpoint_remove_all(CPUState *cs)
  554. {
  555. const AccelOpsClass *ops = cpus_get_accel();
  556. if (ops->remove_all_breakpoints) {
  557. ops->remove_all_breakpoints(cs);
  558. }
  559. }