cpus.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. /*
  2. * QEMU System Emulator
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "monitor/monitor.h"
  26. #include "qemu/coroutine-tls.h"
  27. #include "qapi/error.h"
  28. #include "qapi/qapi-commands-machine.h"
  29. #include "qapi/qapi-commands-misc.h"
  30. #include "qapi/qapi-events-run-state.h"
  31. #include "qapi/qmp/qerror.h"
  32. #include "exec/gdbstub.h"
  33. #include "system/accel-ops.h"
  34. #include "system/hw_accel.h"
  35. #include "exec/cpu-common.h"
  36. #include "qemu/thread.h"
  37. #include "qemu/main-loop.h"
  38. #include "qemu/plugin.h"
  39. #include "system/cpus.h"
  40. #include "qemu/guest-random.h"
  41. #include "hw/nmi.h"
  42. #include "system/replay.h"
  43. #include "system/runstate.h"
  44. #include "system/cpu-timers.h"
  45. #include "system/whpx.h"
  46. #include "hw/boards.h"
  47. #include "hw/hw.h"
  48. #include "trace.h"
  49. #ifdef CONFIG_LINUX
  50. #include <sys/prctl.h>
  51. #ifndef PR_MCE_KILL
  52. #define PR_MCE_KILL 33
  53. #endif
  54. #ifndef PR_MCE_KILL_SET
  55. #define PR_MCE_KILL_SET 1
  56. #endif
  57. #ifndef PR_MCE_KILL_EARLY
  58. #define PR_MCE_KILL_EARLY 1
  59. #endif
  60. #endif /* CONFIG_LINUX */
  61. /* The Big QEMU Lock (BQL) */
  62. static QemuMutex bql;
  63. /*
  64. * The chosen accelerator is supposed to register this.
  65. */
  66. static const AccelOpsClass *cpus_accel;
  67. bool cpu_is_stopped(CPUState *cpu)
  68. {
  69. return cpu->stopped || !runstate_is_running();
  70. }
  71. bool cpu_work_list_empty(CPUState *cpu)
  72. {
  73. return QSIMPLEQ_EMPTY_ATOMIC(&cpu->work_list);
  74. }
  75. bool cpu_thread_is_idle(CPUState *cpu)
  76. {
  77. if (cpu->stop || !cpu_work_list_empty(cpu)) {
  78. return false;
  79. }
  80. if (cpu_is_stopped(cpu)) {
  81. return true;
  82. }
  83. if (!cpu->halted || cpu_has_work(cpu)) {
  84. return false;
  85. }
  86. if (cpus_accel->cpu_thread_is_idle) {
  87. return cpus_accel->cpu_thread_is_idle(cpu);
  88. }
  89. return true;
  90. }
  91. bool all_cpu_threads_idle(void)
  92. {
  93. CPUState *cpu;
  94. CPU_FOREACH(cpu) {
  95. if (!cpu_thread_is_idle(cpu)) {
  96. return false;
  97. }
  98. }
  99. return true;
  100. }
  101. /***********************************************************/
  102. void hw_error(const char *fmt, ...)
  103. {
  104. va_list ap;
  105. CPUState *cpu;
  106. va_start(ap, fmt);
  107. fprintf(stderr, "qemu: hardware error: ");
  108. vfprintf(stderr, fmt, ap);
  109. fprintf(stderr, "\n");
  110. CPU_FOREACH(cpu) {
  111. fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
  112. cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
  113. }
  114. va_end(ap);
  115. abort();
  116. }
  117. void cpu_synchronize_all_states(void)
  118. {
  119. CPUState *cpu;
  120. CPU_FOREACH(cpu) {
  121. cpu_synchronize_state(cpu);
  122. }
  123. }
  124. void cpu_synchronize_all_post_reset(void)
  125. {
  126. CPUState *cpu;
  127. CPU_FOREACH(cpu) {
  128. cpu_synchronize_post_reset(cpu);
  129. }
  130. }
  131. void cpu_synchronize_all_post_init(void)
  132. {
  133. CPUState *cpu;
  134. CPU_FOREACH(cpu) {
  135. cpu_synchronize_post_init(cpu);
  136. }
  137. }
  138. void cpu_synchronize_all_pre_loadvm(void)
  139. {
  140. CPUState *cpu;
  141. CPU_FOREACH(cpu) {
  142. cpu_synchronize_pre_loadvm(cpu);
  143. }
  144. }
  145. void cpu_synchronize_state(CPUState *cpu)
  146. {
  147. if (cpus_accel->synchronize_state) {
  148. cpus_accel->synchronize_state(cpu);
  149. }
  150. }
  151. void cpu_synchronize_post_reset(CPUState *cpu)
  152. {
  153. if (cpus_accel->synchronize_post_reset) {
  154. cpus_accel->synchronize_post_reset(cpu);
  155. }
  156. }
  157. void cpu_synchronize_post_init(CPUState *cpu)
  158. {
  159. if (cpus_accel->synchronize_post_init) {
  160. cpus_accel->synchronize_post_init(cpu);
  161. }
  162. }
  163. void cpu_synchronize_pre_loadvm(CPUState *cpu)
  164. {
  165. if (cpus_accel->synchronize_pre_loadvm) {
  166. cpus_accel->synchronize_pre_loadvm(cpu);
  167. }
  168. }
  169. bool cpus_are_resettable(void)
  170. {
  171. if (cpus_accel->cpus_are_resettable) {
  172. return cpus_accel->cpus_are_resettable();
  173. }
  174. return true;
  175. }
  176. void cpu_exec_reset_hold(CPUState *cpu)
  177. {
  178. if (cpus_accel->cpu_reset_hold) {
  179. cpus_accel->cpu_reset_hold(cpu);
  180. }
  181. }
  182. int64_t cpus_get_virtual_clock(void)
  183. {
  184. /*
  185. * XXX
  186. *
  187. * need to check that cpus_accel is not NULL, because qcow2 calls
  188. * qemu_get_clock_ns(CLOCK_VIRTUAL) without any accel initialized and
  189. * with ticks disabled in some io-tests:
  190. * 030 040 041 060 099 120 127 140 156 161 172 181 191 192 195 203 229 249 256 267
  191. *
  192. * is this expected?
  193. *
  194. * XXX
  195. */
  196. if (cpus_accel && cpus_accel->get_virtual_clock) {
  197. return cpus_accel->get_virtual_clock();
  198. }
  199. return cpu_get_clock();
  200. }
  201. /*
  202. * Signal the new virtual time to the accelerator. This is only needed
  203. * by accelerators that need to track the changes as we warp time.
  204. */
  205. void cpus_set_virtual_clock(int64_t new_time)
  206. {
  207. if (cpus_accel && cpus_accel->set_virtual_clock) {
  208. cpus_accel->set_virtual_clock(new_time);
  209. }
  210. }
  211. /*
  212. * return the time elapsed in VM between vm_start and vm_stop. Unless
  213. * icount is active, cpus_get_elapsed_ticks() uses units of the host CPU cycle
  214. * counter.
  215. */
  216. int64_t cpus_get_elapsed_ticks(void)
  217. {
  218. if (cpus_accel->get_elapsed_ticks) {
  219. return cpus_accel->get_elapsed_ticks();
  220. }
  221. return cpu_get_ticks();
  222. }
  223. static void generic_handle_interrupt(CPUState *cpu, int mask)
  224. {
  225. cpu->interrupt_request |= mask;
  226. if (!qemu_cpu_is_self(cpu)) {
  227. qemu_cpu_kick(cpu);
  228. }
  229. }
  230. void cpu_interrupt(CPUState *cpu, int mask)
  231. {
  232. if (cpus_accel->handle_interrupt) {
  233. cpus_accel->handle_interrupt(cpu, mask);
  234. } else {
  235. generic_handle_interrupt(cpu, mask);
  236. }
  237. }
  238. /*
  239. * True if the vm was previously suspended, and has not been woken or reset.
  240. */
  241. static int vm_was_suspended;
  242. void vm_set_suspended(bool suspended)
  243. {
  244. vm_was_suspended = suspended;
  245. }
  246. bool vm_get_suspended(void)
  247. {
  248. return vm_was_suspended;
  249. }
  250. static int do_vm_stop(RunState state, bool send_stop)
  251. {
  252. int ret = 0;
  253. RunState oldstate = runstate_get();
  254. if (runstate_is_live(oldstate)) {
  255. vm_was_suspended = (oldstate == RUN_STATE_SUSPENDED);
  256. runstate_set(state);
  257. cpu_disable_ticks();
  258. if (oldstate == RUN_STATE_RUNNING) {
  259. pause_all_vcpus();
  260. }
  261. vm_state_notify(0, state);
  262. if (send_stop) {
  263. qapi_event_send_stop();
  264. }
  265. }
  266. bdrv_drain_all();
  267. ret = bdrv_flush_all();
  268. trace_vm_stop_flush_all(ret);
  269. return ret;
  270. }
  271. /* Special vm_stop() variant for terminating the process. Historically clients
  272. * did not expect a QMP STOP event and so we need to retain compatibility.
  273. */
  274. int vm_shutdown(void)
  275. {
  276. return do_vm_stop(RUN_STATE_SHUTDOWN, false);
  277. }
  278. bool cpu_can_run(CPUState *cpu)
  279. {
  280. if (cpu->stop) {
  281. return false;
  282. }
  283. if (cpu_is_stopped(cpu)) {
  284. return false;
  285. }
  286. return true;
  287. }
  288. void cpu_handle_guest_debug(CPUState *cpu)
  289. {
  290. if (replay_running_debug()) {
  291. if (!cpu->singlestep_enabled) {
  292. /*
  293. * Report about the breakpoint and
  294. * make a single step to skip it
  295. */
  296. replay_breakpoint();
  297. cpu_single_step(cpu, SSTEP_ENABLE);
  298. } else {
  299. cpu_single_step(cpu, 0);
  300. }
  301. } else {
  302. gdb_set_stop_cpu(cpu);
  303. qemu_system_debug_request();
  304. cpu->stopped = true;
  305. }
  306. }
  307. #ifdef CONFIG_LINUX
  308. static void sigbus_reraise(void)
  309. {
  310. sigset_t set;
  311. struct sigaction action;
  312. memset(&action, 0, sizeof(action));
  313. action.sa_handler = SIG_DFL;
  314. if (!sigaction(SIGBUS, &action, NULL)) {
  315. raise(SIGBUS);
  316. sigemptyset(&set);
  317. sigaddset(&set, SIGBUS);
  318. pthread_sigmask(SIG_UNBLOCK, &set, NULL);
  319. }
  320. perror("Failed to re-raise SIGBUS!");
  321. abort();
  322. }
  323. static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
  324. {
  325. if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
  326. sigbus_reraise();
  327. }
  328. if (current_cpu) {
  329. /* Called asynchronously in VCPU thread. */
  330. if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
  331. sigbus_reraise();
  332. }
  333. } else {
  334. /* Called synchronously (via signalfd) in main thread. */
  335. if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
  336. sigbus_reraise();
  337. }
  338. }
  339. }
  340. static void qemu_init_sigbus(void)
  341. {
  342. struct sigaction action;
  343. /*
  344. * ALERT: when modifying this, take care that SIGBUS forwarding in
  345. * qemu_prealloc_mem() will continue working as expected.
  346. */
  347. memset(&action, 0, sizeof(action));
  348. action.sa_flags = SA_SIGINFO;
  349. action.sa_sigaction = sigbus_handler;
  350. sigaction(SIGBUS, &action, NULL);
  351. prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
  352. }
  353. #else /* !CONFIG_LINUX */
  354. static void qemu_init_sigbus(void)
  355. {
  356. }
  357. #endif /* !CONFIG_LINUX */
  358. static QemuThread io_thread;
  359. /* cpu creation */
  360. static QemuCond qemu_cpu_cond;
  361. /* system init */
  362. static QemuCond qemu_pause_cond;
  363. void qemu_init_cpu_loop(void)
  364. {
  365. qemu_init_sigbus();
  366. qemu_cond_init(&qemu_cpu_cond);
  367. qemu_cond_init(&qemu_pause_cond);
  368. qemu_mutex_init(&bql);
  369. qemu_thread_get_self(&io_thread);
  370. }
  371. void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
  372. {
  373. do_run_on_cpu(cpu, func, data, &bql);
  374. }
  375. static void qemu_cpu_stop(CPUState *cpu, bool exit)
  376. {
  377. g_assert(qemu_cpu_is_self(cpu));
  378. cpu->stop = false;
  379. cpu->stopped = true;
  380. if (exit) {
  381. cpu_exit(cpu);
  382. }
  383. qemu_cond_broadcast(&qemu_pause_cond);
  384. }
  385. void qemu_wait_io_event_common(CPUState *cpu)
  386. {
  387. qatomic_set_mb(&cpu->thread_kicked, false);
  388. if (cpu->stop) {
  389. qemu_cpu_stop(cpu, false);
  390. }
  391. process_queued_cpu_work(cpu);
  392. }
  393. void qemu_wait_io_event(CPUState *cpu)
  394. {
  395. bool slept = false;
  396. while (cpu_thread_is_idle(cpu)) {
  397. if (!slept) {
  398. slept = true;
  399. qemu_plugin_vcpu_idle_cb(cpu);
  400. }
  401. qemu_cond_wait(cpu->halt_cond, &bql);
  402. }
  403. if (slept) {
  404. qemu_plugin_vcpu_resume_cb(cpu);
  405. }
  406. qemu_wait_io_event_common(cpu);
  407. }
  408. void cpus_kick_thread(CPUState *cpu)
  409. {
  410. if (cpu->thread_kicked) {
  411. return;
  412. }
  413. cpu->thread_kicked = true;
  414. #ifndef _WIN32
  415. int err = pthread_kill(cpu->thread->thread, SIG_IPI);
  416. if (err && err != ESRCH) {
  417. fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
  418. exit(1);
  419. }
  420. #else
  421. qemu_sem_post(&cpu->sem);
  422. #endif
  423. }
  424. void qemu_cpu_kick(CPUState *cpu)
  425. {
  426. qemu_cond_broadcast(cpu->halt_cond);
  427. if (cpus_accel->kick_vcpu_thread) {
  428. cpus_accel->kick_vcpu_thread(cpu);
  429. } else { /* default */
  430. cpus_kick_thread(cpu);
  431. }
  432. }
  433. void qemu_cpu_kick_self(void)
  434. {
  435. assert(current_cpu);
  436. cpus_kick_thread(current_cpu);
  437. }
  438. bool qemu_cpu_is_self(CPUState *cpu)
  439. {
  440. return qemu_thread_is_self(cpu->thread);
  441. }
  442. bool qemu_in_vcpu_thread(void)
  443. {
  444. return current_cpu && qemu_cpu_is_self(current_cpu);
  445. }
  446. QEMU_DEFINE_STATIC_CO_TLS(bool, bql_locked)
  447. static uint32_t bql_unlock_blocked;
  448. void bql_block_unlock(bool increase)
  449. {
  450. uint32_t new_value;
  451. assert(bql_locked());
  452. /* check for overflow! */
  453. new_value = bql_unlock_blocked + increase - !increase;
  454. assert((new_value > bql_unlock_blocked) == increase);
  455. bql_unlock_blocked = new_value;
  456. }
  457. bool bql_locked(void)
  458. {
  459. return get_bql_locked();
  460. }
  461. bool qemu_in_main_thread(void)
  462. {
  463. return bql_locked();
  464. }
  465. void rust_bql_mock_lock(void)
  466. {
  467. error_report("This function should be used only from tests");
  468. abort();
  469. }
  470. /*
  471. * The BQL is taken from so many places that it is worth profiling the
  472. * callers directly, instead of funneling them all through a single function.
  473. */
  474. void bql_lock_impl(const char *file, int line)
  475. {
  476. QemuMutexLockFunc bql_lock_fn = qatomic_read(&bql_mutex_lock_func);
  477. g_assert(!bql_locked());
  478. bql_lock_fn(&bql, file, line);
  479. set_bql_locked(true);
  480. }
  481. void bql_unlock(void)
  482. {
  483. g_assert(bql_locked());
  484. g_assert(!bql_unlock_blocked);
  485. set_bql_locked(false);
  486. qemu_mutex_unlock(&bql);
  487. }
  488. void qemu_cond_wait_bql(QemuCond *cond)
  489. {
  490. qemu_cond_wait(cond, &bql);
  491. }
  492. void qemu_cond_timedwait_bql(QemuCond *cond, int ms)
  493. {
  494. qemu_cond_timedwait(cond, &bql, ms);
  495. }
  496. /* signal CPU creation */
  497. void cpu_thread_signal_created(CPUState *cpu)
  498. {
  499. cpu->created = true;
  500. qemu_cond_signal(&qemu_cpu_cond);
  501. }
  502. /* signal CPU destruction */
  503. void cpu_thread_signal_destroyed(CPUState *cpu)
  504. {
  505. cpu->created = false;
  506. qemu_cond_signal(&qemu_cpu_cond);
  507. }
  508. void cpu_pause(CPUState *cpu)
  509. {
  510. if (qemu_cpu_is_self(cpu)) {
  511. qemu_cpu_stop(cpu, true);
  512. } else {
  513. cpu->stop = true;
  514. qemu_cpu_kick(cpu);
  515. }
  516. }
  517. void cpu_resume(CPUState *cpu)
  518. {
  519. cpu->stop = false;
  520. cpu->stopped = false;
  521. qemu_cpu_kick(cpu);
  522. }
  523. static bool all_vcpus_paused(void)
  524. {
  525. CPUState *cpu;
  526. CPU_FOREACH(cpu) {
  527. if (!cpu->stopped) {
  528. return false;
  529. }
  530. }
  531. return true;
  532. }
  533. void pause_all_vcpus(void)
  534. {
  535. CPUState *cpu;
  536. qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
  537. CPU_FOREACH(cpu) {
  538. cpu_pause(cpu);
  539. }
  540. /* We need to drop the replay_lock so any vCPU threads woken up
  541. * can finish their replay tasks
  542. */
  543. replay_mutex_unlock();
  544. while (!all_vcpus_paused()) {
  545. qemu_cond_wait(&qemu_pause_cond, &bql);
  546. CPU_FOREACH(cpu) {
  547. qemu_cpu_kick(cpu);
  548. }
  549. }
  550. bql_unlock();
  551. replay_mutex_lock();
  552. bql_lock();
  553. }
  554. void resume_all_vcpus(void)
  555. {
  556. CPUState *cpu;
  557. if (!runstate_is_running()) {
  558. return;
  559. }
  560. qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
  561. CPU_FOREACH(cpu) {
  562. cpu_resume(cpu);
  563. }
  564. }
  565. void cpu_remove_sync(CPUState *cpu)
  566. {
  567. cpu->stop = true;
  568. cpu->unplug = true;
  569. qemu_cpu_kick(cpu);
  570. bql_unlock();
  571. qemu_thread_join(cpu->thread);
  572. bql_lock();
  573. }
  574. void cpus_register_accel(const AccelOpsClass *ops)
  575. {
  576. assert(ops != NULL);
  577. assert(ops->create_vcpu_thread != NULL); /* mandatory */
  578. cpus_accel = ops;
  579. }
  580. const AccelOpsClass *cpus_get_accel(void)
  581. {
  582. /* broken if we call this early */
  583. assert(cpus_accel);
  584. return cpus_accel;
  585. }
  586. void qemu_init_vcpu(CPUState *cpu)
  587. {
  588. MachineState *ms = MACHINE(qdev_get_machine());
  589. cpu->nr_threads = ms->smp.threads;
  590. cpu->stopped = true;
  591. cpu->random_seed = qemu_guest_random_seed_thread_part1();
  592. if (!cpu->as) {
  593. /* If the target cpu hasn't set up any address spaces itself,
  594. * give it the default one.
  595. */
  596. cpu->num_ases = 1;
  597. cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
  598. }
  599. /* accelerators all implement the AccelOpsClass */
  600. g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL);
  601. cpus_accel->create_vcpu_thread(cpu);
  602. while (!cpu->created) {
  603. qemu_cond_wait(&qemu_cpu_cond, &bql);
  604. }
  605. }
  606. void cpu_stop_current(void)
  607. {
  608. if (current_cpu) {
  609. current_cpu->stop = true;
  610. cpu_exit(current_cpu);
  611. }
  612. }
  613. int vm_stop(RunState state)
  614. {
  615. if (qemu_in_vcpu_thread()) {
  616. qemu_system_vmstop_request_prepare();
  617. qemu_system_vmstop_request(state);
  618. /*
  619. * FIXME: should not return to device code in case
  620. * vm_stop() has been requested.
  621. */
  622. cpu_stop_current();
  623. return 0;
  624. }
  625. return do_vm_stop(state, true);
  626. }
  627. /**
  628. * Prepare for (re)starting the VM.
  629. * Returns 0 if the vCPUs should be restarted, -1 on an error condition,
  630. * and 1 otherwise.
  631. */
  632. int vm_prepare_start(bool step_pending)
  633. {
  634. int ret = vm_was_suspended ? 1 : 0;
  635. RunState state = vm_was_suspended ? RUN_STATE_SUSPENDED : RUN_STATE_RUNNING;
  636. RunState requested;
  637. qemu_vmstop_requested(&requested);
  638. if (runstate_is_running() && requested == RUN_STATE__MAX) {
  639. return -1;
  640. }
  641. /* Ensure that a STOP/RESUME pair of events is emitted if a
  642. * vmstop request was pending. The BLOCK_IO_ERROR event, for
  643. * example, according to documentation is always followed by
  644. * the STOP event.
  645. */
  646. if (runstate_is_running()) {
  647. qapi_event_send_stop();
  648. qapi_event_send_resume();
  649. return -1;
  650. }
  651. /*
  652. * WHPX accelerator needs to know whether we are going to step
  653. * any CPUs, before starting the first one.
  654. */
  655. if (cpus_accel->synchronize_pre_resume) {
  656. cpus_accel->synchronize_pre_resume(step_pending);
  657. }
  658. /* We are sending this now, but the CPUs will be resumed shortly later */
  659. qapi_event_send_resume();
  660. cpu_enable_ticks();
  661. runstate_set(state);
  662. vm_state_notify(1, state);
  663. vm_was_suspended = false;
  664. return ret;
  665. }
  666. void vm_start(void)
  667. {
  668. if (!vm_prepare_start(false)) {
  669. resume_all_vcpus();
  670. }
  671. }
  672. void vm_resume(RunState state)
  673. {
  674. if (runstate_is_live(state)) {
  675. vm_start();
  676. } else {
  677. runstate_set(state);
  678. }
  679. }
  680. /* does a state transition even if the VM is already stopped,
  681. current state is forgotten forever */
  682. int vm_stop_force_state(RunState state)
  683. {
  684. if (runstate_is_live(runstate_get())) {
  685. return vm_stop(state);
  686. } else {
  687. int ret;
  688. runstate_set(state);
  689. bdrv_drain_all();
  690. /* Make sure to return an error if the flush in a previous vm_stop()
  691. * failed. */
  692. ret = bdrv_flush_all();
  693. trace_vm_stop_flush_all(ret);
  694. return ret;
  695. }
  696. }
  697. void qmp_memsave(uint64_t addr, uint64_t size, const char *filename,
  698. bool has_cpu, int64_t cpu_index, Error **errp)
  699. {
  700. FILE *f;
  701. uint64_t l;
  702. CPUState *cpu;
  703. uint8_t buf[1024];
  704. uint64_t orig_addr = addr, orig_size = size;
  705. if (!has_cpu) {
  706. cpu_index = 0;
  707. }
  708. cpu = qemu_get_cpu(cpu_index);
  709. if (cpu == NULL) {
  710. error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
  711. "a CPU number");
  712. return;
  713. }
  714. f = fopen(filename, "wb");
  715. if (!f) {
  716. error_setg_file_open(errp, errno, filename);
  717. return;
  718. }
  719. while (size != 0) {
  720. l = sizeof(buf);
  721. if (l > size)
  722. l = size;
  723. if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
  724. error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRIu64
  725. " specified", orig_addr, orig_size);
  726. goto exit;
  727. }
  728. if (fwrite(buf, 1, l, f) != l) {
  729. error_setg(errp, "writing memory to '%s' failed",
  730. filename);
  731. goto exit;
  732. }
  733. addr += l;
  734. size -= l;
  735. }
  736. exit:
  737. fclose(f);
  738. }
  739. void qmp_pmemsave(uint64_t addr, uint64_t size, const char *filename,
  740. Error **errp)
  741. {
  742. FILE *f;
  743. uint64_t l;
  744. uint8_t buf[1024];
  745. f = fopen(filename, "wb");
  746. if (!f) {
  747. error_setg_file_open(errp, errno, filename);
  748. return;
  749. }
  750. while (size != 0) {
  751. l = sizeof(buf);
  752. if (l > size)
  753. l = size;
  754. cpu_physical_memory_read(addr, buf, l);
  755. if (fwrite(buf, 1, l, f) != l) {
  756. error_setg(errp, "writing memory to '%s' failed",
  757. filename);
  758. goto exit;
  759. }
  760. addr += l;
  761. size -= l;
  762. }
  763. exit:
  764. fclose(f);
  765. }
  766. void qmp_inject_nmi(Error **errp)
  767. {
  768. nmi_monitor_handle(monitor_get_cpu_index(monitor_cur()), errp);
  769. }