2
0

main-loop.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. /*
  2. * QEMU System Emulator
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "qapi/error.h"
  26. #include "qemu/cutils.h"
  27. #include "qemu/timer.h"
  28. #include "sysemu/cpu-timers.h"
  29. #include "sysemu/replay.h"
  30. #include "qemu/main-loop.h"
  31. #include "block/aio.h"
  32. #include "block/thread-pool.h"
  33. #include "qemu/error-report.h"
  34. #include "qemu/queue.h"
  35. #include "qom/object.h"
  36. #ifndef _WIN32
  37. #include <sys/wait.h>
  38. #endif
  39. #ifndef _WIN32
  40. /* If we have signalfd, we mask out the signals we want to handle and then
  41. * use signalfd to listen for them. We rely on whatever the current signal
  42. * handler is to dispatch the signals when we receive them.
  43. */
  44. /*
  45. * Disable CFI checks.
  46. * We are going to call a signal hander directly. Such handler may or may not
  47. * have been defined in our binary, so there's no guarantee that the pointer
  48. * used to set the handler is a cfi-valid pointer. Since the handlers are
  49. * stored in kernel memory, changing the handler to an attacker-defined
  50. * function requires being able to call a sigaction() syscall,
  51. * which is not as easy as overwriting a pointer in memory.
  52. */
  53. QEMU_DISABLE_CFI
  54. static void sigfd_handler(void *opaque)
  55. {
  56. int fd = (intptr_t)opaque;
  57. struct qemu_signalfd_siginfo info;
  58. struct sigaction action;
  59. ssize_t len;
  60. while (1) {
  61. len = RETRY_ON_EINTR(read(fd, &info, sizeof(info)));
  62. if (len == -1 && errno == EAGAIN) {
  63. break;
  64. }
  65. if (len != sizeof(info)) {
  66. error_report("read from sigfd returned %zd: %s", len,
  67. g_strerror(errno));
  68. return;
  69. }
  70. sigaction(info.ssi_signo, NULL, &action);
  71. if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
  72. sigaction_invoke(&action, &info);
  73. } else if (action.sa_handler) {
  74. action.sa_handler(info.ssi_signo);
  75. }
  76. }
  77. }
  78. static int qemu_signal_init(Error **errp)
  79. {
  80. int sigfd;
  81. sigset_t set;
  82. /*
  83. * SIG_IPI must be blocked in the main thread and must not be caught
  84. * by sigwait() in the signal thread. Otherwise, the cpu thread will
  85. * not catch it reliably.
  86. */
  87. sigemptyset(&set);
  88. sigaddset(&set, SIG_IPI);
  89. sigaddset(&set, SIGIO);
  90. sigaddset(&set, SIGALRM);
  91. sigaddset(&set, SIGBUS);
  92. /* SIGINT cannot be handled via signalfd, so that ^C can be used
  93. * to interrupt QEMU when it is being run under gdb. SIGHUP and
  94. * SIGTERM are also handled asynchronously, even though it is not
  95. * strictly necessary, because they use the same handler as SIGINT.
  96. */
  97. pthread_sigmask(SIG_BLOCK, &set, NULL);
  98. sigdelset(&set, SIG_IPI);
  99. sigfd = qemu_signalfd(&set);
  100. if (sigfd == -1) {
  101. error_setg_errno(errp, errno, "failed to create signalfd");
  102. return -errno;
  103. }
  104. g_unix_set_fd_nonblocking(sigfd, true, NULL);
  105. qemu_set_fd_handler(sigfd, sigfd_handler, NULL, (void *)(intptr_t)sigfd);
  106. return 0;
  107. }
  108. #else /* _WIN32 */
  109. static int qemu_signal_init(Error **errp)
  110. {
  111. return 0;
  112. }
  113. #endif
  114. static AioContext *qemu_aio_context;
  115. static QEMUBH *qemu_notify_bh;
  116. static void notify_event_cb(void *opaque)
  117. {
  118. /* No need to do anything; this bottom half is only used to
  119. * kick the kernel out of ppoll/poll/WaitForMultipleObjects.
  120. */
  121. }
  122. AioContext *qemu_get_aio_context(void)
  123. {
  124. return qemu_aio_context;
  125. }
  126. void qemu_notify_event(void)
  127. {
  128. if (!qemu_aio_context) {
  129. return;
  130. }
  131. qemu_bh_schedule(qemu_notify_bh);
  132. }
  133. static GArray *gpollfds;
  134. int qemu_init_main_loop(Error **errp)
  135. {
  136. int ret;
  137. GSource *src;
  138. init_clocks(qemu_timer_notify_cb);
  139. ret = qemu_signal_init(errp);
  140. if (ret) {
  141. return ret;
  142. }
  143. qemu_aio_context = aio_context_new(errp);
  144. if (!qemu_aio_context) {
  145. return -EMFILE;
  146. }
  147. qemu_set_current_aio_context(qemu_aio_context);
  148. qemu_notify_bh = qemu_bh_new(notify_event_cb, NULL);
  149. gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
  150. src = aio_get_g_source(qemu_aio_context);
  151. g_source_set_name(src, "aio-context");
  152. g_source_attach(src, NULL);
  153. g_source_unref(src);
  154. src = iohandler_get_g_source();
  155. g_source_set_name(src, "io-handler");
  156. g_source_attach(src, NULL);
  157. g_source_unref(src);
  158. return 0;
  159. }
  160. static void main_loop_update_params(EventLoopBase *base, Error **errp)
  161. {
  162. ERRP_GUARD();
  163. if (!qemu_aio_context) {
  164. error_setg(errp, "qemu aio context not ready");
  165. return;
  166. }
  167. aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp);
  168. if (*errp) {
  169. return;
  170. }
  171. aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min,
  172. base->thread_pool_max, errp);
  173. }
  174. MainLoop *mloop;
  175. static void main_loop_init(EventLoopBase *base, Error **errp)
  176. {
  177. MainLoop *m = MAIN_LOOP(base);
  178. if (mloop) {
  179. error_setg(errp, "only one main-loop instance allowed");
  180. return;
  181. }
  182. main_loop_update_params(base, errp);
  183. mloop = m;
  184. return;
  185. }
  186. static bool main_loop_can_be_deleted(EventLoopBase *base)
  187. {
  188. return false;
  189. }
  190. static void main_loop_class_init(ObjectClass *oc, void *class_data)
  191. {
  192. EventLoopBaseClass *bc = EVENT_LOOP_BASE_CLASS(oc);
  193. bc->init = main_loop_init;
  194. bc->update_params = main_loop_update_params;
  195. bc->can_be_deleted = main_loop_can_be_deleted;
  196. }
  197. static const TypeInfo main_loop_info = {
  198. .name = TYPE_MAIN_LOOP,
  199. .parent = TYPE_EVENT_LOOP_BASE,
  200. .class_init = main_loop_class_init,
  201. .instance_size = sizeof(MainLoop),
  202. };
  203. static void main_loop_register_types(void)
  204. {
  205. type_register_static(&main_loop_info);
  206. }
  207. type_init(main_loop_register_types)
  208. static int max_priority;
  209. #ifndef _WIN32
  210. static int glib_pollfds_idx;
  211. static int glib_n_poll_fds;
  212. static void glib_pollfds_fill(int64_t *cur_timeout)
  213. {
  214. GMainContext *context = g_main_context_default();
  215. int timeout = 0;
  216. int64_t timeout_ns;
  217. int n;
  218. g_main_context_prepare(context, &max_priority);
  219. glib_pollfds_idx = gpollfds->len;
  220. n = glib_n_poll_fds;
  221. do {
  222. GPollFD *pfds;
  223. glib_n_poll_fds = n;
  224. g_array_set_size(gpollfds, glib_pollfds_idx + glib_n_poll_fds);
  225. pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
  226. n = g_main_context_query(context, max_priority, &timeout, pfds,
  227. glib_n_poll_fds);
  228. } while (n != glib_n_poll_fds);
  229. if (timeout < 0) {
  230. timeout_ns = -1;
  231. } else {
  232. timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS;
  233. }
  234. *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout);
  235. }
  236. static void glib_pollfds_poll(void)
  237. {
  238. GMainContext *context = g_main_context_default();
  239. GPollFD *pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
  240. if (g_main_context_check(context, max_priority, pfds, glib_n_poll_fds)) {
  241. g_main_context_dispatch(context);
  242. }
  243. }
  244. #define MAX_MAIN_LOOP_SPIN (1000)
  245. static int os_host_main_loop_wait(int64_t timeout)
  246. {
  247. GMainContext *context = g_main_context_default();
  248. int ret;
  249. g_main_context_acquire(context);
  250. glib_pollfds_fill(&timeout);
  251. qemu_mutex_unlock_iothread();
  252. replay_mutex_unlock();
  253. ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
  254. replay_mutex_lock();
  255. qemu_mutex_lock_iothread();
  256. glib_pollfds_poll();
  257. g_main_context_release(context);
  258. return ret;
  259. }
  260. #else
  261. /***********************************************************/
  262. /* Polling handling */
  263. typedef struct PollingEntry {
  264. PollingFunc *func;
  265. void *opaque;
  266. struct PollingEntry *next;
  267. } PollingEntry;
  268. static PollingEntry *first_polling_entry;
  269. int qemu_add_polling_cb(PollingFunc *func, void *opaque)
  270. {
  271. PollingEntry **ppe, *pe;
  272. pe = g_new0(PollingEntry, 1);
  273. pe->func = func;
  274. pe->opaque = opaque;
  275. for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next);
  276. *ppe = pe;
  277. return 0;
  278. }
  279. void qemu_del_polling_cb(PollingFunc *func, void *opaque)
  280. {
  281. PollingEntry **ppe, *pe;
  282. for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) {
  283. pe = *ppe;
  284. if (pe->func == func && pe->opaque == opaque) {
  285. *ppe = pe->next;
  286. g_free(pe);
  287. break;
  288. }
  289. }
  290. }
  291. /***********************************************************/
  292. /* Wait objects support */
  293. typedef struct WaitObjects {
  294. int num;
  295. int revents[MAXIMUM_WAIT_OBJECTS];
  296. HANDLE events[MAXIMUM_WAIT_OBJECTS];
  297. WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS];
  298. void *opaque[MAXIMUM_WAIT_OBJECTS];
  299. } WaitObjects;
  300. static WaitObjects wait_objects = {0};
  301. int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
  302. {
  303. int i;
  304. WaitObjects *w = &wait_objects;
  305. if (w->num >= MAXIMUM_WAIT_OBJECTS) {
  306. return -1;
  307. }
  308. for (i = 0; i < w->num; i++) {
  309. /* check if the same handle is added twice */
  310. if (w->events[i] == handle) {
  311. return -1;
  312. }
  313. }
  314. w->events[w->num] = handle;
  315. w->func[w->num] = func;
  316. w->opaque[w->num] = opaque;
  317. w->revents[w->num] = 0;
  318. w->num++;
  319. return 0;
  320. }
  321. void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
  322. {
  323. int i, found;
  324. WaitObjects *w = &wait_objects;
  325. found = 0;
  326. for (i = 0; i < w->num; i++) {
  327. if (w->events[i] == handle) {
  328. found = 1;
  329. }
  330. if (found && i < (MAXIMUM_WAIT_OBJECTS - 1)) {
  331. w->events[i] = w->events[i + 1];
  332. w->func[i] = w->func[i + 1];
  333. w->opaque[i] = w->opaque[i + 1];
  334. w->revents[i] = w->revents[i + 1];
  335. }
  336. }
  337. if (found) {
  338. w->num--;
  339. }
  340. }
  341. static int pollfds_fill(GArray *pollfds, fd_set *rfds, fd_set *wfds,
  342. fd_set *xfds)
  343. {
  344. int nfds = -1;
  345. int i;
  346. for (i = 0; i < pollfds->len; i++) {
  347. GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
  348. int fd = pfd->fd;
  349. int events = pfd->events;
  350. if (events & G_IO_IN) {
  351. FD_SET(fd, rfds);
  352. nfds = MAX(nfds, fd);
  353. }
  354. if (events & G_IO_OUT) {
  355. FD_SET(fd, wfds);
  356. nfds = MAX(nfds, fd);
  357. }
  358. if (events & G_IO_PRI) {
  359. FD_SET(fd, xfds);
  360. nfds = MAX(nfds, fd);
  361. }
  362. }
  363. return nfds;
  364. }
  365. static void pollfds_poll(GArray *pollfds, int nfds, fd_set *rfds,
  366. fd_set *wfds, fd_set *xfds)
  367. {
  368. int i;
  369. for (i = 0; i < pollfds->len; i++) {
  370. GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
  371. int fd = pfd->fd;
  372. int revents = 0;
  373. if (FD_ISSET(fd, rfds)) {
  374. revents |= G_IO_IN;
  375. }
  376. if (FD_ISSET(fd, wfds)) {
  377. revents |= G_IO_OUT;
  378. }
  379. if (FD_ISSET(fd, xfds)) {
  380. revents |= G_IO_PRI;
  381. }
  382. pfd->revents = revents & pfd->events;
  383. }
  384. }
  385. static int os_host_main_loop_wait(int64_t timeout)
  386. {
  387. GMainContext *context = g_main_context_default();
  388. GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
  389. int select_ret = 0;
  390. int g_poll_ret, ret, i, n_poll_fds;
  391. PollingEntry *pe;
  392. WaitObjects *w = &wait_objects;
  393. gint poll_timeout;
  394. int64_t poll_timeout_ns;
  395. static struct timeval tv0;
  396. fd_set rfds, wfds, xfds;
  397. int nfds;
  398. g_main_context_acquire(context);
  399. /* XXX: need to suppress polling by better using win32 events */
  400. ret = 0;
  401. for (pe = first_polling_entry; pe != NULL; pe = pe->next) {
  402. ret |= pe->func(pe->opaque);
  403. }
  404. if (ret != 0) {
  405. g_main_context_release(context);
  406. return ret;
  407. }
  408. FD_ZERO(&rfds);
  409. FD_ZERO(&wfds);
  410. FD_ZERO(&xfds);
  411. nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds);
  412. if (nfds >= 0) {
  413. select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
  414. if (select_ret != 0) {
  415. timeout = 0;
  416. }
  417. if (select_ret > 0) {
  418. pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds);
  419. }
  420. }
  421. g_main_context_prepare(context, &max_priority);
  422. n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
  423. poll_fds, ARRAY_SIZE(poll_fds));
  424. g_assert(n_poll_fds + w->num <= ARRAY_SIZE(poll_fds));
  425. for (i = 0; i < w->num; i++) {
  426. poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i];
  427. poll_fds[n_poll_fds + i].events = G_IO_IN;
  428. }
  429. if (poll_timeout < 0) {
  430. poll_timeout_ns = -1;
  431. } else {
  432. poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS;
  433. }
  434. poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
  435. qemu_mutex_unlock_iothread();
  436. replay_mutex_unlock();
  437. g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns);
  438. replay_mutex_lock();
  439. qemu_mutex_lock_iothread();
  440. if (g_poll_ret > 0) {
  441. for (i = 0; i < w->num; i++) {
  442. w->revents[i] = poll_fds[n_poll_fds + i].revents;
  443. }
  444. for (i = 0; i < w->num; i++) {
  445. if (w->revents[i] && w->func[i]) {
  446. w->func[i](w->opaque[i]);
  447. }
  448. }
  449. }
  450. if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) {
  451. g_main_context_dispatch(context);
  452. }
  453. g_main_context_release(context);
  454. return select_ret || g_poll_ret;
  455. }
  456. #endif
  457. static NotifierList main_loop_poll_notifiers =
  458. NOTIFIER_LIST_INITIALIZER(main_loop_poll_notifiers);
  459. void main_loop_poll_add_notifier(Notifier *notify)
  460. {
  461. notifier_list_add(&main_loop_poll_notifiers, notify);
  462. }
  463. void main_loop_poll_remove_notifier(Notifier *notify)
  464. {
  465. notifier_remove(notify);
  466. }
  467. void main_loop_wait(int nonblocking)
  468. {
  469. MainLoopPoll mlpoll = {
  470. .state = MAIN_LOOP_POLL_FILL,
  471. .timeout = UINT32_MAX,
  472. .pollfds = gpollfds,
  473. };
  474. int ret;
  475. int64_t timeout_ns;
  476. if (nonblocking) {
  477. mlpoll.timeout = 0;
  478. }
  479. /* poll any events */
  480. g_array_set_size(gpollfds, 0); /* reset for new iteration */
  481. /* XXX: separate device handlers from system ones */
  482. notifier_list_notify(&main_loop_poll_notifiers, &mlpoll);
  483. if (mlpoll.timeout == UINT32_MAX) {
  484. timeout_ns = -1;
  485. } else {
  486. timeout_ns = (uint64_t)mlpoll.timeout * (int64_t)(SCALE_MS);
  487. }
  488. timeout_ns = qemu_soonest_timeout(timeout_ns,
  489. timerlistgroup_deadline_ns(
  490. &main_loop_tlg));
  491. ret = os_host_main_loop_wait(timeout_ns);
  492. mlpoll.state = ret < 0 ? MAIN_LOOP_POLL_ERR : MAIN_LOOP_POLL_OK;
  493. notifier_list_notify(&main_loop_poll_notifiers, &mlpoll);
  494. if (icount_enabled()) {
  495. /*
  496. * CPU thread can infinitely wait for event after
  497. * missing the warp
  498. */
  499. icount_start_warp_timer();
  500. }
  501. qemu_clock_run_all_timers();
  502. }
  503. /* Functions to operate on the main QEMU AioContext. */
  504. QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name)
  505. {
  506. return aio_bh_new_full(qemu_aio_context, cb, opaque, name);
  507. }
  508. /*
  509. * Functions to operate on the I/O handler AioContext.
  510. * This context runs on top of main loop. We can't reuse qemu_aio_context
  511. * because iohandlers mustn't be polled by aio_poll(qemu_aio_context).
  512. */
  513. static AioContext *iohandler_ctx;
  514. static void iohandler_init(void)
  515. {
  516. if (!iohandler_ctx) {
  517. iohandler_ctx = aio_context_new(&error_abort);
  518. }
  519. }
  520. AioContext *iohandler_get_aio_context(void)
  521. {
  522. iohandler_init();
  523. return iohandler_ctx;
  524. }
  525. GSource *iohandler_get_g_source(void)
  526. {
  527. iohandler_init();
  528. return aio_get_g_source(iohandler_ctx);
  529. }
  530. void qemu_set_fd_handler(int fd,
  531. IOHandler *fd_read,
  532. IOHandler *fd_write,
  533. void *opaque)
  534. {
  535. iohandler_init();
  536. aio_set_fd_handler(iohandler_ctx, fd, false,
  537. fd_read, fd_write, NULL, NULL, opaque);
  538. }
  539. void event_notifier_set_handler(EventNotifier *e,
  540. EventNotifierHandler *handler)
  541. {
  542. iohandler_init();
  543. aio_set_event_notifier(iohandler_ctx, e, false,
  544. handler, NULL, NULL);
  545. }