2
0

main-loop.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. /*
  2. * QEMU System Emulator
  3. *
  4. * Copyright (c) 2003-2008 Fabrice Bellard
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu-common.h"
  25. #include "qemu-timer.h"
  26. #include "slirp/slirp.h"
  27. #include "main-loop.h"
  28. #ifndef _WIN32
  29. #include "compatfd.h"
  30. static int io_thread_fd = -1;
  31. void qemu_notify_event(void)
  32. {
  33. /* Write 8 bytes to be compatible with eventfd. */
  34. static const uint64_t val = 1;
  35. ssize_t ret;
  36. if (io_thread_fd == -1) {
  37. return;
  38. }
  39. do {
  40. ret = write(io_thread_fd, &val, sizeof(val));
  41. } while (ret < 0 && errno == EINTR);
  42. /* EAGAIN is fine, a read must be pending. */
  43. if (ret < 0 && errno != EAGAIN) {
  44. fprintf(stderr, "qemu_notify_event: write() failed: %s\n",
  45. strerror(errno));
  46. exit(1);
  47. }
  48. }
  49. static void qemu_event_read(void *opaque)
  50. {
  51. int fd = (intptr_t)opaque;
  52. ssize_t len;
  53. char buffer[512];
  54. /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */
  55. do {
  56. len = read(fd, buffer, sizeof(buffer));
  57. } while ((len == -1 && errno == EINTR) || len == sizeof(buffer));
  58. }
  59. static int qemu_event_init(void)
  60. {
  61. int err;
  62. int fds[2];
  63. err = qemu_eventfd(fds);
  64. if (err == -1) {
  65. return -errno;
  66. }
  67. err = fcntl_setfl(fds[0], O_NONBLOCK);
  68. if (err < 0) {
  69. goto fail;
  70. }
  71. err = fcntl_setfl(fds[1], O_NONBLOCK);
  72. if (err < 0) {
  73. goto fail;
  74. }
  75. qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
  76. (void *)(intptr_t)fds[0]);
  77. io_thread_fd = fds[1];
  78. return 0;
  79. fail:
  80. close(fds[0]);
  81. close(fds[1]);
  82. return err;
  83. }
  84. /* If we have signalfd, we mask out the signals we want to handle and then
  85. * use signalfd to listen for them. We rely on whatever the current signal
  86. * handler is to dispatch the signals when we receive them.
  87. */
  88. static void sigfd_handler(void *opaque)
  89. {
  90. int fd = (intptr_t)opaque;
  91. struct qemu_signalfd_siginfo info;
  92. struct sigaction action;
  93. ssize_t len;
  94. while (1) {
  95. do {
  96. len = read(fd, &info, sizeof(info));
  97. } while (len == -1 && errno == EINTR);
  98. if (len == -1 && errno == EAGAIN) {
  99. break;
  100. }
  101. if (len != sizeof(info)) {
  102. printf("read from sigfd returned %zd: %m\n", len);
  103. return;
  104. }
  105. sigaction(info.ssi_signo, NULL, &action);
  106. if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
  107. action.sa_sigaction(info.ssi_signo,
  108. (siginfo_t *)&info, NULL);
  109. } else if (action.sa_handler) {
  110. action.sa_handler(info.ssi_signo);
  111. }
  112. }
  113. }
  114. static int qemu_signal_init(void)
  115. {
  116. int sigfd;
  117. sigset_t set;
  118. /*
  119. * SIG_IPI must be blocked in the main thread and must not be caught
  120. * by sigwait() in the signal thread. Otherwise, the cpu thread will
  121. * not catch it reliably.
  122. */
  123. sigemptyset(&set);
  124. sigaddset(&set, SIG_IPI);
  125. sigaddset(&set, SIGIO);
  126. sigaddset(&set, SIGALRM);
  127. sigaddset(&set, SIGBUS);
  128. pthread_sigmask(SIG_BLOCK, &set, NULL);
  129. sigdelset(&set, SIG_IPI);
  130. sigfd = qemu_signalfd(&set);
  131. if (sigfd == -1) {
  132. fprintf(stderr, "failed to create signalfd\n");
  133. return -errno;
  134. }
  135. fcntl_setfl(sigfd, O_NONBLOCK);
  136. qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL,
  137. (void *)(intptr_t)sigfd);
  138. return 0;
  139. }
  140. #else /* _WIN32 */
  141. static HANDLE qemu_event_handle = NULL;
  142. static void dummy_event_handler(void *opaque)
  143. {
  144. }
  145. static int qemu_event_init(void)
  146. {
  147. qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
  148. if (!qemu_event_handle) {
  149. fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError());
  150. return -1;
  151. }
  152. qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
  153. return 0;
  154. }
  155. void qemu_notify_event(void)
  156. {
  157. if (!qemu_event_handle) {
  158. return;
  159. }
  160. if (!SetEvent(qemu_event_handle)) {
  161. fprintf(stderr, "qemu_notify_event: SetEvent failed: %ld\n",
  162. GetLastError());
  163. exit(1);
  164. }
  165. }
  166. static int qemu_signal_init(void)
  167. {
  168. return 0;
  169. }
  170. #endif
  171. int main_loop_init(void)
  172. {
  173. int ret;
  174. qemu_mutex_lock_iothread();
  175. ret = qemu_signal_init();
  176. if (ret) {
  177. return ret;
  178. }
  179. /* Note eventfd must be drained before signalfd handlers run */
  180. ret = qemu_event_init();
  181. if (ret) {
  182. return ret;
  183. }
  184. return 0;
  185. }
  186. static fd_set rfds, wfds, xfds;
  187. static int nfds;
  188. static GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
  189. static int n_poll_fds;
  190. static int max_priority;
  191. #ifndef _WIN32
  192. static void glib_select_fill(int *max_fd, fd_set *rfds, fd_set *wfds,
  193. fd_set *xfds, uint32_t *cur_timeout)
  194. {
  195. GMainContext *context = g_main_context_default();
  196. int i;
  197. int timeout = 0;
  198. g_main_context_prepare(context, &max_priority);
  199. n_poll_fds = g_main_context_query(context, max_priority, &timeout,
  200. poll_fds, ARRAY_SIZE(poll_fds));
  201. g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds));
  202. for (i = 0; i < n_poll_fds; i++) {
  203. GPollFD *p = &poll_fds[i];
  204. if ((p->events & G_IO_IN)) {
  205. FD_SET(p->fd, rfds);
  206. *max_fd = MAX(*max_fd, p->fd);
  207. }
  208. if ((p->events & G_IO_OUT)) {
  209. FD_SET(p->fd, wfds);
  210. *max_fd = MAX(*max_fd, p->fd);
  211. }
  212. if ((p->events & G_IO_ERR)) {
  213. FD_SET(p->fd, xfds);
  214. *max_fd = MAX(*max_fd, p->fd);
  215. }
  216. }
  217. if (timeout >= 0 && timeout < *cur_timeout) {
  218. *cur_timeout = timeout;
  219. }
  220. }
  221. static void glib_select_poll(fd_set *rfds, fd_set *wfds, fd_set *xfds,
  222. bool err)
  223. {
  224. GMainContext *context = g_main_context_default();
  225. if (!err) {
  226. int i;
  227. for (i = 0; i < n_poll_fds; i++) {
  228. GPollFD *p = &poll_fds[i];
  229. if ((p->events & G_IO_IN) && FD_ISSET(p->fd, rfds)) {
  230. p->revents |= G_IO_IN;
  231. }
  232. if ((p->events & G_IO_OUT) && FD_ISSET(p->fd, wfds)) {
  233. p->revents |= G_IO_OUT;
  234. }
  235. if ((p->events & G_IO_ERR) && FD_ISSET(p->fd, xfds)) {
  236. p->revents |= G_IO_ERR;
  237. }
  238. }
  239. }
  240. if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) {
  241. g_main_context_dispatch(context);
  242. }
  243. }
  244. static int os_host_main_loop_wait(uint32_t timeout)
  245. {
  246. struct timeval tv, *tvarg = NULL;
  247. int ret;
  248. glib_select_fill(&nfds, &rfds, &wfds, &xfds, &timeout);
  249. if (timeout < UINT32_MAX) {
  250. tvarg = &tv;
  251. tv.tv_sec = timeout / 1000;
  252. tv.tv_usec = (timeout % 1000) * 1000;
  253. }
  254. if (timeout > 0) {
  255. qemu_mutex_unlock_iothread();
  256. }
  257. ret = select(nfds + 1, &rfds, &wfds, &xfds, tvarg);
  258. if (timeout > 0) {
  259. qemu_mutex_lock_iothread();
  260. }
  261. glib_select_poll(&rfds, &wfds, &xfds, (ret < 0));
  262. return ret;
  263. }
  264. #else
  265. /***********************************************************/
  266. /* Polling handling */
  267. typedef struct PollingEntry {
  268. PollingFunc *func;
  269. void *opaque;
  270. struct PollingEntry *next;
  271. } PollingEntry;
  272. static PollingEntry *first_polling_entry;
  273. int qemu_add_polling_cb(PollingFunc *func, void *opaque)
  274. {
  275. PollingEntry **ppe, *pe;
  276. pe = g_malloc0(sizeof(PollingEntry));
  277. pe->func = func;
  278. pe->opaque = opaque;
  279. for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next);
  280. *ppe = pe;
  281. return 0;
  282. }
  283. void qemu_del_polling_cb(PollingFunc *func, void *opaque)
  284. {
  285. PollingEntry **ppe, *pe;
  286. for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) {
  287. pe = *ppe;
  288. if (pe->func == func && pe->opaque == opaque) {
  289. *ppe = pe->next;
  290. g_free(pe);
  291. break;
  292. }
  293. }
  294. }
  295. /***********************************************************/
  296. /* Wait objects support */
  297. typedef struct WaitObjects {
  298. int num;
  299. int revents[MAXIMUM_WAIT_OBJECTS + 1];
  300. HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
  301. WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1];
  302. void *opaque[MAXIMUM_WAIT_OBJECTS + 1];
  303. } WaitObjects;
  304. static WaitObjects wait_objects = {0};
  305. int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
  306. {
  307. WaitObjects *w = &wait_objects;
  308. if (w->num >= MAXIMUM_WAIT_OBJECTS) {
  309. return -1;
  310. }
  311. w->events[w->num] = handle;
  312. w->func[w->num] = func;
  313. w->opaque[w->num] = opaque;
  314. w->revents[w->num] = 0;
  315. w->num++;
  316. return 0;
  317. }
  318. void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
  319. {
  320. int i, found;
  321. WaitObjects *w = &wait_objects;
  322. found = 0;
  323. for (i = 0; i < w->num; i++) {
  324. if (w->events[i] == handle) {
  325. found = 1;
  326. }
  327. if (found) {
  328. w->events[i] = w->events[i + 1];
  329. w->func[i] = w->func[i + 1];
  330. w->opaque[i] = w->opaque[i + 1];
  331. w->revents[i] = w->revents[i + 1];
  332. }
  333. }
  334. if (found) {
  335. w->num--;
  336. }
  337. }
  338. void qemu_fd_register(int fd)
  339. {
  340. WSAEventSelect(fd, qemu_event_handle, FD_READ | FD_ACCEPT | FD_CLOSE |
  341. FD_CONNECT | FD_WRITE | FD_OOB);
  342. }
  343. static int os_host_main_loop_wait(uint32_t timeout)
  344. {
  345. GMainContext *context = g_main_context_default();
  346. int ret, i;
  347. PollingEntry *pe;
  348. WaitObjects *w = &wait_objects;
  349. gint poll_timeout;
  350. static struct timeval tv0;
  351. /* XXX: need to suppress polling by better using win32 events */
  352. ret = 0;
  353. for (pe = first_polling_entry; pe != NULL; pe = pe->next) {
  354. ret |= pe->func(pe->opaque);
  355. }
  356. if (ret != 0) {
  357. return ret;
  358. }
  359. if (nfds >= 0) {
  360. ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
  361. if (ret != 0) {
  362. timeout = 0;
  363. }
  364. }
  365. g_main_context_prepare(context, &max_priority);
  366. n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
  367. poll_fds, ARRAY_SIZE(poll_fds));
  368. g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds));
  369. for (i = 0; i < w->num; i++) {
  370. poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i];
  371. poll_fds[n_poll_fds + i].events = G_IO_IN;
  372. }
  373. if (poll_timeout < 0 || timeout < poll_timeout) {
  374. poll_timeout = timeout;
  375. }
  376. qemu_mutex_unlock_iothread();
  377. ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout);
  378. qemu_mutex_lock_iothread();
  379. if (ret > 0) {
  380. for (i = 0; i < w->num; i++) {
  381. w->revents[i] = poll_fds[n_poll_fds + i].revents;
  382. }
  383. for (i = 0; i < w->num; i++) {
  384. if (w->revents[i] && w->func[i]) {
  385. w->func[i](w->opaque[i]);
  386. }
  387. }
  388. }
  389. if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) {
  390. g_main_context_dispatch(context);
  391. }
  392. /* If an edge-triggered socket event occurred, select will return a
  393. * positive result on the next iteration. We do not need to do anything
  394. * here.
  395. */
  396. return ret;
  397. }
  398. #endif
  399. int main_loop_wait(int nonblocking)
  400. {
  401. int ret;
  402. uint32_t timeout = UINT32_MAX;
  403. if (nonblocking) {
  404. timeout = 0;
  405. } else {
  406. qemu_bh_update_timeout(&timeout);
  407. }
  408. /* poll any events */
  409. /* XXX: separate device handlers from system ones */
  410. nfds = -1;
  411. FD_ZERO(&rfds);
  412. FD_ZERO(&wfds);
  413. FD_ZERO(&xfds);
  414. #ifdef CONFIG_SLIRP
  415. slirp_update_timeout(&timeout);
  416. slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
  417. #endif
  418. qemu_iohandler_fill(&nfds, &rfds, &wfds, &xfds);
  419. ret = os_host_main_loop_wait(timeout);
  420. qemu_iohandler_poll(&rfds, &wfds, &xfds, ret);
  421. #ifdef CONFIG_SLIRP
  422. slirp_select_poll(&rfds, &wfds, &xfds, (ret < 0));
  423. #endif
  424. qemu_run_all_timers();
  425. /* Check bottom-halves last in case any of the earlier events triggered
  426. them. */
  427. qemu_bh_poll();
  428. return ret;
  429. }