aio-win32.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. /*
  2. * QEMU aio implementation
  3. *
  4. * Copyright IBM Corp., 2008
  5. * Copyright Red Hat Inc., 2012
  6. *
  7. * Authors:
  8. * Anthony Liguori <aliguori@us.ibm.com>
  9. * Paolo Bonzini <pbonzini@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2. See
  12. * the COPYING file in the top-level directory.
  13. *
  14. * Contributions after 2012-01-13 are licensed under the terms of the
  15. * GNU GPL, version 2 or (at your option) any later version.
  16. */
  17. #include "qemu-common.h"
  18. #include "block/block.h"
  19. #include "qemu/queue.h"
  20. #include "qemu/sockets.h"
  21. struct AioHandler {
  22. EventNotifier *e;
  23. EventNotifierHandler *io_notify;
  24. GPollFD pfd;
  25. int deleted;
  26. QLIST_ENTRY(AioHandler) node;
  27. };
  28. void aio_set_event_notifier(AioContext *ctx,
  29. EventNotifier *e,
  30. EventNotifierHandler *io_notify)
  31. {
  32. AioHandler *node;
  33. QLIST_FOREACH(node, &ctx->aio_handlers, node) {
  34. if (node->e == e && !node->deleted) {
  35. break;
  36. }
  37. }
  38. /* Are we deleting the fd handler? */
  39. if (!io_notify) {
  40. if (node) {
  41. g_source_remove_poll(&ctx->source, &node->pfd);
  42. /* If the lock is held, just mark the node as deleted */
  43. if (ctx->walking_handlers) {
  44. node->deleted = 1;
  45. node->pfd.revents = 0;
  46. } else {
  47. /* Otherwise, delete it for real. We can't just mark it as
  48. * deleted because deleted nodes are only cleaned up after
  49. * releasing the walking_handlers lock.
  50. */
  51. QLIST_REMOVE(node, node);
  52. g_free(node);
  53. }
  54. }
  55. } else {
  56. if (node == NULL) {
  57. /* Alloc and insert if it's not already there */
  58. node = g_malloc0(sizeof(AioHandler));
  59. node->e = e;
  60. node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
  61. node->pfd.events = G_IO_IN;
  62. QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node);
  63. g_source_add_poll(&ctx->source, &node->pfd);
  64. }
  65. /* Update handler with latest information */
  66. node->io_notify = io_notify;
  67. }
  68. aio_notify(ctx);
  69. }
  70. bool aio_pending(AioContext *ctx)
  71. {
  72. AioHandler *node;
  73. QLIST_FOREACH(node, &ctx->aio_handlers, node) {
  74. if (node->pfd.revents && node->io_notify) {
  75. return true;
  76. }
  77. }
  78. return false;
  79. }
  80. bool aio_poll(AioContext *ctx, bool blocking)
  81. {
  82. AioHandler *node;
  83. HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
  84. bool progress;
  85. int count;
  86. int timeout;
  87. progress = false;
  88. /*
  89. * If there are callbacks left that have been queued, we need to call then.
  90. * Do not call select in this case, because it is possible that the caller
  91. * does not need a complete flush (as is the case for qemu_aio_wait loops).
  92. */
  93. if (aio_bh_poll(ctx)) {
  94. blocking = false;
  95. progress = true;
  96. }
  97. /* Run timers */
  98. progress |= timerlistgroup_run_timers(&ctx->tlg);
  99. /*
  100. * Then dispatch any pending callbacks from the GSource.
  101. *
  102. * We have to walk very carefully in case qemu_aio_set_fd_handler is
  103. * called while we're walking.
  104. */
  105. node = QLIST_FIRST(&ctx->aio_handlers);
  106. while (node) {
  107. AioHandler *tmp;
  108. ctx->walking_handlers++;
  109. if (node->pfd.revents && node->io_notify) {
  110. node->pfd.revents = 0;
  111. node->io_notify(node->e);
  112. /* aio_notify() does not count as progress */
  113. if (node->e != &ctx->notifier) {
  114. progress = true;
  115. }
  116. }
  117. tmp = node;
  118. node = QLIST_NEXT(node, node);
  119. ctx->walking_handlers--;
  120. if (!ctx->walking_handlers && tmp->deleted) {
  121. QLIST_REMOVE(tmp, node);
  122. g_free(tmp);
  123. }
  124. }
  125. if (progress && !blocking) {
  126. return true;
  127. }
  128. ctx->walking_handlers++;
  129. /* fill fd sets */
  130. count = 0;
  131. QLIST_FOREACH(node, &ctx->aio_handlers, node) {
  132. if (!node->deleted && node->io_notify) {
  133. events[count++] = event_notifier_get_handle(node->e);
  134. }
  135. }
  136. ctx->walking_handlers--;
  137. /* wait until next event */
  138. while (count > 0) {
  139. int ret;
  140. timeout = blocking ?
  141. qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg)) : 0;
  142. ret = WaitForMultipleObjects(count, events, FALSE, timeout);
  143. /* if we have any signaled events, dispatch event */
  144. if ((DWORD) (ret - WAIT_OBJECT_0) >= count) {
  145. break;
  146. }
  147. blocking = false;
  148. /* we have to walk very carefully in case
  149. * qemu_aio_set_fd_handler is called while we're walking */
  150. node = QLIST_FIRST(&ctx->aio_handlers);
  151. while (node) {
  152. AioHandler *tmp;
  153. ctx->walking_handlers++;
  154. if (!node->deleted &&
  155. event_notifier_get_handle(node->e) == events[ret - WAIT_OBJECT_0] &&
  156. node->io_notify) {
  157. node->io_notify(node->e);
  158. /* aio_notify() does not count as progress */
  159. if (node->e != &ctx->notifier) {
  160. progress = true;
  161. }
  162. }
  163. tmp = node;
  164. node = QLIST_NEXT(node, node);
  165. ctx->walking_handlers--;
  166. if (!ctx->walking_handlers && tmp->deleted) {
  167. QLIST_REMOVE(tmp, node);
  168. g_free(tmp);
  169. }
  170. }
  171. /* Try again, but only call each handler once. */
  172. events[ret - WAIT_OBJECT_0] = events[--count];
  173. }
  174. if (blocking) {
  175. /* Run the timers a second time. We do this because otherwise aio_wait
  176. * will not note progress - and will stop a drain early - if we have
  177. * a timer that was not ready to run entering g_poll but is ready
  178. * after g_poll. This will only do anything if a timer has expired.
  179. */
  180. progress |= timerlistgroup_run_timers(&ctx->tlg);
  181. }
  182. return progress;
  183. }