2
0

aio.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * QEMU aio implementation
  3. *
  4. * Copyright IBM, Corp. 2008
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu-common.h"
  14. #include "block.h"
  15. #include "qemu-queue.h"
  16. #include "qemu_socket.h"
  17. typedef struct AioHandler AioHandler;
  18. /* The list of registered AIO handlers */
  19. static QLIST_HEAD(, AioHandler) aio_handlers;
  20. /* This is a simple lock used to protect the aio_handlers list. Specifically,
  21. * it's used to ensure that no callbacks are removed while we're walking and
  22. * dispatching callbacks.
  23. */
  24. static int walking_handlers;
  25. struct AioHandler
  26. {
  27. int fd;
  28. IOHandler *io_read;
  29. IOHandler *io_write;
  30. AioFlushHandler *io_flush;
  31. AioProcessQueue *io_process_queue;
  32. int deleted;
  33. void *opaque;
  34. QLIST_ENTRY(AioHandler) node;
  35. };
  36. static AioHandler *find_aio_handler(int fd)
  37. {
  38. AioHandler *node;
  39. QLIST_FOREACH(node, &aio_handlers, node) {
  40. if (node->fd == fd)
  41. if (!node->deleted)
  42. return node;
  43. }
  44. return NULL;
  45. }
  46. int qemu_aio_set_fd_handler(int fd,
  47. IOHandler *io_read,
  48. IOHandler *io_write,
  49. AioFlushHandler *io_flush,
  50. AioProcessQueue *io_process_queue,
  51. void *opaque)
  52. {
  53. AioHandler *node;
  54. node = find_aio_handler(fd);
  55. /* Are we deleting the fd handler? */
  56. if (!io_read && !io_write) {
  57. if (node) {
  58. /* If the lock is held, just mark the node as deleted */
  59. if (walking_handlers)
  60. node->deleted = 1;
  61. else {
  62. /* Otherwise, delete it for real. We can't just mark it as
  63. * deleted because deleted nodes are only cleaned up after
  64. * releasing the walking_handlers lock.
  65. */
  66. QLIST_REMOVE(node, node);
  67. g_free(node);
  68. }
  69. }
  70. } else {
  71. if (node == NULL) {
  72. /* Alloc and insert if it's not already there */
  73. node = g_malloc0(sizeof(AioHandler));
  74. node->fd = fd;
  75. QLIST_INSERT_HEAD(&aio_handlers, node, node);
  76. }
  77. /* Update handler with latest information */
  78. node->io_read = io_read;
  79. node->io_write = io_write;
  80. node->io_flush = io_flush;
  81. node->io_process_queue = io_process_queue;
  82. node->opaque = opaque;
  83. }
  84. qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
  85. return 0;
  86. }
  87. void qemu_aio_flush(void)
  88. {
  89. AioHandler *node;
  90. int ret;
  91. do {
  92. ret = 0;
  93. /*
  94. * If there are pending emulated aio start them now so flush
  95. * will be able to return 1.
  96. */
  97. qemu_aio_wait();
  98. QLIST_FOREACH(node, &aio_handlers, node) {
  99. if (node->io_flush) {
  100. ret |= node->io_flush(node->opaque);
  101. }
  102. }
  103. } while (qemu_bh_poll() || ret > 0);
  104. }
  105. int qemu_aio_process_queue(void)
  106. {
  107. AioHandler *node;
  108. int ret = 0;
  109. walking_handlers = 1;
  110. QLIST_FOREACH(node, &aio_handlers, node) {
  111. if (node->io_process_queue) {
  112. if (node->io_process_queue(node->opaque)) {
  113. ret = 1;
  114. }
  115. }
  116. }
  117. walking_handlers = 0;
  118. return ret;
  119. }
  120. void qemu_aio_wait(void)
  121. {
  122. int ret;
  123. if (qemu_bh_poll())
  124. return;
  125. /*
  126. * If there are callbacks left that have been queued, we need to call then.
  127. * Return afterwards to avoid waiting needlessly in select().
  128. */
  129. if (qemu_aio_process_queue())
  130. return;
  131. do {
  132. AioHandler *node;
  133. fd_set rdfds, wrfds;
  134. int max_fd = -1;
  135. walking_handlers = 1;
  136. FD_ZERO(&rdfds);
  137. FD_ZERO(&wrfds);
  138. /* fill fd sets */
  139. QLIST_FOREACH(node, &aio_handlers, node) {
  140. /* If there aren't pending AIO operations, don't invoke callbacks.
  141. * Otherwise, if there are no AIO requests, qemu_aio_wait() would
  142. * wait indefinitely.
  143. */
  144. if (node->io_flush && node->io_flush(node->opaque) == 0)
  145. continue;
  146. if (!node->deleted && node->io_read) {
  147. FD_SET(node->fd, &rdfds);
  148. max_fd = MAX(max_fd, node->fd + 1);
  149. }
  150. if (!node->deleted && node->io_write) {
  151. FD_SET(node->fd, &wrfds);
  152. max_fd = MAX(max_fd, node->fd + 1);
  153. }
  154. }
  155. walking_handlers = 0;
  156. /* No AIO operations? Get us out of here */
  157. if (max_fd == -1)
  158. break;
  159. /* wait until next event */
  160. ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
  161. if (ret == -1 && errno == EINTR)
  162. continue;
  163. /* if we have any readable fds, dispatch event */
  164. if (ret > 0) {
  165. walking_handlers = 1;
  166. /* we have to walk very carefully in case
  167. * qemu_aio_set_fd_handler is called while we're walking */
  168. node = QLIST_FIRST(&aio_handlers);
  169. while (node) {
  170. AioHandler *tmp;
  171. if (!node->deleted &&
  172. FD_ISSET(node->fd, &rdfds) &&
  173. node->io_read) {
  174. node->io_read(node->opaque);
  175. }
  176. if (!node->deleted &&
  177. FD_ISSET(node->fd, &wrfds) &&
  178. node->io_write) {
  179. node->io_write(node->opaque);
  180. }
  181. tmp = node;
  182. node = QLIST_NEXT(node, node);
  183. if (tmp->deleted) {
  184. QLIST_REMOVE(tmp, node);
  185. g_free(tmp);
  186. }
  187. }
  188. walking_handlers = 0;
  189. }
  190. } while (ret == 0);
  191. }