2
0

aio.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /*
  2. * QEMU aio implementation
  3. *
  4. * Copyright IBM, Corp. 2008
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Contributions after 2012-01-13 are licensed under the terms of the
  13. * GNU GPL, version 2 or (at your option) any later version.
  14. */
  15. #include "qemu-common.h"
  16. #include "block.h"
  17. #include "qemu-queue.h"
  18. #include "qemu_socket.h"
  19. typedef struct AioHandler AioHandler;
  20. /* The list of registered AIO handlers */
  21. static QLIST_HEAD(, AioHandler) aio_handlers;
  22. /* This is a simple lock used to protect the aio_handlers list. Specifically,
  23. * it's used to ensure that no callbacks are removed while we're walking and
  24. * dispatching callbacks.
  25. */
  26. static int walking_handlers;
  27. struct AioHandler
  28. {
  29. int fd;
  30. IOHandler *io_read;
  31. IOHandler *io_write;
  32. AioFlushHandler *io_flush;
  33. int deleted;
  34. void *opaque;
  35. QLIST_ENTRY(AioHandler) node;
  36. };
  37. static AioHandler *find_aio_handler(int fd)
  38. {
  39. AioHandler *node;
  40. QLIST_FOREACH(node, &aio_handlers, node) {
  41. if (node->fd == fd)
  42. if (!node->deleted)
  43. return node;
  44. }
  45. return NULL;
  46. }
  47. int qemu_aio_set_fd_handler(int fd,
  48. IOHandler *io_read,
  49. IOHandler *io_write,
  50. AioFlushHandler *io_flush,
  51. void *opaque)
  52. {
  53. AioHandler *node;
  54. node = find_aio_handler(fd);
  55. /* Are we deleting the fd handler? */
  56. if (!io_read && !io_write) {
  57. if (node) {
  58. /* If the lock is held, just mark the node as deleted */
  59. if (walking_handlers)
  60. node->deleted = 1;
  61. else {
  62. /* Otherwise, delete it for real. We can't just mark it as
  63. * deleted because deleted nodes are only cleaned up after
  64. * releasing the walking_handlers lock.
  65. */
  66. QLIST_REMOVE(node, node);
  67. g_free(node);
  68. }
  69. }
  70. } else {
  71. if (node == NULL) {
  72. /* Alloc and insert if it's not already there */
  73. node = g_malloc0(sizeof(AioHandler));
  74. node->fd = fd;
  75. QLIST_INSERT_HEAD(&aio_handlers, node, node);
  76. }
  77. /* Update handler with latest information */
  78. node->io_read = io_read;
  79. node->io_write = io_write;
  80. node->io_flush = io_flush;
  81. node->opaque = opaque;
  82. }
  83. qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
  84. return 0;
  85. }
  86. void qemu_aio_flush(void)
  87. {
  88. while (qemu_aio_wait());
  89. }
  90. bool qemu_aio_wait(void)
  91. {
  92. AioHandler *node;
  93. fd_set rdfds, wrfds;
  94. int max_fd = -1;
  95. int ret;
  96. bool busy;
  97. /*
  98. * If there are callbacks left that have been queued, we need to call then.
  99. * Do not call select in this case, because it is possible that the caller
  100. * does not need a complete flush (as is the case for qemu_aio_wait loops).
  101. */
  102. if (qemu_bh_poll()) {
  103. return true;
  104. }
  105. walking_handlers = 1;
  106. FD_ZERO(&rdfds);
  107. FD_ZERO(&wrfds);
  108. /* fill fd sets */
  109. busy = false;
  110. QLIST_FOREACH(node, &aio_handlers, node) {
  111. /* If there aren't pending AIO operations, don't invoke callbacks.
  112. * Otherwise, if there are no AIO requests, qemu_aio_wait() would
  113. * wait indefinitely.
  114. */
  115. if (node->io_flush) {
  116. if (node->io_flush(node->opaque) == 0) {
  117. continue;
  118. }
  119. busy = true;
  120. }
  121. if (!node->deleted && node->io_read) {
  122. FD_SET(node->fd, &rdfds);
  123. max_fd = MAX(max_fd, node->fd + 1);
  124. }
  125. if (!node->deleted && node->io_write) {
  126. FD_SET(node->fd, &wrfds);
  127. max_fd = MAX(max_fd, node->fd + 1);
  128. }
  129. }
  130. walking_handlers = 0;
  131. /* No AIO operations? Get us out of here */
  132. if (!busy) {
  133. return false;
  134. }
  135. /* wait until next event */
  136. ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
  137. /* if we have any readable fds, dispatch event */
  138. if (ret > 0) {
  139. walking_handlers = 1;
  140. /* we have to walk very carefully in case
  141. * qemu_aio_set_fd_handler is called while we're walking */
  142. node = QLIST_FIRST(&aio_handlers);
  143. while (node) {
  144. AioHandler *tmp;
  145. if (!node->deleted &&
  146. FD_ISSET(node->fd, &rdfds) &&
  147. node->io_read) {
  148. node->io_read(node->opaque);
  149. }
  150. if (!node->deleted &&
  151. FD_ISSET(node->fd, &wrfds) &&
  152. node->io_write) {
  153. node->io_write(node->opaque);
  154. }
  155. tmp = node;
  156. node = QLIST_NEXT(node, node);
  157. if (tmp->deleted) {
  158. QLIST_REMOVE(tmp, node);
  159. g_free(tmp);
  160. }
  161. }
  162. walking_handlers = 0;
  163. }
  164. return true;
  165. }