2
0

aio.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * QEMU aio implementation
  3. *
  4. * Copyright IBM, Corp. 2008
  5. *
  6. * Authors:
  7. * Anthony Liguori <aliguori@us.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu-common.h"
  14. #include "block.h"
  15. #include "sys-queue.h"
  16. #include "qemu_socket.h"
  17. typedef struct AioHandler AioHandler;
  18. /* The list of registered AIO handlers */
  19. static LIST_HEAD(, AioHandler) aio_handlers;
  20. /* This is a simple lock used to protect the aio_handlers list. Specifically,
  21. * it's used to ensure that no callbacks are removed while we're walking and
  22. * dispatching callbacks.
  23. */
  24. static int walking_handlers;
  25. struct AioHandler
  26. {
  27. int fd;
  28. IOHandler *io_read;
  29. IOHandler *io_write;
  30. AioFlushHandler *io_flush;
  31. int deleted;
  32. void *opaque;
  33. LIST_ENTRY(AioHandler) node;
  34. };
  35. static AioHandler *find_aio_handler(int fd)
  36. {
  37. AioHandler *node;
  38. LIST_FOREACH(node, &aio_handlers, node) {
  39. if (node->fd == fd)
  40. if (!node->deleted)
  41. return node;
  42. }
  43. return NULL;
  44. }
  45. int qemu_aio_set_fd_handler(int fd,
  46. IOHandler *io_read,
  47. IOHandler *io_write,
  48. AioFlushHandler *io_flush,
  49. void *opaque)
  50. {
  51. AioHandler *node;
  52. node = find_aio_handler(fd);
  53. /* Are we deleting the fd handler? */
  54. if (!io_read && !io_write) {
  55. if (node) {
  56. /* If the lock is held, just mark the node as deleted */
  57. if (walking_handlers)
  58. node->deleted = 1;
  59. else {
  60. /* Otherwise, delete it for real. We can't just mark it as
  61. * deleted because deleted nodes are only cleaned up after
  62. * releasing the walking_handlers lock.
  63. */
  64. LIST_REMOVE(node, node);
  65. qemu_free(node);
  66. }
  67. }
  68. } else {
  69. if (node == NULL) {
  70. /* Alloc and insert if it's not already there */
  71. node = qemu_mallocz(sizeof(AioHandler));
  72. node->fd = fd;
  73. LIST_INSERT_HEAD(&aio_handlers, node, node);
  74. }
  75. /* Update handler with latest information */
  76. node->io_read = io_read;
  77. node->io_write = io_write;
  78. node->io_flush = io_flush;
  79. node->opaque = opaque;
  80. }
  81. qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque);
  82. return 0;
  83. }
  84. void qemu_aio_flush(void)
  85. {
  86. AioHandler *node;
  87. int ret;
  88. do {
  89. ret = 0;
  90. /*
  91. * If there are pending emulated aio start them now so flush
  92. * will be able to return 1.
  93. */
  94. qemu_aio_wait();
  95. LIST_FOREACH(node, &aio_handlers, node) {
  96. ret |= node->io_flush(node->opaque);
  97. }
  98. } while (qemu_bh_poll() || ret > 0);
  99. }
  100. void qemu_aio_wait(void)
  101. {
  102. int ret;
  103. if (qemu_bh_poll())
  104. return;
  105. do {
  106. AioHandler *node;
  107. fd_set rdfds, wrfds;
  108. int max_fd = -1;
  109. walking_handlers = 1;
  110. FD_ZERO(&rdfds);
  111. FD_ZERO(&wrfds);
  112. /* fill fd sets */
  113. LIST_FOREACH(node, &aio_handlers, node) {
  114. /* If there aren't pending AIO operations, don't invoke callbacks.
  115. * Otherwise, if there are no AIO requests, qemu_aio_wait() would
  116. * wait indefinitely.
  117. */
  118. if (node->io_flush && node->io_flush(node->opaque) == 0)
  119. continue;
  120. if (!node->deleted && node->io_read) {
  121. FD_SET(node->fd, &rdfds);
  122. max_fd = MAX(max_fd, node->fd + 1);
  123. }
  124. if (!node->deleted && node->io_write) {
  125. FD_SET(node->fd, &wrfds);
  126. max_fd = MAX(max_fd, node->fd + 1);
  127. }
  128. }
  129. walking_handlers = 0;
  130. /* No AIO operations? Get us out of here */
  131. if (max_fd == -1)
  132. break;
  133. /* wait until next event */
  134. ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
  135. if (ret == -1 && errno == EINTR)
  136. continue;
  137. /* if we have any readable fds, dispatch event */
  138. if (ret > 0) {
  139. walking_handlers = 1;
  140. /* we have to walk very carefully in case
  141. * qemu_aio_set_fd_handler is called while we're walking */
  142. node = LIST_FIRST(&aio_handlers);
  143. while (node) {
  144. AioHandler *tmp;
  145. if (!node->deleted &&
  146. FD_ISSET(node->fd, &rdfds) &&
  147. node->io_read) {
  148. node->io_read(node->opaque);
  149. }
  150. if (!node->deleted &&
  151. FD_ISSET(node->fd, &wrfds) &&
  152. node->io_write) {
  153. node->io_write(node->opaque);
  154. }
  155. tmp = node;
  156. node = LIST_NEXT(node, node);
  157. if (tmp->deleted) {
  158. LIST_REMOVE(tmp, node);
  159. qemu_free(tmp);
  160. }
  161. }
  162. walking_handlers = 0;
  163. }
  164. } while (ret == 0);
  165. }