소스 검색

aio: Get rid of qemu_aio_flush()

There are no remaining users, and new users should probably be
using bdrv_drain_all() in the first place.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Kevin Wolf 12 년 전
부모
커밋
c57b6656c3
6개의 변경된 파일5개의 추가작업 그리고 20개의 파일을 삭제
  1. 0 5
      async.c
  2. 1 1
      block/commit.c
  3. 1 1
      block/mirror.c
  4. 1 1
      block/stream.c
  5. 0 5
      main-loop.c
  6. 2 7
      qemu-aio.h

+ 0 - 5
async.c

@@ -215,8 +215,3 @@ void aio_context_unref(AioContext *ctx)
 {
 {
     g_source_unref(&ctx->source);
     g_source_unref(&ctx->source);
 }
 }
-
-void aio_flush(AioContext *ctx)
-{
-    while (aio_poll(ctx, true));
-}

+ 1 - 1
block/commit.c

@@ -103,7 +103,7 @@ static void coroutine_fn commit_run(void *opaque)
 
 
 wait:
 wait:
         /* Note that even when no rate limit is applied we need to yield
         /* Note that even when no rate limit is applied we need to yield
-         * with no pending I/O here so that qemu_aio_flush() returns.
+         * with no pending I/O here so that bdrv_drain_all() returns.
          */
          */
         block_job_sleep_ns(&s->common, rt_clock, delay_ns);
         block_job_sleep_ns(&s->common, rt_clock, delay_ns);
         if (block_job_is_cancelled(&s->common)) {
         if (block_job_is_cancelled(&s->common)) {

+ 1 - 1
block/mirror.c

@@ -205,7 +205,7 @@ static void coroutine_fn mirror_run(void *opaque)
             }
             }
 
 
             /* Note that even when no rate limit is applied we need to yield
             /* Note that even when no rate limit is applied we need to yield
-             * with no pending I/O here so that qemu_aio_flush() returns.
+             * with no pending I/O here so that bdrv_drain_all() returns.
              */
              */
             block_job_sleep_ns(&s->common, rt_clock, delay_ns);
             block_job_sleep_ns(&s->common, rt_clock, delay_ns);
             if (block_job_is_cancelled(&s->common)) {
             if (block_job_is_cancelled(&s->common)) {

+ 1 - 1
block/stream.c

@@ -108,7 +108,7 @@ static void coroutine_fn stream_run(void *opaque)
 
 
 wait:
 wait:
         /* Note that even when no rate limit is applied we need to yield
         /* Note that even when no rate limit is applied we need to yield
-         * with no pending I/O here so that qemu_aio_flush() returns.
+         * with no pending I/O here so that bdrv_drain_all() returns.
          */
          */
         block_job_sleep_ns(&s->common, rt_clock, delay_ns);
         block_job_sleep_ns(&s->common, rt_clock, delay_ns);
         if (block_job_is_cancelled(&s->common)) {
         if (block_job_is_cancelled(&s->common)) {

+ 0 - 5
main-loop.c

@@ -432,11 +432,6 @@ QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
     return aio_bh_new(qemu_aio_context, cb, opaque);
     return aio_bh_new(qemu_aio_context, cb, opaque);
 }
 }
 
 
-void qemu_aio_flush(void)
-{
-    aio_flush(qemu_aio_context);
-}
-
 bool qemu_aio_wait(void)
 bool qemu_aio_wait(void)
 {
 {
     return aio_poll(qemu_aio_context, true);
     return aio_poll(qemu_aio_context, true);

+ 2 - 7
qemu-aio.h

@@ -162,10 +162,6 @@ void qemu_bh_cancel(QEMUBH *bh);
  */
  */
 void qemu_bh_delete(QEMUBH *bh);
 void qemu_bh_delete(QEMUBH *bh);
 
 
-/* Flush any pending AIO operation. This function will block until all
- * outstanding AIO operations have been completed or cancelled. */
-void aio_flush(AioContext *ctx);
-
 /* Return whether there are any pending callbacks from the GSource
 /* Return whether there are any pending callbacks from the GSource
  * attached to the AioContext.
  * attached to the AioContext.
  *
  *
@@ -196,7 +192,7 @@ typedef int (AioFlushHandler)(void *opaque);
 
 
 /* Register a file descriptor and associated callbacks.  Behaves very similarly
 /* Register a file descriptor and associated callbacks.  Behaves very similarly
  * to qemu_set_fd_handler2.  Unlike qemu_set_fd_handler2, these callbacks will
  * to qemu_set_fd_handler2.  Unlike qemu_set_fd_handler2, these callbacks will
- * be invoked when using either qemu_aio_wait() or qemu_aio_flush().
+ * be invoked when using qemu_aio_wait().
  *
  *
  * Code that invokes AIO completion functions should rely on this function
  * Code that invokes AIO completion functions should rely on this function
  * instead of qemu_set_fd_handler[2].
  * instead of qemu_set_fd_handler[2].
@@ -211,7 +207,7 @@ void aio_set_fd_handler(AioContext *ctx,
 
 
 /* Register an event notifier and associated callbacks.  Behaves very similarly
 /* Register an event notifier and associated callbacks.  Behaves very similarly
  * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
  * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
- * will be invoked when using either qemu_aio_wait() or qemu_aio_flush().
+ * will be invoked when using qemu_aio_wait().
  *
  *
  * Code that invokes AIO completion functions should rely on this function
  * Code that invokes AIO completion functions should rely on this function
  * instead of event_notifier_set_handler.
  * instead of event_notifier_set_handler.
@@ -228,7 +224,6 @@ GSource *aio_get_g_source(AioContext *ctx);
 
 
 /* Functions to operate on the main QEMU AioContext.  */
 /* Functions to operate on the main QEMU AioContext.  */
 
 
-void qemu_aio_flush(void);
 bool qemu_aio_wait(void);
 bool qemu_aio_wait(void);
 void qemu_aio_set_event_notifier(EventNotifier *notifier,
 void qemu_aio_set_event_notifier(EventNotifier *notifier,
                                  EventNotifierHandler *io_read,
                                  EventNotifierHandler *io_read,