2
0
Эх сурвалжийг харах

Merge remote-tracking branch 'remotes/ericb/tags/pull-qapi-2018-04-10' into staging

qapi patches for 2018-04-10

- Peter Xu: iotests: fix wait_until_completed()
- Peter Xu: iothread: workaround glib bug which hangs qmp-test
- Peter Xu: monitor: bind dispatch bh to iohandler context

# gpg: Signature made Tue 10 Apr 2018 14:15:09 BST
# gpg:                using RSA key A7A16B4A2527436A
# gpg: Good signature from "Eric Blake <eblake@redhat.com>"
# gpg:                 aka "Eric Blake (Free Software Programmer) <ebb9@byu.net>"
# gpg:                 aka "[jpeg image of size 6874]"
# Primary key fingerprint: 71C2 CC22 B1C4 6029 27D2  F3AA A7A1 6B4A 2527 436A

* remotes/ericb/tags/pull-qapi-2018-04-10:
  monitor: bind dispatch bh to iohandler context
  iothread: workaround glib bug which hangs qmp-test
  iotests: fix wait_until_completed()

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Peter Maydell 7 жил өмнө
parent
commit
26d6a7c87b
3 өөрчлөгдсөн 18 нэмэгдсэн , 11 устгасан
  1. 14 4
      iothread.c
  2. 1 1
      monitor.c
  3. 3 6
      tests/qemu-iotests/iotests.py

+ 14 - 4
iothread.c

@@ -117,16 +117,26 @@ static void iothread_instance_finalize(Object *obj)
     IOThread *iothread = IOTHREAD(obj);
 
     iothread_stop(iothread);
+    /*
+     * Before glib2 2.33.10, there is a glib2 bug that GSource context
+     * pointer may not be cleared even if the context has already been
+     * destroyed (while it should).  Here let's free the AIO context
+     * earlier to bypass that glib bug.
+     *
+     * We can remove this comment after the minimum supported glib2
+     * version boosts to 2.33.10.  Before that, let's free the
+     * GSources first before destroying any GMainContext.
+     */
+    if (iothread->ctx) {
+        aio_context_unref(iothread->ctx);
+        iothread->ctx = NULL;
+    }
     if (iothread->worker_context) {
         g_main_context_unref(iothread->worker_context);
         iothread->worker_context = NULL;
     }
     qemu_cond_destroy(&iothread->init_done_cond);
     qemu_mutex_destroy(&iothread->init_done_lock);
-    if (!iothread->ctx) {
-        return;
-    }
-    aio_context_unref(iothread->ctx);
 }
 
 static void iothread_complete(UserCreatable *obj, Error **errp)

+ 1 - 1
monitor.c

@@ -4467,7 +4467,7 @@ static void monitor_iothread_init(void)
      * have assumption to be run on main loop thread.  It would be
      * nice that one day we can remove this assumption in the future.
      */
-    mon_global.qmp_dispatcher_bh = aio_bh_new(qemu_get_aio_context(),
+    mon_global.qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
                                               monitor_qmp_bh_dispatcher,
                                               NULL);
 

+ 3 - 6
tests/qemu-iotests/iotests.py

@@ -470,18 +470,15 @@ def cancel_and_wait(self, drive='drive0', force=False, resume=False):
 
     def wait_until_completed(self, drive='drive0', check_offset=True):
         '''Wait for a block job to finish, returning the event'''
-        completed = False
-        while not completed:
+        while True:
             for event in self.vm.get_qmp_events(wait=True):
                 if event['event'] == 'BLOCK_JOB_COMPLETED':
                     self.assert_qmp(event, 'data/device', drive)
                     self.assert_qmp_absent(event, 'data/error')
                     if check_offset:
                         self.assert_qmp(event, 'data/offset', event['data']['len'])
-                    completed = True
-
-        self.assert_no_active_block_jobs()
-        return event
+                    self.assert_no_active_block_jobs()
+                    return event
 
     def wait_ready(self, drive='drive0'):
         '''Wait until a block job BLOCK_JOB_READY event'''