|
@@ -133,6 +133,10 @@ static void monitor_qmp_respond(MonitorQMP *mon, QDict *rsp)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Runs outside of coroutine context for OOB commands, but in
|
|
|
|
+ * coroutine context for everything else.
|
|
|
|
+ */
|
|
static void monitor_qmp_dispatch(MonitorQMP *mon, QObject *req)
|
|
static void monitor_qmp_dispatch(MonitorQMP *mon, QObject *req)
|
|
{
|
|
{
|
|
QDict *rsp;
|
|
QDict *rsp;
|
|
@@ -206,43 +210,99 @@ static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void)
|
|
return req_obj;
|
|
return req_obj;
|
|
}
|
|
}
|
|
|
|
|
|
-void monitor_qmp_bh_dispatcher(void *data)
|
|
|
|
|
|
+void coroutine_fn monitor_qmp_dispatcher_co(void *data)
|
|
{
|
|
{
|
|
- QMPRequest *req_obj = monitor_qmp_requests_pop_any_with_lock();
|
|
|
|
|
|
+ QMPRequest *req_obj = NULL;
|
|
QDict *rsp;
|
|
QDict *rsp;
|
|
bool need_resume;
|
|
bool need_resume;
|
|
MonitorQMP *mon;
|
|
MonitorQMP *mon;
|
|
|
|
|
|
- if (!req_obj) {
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
|
|
+ while (true) {
|
|
|
|
+ assert(qatomic_mb_read(&qmp_dispatcher_co_busy) == true);
|
|
|
|
|
|
- mon = req_obj->mon;
|
|
|
|
- /* qmp_oob_enabled() might change after "qmp_capabilities" */
|
|
|
|
- need_resume = !qmp_oob_enabled(mon) ||
|
|
|
|
- mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1;
|
|
|
|
- qemu_mutex_unlock(&mon->qmp_queue_lock);
|
|
|
|
- if (req_obj->req) {
|
|
|
|
- QDict *qdict = qobject_to(QDict, req_obj->req);
|
|
|
|
- QObject *id = qdict ? qdict_get(qdict, "id") : NULL;
|
|
|
|
- trace_monitor_qmp_cmd_in_band(qobject_get_try_str(id) ?: "");
|
|
|
|
- monitor_qmp_dispatch(mon, req_obj->req);
|
|
|
|
- } else {
|
|
|
|
- assert(req_obj->err);
|
|
|
|
- rsp = qmp_error_response(req_obj->err);
|
|
|
|
- req_obj->err = NULL;
|
|
|
|
- monitor_qmp_respond(mon, rsp);
|
|
|
|
- qobject_unref(rsp);
|
|
|
|
- }
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Mark the dispatcher as not busy already here so that we
|
|
|
|
+ * don't miss any new requests coming in the middle of our
|
|
|
|
+ * processing.
|
|
|
|
+ */
|
|
|
|
+ qatomic_mb_set(&qmp_dispatcher_co_busy, false);
|
|
|
|
+
|
|
|
|
+ while (!(req_obj = monitor_qmp_requests_pop_any_with_lock())) {
|
|
|
|
+ /*
|
|
|
|
+ * No more requests to process. Wait to be reentered from
|
|
|
|
+ * handle_qmp_command() when it pushes more requests, or
|
|
|
|
+ * from monitor_cleanup() when it requests shutdown.
|
|
|
|
+ */
|
|
|
|
+ if (!qmp_dispatcher_co_shutdown) {
|
|
|
|
+ qemu_coroutine_yield();
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * busy must be set to true again by whoever
|
|
|
|
+ * rescheduled us to avoid double scheduling
|
|
|
|
+ */
|
|
|
|
+ assert(qatomic_xchg(&qmp_dispatcher_co_busy, false) == true);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * qmp_dispatcher_co_shutdown may have changed if we
|
|
|
|
+ * yielded and were reentered from monitor_cleanup()
|
|
|
|
+ */
|
|
|
|
+ if (qmp_dispatcher_co_shutdown) {
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
- if (need_resume) {
|
|
|
|
- /* Pairs with the monitor_suspend() in handle_qmp_command() */
|
|
|
|
- monitor_resume(&mon->common);
|
|
|
|
- }
|
|
|
|
- qmp_request_free(req_obj);
|
|
|
|
|
|
+ if (qatomic_xchg(&qmp_dispatcher_co_busy, true) == true) {
|
|
|
|
+ /*
|
|
|
|
+ * Someone rescheduled us (probably because a new requests
|
|
|
|
+ * came in), but we didn't actually yield. Do that now,
|
|
|
|
+ * only to be immediately reentered and removed from the
|
|
|
|
+ * list of scheduled coroutines.
|
|
|
|
+ */
|
|
|
|
+ qemu_coroutine_yield();
|
|
|
|
+ }
|
|
|
|
|
|
- /* Reschedule instead of looping so the main loop stays responsive */
|
|
|
|
- qemu_bh_schedule(qmp_dispatcher_bh);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Move the coroutine from iohandler_ctx to qemu_aio_context for
|
|
|
|
+ * executing the command handler so that it can make progress if it
|
|
|
|
+ * involves an AIO_WAIT_WHILE().
|
|
|
|
+ */
|
|
|
|
+ aio_co_schedule(qemu_get_aio_context(), qmp_dispatcher_co);
|
|
|
|
+ qemu_coroutine_yield();
|
|
|
|
+
|
|
|
|
+ mon = req_obj->mon;
|
|
|
|
+ /* qmp_oob_enabled() might change after "qmp_capabilities" */
|
|
|
|
+ need_resume = !qmp_oob_enabled(mon) ||
|
|
|
|
+ mon->qmp_requests->length == QMP_REQ_QUEUE_LEN_MAX - 1;
|
|
|
|
+ qemu_mutex_unlock(&mon->qmp_queue_lock);
|
|
|
|
+ if (req_obj->req) {
|
|
|
|
+ QDict *qdict = qobject_to(QDict, req_obj->req);
|
|
|
|
+ QObject *id = qdict ? qdict_get(qdict, "id") : NULL;
|
|
|
|
+ trace_monitor_qmp_cmd_in_band(qobject_get_try_str(id) ?: "");
|
|
|
|
+ monitor_qmp_dispatch(mon, req_obj->req);
|
|
|
|
+ } else {
|
|
|
|
+ assert(req_obj->err);
|
|
|
|
+ rsp = qmp_error_response(req_obj->err);
|
|
|
|
+ req_obj->err = NULL;
|
|
|
|
+ monitor_qmp_respond(mon, rsp);
|
|
|
|
+ qobject_unref(rsp);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (need_resume) {
|
|
|
|
+ /* Pairs with the monitor_suspend() in handle_qmp_command() */
|
|
|
|
+ monitor_resume(&mon->common);
|
|
|
|
+ }
|
|
|
|
+ qmp_request_free(req_obj);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Yield and reschedule so the main loop stays responsive.
|
|
|
|
+ *
|
|
|
|
+ * Move back to iohandler_ctx so that nested event loops for
|
|
|
|
+ * qemu_aio_context don't start new monitor commands.
|
|
|
|
+ */
|
|
|
|
+ aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co);
|
|
|
|
+ qemu_coroutine_yield();
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static void handle_qmp_command(void *opaque, QObject *req, Error *err)
|
|
static void handle_qmp_command(void *opaque, QObject *req, Error *err)
|
|
@@ -303,7 +363,9 @@ static void handle_qmp_command(void *opaque, QObject *req, Error *err)
|
|
qemu_mutex_unlock(&mon->qmp_queue_lock);
|
|
qemu_mutex_unlock(&mon->qmp_queue_lock);
|
|
|
|
|
|
/* Kick the dispatcher routine */
|
|
/* Kick the dispatcher routine */
|
|
- qemu_bh_schedule(qmp_dispatcher_bh);
|
|
|
|
|
|
+ if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
|
|
|
|
+ aio_co_wake(qmp_dispatcher_co);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
|
|
static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
|