|
@@ -155,10 +155,11 @@ static int max_priority;
|
|
static int glib_pollfds_idx;
|
|
static int glib_pollfds_idx;
|
|
static int glib_n_poll_fds;
|
|
static int glib_n_poll_fds;
|
|
|
|
|
|
-static void glib_pollfds_fill(uint32_t *cur_timeout)
|
|
|
|
|
|
+static void glib_pollfds_fill(int64_t *cur_timeout)
|
|
{
|
|
{
|
|
GMainContext *context = g_main_context_default();
|
|
GMainContext *context = g_main_context_default();
|
|
int timeout = 0;
|
|
int timeout = 0;
|
|
|
|
+ int64_t timeout_ns;
|
|
int n;
|
|
int n;
|
|
|
|
|
|
g_main_context_prepare(context, &max_priority);
|
|
g_main_context_prepare(context, &max_priority);
|
|
@@ -174,9 +175,13 @@ static void glib_pollfds_fill(uint32_t *cur_timeout)
|
|
glib_n_poll_fds);
|
|
glib_n_poll_fds);
|
|
} while (n != glib_n_poll_fds);
|
|
} while (n != glib_n_poll_fds);
|
|
|
|
|
|
- if (timeout >= 0 && timeout < *cur_timeout) {
|
|
|
|
- *cur_timeout = timeout;
|
|
|
|
|
|
+ if (timeout < 0) {
|
|
|
|
+ timeout_ns = -1;
|
|
|
|
+ } else {
|
|
|
|
+ timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout);
|
|
}
|
|
}
|
|
|
|
|
|
static void glib_pollfds_poll(void)
|
|
static void glib_pollfds_poll(void)
|
|
@@ -191,7 +196,7 @@ static void glib_pollfds_poll(void)
|
|
|
|
|
|
#define MAX_MAIN_LOOP_SPIN (1000)
|
|
#define MAX_MAIN_LOOP_SPIN (1000)
|
|
|
|
|
|
-static int os_host_main_loop_wait(uint32_t timeout)
|
|
|
|
|
|
+static int os_host_main_loop_wait(int64_t timeout)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
static int spin_counter;
|
|
static int spin_counter;
|
|
@@ -204,7 +209,7 @@ static int os_host_main_loop_wait(uint32_t timeout)
|
|
* print a message to the screen. If we run into this condition, create
|
|
* print a message to the screen. If we run into this condition, create
|
|
* a fake timeout in order to give the VCPU threads a chance to run.
|
|
* a fake timeout in order to give the VCPU threads a chance to run.
|
|
*/
|
|
*/
|
|
- if (spin_counter > MAX_MAIN_LOOP_SPIN) {
|
|
|
|
|
|
+ if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) {
|
|
static bool notified;
|
|
static bool notified;
|
|
|
|
|
|
if (!notified) {
|
|
if (!notified) {
|
|
@@ -214,19 +219,19 @@ static int os_host_main_loop_wait(uint32_t timeout)
|
|
notified = true;
|
|
notified = true;
|
|
}
|
|
}
|
|
|
|
|
|
- timeout = 1;
|
|
|
|
|
|
+ timeout = SCALE_MS;
|
|
}
|
|
}
|
|
|
|
|
|
- if (timeout > 0) {
|
|
|
|
|
|
+ if (timeout) {
|
|
spin_counter = 0;
|
|
spin_counter = 0;
|
|
qemu_mutex_unlock_iothread();
|
|
qemu_mutex_unlock_iothread();
|
|
} else {
|
|
} else {
|
|
spin_counter++;
|
|
spin_counter++;
|
|
}
|
|
}
|
|
|
|
|
|
- ret = g_poll((GPollFD *)gpollfds->data, gpollfds->len, timeout);
|
|
|
|
|
|
+ ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
|
|
|
|
|
|
- if (timeout > 0) {
|
|
|
|
|
|
+ if (timeout) {
|
|
qemu_mutex_lock_iothread();
|
|
qemu_mutex_lock_iothread();
|
|
}
|
|
}
|
|
|
|
|
|
@@ -373,7 +378,7 @@ static void pollfds_poll(GArray *pollfds, int nfds, fd_set *rfds,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static int os_host_main_loop_wait(uint32_t timeout)
|
|
|
|
|
|
+static int os_host_main_loop_wait(int64_t timeout)
|
|
{
|
|
{
|
|
GMainContext *context = g_main_context_default();
|
|
GMainContext *context = g_main_context_default();
|
|
GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
|
|
GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
|
|
@@ -382,6 +387,7 @@ static int os_host_main_loop_wait(uint32_t timeout)
|
|
PollingEntry *pe;
|
|
PollingEntry *pe;
|
|
WaitObjects *w = &wait_objects;
|
|
WaitObjects *w = &wait_objects;
|
|
gint poll_timeout;
|
|
gint poll_timeout;
|
|
|
|
+ int64_t poll_timeout_ns;
|
|
static struct timeval tv0;
|
|
static struct timeval tv0;
|
|
fd_set rfds, wfds, xfds;
|
|
fd_set rfds, wfds, xfds;
|
|
int nfds;
|
|
int nfds;
|
|
@@ -419,12 +425,17 @@ static int os_host_main_loop_wait(uint32_t timeout)
|
|
poll_fds[n_poll_fds + i].events = G_IO_IN;
|
|
poll_fds[n_poll_fds + i].events = G_IO_IN;
|
|
}
|
|
}
|
|
|
|
|
|
- if (poll_timeout < 0 || timeout < poll_timeout) {
|
|
|
|
- poll_timeout = timeout;
|
|
|
|
|
|
+ if (poll_timeout < 0) {
|
|
|
|
+ poll_timeout_ns = -1;
|
|
|
|
+ } else {
|
|
|
|
+ poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);
|
|
|
|
+
|
|
qemu_mutex_unlock_iothread();
|
|
qemu_mutex_unlock_iothread();
|
|
- g_poll_ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout);
|
|
|
|
|
|
+ g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns);
|
|
|
|
+
|
|
qemu_mutex_lock_iothread();
|
|
qemu_mutex_lock_iothread();
|
|
if (g_poll_ret > 0) {
|
|
if (g_poll_ret > 0) {
|
|
for (i = 0; i < w->num; i++) {
|
|
for (i = 0; i < w->num; i++) {
|
|
@@ -449,6 +460,7 @@ int main_loop_wait(int nonblocking)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
uint32_t timeout = UINT32_MAX;
|
|
uint32_t timeout = UINT32_MAX;
|
|
|
|
+ int64_t timeout_ns;
|
|
|
|
|
|
if (nonblocking) {
|
|
if (nonblocking) {
|
|
timeout = 0;
|
|
timeout = 0;
|
|
@@ -462,7 +474,18 @@ int main_loop_wait(int nonblocking)
|
|
slirp_pollfds_fill(gpollfds);
|
|
slirp_pollfds_fill(gpollfds);
|
|
#endif
|
|
#endif
|
|
qemu_iohandler_fill(gpollfds);
|
|
qemu_iohandler_fill(gpollfds);
|
|
- ret = os_host_main_loop_wait(timeout);
|
|
|
|
|
|
+
|
|
|
|
+ if (timeout == UINT32_MAX) {
|
|
|
|
+ timeout_ns = -1;
|
|
|
|
+ } else {
|
|
|
|
+ timeout_ns = (uint64_t)timeout * (int64_t)(SCALE_MS);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ timeout_ns = qemu_soonest_timeout(timeout_ns,
|
|
|
|
+ timerlistgroup_deadline_ns(
|
|
|
|
+ &main_loop_tlg));
|
|
|
|
+
|
|
|
|
+ ret = os_host_main_loop_wait(timeout_ns);
|
|
qemu_iohandler_poll(gpollfds, ret);
|
|
qemu_iohandler_poll(gpollfds, ret);
|
|
#ifdef CONFIG_SLIRP
|
|
#ifdef CONFIG_SLIRP
|
|
slirp_pollfds_poll(gpollfds, (ret < 0));
|
|
slirp_pollfds_poll(gpollfds, (ret < 0));
|