123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789 |
- /*
- * QEMU aio implementation
- *
- * Copyright IBM, Corp. 2008
- *
- * Authors:
- * Anthony Liguori <aliguori@us.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- * Contributions after 2012-01-13 are licensed under the terms of the
- * GNU GPL, version 2 or (at your option) any later version.
- */
- #include "qemu/osdep.h"
- #include "block/block.h"
- #include "block/thread-pool.h"
- #include "qemu/main-loop.h"
- #include "qemu/rcu.h"
- #include "qemu/rcu_queue.h"
- #include "qemu/sockets.h"
- #include "qemu/cutils.h"
- #include "trace.h"
- #include "aio-posix.h"
- /* Stop userspace polling on a handler if it isn't active for some time */
- #define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
- bool aio_poll_disabled(AioContext *ctx)
- {
- return qatomic_read(&ctx->poll_disable_cnt);
- }
- void aio_add_ready_handler(AioHandlerList *ready_list,
- AioHandler *node,
- int revents)
- {
- QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
- node->pfd.revents = revents;
- QLIST_INSERT_HEAD(ready_list, node, node_ready);
- }
- static void aio_add_poll_ready_handler(AioHandlerList *ready_list,
- AioHandler *node)
- {
- QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
- node->poll_ready = true;
- QLIST_INSERT_HEAD(ready_list, node, node_ready);
- }
- static AioHandler *find_aio_handler(AioContext *ctx, int fd)
- {
- AioHandler *node;
- QLIST_FOREACH(node, &ctx->aio_handlers, node) {
- if (node->pfd.fd == fd) {
- if (!QLIST_IS_INSERTED(node, node_deleted)) {
- return node;
- }
- }
- }
- return NULL;
- }
- static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
- {
- /* If the GSource is in the process of being destroyed then
- * g_source_remove_poll() causes an assertion failure. Skip
- * removal in that case, because glib cleans up its state during
- * destruction anyway.
- */
- if (!g_source_is_destroyed(&ctx->source)) {
- g_source_remove_poll(&ctx->source, &node->pfd);
- }
- node->pfd.revents = 0;
- node->poll_ready = false;
- /* If the fd monitor has already marked it deleted, leave it alone */
- if (QLIST_IS_INSERTED(node, node_deleted)) {
- return false;
- }
- /* If a read is in progress, just mark the node as deleted */
- if (qemu_lockcnt_count(&ctx->list_lock)) {
- QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
- return false;
- }
- /* Otherwise, delete it for real. We can't just mark it as
- * deleted because deleted nodes are only cleaned up while
- * no one is walking the handlers list.
- */
- QLIST_SAFE_REMOVE(node, node_poll);
- QLIST_REMOVE(node, node);
- return true;
- }
- void aio_set_fd_handler(AioContext *ctx,
- int fd,
- IOHandler *io_read,
- IOHandler *io_write,
- AioPollFn *io_poll,
- IOHandler *io_poll_ready,
- void *opaque)
- {
- AioHandler *node;
- AioHandler *new_node = NULL;
- bool is_new = false;
- bool deleted = false;
- int poll_disable_change;
- if (io_poll && !io_poll_ready) {
- io_poll = NULL; /* polling only makes sense if there is a handler */
- }
- qemu_lockcnt_lock(&ctx->list_lock);
- node = find_aio_handler(ctx, fd);
- /* Are we deleting the fd handler? */
- if (!io_read && !io_write && !io_poll) {
- if (node == NULL) {
- qemu_lockcnt_unlock(&ctx->list_lock);
- return;
- }
- /* Clean events in order to unregister fd from the ctx epoll. */
- node->pfd.events = 0;
- poll_disable_change = -!node->io_poll;
- } else {
- poll_disable_change = !io_poll - (node && !node->io_poll);
- if (node == NULL) {
- is_new = true;
- }
- /* Alloc and insert if it's not already there */
- new_node = g_new0(AioHandler, 1);
- /* Update handler with latest information */
- new_node->io_read = io_read;
- new_node->io_write = io_write;
- new_node->io_poll = io_poll;
- new_node->io_poll_ready = io_poll_ready;
- new_node->opaque = opaque;
- if (is_new) {
- new_node->pfd.fd = fd;
- } else {
- new_node->pfd = node->pfd;
- }
- g_source_add_poll(&ctx->source, &new_node->pfd);
- new_node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
- new_node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
- QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
- }
- /* No need to order poll_disable_cnt writes against other updates;
- * the counter is only used to avoid wasting time and latency on
- * iterated polling when the system call will be ultimately necessary.
- * Changing handlers is a rare event, and a little wasted polling until
- * the aio_notify below is not an issue.
- */
- qatomic_set(&ctx->poll_disable_cnt,
- qatomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
- ctx->fdmon_ops->update(ctx, node, new_node);
- if (node) {
- deleted = aio_remove_fd_handler(ctx, node);
- }
- qemu_lockcnt_unlock(&ctx->list_lock);
- aio_notify(ctx);
- if (deleted) {
- g_free(node);
- }
- }
- static void aio_set_fd_poll(AioContext *ctx, int fd,
- IOHandler *io_poll_begin,
- IOHandler *io_poll_end)
- {
- AioHandler *node = find_aio_handler(ctx, fd);
- if (!node) {
- return;
- }
- node->io_poll_begin = io_poll_begin;
- node->io_poll_end = io_poll_end;
- }
- void aio_set_event_notifier(AioContext *ctx,
- EventNotifier *notifier,
- EventNotifierHandler *io_read,
- AioPollFn *io_poll,
- EventNotifierHandler *io_poll_ready)
- {
- aio_set_fd_handler(ctx, event_notifier_get_fd(notifier),
- (IOHandler *)io_read, NULL, io_poll,
- (IOHandler *)io_poll_ready, notifier);
- }
- void aio_set_event_notifier_poll(AioContext *ctx,
- EventNotifier *notifier,
- EventNotifierHandler *io_poll_begin,
- EventNotifierHandler *io_poll_end)
- {
- aio_set_fd_poll(ctx, event_notifier_get_fd(notifier),
- (IOHandler *)io_poll_begin,
- (IOHandler *)io_poll_end);
- }
- static bool poll_set_started(AioContext *ctx, AioHandlerList *ready_list,
- bool started)
- {
- AioHandler *node;
- bool progress = false;
- if (started == ctx->poll_started) {
- return false;
- }
- ctx->poll_started = started;
- qemu_lockcnt_inc(&ctx->list_lock);
- QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
- IOHandler *fn;
- if (QLIST_IS_INSERTED(node, node_deleted)) {
- continue;
- }
- if (started) {
- fn = node->io_poll_begin;
- } else {
- fn = node->io_poll_end;
- }
- if (fn) {
- fn(node->opaque);
- }
- /* Poll one last time in case ->io_poll_end() raced with the event */
- if (!started && node->io_poll(node->opaque)) {
- aio_add_poll_ready_handler(ready_list, node);
- progress = true;
- }
- }
- qemu_lockcnt_dec(&ctx->list_lock);
- return progress;
- }
- bool aio_prepare(AioContext *ctx)
- {
- AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
- /* Poll mode cannot be used with glib's event loop, disable it. */
- poll_set_started(ctx, &ready_list, false);
- /* TODO what to do with this list? */
- return false;
- }
- bool aio_pending(AioContext *ctx)
- {
- AioHandler *node;
- bool result = false;
- /*
- * We have to walk very carefully in case aio_set_fd_handler is
- * called while we're walking.
- */
- qemu_lockcnt_inc(&ctx->list_lock);
- QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
- int revents;
- /* TODO should this check poll ready? */
- revents = node->pfd.revents & node->pfd.events;
- if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) {
- result = true;
- break;
- }
- if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) {
- result = true;
- break;
- }
- }
- qemu_lockcnt_dec(&ctx->list_lock);
- return result;
- }
- static void aio_free_deleted_handlers(AioContext *ctx)
- {
- AioHandler *node;
- if (QLIST_EMPTY_RCU(&ctx->deleted_aio_handlers)) {
- return;
- }
- if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
- return; /* we are nested, let the parent do the freeing */
- }
- while ((node = QLIST_FIRST_RCU(&ctx->deleted_aio_handlers))) {
- QLIST_REMOVE(node, node);
- QLIST_REMOVE(node, node_deleted);
- QLIST_SAFE_REMOVE(node, node_poll);
- g_free(node);
- }
- qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
- }
- static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
- {
- bool progress = false;
- bool poll_ready;
- int revents;
- revents = node->pfd.revents & node->pfd.events;
- node->pfd.revents = 0;
- poll_ready = node->poll_ready;
- node->poll_ready = false;
- /*
- * Start polling AioHandlers when they become ready because activity is
- * likely to continue. Note that starvation is theoretically possible when
- * fdmon_supports_polling(), but only until the fd fires for the first
- * time.
- */
- if (!QLIST_IS_INSERTED(node, node_deleted) &&
- !QLIST_IS_INSERTED(node, node_poll) &&
- node->io_poll) {
- trace_poll_add(ctx, node, node->pfd.fd, revents);
- if (ctx->poll_started && node->io_poll_begin) {
- node->io_poll_begin(node->opaque);
- }
- QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
- }
- if (!QLIST_IS_INSERTED(node, node_deleted) &&
- poll_ready && revents == 0 && node->io_poll_ready) {
- /*
- * Remove temporarily to avoid infinite loops when ->io_poll_ready()
- * calls aio_poll() before clearing the condition that made the poll
- * handler become ready.
- */
- QLIST_SAFE_REMOVE(node, node_poll);
- node->io_poll_ready(node->opaque);
- if (!QLIST_IS_INSERTED(node, node_poll)) {
- QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
- }
- /*
- * Return early since revents was zero. aio_notify() does not count as
- * progress.
- */
- return node->opaque != &ctx->notifier;
- }
- if (!QLIST_IS_INSERTED(node, node_deleted) &&
- (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
- node->io_read) {
- node->io_read(node->opaque);
- /* aio_notify() does not count as progress */
- if (node->opaque != &ctx->notifier) {
- progress = true;
- }
- }
- if (!QLIST_IS_INSERTED(node, node_deleted) &&
- (revents & (G_IO_OUT | G_IO_ERR)) &&
- node->io_write) {
- node->io_write(node->opaque);
- progress = true;
- }
- return progress;
- }
- /*
- * If we have a list of ready handlers then this is more efficient than
- * scanning all handlers with aio_dispatch_handlers().
- */
- static bool aio_dispatch_ready_handlers(AioContext *ctx,
- AioHandlerList *ready_list)
- {
- bool progress = false;
- AioHandler *node;
- while ((node = QLIST_FIRST(ready_list))) {
- QLIST_REMOVE(node, node_ready);
- progress = aio_dispatch_handler(ctx, node) || progress;
- }
- return progress;
- }
- /* Slower than aio_dispatch_ready_handlers() but only used via glib */
- static bool aio_dispatch_handlers(AioContext *ctx)
- {
- AioHandler *node, *tmp;
- bool progress = false;
- QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
- progress = aio_dispatch_handler(ctx, node) || progress;
- }
- return progress;
- }
- void aio_dispatch(AioContext *ctx)
- {
- qemu_lockcnt_inc(&ctx->list_lock);
- aio_bh_poll(ctx);
- aio_dispatch_handlers(ctx);
- aio_free_deleted_handlers(ctx);
- qemu_lockcnt_dec(&ctx->list_lock);
- timerlistgroup_run_timers(&ctx->tlg);
- }
- static bool run_poll_handlers_once(AioContext *ctx,
- AioHandlerList *ready_list,
- int64_t now,
- int64_t *timeout)
- {
- bool progress = false;
- AioHandler *node;
- AioHandler *tmp;
- QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
- if (node->io_poll(node->opaque)) {
- aio_add_poll_ready_handler(ready_list, node);
- node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
- /*
- * Polling was successful, exit try_poll_mode immediately
- * to adjust the next polling time.
- */
- *timeout = 0;
- if (node->opaque != &ctx->notifier) {
- progress = true;
- }
- }
- /* Caller handles freeing deleted nodes. Don't do it here. */
- }
- return progress;
- }
- static bool fdmon_supports_polling(AioContext *ctx)
- {
- return ctx->fdmon_ops->need_wait != aio_poll_disabled;
- }
- static bool remove_idle_poll_handlers(AioContext *ctx,
- AioHandlerList *ready_list,
- int64_t now)
- {
- AioHandler *node;
- AioHandler *tmp;
- bool progress = false;
- /*
- * File descriptor monitoring implementations without userspace polling
- * support suffer from starvation when a subset of handlers is polled
- * because fds will not be processed in a timely fashion. Don't remove
- * idle poll handlers.
- */
- if (!fdmon_supports_polling(ctx)) {
- return false;
- }
- QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
- if (node->poll_idle_timeout == 0LL) {
- node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
- } else if (now >= node->poll_idle_timeout) {
- trace_poll_remove(ctx, node, node->pfd.fd);
- node->poll_idle_timeout = 0LL;
- QLIST_SAFE_REMOVE(node, node_poll);
- if (ctx->poll_started && node->io_poll_end) {
- node->io_poll_end(node->opaque);
- /*
- * Final poll in case ->io_poll_end() races with an event.
- * Nevermind about re-adding the handler in the rare case where
- * this causes progress.
- */
- if (node->io_poll(node->opaque)) {
- aio_add_poll_ready_handler(ready_list, node);
- progress = true;
- }
- }
- }
- }
- return progress;
- }
- /* run_poll_handlers:
- * @ctx: the AioContext
- * @ready_list: the list to place ready handlers on
- * @max_ns: maximum time to poll for, in nanoseconds
- *
- * Polls for a given time.
- *
- * Note that the caller must have incremented ctx->list_lock.
- *
- * Returns: true if progress was made, false otherwise
- */
- static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list,
- int64_t max_ns, int64_t *timeout)
- {
- bool progress;
- int64_t start_time, elapsed_time;
- assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
- trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
- /*
- * Optimization: ->io_poll() handlers often contain RCU read critical
- * sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
- * -> rcu_read_lock() -> ... sequences with expensive memory
- * synchronization primitives. Make the entire polling loop an RCU
- * critical section because nested rcu_read_lock()/rcu_read_unlock() calls
- * are cheap.
- */
- RCU_READ_LOCK_GUARD();
- start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
- do {
- progress = run_poll_handlers_once(ctx, ready_list,
- start_time, timeout);
- elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
- max_ns = qemu_soonest_timeout(*timeout, max_ns);
- assert(!(max_ns && progress));
- } while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx));
- if (remove_idle_poll_handlers(ctx, ready_list,
- start_time + elapsed_time)) {
- *timeout = 0;
- progress = true;
- }
- /* If time has passed with no successful polling, adjust *timeout to
- * keep the same ending time.
- */
- if (*timeout != -1) {
- *timeout -= MIN(*timeout, elapsed_time);
- }
- trace_run_poll_handlers_end(ctx, progress, *timeout);
- return progress;
- }
- /* try_poll_mode:
- * @ctx: the AioContext
- * @ready_list: list to add handlers that need to be run
- * @timeout: timeout for blocking wait, computed by the caller and updated if
- * polling succeeds.
- *
- * Note that the caller must have incremented ctx->list_lock.
- *
- * Returns: true if progress was made, false otherwise
- */
- static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
- int64_t *timeout)
- {
- int64_t max_ns;
- if (QLIST_EMPTY_RCU(&ctx->poll_aio_handlers)) {
- return false;
- }
- max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
- if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
- /*
- * Enable poll mode. It pairs with the poll_set_started() in
- * aio_poll() which disables poll mode.
- */
- poll_set_started(ctx, ready_list, true);
- if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
- return true;
- }
- }
- return false;
- }
- bool aio_poll(AioContext *ctx, bool blocking)
- {
- AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
- bool progress;
- bool use_notify_me;
- int64_t timeout;
- int64_t start = 0;
- /*
- * There cannot be two concurrent aio_poll calls for the same AioContext (or
- * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
- * We rely on this below to avoid slow locked accesses to ctx->notify_me.
- *
- * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
- * is special in that it runs in the main thread, but that thread's context
- * is qemu_aio_context.
- */
- assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
- qemu_get_aio_context() : ctx));
- qemu_lockcnt_inc(&ctx->list_lock);
- if (ctx->poll_max_ns) {
- start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
- }
- timeout = blocking ? aio_compute_timeout(ctx) : 0;
- progress = try_poll_mode(ctx, &ready_list, &timeout);
- assert(!(timeout && progress));
- /*
- * aio_notify can avoid the expensive event_notifier_set if
- * everything (file descriptors, bottom halves, timers) will
- * be re-evaluated before the next blocking poll(). This is
- * already true when aio_poll is called with blocking == false;
- * if blocking == true, it is only true after poll() returns,
- * so disable the optimization now.
- */
- use_notify_me = timeout != 0;
- if (use_notify_me) {
- qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
- /*
- * Write ctx->notify_me before reading ctx->notified. Pairs with
- * smp_mb in aio_notify().
- */
- smp_mb();
- /* Don't block if aio_notify() was called */
- if (qatomic_read(&ctx->notified)) {
- timeout = 0;
- }
- }
- /* If polling is allowed, non-blocking aio_poll does not need the
- * system call---a single round of run_poll_handlers_once suffices.
- */
- if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
- /*
- * Disable poll mode. poll mode should be disabled before the call
- * of ctx->fdmon_ops->wait() so that guest's notification can wake
- * up IO threads when some work becomes pending. It is essential to
- * avoid hangs or unnecessary latency.
- */
- if (poll_set_started(ctx, &ready_list, false)) {
- timeout = 0;
- progress = true;
- }
- ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
- }
- if (use_notify_me) {
- /* Finish the poll before clearing the flag. */
- qatomic_store_release(&ctx->notify_me,
- qatomic_read(&ctx->notify_me) - 2);
- }
- aio_notify_accept(ctx);
- /* Adjust polling time */
- if (ctx->poll_max_ns) {
- int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
- if (block_ns <= ctx->poll_ns) {
- /* This is the sweet spot, no adjustment needed */
- } else if (block_ns > ctx->poll_max_ns) {
- /* We'd have to poll for too long, poll less */
- int64_t old = ctx->poll_ns;
- if (ctx->poll_shrink) {
- ctx->poll_ns /= ctx->poll_shrink;
- } else {
- ctx->poll_ns = 0;
- }
- trace_poll_shrink(ctx, old, ctx->poll_ns);
- } else if (ctx->poll_ns < ctx->poll_max_ns &&
- block_ns < ctx->poll_max_ns) {
- /* There is room to grow, poll longer */
- int64_t old = ctx->poll_ns;
- int64_t grow = ctx->poll_grow;
- if (grow == 0) {
- grow = 2;
- }
- if (ctx->poll_ns) {
- ctx->poll_ns *= grow;
- } else {
- ctx->poll_ns = 4000; /* start polling at 4 microseconds */
- }
- if (ctx->poll_ns > ctx->poll_max_ns) {
- ctx->poll_ns = ctx->poll_max_ns;
- }
- trace_poll_grow(ctx, old, ctx->poll_ns);
- }
- }
- progress |= aio_bh_poll(ctx);
- progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
- aio_free_deleted_handlers(ctx);
- qemu_lockcnt_dec(&ctx->list_lock);
- progress |= timerlistgroup_run_timers(&ctx->tlg);
- return progress;
- }
- void aio_context_setup(AioContext *ctx)
- {
- ctx->fdmon_ops = &fdmon_poll_ops;
- ctx->epollfd = -1;
- /* Use the fastest fd monitoring implementation if available */
- if (fdmon_io_uring_setup(ctx)) {
- return;
- }
- fdmon_epoll_setup(ctx);
- }
- void aio_context_destroy(AioContext *ctx)
- {
- fdmon_io_uring_destroy(ctx);
- fdmon_epoll_disable(ctx);
- aio_free_deleted_handlers(ctx);
- }
- void aio_context_use_g_source(AioContext *ctx)
- {
- /*
- * Disable io_uring when the glib main loop is used because it doesn't
- * support mixed glib/aio_poll() usage. It relies on aio_poll() being
- * called regularly so that changes to the monitored file descriptors are
- * submitted, otherwise a list of pending fd handlers builds up.
- */
- fdmon_io_uring_destroy(ctx);
- aio_free_deleted_handlers(ctx);
- }
- void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
- int64_t grow, int64_t shrink, Error **errp)
- {
- /* No thread synchronization here, it doesn't matter if an incorrect value
- * is used once.
- */
- ctx->poll_max_ns = max_ns;
- ctx->poll_ns = 0;
- ctx->poll_grow = grow;
- ctx->poll_shrink = shrink;
- aio_notify(ctx);
- }
- void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch)
- {
- /*
- * No thread synchronization here, it doesn't matter if an incorrect value
- * is used once.
- */
- ctx->aio_max_batch = max_batch;
- aio_notify(ctx);
- }
|