|
@@ -30,7 +30,7 @@
|
|
|
#include "sysemu/qtest.h"
|
|
|
|
|
|
/* The ThrottleGroup structure (with its ThrottleState) is shared
|
|
|
- * among different BlockBackends and it's independent from
|
|
|
+ * among different ThrottleGroupMembers and it's independent from
|
|
|
* AioContext, so in order to use it from different threads it needs
|
|
|
* its own locking.
|
|
|
*
|
|
@@ -40,26 +40,26 @@
|
|
|
* The whole ThrottleGroup structure is private and invisible to
|
|
|
* outside users, that only use it through its ThrottleState.
|
|
|
*
|
|
|
- * In addition to the ThrottleGroup structure, BlockBackendPublic has
|
|
|
+ * In addition to the ThrottleGroup structure, ThrottleGroupMember has
|
|
|
* fields that need to be accessed by other members of the group and
|
|
|
* therefore also need to be protected by this lock. Once a
|
|
|
- * BlockBackend is registered in a group those fields can be accessed
|
|
|
+ * ThrottleGroupMember is registered in a group those fields can be accessed
|
|
|
* by other threads any time.
|
|
|
*
|
|
|
* Again, all this is handled internally and is mostly transparent to
|
|
|
* the outside. The 'throttle_timers' field however has an additional
|
|
|
* constraint because it may be temporarily invalid (see for example
|
|
|
* blk_set_aio_context()). Therefore in this file a thread will
|
|
|
- * access some other BlockBackend's timers only after verifying that
|
|
|
- * that BlockBackend has throttled requests in the queue.
|
|
|
+ * access some other ThrottleGroupMember's timers only after verifying that
|
|
|
+ * that ThrottleGroupMember has throttled requests in the queue.
|
|
|
*/
|
|
|
typedef struct ThrottleGroup {
|
|
|
char *name; /* This is constant during the lifetime of the group */
|
|
|
|
|
|
QemuMutex lock; /* This lock protects the following four fields */
|
|
|
ThrottleState ts;
|
|
|
- QLIST_HEAD(, BlockBackendPublic) head;
|
|
|
- BlockBackend *tokens[2];
|
|
|
+ QLIST_HEAD(, ThrottleGroupMember) head;
|
|
|
+ ThrottleGroupMember *tokens[2];
|
|
|
bool any_timer_armed[2];
|
|
|
QEMUClockType clock_type;
|
|
|
|
|
@@ -140,114 +140,112 @@ void throttle_group_unref(ThrottleState *ts)
|
|
|
qemu_mutex_unlock(&throttle_groups_lock);
|
|
|
}
|
|
|
|
|
|
-/* Get the name from a BlockBackend's ThrottleGroup. The name (and the pointer)
|
|
|
+/* Get the name from a ThrottleGroupMember's group. The name (and the pointer)
|
|
|
* is guaranteed to remain constant during the lifetime of the group.
|
|
|
*
|
|
|
- * @blk: a BlockBackend that is member of a throttling group
|
|
|
+ * @tgm: a ThrottleGroupMember
|
|
|
* @ret: the name of the group.
|
|
|
*/
|
|
|
-const char *throttle_group_get_name(BlockBackend *blk)
|
|
|
+const char *throttle_group_get_name(ThrottleGroupMember *tgm)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
|
|
|
+ ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
|
|
|
return tg->name;
|
|
|
}
|
|
|
|
|
|
-/* Return the next BlockBackend in the round-robin sequence, simulating a
|
|
|
- * circular list.
|
|
|
+/* Return the next ThrottleGroupMember in the round-robin sequence, simulating
|
|
|
+ * a circular list.
|
|
|
*
|
|
|
* This assumes that tg->lock is held.
|
|
|
*
|
|
|
- * @blk: the current BlockBackend
|
|
|
- * @ret: the next BlockBackend in the sequence
|
|
|
+ * @tgm: the current ThrottleGroupMember
|
|
|
+ * @ret: the next ThrottleGroupMember in the sequence
|
|
|
*/
|
|
|
-static BlockBackend *throttle_group_next_blk(BlockBackend *blk)
|
|
|
+static ThrottleGroupMember *throttle_group_next_tgm(ThrottleGroupMember *tgm)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleState *ts = blkp->throttle_state;
|
|
|
+ ThrottleState *ts = tgm->throttle_state;
|
|
|
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
- BlockBackendPublic *next = QLIST_NEXT(blkp, round_robin);
|
|
|
+ ThrottleGroupMember *next = QLIST_NEXT(tgm, round_robin);
|
|
|
|
|
|
if (!next) {
|
|
|
next = QLIST_FIRST(&tg->head);
|
|
|
}
|
|
|
|
|
|
- return blk_by_public(next);
|
|
|
+ return next;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Return whether a BlockBackend has pending requests.
|
|
|
+ * Return whether a ThrottleGroupMember has pending requests.
|
|
|
*
|
|
|
* This assumes that tg->lock is held.
|
|
|
*
|
|
|
- * @blk: the BlockBackend
|
|
|
- * @is_write: the type of operation (read/write)
|
|
|
- * @ret: whether the BlockBackend has pending requests.
|
|
|
+ * @tgm: the ThrottleGroupMember
|
|
|
+ * @is_write: the type of operation (read/write)
|
|
|
+ * @ret: whether the ThrottleGroupMember has pending requests.
|
|
|
*/
|
|
|
-static inline bool blk_has_pending_reqs(BlockBackend *blk,
|
|
|
+static inline bool tgm_has_pending_reqs(ThrottleGroupMember *tgm,
|
|
|
bool is_write)
|
|
|
{
|
|
|
- const BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- return blkp->pending_reqs[is_write];
|
|
|
+ return tgm->pending_reqs[is_write];
|
|
|
}
|
|
|
|
|
|
-/* Return the next BlockBackend in the round-robin sequence with pending I/O
|
|
|
- * requests.
|
|
|
+/* Return the next ThrottleGroupMember in the round-robin sequence with pending
|
|
|
+ * I/O requests.
|
|
|
*
|
|
|
* This assumes that tg->lock is held.
|
|
|
*
|
|
|
- * @blk: the current BlockBackend
|
|
|
+ * @tgm: the current ThrottleGroupMember
|
|
|
* @is_write: the type of operation (read/write)
|
|
|
- * @ret: the next BlockBackend with pending requests, or blk if there is
|
|
|
- * none.
|
|
|
+ * @ret: the next ThrottleGroupMember with pending requests, or tgm if
|
|
|
+ * there is none.
|
|
|
*/
|
|
|
-static BlockBackend *next_throttle_token(BlockBackend *blk, bool is_write)
|
|
|
+static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm,
|
|
|
+ bool is_write)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
|
|
|
- BlockBackend *token, *start;
|
|
|
+ ThrottleState *ts = tgm->throttle_state;
|
|
|
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
+ ThrottleGroupMember *token, *start;
|
|
|
|
|
|
start = token = tg->tokens[is_write];
|
|
|
|
|
|
/* get next bs round in round robin style */
|
|
|
- token = throttle_group_next_blk(token);
|
|
|
- while (token != start && !blk_has_pending_reqs(token, is_write)) {
|
|
|
- token = throttle_group_next_blk(token);
|
|
|
+ token = throttle_group_next_tgm(token);
|
|
|
+ while (token != start && !tgm_has_pending_reqs(token, is_write)) {
|
|
|
+ token = throttle_group_next_tgm(token);
|
|
|
}
|
|
|
|
|
|
/* If no IO are queued for scheduling on the next round robin token
|
|
|
- * then decide the token is the current bs because chances are
|
|
|
- * the current bs get the current request queued.
|
|
|
+ * then decide the token is the current tgm because chances are
|
|
|
+ * the current tgm got the current request queued.
|
|
|
*/
|
|
|
- if (token == start && !blk_has_pending_reqs(token, is_write)) {
|
|
|
- token = blk;
|
|
|
+ if (token == start && !tgm_has_pending_reqs(token, is_write)) {
|
|
|
+ token = tgm;
|
|
|
}
|
|
|
|
|
|
- /* Either we return the original BB, or one with pending requests */
|
|
|
- assert(token == blk || blk_has_pending_reqs(token, is_write));
|
|
|
+ /* Either we return the original TGM, or one with pending requests */
|
|
|
+ assert(token == tgm || tgm_has_pending_reqs(token, is_write));
|
|
|
|
|
|
return token;
|
|
|
}
|
|
|
|
|
|
-/* Check if the next I/O request for a BlockBackend needs to be throttled or
|
|
|
- * not. If there's no timer set in this group, set one and update the token
|
|
|
- * accordingly.
|
|
|
+/* Check if the next I/O request for a ThrottleGroupMember needs to be
|
|
|
+ * throttled or not. If there's no timer set in this group, set one and update
|
|
|
+ * the token accordingly.
|
|
|
*
|
|
|
* This assumes that tg->lock is held.
|
|
|
*
|
|
|
- * @blk: the current BlockBackend
|
|
|
+ * @tgm: the current ThrottleGroupMember
|
|
|
* @is_write: the type of operation (read/write)
|
|
|
* @ret: whether the I/O request needs to be throttled or not
|
|
|
*/
|
|
|
-static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
|
|
|
+static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
|
|
|
+ bool is_write)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleState *ts = blkp->throttle_state;
|
|
|
- ThrottleTimers *tt = &blkp->throttle_timers;
|
|
|
+ ThrottleState *ts = tgm->throttle_state;
|
|
|
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
+ ThrottleTimers *tt = &tgm->throttle_timers;
|
|
|
bool must_wait;
|
|
|
|
|
|
- if (atomic_read(&blkp->io_limits_disabled)) {
|
|
|
+ if (atomic_read(&tgm->io_limits_disabled)) {
|
|
|
return false;
|
|
|
}
|
|
|
|
|
@@ -258,30 +256,29 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
|
|
|
|
|
|
must_wait = throttle_schedule_timer(ts, tt, is_write);
|
|
|
|
|
|
- /* If a timer just got armed, set blk as the current token */
|
|
|
+ /* If a timer just got armed, set tgm as the current token */
|
|
|
if (must_wait) {
|
|
|
- tg->tokens[is_write] = blk;
|
|
|
+ tg->tokens[is_write] = tgm;
|
|
|
tg->any_timer_armed[is_write] = true;
|
|
|
}
|
|
|
|
|
|
return must_wait;
|
|
|
}
|
|
|
|
|
|
-/* Start the next pending I/O request for a BlockBackend. Return whether
|
|
|
+/* Start the next pending I/O request for a ThrottleGroupMember. Return whether
|
|
|
* any request was actually pending.
|
|
|
*
|
|
|
- * @blk: the current BlockBackend
|
|
|
+ * @tgm: the current ThrottleGroupMember
|
|
|
* @is_write: the type of operation (read/write)
|
|
|
*/
|
|
|
-static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk,
|
|
|
+static bool coroutine_fn throttle_group_co_restart_queue(ThrottleGroupMember *tgm,
|
|
|
bool is_write)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
bool ret;
|
|
|
|
|
|
- qemu_co_mutex_lock(&blkp->throttled_reqs_lock);
|
|
|
- ret = qemu_co_queue_next(&blkp->throttled_reqs[is_write]);
|
|
|
- qemu_co_mutex_unlock(&blkp->throttled_reqs_lock);
|
|
|
+ qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
|
|
|
+ ret = qemu_co_queue_next(&tgm->throttled_reqs[is_write]);
|
|
|
+ qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -290,19 +287,19 @@ static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk,
|
|
|
*
|
|
|
* This assumes that tg->lock is held.
|
|
|
*
|
|
|
- * @blk: the current BlockBackend
|
|
|
+ * @tgm: the current ThrottleGroupMember
|
|
|
* @is_write: the type of operation (read/write)
|
|
|
*/
|
|
|
-static void schedule_next_request(BlockBackend *blk, bool is_write)
|
|
|
+static void schedule_next_request(ThrottleGroupMember *tgm, bool is_write)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
|
|
|
+ ThrottleState *ts = tgm->throttle_state;
|
|
|
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
bool must_wait;
|
|
|
- BlockBackend *token;
|
|
|
+ ThrottleGroupMember *token;
|
|
|
|
|
|
/* Check if there's any pending request to schedule next */
|
|
|
- token = next_throttle_token(blk, is_write);
|
|
|
- if (!blk_has_pending_reqs(token, is_write)) {
|
|
|
+ token = next_throttle_token(tgm, is_write);
|
|
|
+ if (!tgm_has_pending_reqs(token, is_write)) {
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -311,12 +308,12 @@ static void schedule_next_request(BlockBackend *blk, bool is_write)
|
|
|
|
|
|
/* If it doesn't have to wait, queue it for immediate execution */
|
|
|
if (!must_wait) {
|
|
|
- /* Give preference to requests from the current blk */
|
|
|
+ /* Give preference to requests from the current tgm */
|
|
|
if (qemu_in_coroutine() &&
|
|
|
- throttle_group_co_restart_queue(blk, is_write)) {
|
|
|
- token = blk;
|
|
|
+ throttle_group_co_restart_queue(tgm, is_write)) {
|
|
|
+ token = tgm;
|
|
|
} else {
|
|
|
- ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
|
|
|
+ ThrottleTimers *tt = &token->throttle_timers;
|
|
|
int64_t now = qemu_clock_get_ns(tg->clock_type);
|
|
|
timer_mod(tt->timers[is_write], now);
|
|
|
tg->any_timer_armed[is_write] = true;
|
|
@@ -329,76 +326,77 @@ static void schedule_next_request(BlockBackend *blk, bool is_write)
|
|
|
* if necessary, and schedule the next request using a round robin
|
|
|
* algorithm.
|
|
|
*
|
|
|
- * @blk: the current BlockBackend
|
|
|
+ * @tgm: the current ThrottleGroupMember
|
|
|
* @bytes: the number of bytes for this I/O
|
|
|
* @is_write: the type of operation (read/write)
|
|
|
*/
|
|
|
-void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
|
|
|
+void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm,
|
|
|
unsigned int bytes,
|
|
|
bool is_write)
|
|
|
{
|
|
|
bool must_wait;
|
|
|
- BlockBackend *token;
|
|
|
-
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
|
|
|
+ ThrottleGroupMember *token;
|
|
|
+ ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
|
|
|
qemu_mutex_lock(&tg->lock);
|
|
|
|
|
|
/* First we check if this I/O has to be throttled. */
|
|
|
- token = next_throttle_token(blk, is_write);
|
|
|
+ token = next_throttle_token(tgm, is_write);
|
|
|
must_wait = throttle_group_schedule_timer(token, is_write);
|
|
|
|
|
|
/* Wait if there's a timer set or queued requests of this type */
|
|
|
- if (must_wait || blkp->pending_reqs[is_write]) {
|
|
|
- blkp->pending_reqs[is_write]++;
|
|
|
+ if (must_wait || tgm->pending_reqs[is_write]) {
|
|
|
+ tgm->pending_reqs[is_write]++;
|
|
|
qemu_mutex_unlock(&tg->lock);
|
|
|
- qemu_co_mutex_lock(&blkp->throttled_reqs_lock);
|
|
|
- qemu_co_queue_wait(&blkp->throttled_reqs[is_write],
|
|
|
- &blkp->throttled_reqs_lock);
|
|
|
- qemu_co_mutex_unlock(&blkp->throttled_reqs_lock);
|
|
|
+ qemu_co_mutex_lock(&tgm->throttled_reqs_lock);
|
|
|
+ qemu_co_queue_wait(&tgm->throttled_reqs[is_write],
|
|
|
+ &tgm->throttled_reqs_lock);
|
|
|
+ qemu_co_mutex_unlock(&tgm->throttled_reqs_lock);
|
|
|
qemu_mutex_lock(&tg->lock);
|
|
|
- blkp->pending_reqs[is_write]--;
|
|
|
+ tgm->pending_reqs[is_write]--;
|
|
|
}
|
|
|
|
|
|
/* The I/O will be executed, so do the accounting */
|
|
|
- throttle_account(blkp->throttle_state, is_write, bytes);
|
|
|
+ throttle_account(tgm->throttle_state, is_write, bytes);
|
|
|
|
|
|
/* Schedule the next request */
|
|
|
- schedule_next_request(blk, is_write);
|
|
|
+ schedule_next_request(tgm, is_write);
|
|
|
|
|
|
qemu_mutex_unlock(&tg->lock);
|
|
|
}
|
|
|
|
|
|
typedef struct {
|
|
|
- BlockBackend *blk;
|
|
|
+ ThrottleGroupMember *tgm;
|
|
|
bool is_write;
|
|
|
} RestartData;
|
|
|
|
|
|
static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
|
|
|
{
|
|
|
RestartData *data = opaque;
|
|
|
- BlockBackend *blk = data->blk;
|
|
|
+ ThrottleGroupMember *tgm = data->tgm;
|
|
|
+ ThrottleState *ts = tgm->throttle_state;
|
|
|
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
bool is_write = data->is_write;
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
|
|
|
bool empty_queue;
|
|
|
|
|
|
- empty_queue = !throttle_group_co_restart_queue(blk, is_write);
|
|
|
+ empty_queue = !throttle_group_co_restart_queue(tgm, is_write);
|
|
|
|
|
|
/* If the request queue was empty then we have to take care of
|
|
|
* scheduling the next one */
|
|
|
if (empty_queue) {
|
|
|
qemu_mutex_lock(&tg->lock);
|
|
|
- schedule_next_request(blk, is_write);
|
|
|
+ schedule_next_request(tgm, is_write);
|
|
|
qemu_mutex_unlock(&tg->lock);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
|
|
|
+static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write)
|
|
|
{
|
|
|
+ BlockBackendPublic *blkp = container_of(tgm, BlockBackendPublic,
|
|
|
+ throttle_group_member);
|
|
|
+ BlockBackend *blk = blk_by_public(blkp);
|
|
|
Coroutine *co;
|
|
|
RestartData rd = {
|
|
|
- .blk = blk,
|
|
|
+ .tgm = tgm,
|
|
|
.is_write = is_write
|
|
|
};
|
|
|
|
|
@@ -406,13 +404,11 @@ static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
|
|
|
aio_co_enter(blk_get_aio_context(blk), co);
|
|
|
}
|
|
|
|
|
|
-void throttle_group_restart_blk(BlockBackend *blk)
|
|
|
+void throttle_group_restart_tgm(ThrottleGroupMember *tgm)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
-
|
|
|
- if (blkp->throttle_state) {
|
|
|
- throttle_group_restart_queue(blk, 0);
|
|
|
- throttle_group_restart_queue(blk, 1);
|
|
|
+ if (tgm->throttle_state) {
|
|
|
+ throttle_group_restart_queue(tgm, 0);
|
|
|
+ throttle_group_restart_queue(tgm, 1);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -420,32 +416,30 @@ void throttle_group_restart_blk(BlockBackend *blk)
|
|
|
* to throttle_config(), but guarantees atomicity within the
|
|
|
* throttling group.
|
|
|
*
|
|
|
- * @blk: a BlockBackend that is a member of the group
|
|
|
+ * @tgm: a ThrottleGroupMember that is a member of the group
|
|
|
* @cfg: the configuration to set
|
|
|
*/
|
|
|
-void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg)
|
|
|
+void throttle_group_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleState *ts = blkp->throttle_state;
|
|
|
+ ThrottleState *ts = tgm->throttle_state;
|
|
|
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
qemu_mutex_lock(&tg->lock);
|
|
|
throttle_config(ts, tg->clock_type, cfg);
|
|
|
qemu_mutex_unlock(&tg->lock);
|
|
|
|
|
|
- throttle_group_restart_blk(blk);
|
|
|
+ throttle_group_restart_tgm(tgm);
|
|
|
}
|
|
|
|
|
|
/* Get the throttle configuration from a particular group. Similar to
|
|
|
* throttle_get_config(), but guarantees atomicity within the
|
|
|
* throttling group.
|
|
|
*
|
|
|
- * @blk: a BlockBackend that is a member of the group
|
|
|
+ * @tgm: a ThrottleGroupMember that is a member of the group
|
|
|
* @cfg: the configuration will be written here
|
|
|
*/
|
|
|
-void throttle_group_get_config(BlockBackend *blk, ThrottleConfig *cfg)
|
|
|
+void throttle_group_get_config(ThrottleGroupMember *tgm, ThrottleConfig *cfg)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleState *ts = blkp->throttle_state;
|
|
|
+ ThrottleState *ts = tgm->throttle_state;
|
|
|
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
qemu_mutex_lock(&tg->lock);
|
|
|
throttle_get_config(ts, cfg);
|
|
@@ -461,7 +455,8 @@ void throttle_group_get_config(BlockBackend *blk, ThrottleConfig *cfg)
|
|
|
static void timer_cb(BlockBackend *blk, bool is_write)
|
|
|
{
|
|
|
BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleState *ts = blkp->throttle_state;
|
|
|
+ ThrottleGroupMember *tgm = &blkp->throttle_group_member;
|
|
|
+ ThrottleState *ts = tgm->throttle_state;
|
|
|
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
|
|
|
/* The timer has just been fired, so we can update the flag */
|
|
@@ -470,7 +465,7 @@ static void timer_cb(BlockBackend *blk, bool is_write)
|
|
|
qemu_mutex_unlock(&tg->lock);
|
|
|
|
|
|
/* Run the request that was waiting for this timer */
|
|
|
- throttle_group_restart_queue(blk, is_write);
|
|
|
+ throttle_group_restart_queue(tgm, is_write);
|
|
|
}
|
|
|
|
|
|
static void read_timer_cb(void *opaque)
|
|
@@ -483,32 +478,36 @@ static void write_timer_cb(void *opaque)
|
|
|
timer_cb(opaque, true);
|
|
|
}
|
|
|
|
|
|
-/* Register a BlockBackend in the throttling group, also initializing its
|
|
|
- * timers and updating its throttle_state pointer to point to it. If a
|
|
|
+/* Register a ThrottleGroupMember from the throttling group, also initializing
|
|
|
+ * its timers and updating its throttle_state pointer to point to it. If a
|
|
|
* throttling group with that name does not exist yet, it will be created.
|
|
|
*
|
|
|
- * @blk: the BlockBackend to insert
|
|
|
+ * @tgm: the ThrottleGroupMember to insert
|
|
|
* @groupname: the name of the group
|
|
|
*/
|
|
|
-void throttle_group_register_blk(BlockBackend *blk, const char *groupname)
|
|
|
+void throttle_group_register_tgm(ThrottleGroupMember *tgm,
|
|
|
+ const char *groupname)
|
|
|
{
|
|
|
int i;
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
+ BlockBackendPublic *blkp = container_of(tgm, BlockBackendPublic,
|
|
|
+ throttle_group_member);
|
|
|
+ BlockBackend *blk = blk_by_public(blkp);
|
|
|
ThrottleState *ts = throttle_group_incref(groupname);
|
|
|
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
- blkp->throttle_state = ts;
|
|
|
+
|
|
|
+ tgm->throttle_state = ts;
|
|
|
|
|
|
qemu_mutex_lock(&tg->lock);
|
|
|
- /* If the ThrottleGroup is new set this BlockBackend as the token */
|
|
|
+ /* If the ThrottleGroup is new set this ThrottleGroupMember as the token */
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
if (!tg->tokens[i]) {
|
|
|
- tg->tokens[i] = blk;
|
|
|
+ tg->tokens[i] = tgm;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- QLIST_INSERT_HEAD(&tg->head, blkp, round_robin);
|
|
|
+ QLIST_INSERT_HEAD(&tg->head, tgm, round_robin);
|
|
|
|
|
|
- throttle_timers_init(&blkp->throttle_timers,
|
|
|
+ throttle_timers_init(&tgm->throttle_timers,
|
|
|
blk_get_aio_context(blk),
|
|
|
tg->clock_type,
|
|
|
read_timer_cb,
|
|
@@ -518,45 +517,46 @@ void throttle_group_register_blk(BlockBackend *blk, const char *groupname)
|
|
|
qemu_mutex_unlock(&tg->lock);
|
|
|
}
|
|
|
|
|
|
-/* Unregister a BlockBackend from its group, removing it from the list,
|
|
|
+/* Unregister a ThrottleGroupMember from its group, removing it from the list,
|
|
|
* destroying the timers and setting the throttle_state pointer to NULL.
|
|
|
*
|
|
|
- * The BlockBackend must not have pending throttled requests, so the caller has
|
|
|
- * to drain them first.
|
|
|
+ * The ThrottleGroupMember must not have pending throttled requests, so the
|
|
|
+ * caller has to drain them first.
|
|
|
*
|
|
|
* The group will be destroyed if it's empty after this operation.
|
|
|
*
|
|
|
- * @blk: the BlockBackend to remove
|
|
|
+ * @tgm the ThrottleGroupMember to remove
|
|
|
*/
|
|
|
-void throttle_group_unregister_blk(BlockBackend *blk)
|
|
|
+void throttle_group_unregister_tgm(ThrottleGroupMember *tgm)
|
|
|
{
|
|
|
- BlockBackendPublic *blkp = blk_get_public(blk);
|
|
|
- ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
|
|
|
+ ThrottleState *ts = tgm->throttle_state;
|
|
|
+ ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
|
|
|
+ ThrottleGroupMember *token;
|
|
|
int i;
|
|
|
|
|
|
- assert(blkp->pending_reqs[0] == 0 && blkp->pending_reqs[1] == 0);
|
|
|
- assert(qemu_co_queue_empty(&blkp->throttled_reqs[0]));
|
|
|
- assert(qemu_co_queue_empty(&blkp->throttled_reqs[1]));
|
|
|
+ assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
|
|
|
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
|
|
|
+ assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
|
|
|
|
|
|
qemu_mutex_lock(&tg->lock);
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
- if (tg->tokens[i] == blk) {
|
|
|
- BlockBackend *token = throttle_group_next_blk(blk);
|
|
|
- /* Take care of the case where this is the last blk in the group */
|
|
|
- if (token == blk) {
|
|
|
+ if (tg->tokens[i] == tgm) {
|
|
|
+ token = throttle_group_next_tgm(tgm);
|
|
|
+ /* Take care of the case where this is the last tgm in the group */
|
|
|
+ if (token == tgm) {
|
|
|
token = NULL;
|
|
|
}
|
|
|
tg->tokens[i] = token;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* remove the current blk from the list */
|
|
|
- QLIST_REMOVE(blkp, round_robin);
|
|
|
- throttle_timers_destroy(&blkp->throttle_timers);
|
|
|
+ /* remove the current tgm from the list */
|
|
|
+ QLIST_REMOVE(tgm, round_robin);
|
|
|
+ throttle_timers_destroy(&tgm->throttle_timers);
|
|
|
qemu_mutex_unlock(&tg->lock);
|
|
|
|
|
|
throttle_group_unref(&tg->ts);
|
|
|
- blkp->throttle_state = NULL;
|
|
|
+ tgm->throttle_state = NULL;
|
|
|
}
|
|
|
|
|
|
static void throttle_groups_init(void)
|