|
@@ -37,6 +37,8 @@
|
|
#include "hw/virtio/virtio-blk-common.h"
|
|
#include "hw/virtio/virtio-blk-common.h"
|
|
#include "qemu/coroutine.h"
|
|
#include "qemu/coroutine.h"
|
|
|
|
|
|
|
|
+static void virtio_blk_ioeventfd_attach(VirtIOBlock *s);
|
|
|
|
+
|
|
static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
|
|
static void virtio_blk_init_request(VirtIOBlock *s, VirtQueue *vq,
|
|
VirtIOBlockReq *req)
|
|
VirtIOBlockReq *req)
|
|
{
|
|
{
|
|
@@ -64,7 +66,7 @@ static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
|
|
iov_discard_undo(&req->inhdr_undo);
|
|
iov_discard_undo(&req->inhdr_undo);
|
|
iov_discard_undo(&req->outhdr_undo);
|
|
iov_discard_undo(&req->outhdr_undo);
|
|
virtqueue_push(req->vq, &req->elem, req->in_len);
|
|
virtqueue_push(req->vq, &req->elem, req->in_len);
|
|
- if (s->ioeventfd_started && !s->ioeventfd_disabled) {
|
|
|
|
|
|
+ if (qemu_in_iothread()) {
|
|
virtio_notify_irqfd(vdev, req->vq);
|
|
virtio_notify_irqfd(vdev, req->vq);
|
|
} else {
|
|
} else {
|
|
virtio_notify(vdev, req->vq);
|
|
virtio_notify(vdev, req->vq);
|
|
@@ -661,6 +663,9 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret)
|
|
int64_t zrp_size, n, j = 0;
|
|
int64_t zrp_size, n, j = 0;
|
|
int64_t nz = data->zone_report_data.nr_zones;
|
|
int64_t nz = data->zone_report_data.nr_zones;
|
|
int8_t err_status = VIRTIO_BLK_S_OK;
|
|
int8_t err_status = VIRTIO_BLK_S_OK;
|
|
|
|
+ struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) {
|
|
|
|
+ .nr_zones = cpu_to_le64(nz),
|
|
|
|
+ };
|
|
|
|
|
|
trace_virtio_blk_zone_report_complete(vdev, req, nz, ret);
|
|
trace_virtio_blk_zone_report_complete(vdev, req, nz, ret);
|
|
if (ret) {
|
|
if (ret) {
|
|
@@ -668,9 +673,6 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret)
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- struct virtio_blk_zone_report zrp_hdr = (struct virtio_blk_zone_report) {
|
|
|
|
- .nr_zones = cpu_to_le64(nz),
|
|
|
|
- };
|
|
|
|
zrp_size = sizeof(struct virtio_blk_zone_report)
|
|
zrp_size = sizeof(struct virtio_blk_zone_report)
|
|
+ sizeof(struct virtio_blk_zone_descriptor) * nz;
|
|
+ sizeof(struct virtio_blk_zone_descriptor) * nz;
|
|
n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr));
|
|
n = iov_from_buf(in_iov, in_num, 0, &zrp_hdr, sizeof(zrp_hdr));
|
|
@@ -898,13 +900,14 @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req,
|
|
|
|
|
|
int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
|
|
int64_t offset = virtio_ldq_p(vdev, &req->out.sector) << BDRV_SECTOR_BITS;
|
|
int64_t len = iov_size(out_iov, out_num);
|
|
int64_t len = iov_size(out_iov, out_num);
|
|
|
|
+ ZoneCmdData *data;
|
|
|
|
|
|
trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS);
|
|
trace_virtio_blk_handle_zone_append(vdev, req, offset >> BDRV_SECTOR_BITS);
|
|
if (!check_zoned_request(s, offset, len, true, &err_status)) {
|
|
if (!check_zoned_request(s, offset, len, true, &err_status)) {
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
- ZoneCmdData *data = g_malloc(sizeof(ZoneCmdData));
|
|
|
|
|
|
+ data = g_malloc(sizeof(ZoneCmdData));
|
|
data->req = req;
|
|
data->req = req;
|
|
data->in_iov = in_iov;
|
|
data->in_iov = in_iov;
|
|
data->in_num = in_num;
|
|
data->in_num = in_num;
|
|
@@ -1191,14 +1194,15 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
|
|
{
|
|
{
|
|
VirtIOBlock *s = opaque;
|
|
VirtIOBlock *s = opaque;
|
|
uint16_t num_queues = s->conf.num_queues;
|
|
uint16_t num_queues = s->conf.num_queues;
|
|
|
|
+ g_autofree VirtIOBlockReq **vq_rq = NULL;
|
|
|
|
+ VirtIOBlockReq *rq;
|
|
|
|
|
|
if (!running) {
|
|
if (!running) {
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
/* Split the device-wide s->rq request list into per-vq request lists */
|
|
/* Split the device-wide s->rq request list into per-vq request lists */
|
|
- g_autofree VirtIOBlockReq **vq_rq = g_new0(VirtIOBlockReq *, num_queues);
|
|
|
|
- VirtIOBlockReq *rq;
|
|
|
|
|
|
+ vq_rq = g_new0(VirtIOBlockReq *, num_queues);
|
|
|
|
|
|
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
|
|
WITH_QEMU_LOCK_GUARD(&s->rq_lock) {
|
|
rq = s->rq;
|
|
rq = s->rq;
|
|
@@ -1209,6 +1213,8 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running,
|
|
VirtIOBlockReq *next = rq->next;
|
|
VirtIOBlockReq *next = rq->next;
|
|
uint16_t idx = virtio_get_queue_index(rq->vq);
|
|
uint16_t idx = virtio_get_queue_index(rq->vq);
|
|
|
|
|
|
|
|
+ /* Only num_queues vqs were created so vq_rq[idx] is within bounds */
|
|
|
|
+ assert(idx < num_queues);
|
|
rq->next = vq_rq[idx];
|
|
rq->next = vq_rq[idx];
|
|
vq_rq[idx] = rq;
|
|
vq_rq[idx] = rq;
|
|
rq = next;
|
|
rq = next;
|
|
@@ -1485,68 +1491,6 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static bool
|
|
|
|
-validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
|
|
|
|
- uint16_t num_queues, Error **errp)
|
|
|
|
-{
|
|
|
|
- g_autofree unsigned long *vqs = bitmap_new(num_queues);
|
|
|
|
- g_autoptr(GHashTable) iothreads =
|
|
|
|
- g_hash_table_new(g_str_hash, g_str_equal);
|
|
|
|
-
|
|
|
|
- for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
|
|
|
|
- const char *name = node->value->iothread;
|
|
|
|
- uint16List *vq;
|
|
|
|
-
|
|
|
|
- if (!iothread_by_id(name)) {
|
|
|
|
- error_setg(errp, "IOThread \"%s\" object does not exist", name);
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (!g_hash_table_add(iothreads, (gpointer)name)) {
|
|
|
|
- error_setg(errp,
|
|
|
|
- "duplicate IOThread name \"%s\" in iothread-vq-mapping",
|
|
|
|
- name);
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (node != list) {
|
|
|
|
- if (!!node->value->vqs != !!list->value->vqs) {
|
|
|
|
- error_setg(errp, "either all items in iothread-vq-mapping "
|
|
|
|
- "must have vqs or none of them must have it");
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- for (vq = node->value->vqs; vq; vq = vq->next) {
|
|
|
|
- if (vq->value >= num_queues) {
|
|
|
|
- error_setg(errp, "vq index %u for IOThread \"%s\" must be "
|
|
|
|
- "less than num_queues %u in iothread-vq-mapping",
|
|
|
|
- vq->value, name, num_queues);
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (test_and_set_bit(vq->value, vqs)) {
|
|
|
|
- error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
|
|
|
|
- "because it is already assigned", vq->value, name);
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (list->value->vqs) {
|
|
|
|
- for (uint16_t i = 0; i < num_queues; i++) {
|
|
|
|
- if (!test_bit(i, vqs)) {
|
|
|
|
- error_setg(errp,
|
|
|
|
- "missing vq %u IOThread assignment in iothread-vq-mapping",
|
|
|
|
- i);
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- return true;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void virtio_resize_cb(void *opaque)
|
|
static void virtio_resize_cb(void *opaque)
|
|
{
|
|
{
|
|
VirtIODevice *vdev = opaque;
|
|
VirtIODevice *vdev = opaque;
|
|
@@ -1613,15 +1557,95 @@ static const BlockDevOps virtio_block_ops = {
|
|
.drained_end = virtio_blk_drained_end,
|
|
.drained_end = virtio_blk_drained_end,
|
|
};
|
|
};
|
|
|
|
|
|
-/* Generate vq:AioContext mappings from a validated iothread-vq-mapping list */
|
|
|
|
-static void
|
|
|
|
-apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
|
|
|
|
- AioContext **vq_aio_context, uint16_t num_queues)
|
|
|
|
|
|
+static bool
|
|
|
|
+validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
|
|
|
|
+ uint16_t num_queues, Error **errp)
|
|
|
|
+{
|
|
|
|
+ g_autofree unsigned long *vqs = bitmap_new(num_queues);
|
|
|
|
+ g_autoptr(GHashTable) iothreads =
|
|
|
|
+ g_hash_table_new(g_str_hash, g_str_equal);
|
|
|
|
+
|
|
|
|
+ for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) {
|
|
|
|
+ const char *name = node->value->iothread;
|
|
|
|
+ uint16List *vq;
|
|
|
|
+
|
|
|
|
+ if (!iothread_by_id(name)) {
|
|
|
|
+ error_setg(errp, "IOThread \"%s\" object does not exist", name);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!g_hash_table_add(iothreads, (gpointer)name)) {
|
|
|
|
+ error_setg(errp,
|
|
|
|
+ "duplicate IOThread name \"%s\" in iothread-vq-mapping",
|
|
|
|
+ name);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (node != list) {
|
|
|
|
+ if (!!node->value->vqs != !!list->value->vqs) {
|
|
|
|
+ error_setg(errp, "either all items in iothread-vq-mapping "
|
|
|
|
+ "must have vqs or none of them must have it");
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ for (vq = node->value->vqs; vq; vq = vq->next) {
|
|
|
|
+ if (vq->value >= num_queues) {
|
|
|
|
+ error_setg(errp, "vq index %u for IOThread \"%s\" must be "
|
|
|
|
+ "less than num_queues %u in iothread-vq-mapping",
|
|
|
|
+ vq->value, name, num_queues);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (test_and_set_bit(vq->value, vqs)) {
|
|
|
|
+ error_setg(errp, "cannot assign vq %u to IOThread \"%s\" "
|
|
|
|
+ "because it is already assigned", vq->value, name);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (list->value->vqs) {
|
|
|
|
+ for (uint16_t i = 0; i < num_queues; i++) {
|
|
|
|
+ if (!test_bit(i, vqs)) {
|
|
|
|
+ error_setg(errp,
|
|
|
|
+ "missing vq %u IOThread assignment in iothread-vq-mapping",
|
|
|
|
+ i);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return true;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * apply_iothread_vq_mapping:
|
|
|
|
+ * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
|
|
|
|
+ * @vq_aio_context: The array of AioContext pointers to fill in.
|
|
|
|
+ * @num_queues: The length of @vq_aio_context.
|
|
|
|
+ * @errp: If an error occurs, a pointer to the area to store the error.
|
|
|
|
+ *
|
|
|
|
+ * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
|
|
|
|
+ * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
|
|
|
|
+ *
|
|
|
|
+ * Returns: %true on success, %false on failure.
|
|
|
|
+ **/
|
|
|
|
+static bool apply_iothread_vq_mapping(
|
|
|
|
+ IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
|
|
|
|
+ AioContext **vq_aio_context,
|
|
|
|
+ uint16_t num_queues,
|
|
|
|
+ Error **errp)
|
|
{
|
|
{
|
|
IOThreadVirtQueueMappingList *node;
|
|
IOThreadVirtQueueMappingList *node;
|
|
size_t num_iothreads = 0;
|
|
size_t num_iothreads = 0;
|
|
size_t cur_iothread = 0;
|
|
size_t cur_iothread = 0;
|
|
|
|
|
|
|
|
+ if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list,
|
|
|
|
+ num_queues, errp)) {
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
for (node = iothread_vq_mapping_list; node; node = node->next) {
|
|
for (node = iothread_vq_mapping_list; node; node = node->next) {
|
|
num_iothreads++;
|
|
num_iothreads++;
|
|
}
|
|
}
|
|
@@ -1638,6 +1662,7 @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
|
|
|
|
|
|
/* Explicit vq:IOThread assignment */
|
|
/* Explicit vq:IOThread assignment */
|
|
for (vq = node->value->vqs; vq; vq = vq->next) {
|
|
for (vq = node->value->vqs; vq; vq = vq->next) {
|
|
|
|
+ assert(vq->value < num_queues);
|
|
vq_aio_context[vq->value] = ctx;
|
|
vq_aio_context[vq->value] = ctx;
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
@@ -1650,6 +1675,8 @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
|
|
|
|
|
|
cur_iothread++;
|
|
cur_iothread++;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ return true;
|
|
}
|
|
}
|
|
|
|
|
|
/* Context: BQL held */
|
|
/* Context: BQL held */
|
|
@@ -1660,6 +1687,13 @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
|
|
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
|
|
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
|
|
|
|
|
|
|
|
+ if (conf->iothread && conf->iothread_vq_mapping_list) {
|
|
|
|
+ error_setg(errp,
|
|
|
|
+ "iothread and iothread-vq-mapping properties cannot be set "
|
|
|
|
+ "at the same time");
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
if (conf->iothread || conf->iothread_vq_mapping_list) {
|
|
if (conf->iothread || conf->iothread_vq_mapping_list) {
|
|
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
|
|
if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
|
|
error_setg(errp,
|
|
error_setg(errp,
|
|
@@ -1685,8 +1719,14 @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
|
|
s->vq_aio_context = g_new(AioContext *, conf->num_queues);
|
|
s->vq_aio_context = g_new(AioContext *, conf->num_queues);
|
|
|
|
|
|
if (conf->iothread_vq_mapping_list) {
|
|
if (conf->iothread_vq_mapping_list) {
|
|
- apply_vq_mapping(conf->iothread_vq_mapping_list, s->vq_aio_context,
|
|
|
|
- conf->num_queues);
|
|
|
|
|
|
+ if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list,
|
|
|
|
+ s->vq_aio_context,
|
|
|
|
+ conf->num_queues,
|
|
|
|
+ errp)) {
|
|
|
|
+ g_free(s->vq_aio_context);
|
|
|
|
+ s->vq_aio_context = NULL;
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
} else if (conf->iothread) {
|
|
} else if (conf->iothread) {
|
|
AioContext *ctx = iothread_get_aio_context(conf->iothread);
|
|
AioContext *ctx = iothread_get_aio_context(conf->iothread);
|
|
for (unsigned i = 0; i < conf->num_queues; i++) {
|
|
for (unsigned i = 0; i < conf->num_queues; i++) {
|
|
@@ -1790,6 +1830,7 @@ static int virtio_blk_start_ioeventfd(VirtIODevice *vdev)
|
|
* Try to change the AioContext so that block jobs and other operations can
|
|
* Try to change the AioContext so that block jobs and other operations can
|
|
* co-locate their activity in the same AioContext. If it fails, nevermind.
|
|
* co-locate their activity in the same AioContext. If it fails, nevermind.
|
|
*/
|
|
*/
|
|
|
|
+ assert(nvqs > 0); /* enforced during ->realize() */
|
|
r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0],
|
|
r = blk_set_aio_context(s->conf.conf.blk, s->vq_aio_context[0],
|
|
&local_err);
|
|
&local_err);
|
|
if (r < 0) {
|
|
if (r < 0) {
|
|
@@ -1808,17 +1849,14 @@ static int virtio_blk_start_ioeventfd(VirtIODevice *vdev)
|
|
s->ioeventfd_started = true;
|
|
s->ioeventfd_started = true;
|
|
smp_wmb(); /* paired with aio_notify_accept() on the read side */
|
|
smp_wmb(); /* paired with aio_notify_accept() on the read side */
|
|
|
|
|
|
- /* Get this show started by hooking up our callbacks */
|
|
|
|
- for (i = 0; i < nvqs; i++) {
|
|
|
|
- VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
|
|
- AioContext *ctx = s->vq_aio_context[i];
|
|
|
|
-
|
|
|
|
- /* Kick right away to begin processing requests already in vring */
|
|
|
|
- event_notifier_set(virtio_queue_get_host_notifier(vq));
|
|
|
|
-
|
|
|
|
- if (!blk_in_drain(s->conf.conf.blk)) {
|
|
|
|
- virtio_queue_aio_attach_host_notifier(vq, ctx);
|
|
|
|
- }
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Get this show started by hooking up our callbacks. If drained now,
|
|
|
|
+ * virtio_blk_drained_end() will do this later.
|
|
|
|
+ * Attaching the notifier also kicks the virtqueues, processing any requests
|
|
|
|
+ * they may already have.
|
|
|
|
+ */
|
|
|
|
+ if (!blk_in_drain(s->conf.conf.blk)) {
|
|
|
|
+ virtio_blk_ioeventfd_attach(s);
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
@@ -1924,6 +1962,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
|
|
VirtIOBlock *s = VIRTIO_BLK(dev);
|
|
VirtIOBlock *s = VIRTIO_BLK(dev);
|
|
VirtIOBlkConf *conf = &s->conf;
|
|
VirtIOBlkConf *conf = &s->conf;
|
|
|
|
+ BlockDriverState *bs;
|
|
Error *err = NULL;
|
|
Error *err = NULL;
|
|
unsigned i;
|
|
unsigned i;
|
|
|
|
|
|
@@ -1969,7 +2008,7 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- BlockDriverState *bs = blk_bs(conf->conf.blk);
|
|
|
|
|
|
+ bs = blk_bs(conf->conf.blk);
|
|
if (bs->bl.zoned != BLK_Z_NONE) {
|
|
if (bs->bl.zoned != BLK_Z_NONE) {
|
|
virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED);
|
|
virtio_add_feature(&s->host_features, VIRTIO_BLK_F_ZONED);
|
|
if (bs->bl.zoned == BLK_Z_HM) {
|
|
if (bs->bl.zoned == BLK_Z_HM) {
|
|
@@ -1996,19 +2035,6 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- if (conf->iothread_vq_mapping_list) {
|
|
|
|
- if (conf->iothread) {
|
|
|
|
- error_setg(errp, "iothread and iothread-vq-mapping properties "
|
|
|
|
- "cannot be set at the same time");
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (!validate_iothread_vq_mapping_list(conf->iothread_vq_mapping_list,
|
|
|
|
- conf->num_queues, errp)) {
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
|
|
s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
|
|
s->host_features);
|
|
s->host_features);
|
|
virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
|
|
virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
|