|
@@ -27,6 +27,7 @@
|
|
#include "hw/qdev-properties.h"
|
|
#include "hw/qdev-properties.h"
|
|
#include "hw/scsi/scsi.h"
|
|
#include "hw/scsi/scsi.h"
|
|
#include "scsi/constants.h"
|
|
#include "scsi/constants.h"
|
|
|
|
+#include "hw/virtio/iothread-vq-mapping.h"
|
|
#include "hw/virtio/virtio-bus.h"
|
|
#include "hw/virtio/virtio-bus.h"
|
|
#include "hw/virtio/virtio-access.h"
|
|
#include "hw/virtio/virtio-access.h"
|
|
#include "trace.h"
|
|
#include "trace.h"
|
|
@@ -47,7 +48,7 @@ typedef struct VirtIOSCSIReq {
|
|
/* Used for two-stage request submission and TMFs deferred to BH */
|
|
/* Used for two-stage request submission and TMFs deferred to BH */
|
|
QTAILQ_ENTRY(VirtIOSCSIReq) next;
|
|
QTAILQ_ENTRY(VirtIOSCSIReq) next;
|
|
|
|
|
|
- /* Used for cancellation of request during TMFs */
|
|
|
|
|
|
+ /* Used for cancellation of request during TMFs. Atomic. */
|
|
int remaining;
|
|
int remaining;
|
|
|
|
|
|
SCSIRequest *sreq;
|
|
SCSIRequest *sreq;
|
|
@@ -102,13 +103,18 @@ static void virtio_scsi_free_req(VirtIOSCSIReq *req)
|
|
g_free(req);
|
|
g_free(req);
|
|
}
|
|
}
|
|
|
|
|
|
-static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
|
|
|
|
|
|
+static void virtio_scsi_complete_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
|
|
{
|
|
{
|
|
VirtIOSCSI *s = req->dev;
|
|
VirtIOSCSI *s = req->dev;
|
|
VirtQueue *vq = req->vq;
|
|
VirtQueue *vq = req->vq;
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(s);
|
|
|
|
|
|
qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
|
|
qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
|
|
|
|
+
|
|
|
|
+ if (vq_lock) {
|
|
|
|
+ qemu_mutex_lock(vq_lock);
|
|
|
|
+ }
|
|
|
|
+
|
|
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
|
|
virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
|
|
if (s->dataplane_started && !s->dataplane_fenced) {
|
|
if (s->dataplane_started && !s->dataplane_fenced) {
|
|
virtio_notify_irqfd(vdev, vq);
|
|
virtio_notify_irqfd(vdev, vq);
|
|
@@ -116,6 +122,10 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
|
|
virtio_notify(vdev, vq);
|
|
virtio_notify(vdev, vq);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (vq_lock) {
|
|
|
|
+ qemu_mutex_unlock(vq_lock);
|
|
|
|
+ }
|
|
|
|
+
|
|
if (req->sreq) {
|
|
if (req->sreq) {
|
|
req->sreq->hba_private = NULL;
|
|
req->sreq->hba_private = NULL;
|
|
scsi_req_unref(req->sreq);
|
|
scsi_req_unref(req->sreq);
|
|
@@ -123,34 +133,20 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
|
|
virtio_scsi_free_req(req);
|
|
virtio_scsi_free_req(req);
|
|
}
|
|
}
|
|
|
|
|
|
-static void virtio_scsi_complete_req_bh(void *opaque)
|
|
|
|
|
|
+static void virtio_scsi_bad_req(VirtIOSCSIReq *req, QemuMutex *vq_lock)
|
|
{
|
|
{
|
|
- VirtIOSCSIReq *req = opaque;
|
|
|
|
|
|
+ virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
|
|
|
|
|
|
- virtio_scsi_complete_req(req);
|
|
|
|
-}
|
|
|
|
|
|
+ if (vq_lock) {
|
|
|
|
+ qemu_mutex_lock(vq_lock);
|
|
|
|
+ }
|
|
|
|
|
|
-/*
|
|
|
|
- * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
|
|
|
|
- * thread cannot touch the virtqueue since that could race with an IOThread.
|
|
|
|
- */
|
|
|
|
-static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
|
|
|
|
-{
|
|
|
|
- VirtIOSCSI *s = req->dev;
|
|
|
|
|
|
+ virtqueue_detach_element(req->vq, &req->elem, 0);
|
|
|
|
|
|
- if (!s->ctx || s->ctx == qemu_get_aio_context()) {
|
|
|
|
- /* No need to schedule a BH when there is no IOThread */
|
|
|
|
- virtio_scsi_complete_req(req);
|
|
|
|
- } else {
|
|
|
|
- /* Run request completion in the IOThread */
|
|
|
|
- aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
|
|
|
|
|
|
+ if (vq_lock) {
|
|
|
|
+ qemu_mutex_unlock(vq_lock);
|
|
}
|
|
}
|
|
-}
|
|
|
|
|
|
|
|
-static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
|
|
|
|
-{
|
|
|
|
- virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
|
|
|
|
- virtqueue_detach_element(req->vq, &req->elem, 0);
|
|
|
|
virtio_scsi_free_req(req);
|
|
virtio_scsi_free_req(req);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -235,12 +231,21 @@ static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
|
|
|
|
|
|
+static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq, QemuMutex *vq_lock)
|
|
{
|
|
{
|
|
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
|
|
VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
|
|
VirtIOSCSIReq *req;
|
|
VirtIOSCSIReq *req;
|
|
|
|
|
|
|
|
+ if (vq_lock) {
|
|
|
|
+ qemu_mutex_lock(vq_lock);
|
|
|
|
+ }
|
|
|
|
+
|
|
req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
|
|
req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
|
|
|
|
+
|
|
|
|
+ if (vq_lock) {
|
|
|
|
+ qemu_mutex_unlock(vq_lock);
|
|
|
|
+ }
|
|
|
|
+
|
|
if (!req) {
|
|
if (!req) {
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
@@ -294,136 +299,157 @@ typedef struct {
|
|
VirtIOSCSIReq *tmf_req;
|
|
VirtIOSCSIReq *tmf_req;
|
|
} VirtIOSCSICancelNotifier;
|
|
} VirtIOSCSICancelNotifier;
|
|
|
|
|
|
|
|
+static void virtio_scsi_tmf_dec_remaining(VirtIOSCSIReq *tmf)
|
|
|
|
+{
|
|
|
|
+ if (qatomic_fetch_dec(&tmf->remaining) == 1) {
|
|
|
|
+ trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(tmf->req.tmf.lun),
|
|
|
|
+ tmf->req.tmf.tag, tmf->resp.tmf.response);
|
|
|
|
+
|
|
|
|
+ virtio_scsi_complete_req(tmf, &tmf->dev->ctrl_lock);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
|
|
static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
|
|
{
|
|
{
|
|
VirtIOSCSICancelNotifier *n = container_of(notifier,
|
|
VirtIOSCSICancelNotifier *n = container_of(notifier,
|
|
VirtIOSCSICancelNotifier,
|
|
VirtIOSCSICancelNotifier,
|
|
notifier);
|
|
notifier);
|
|
|
|
|
|
- if (--n->tmf_req->remaining == 0) {
|
|
|
|
- VirtIOSCSIReq *req = n->tmf_req;
|
|
|
|
-
|
|
|
|
- trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
|
|
|
|
- req->req.tmf.tag, req->resp.tmf.response);
|
|
|
|
- virtio_scsi_complete_req(req);
|
|
|
|
- }
|
|
|
|
|
|
+ virtio_scsi_tmf_dec_remaining(n->tmf_req);
|
|
g_free(n);
|
|
g_free(n);
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
|
|
|
|
|
|
+static void virtio_scsi_tmf_cancel_req(VirtIOSCSIReq *tmf, SCSIRequest *r)
|
|
{
|
|
{
|
|
- if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
|
|
|
|
- assert(blk_get_aio_context(d->conf.blk) == s->ctx);
|
|
|
|
- }
|
|
|
|
|
|
+ VirtIOSCSICancelNotifier *notifier;
|
|
|
|
+
|
|
|
|
+ assert(r->ctx == qemu_get_current_aio_context());
|
|
|
|
+
|
|
|
|
+ /* Decremented in virtio_scsi_cancel_notify() */
|
|
|
|
+ qatomic_inc(&tmf->remaining);
|
|
|
|
+
|
|
|
|
+ notifier = g_new(VirtIOSCSICancelNotifier, 1);
|
|
|
|
+ notifier->notifier.notify = virtio_scsi_cancel_notify;
|
|
|
|
+ notifier->tmf_req = tmf;
|
|
|
|
+ scsi_req_cancel_async(r, ¬ifier->notifier);
|
|
}
|
|
}
|
|
|
|
|
|
-static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
|
|
|
|
|
|
+/* Execute a TMF on the requests in the current AioContext */
|
|
|
|
+static void virtio_scsi_do_tmf_aio_context(void *opaque)
|
|
{
|
|
{
|
|
- VirtIOSCSI *s = req->dev;
|
|
|
|
- SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
|
|
|
|
- BusChild *kid;
|
|
|
|
- int target;
|
|
|
|
|
|
+ AioContext *ctx = qemu_get_current_aio_context();
|
|
|
|
+ VirtIOSCSIReq *tmf = opaque;
|
|
|
|
+ VirtIOSCSI *s = tmf->dev;
|
|
|
|
+ SCSIDevice *d = virtio_scsi_device_get(s, tmf->req.tmf.lun);
|
|
|
|
+ SCSIRequest *r;
|
|
|
|
+ bool match_tag;
|
|
|
|
|
|
- switch (req->req.tmf.subtype) {
|
|
|
|
- case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
|
|
|
|
- if (!d) {
|
|
|
|
- req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
- if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
|
|
|
|
- req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
- qatomic_inc(&s->resetting);
|
|
|
|
- device_cold_reset(&d->qdev);
|
|
|
|
- qatomic_dec(&s->resetting);
|
|
|
|
|
|
+ if (!d) {
|
|
|
|
+ tmf->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
|
|
|
|
+ virtio_scsi_tmf_dec_remaining(tmf);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This function could handle other subtypes that need to be processed in
|
|
|
|
+ * the request's AioContext in the future, but for now only request
|
|
|
|
+ * cancelation subtypes are performed here.
|
|
|
|
+ */
|
|
|
|
+ switch (tmf->req.tmf.subtype) {
|
|
|
|
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK:
|
|
|
|
+ match_tag = true;
|
|
break;
|
|
break;
|
|
|
|
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
|
|
|
|
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
|
|
|
|
+ match_tag = false;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ g_assert_not_reached();
|
|
|
|
+ }
|
|
|
|
|
|
- case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
|
|
|
|
- target = req->req.tmf.lun[1];
|
|
|
|
- qatomic_inc(&s->resetting);
|
|
|
|
|
|
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
|
|
|
|
+ QTAILQ_FOREACH(r, &d->requests, next) {
|
|
|
|
+ VirtIOSCSIReq *cmd_req = r->hba_private;
|
|
|
|
+ assert(cmd_req); /* request has hba_private while enqueued */
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
|
- QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
|
|
|
|
- SCSIDevice *d1 = SCSI_DEVICE(kid->child);
|
|
|
|
- if (d1->channel == 0 && d1->id == target) {
|
|
|
|
- device_cold_reset(&d1->qdev);
|
|
|
|
|
|
+ if (r->ctx != ctx) {
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+ if (match_tag && cmd_req->req.cmd.tag != tmf->req.tmf.tag) {
|
|
|
|
+ continue;
|
|
}
|
|
}
|
|
|
|
+ virtio_scsi_tmf_cancel_req(tmf, r);
|
|
}
|
|
}
|
|
- rcu_read_unlock();
|
|
|
|
-
|
|
|
|
- qatomic_dec(&s->resetting);
|
|
|
|
- break;
|
|
|
|
-
|
|
|
|
- default:
|
|
|
|
- g_assert_not_reached();
|
|
|
|
}
|
|
}
|
|
|
|
|
|
-out:
|
|
|
|
- object_unref(OBJECT(d));
|
|
|
|
- virtio_scsi_complete_req_from_main_loop(req);
|
|
|
|
|
|
+ /* Incremented by virtio_scsi_do_tmf() */
|
|
|
|
+ virtio_scsi_tmf_dec_remaining(tmf);
|
|
|
|
+
|
|
|
|
+ object_unref(d);
|
|
}
|
|
}
|
|
|
|
|
|
-/* Some TMFs must be processed from the main loop thread */
|
|
|
|
-static void virtio_scsi_do_tmf_bh(void *opaque)
|
|
|
|
|
|
+static void dummy_bh(void *opaque)
|
|
{
|
|
{
|
|
- VirtIOSCSI *s = opaque;
|
|
|
|
- QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
|
|
|
|
- VirtIOSCSIReq *req;
|
|
|
|
- VirtIOSCSIReq *tmp;
|
|
|
|
|
|
+ /* Do nothing */
|
|
|
|
+}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Wait for pending virtio_scsi_defer_tmf_to_aio_context() BHs.
|
|
|
|
+ */
|
|
|
|
+static void virtio_scsi_flush_defer_tmf_to_aio_context(VirtIOSCSI *s)
|
|
|
|
+{
|
|
GLOBAL_STATE_CODE();
|
|
GLOBAL_STATE_CODE();
|
|
|
|
|
|
- WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
|
|
|
|
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
|
|
|
|
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
|
|
|
|
- QTAILQ_INSERT_TAIL(&reqs, req, next);
|
|
|
|
- }
|
|
|
|
|
|
+ assert(!s->dataplane_started);
|
|
|
|
|
|
- qemu_bh_delete(s->tmf_bh);
|
|
|
|
- s->tmf_bh = NULL;
|
|
|
|
- }
|
|
|
|
|
|
+ for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
|
|
|
|
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
|
|
|
|
|
|
- QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
|
|
|
|
- QTAILQ_REMOVE(&reqs, req, next);
|
|
|
|
- virtio_scsi_do_one_tmf_bh(req);
|
|
|
|
|
|
+ /* Our BH only runs after previously scheduled BHs */
|
|
|
|
+ aio_wait_bh_oneshot(ctx, dummy_bh, NULL);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
|
|
|
|
|
|
+/*
|
|
|
|
+ * Run the TMF in a specific AioContext, handling only requests in that
|
|
|
|
+ * AioContext. This is necessary because requests can run in different
|
|
|
|
+ * AioContext and it is only possible to cancel them from the AioContext where
|
|
|
|
+ * they are running.
|
|
|
|
+ */
|
|
|
|
+static void virtio_scsi_defer_tmf_to_aio_context(VirtIOSCSIReq *tmf,
|
|
|
|
+ AioContext *ctx)
|
|
{
|
|
{
|
|
- VirtIOSCSIReq *req;
|
|
|
|
- VirtIOSCSIReq *tmp;
|
|
|
|
|
|
+ /* Decremented in virtio_scsi_do_tmf_aio_context() */
|
|
|
|
+ qatomic_inc(&tmf->remaining);
|
|
|
|
|
|
- GLOBAL_STATE_CODE();
|
|
|
|
-
|
|
|
|
- /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */
|
|
|
|
- if (s->tmf_bh) {
|
|
|
|
- qemu_bh_delete(s->tmf_bh);
|
|
|
|
- s->tmf_bh = NULL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
|
|
|
|
- QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
|
|
|
|
-
|
|
|
|
- /* SAM-6 6.3.2 Hard reset */
|
|
|
|
- req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
|
|
|
|
- virtio_scsi_complete_req(req);
|
|
|
|
- }
|
|
|
|
|
|
+ /* See virtio_scsi_flush_defer_tmf_to_aio_context() cleanup during reset */
|
|
|
|
+ aio_bh_schedule_oneshot(ctx, virtio_scsi_do_tmf_aio_context, tmf);
|
|
}
|
|
}
|
|
|
|
|
|
-static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
|
|
|
|
|
|
+/*
|
|
|
|
+ * Returns the AioContext for a given TMF's tag field or NULL. Note that the
|
|
|
|
+ * request identified by the tag may have completed by the time you can execute
|
|
|
|
+ * a BH in the AioContext, so don't assume the request still exists in your BH.
|
|
|
|
+ */
|
|
|
|
+static AioContext *find_aio_context_for_tmf_tag(SCSIDevice *d,
|
|
|
|
+ VirtIOSCSIReq *tmf)
|
|
{
|
|
{
|
|
- VirtIOSCSI *s = req->dev;
|
|
|
|
|
|
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
|
|
|
|
+ SCSIRequest *r;
|
|
|
|
+ SCSIRequest *next;
|
|
|
|
+
|
|
|
|
+ QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
|
|
|
|
+ VirtIOSCSIReq *cmd_req = r->hba_private;
|
|
|
|
|
|
- WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
|
|
|
|
- QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
|
|
|
|
|
|
+ /* hba_private is non-NULL while the request is enqueued */
|
|
|
|
+ assert(cmd_req);
|
|
|
|
|
|
- if (!s->tmf_bh) {
|
|
|
|
- s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
|
|
|
|
- qemu_bh_schedule(s->tmf_bh);
|
|
|
|
|
|
+ if (cmd_req->req.cmd.tag == tmf->req.tmf.tag) {
|
|
|
|
+ return r->ctx;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
/* Return 0 if the request is ready to be completed and return to guest;
|
|
/* Return 0 if the request is ready to be completed and return to guest;
|
|
@@ -433,9 +459,9 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
{
|
|
{
|
|
SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
|
|
SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
|
|
SCSIRequest *r, *next;
|
|
SCSIRequest *r, *next;
|
|
|
|
+ AioContext *ctx;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
- virtio_scsi_ctx_check(s, d);
|
|
|
|
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
|
|
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_OK;
|
|
req->resp.tmf.response = VIRTIO_SCSI_S_OK;
|
|
|
|
|
|
@@ -450,7 +476,22 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
req->req.tmf.tag, req->req.tmf.subtype);
|
|
req->req.tmf.tag, req->req.tmf.subtype);
|
|
|
|
|
|
switch (req->req.tmf.subtype) {
|
|
switch (req->req.tmf.subtype) {
|
|
- case VIRTIO_SCSI_T_TMF_ABORT_TASK:
|
|
|
|
|
|
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK: {
|
|
|
|
+ if (!d) {
|
|
|
|
+ goto fail;
|
|
|
|
+ }
|
|
|
|
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
|
|
|
|
+ goto incorrect_lun;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ctx = find_aio_context_for_tmf_tag(d, req);
|
|
|
|
+ if (ctx) {
|
|
|
|
+ virtio_scsi_defer_tmf_to_aio_context(req, ctx);
|
|
|
|
+ ret = -EINPROGRESS;
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
case VIRTIO_SCSI_T_TMF_QUERY_TASK:
|
|
case VIRTIO_SCSI_T_TMF_QUERY_TASK:
|
|
if (!d) {
|
|
if (!d) {
|
|
goto fail;
|
|
goto fail;
|
|
@@ -458,44 +499,82 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
|
|
if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
|
|
goto incorrect_lun;
|
|
goto incorrect_lun;
|
|
}
|
|
}
|
|
- QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
|
|
|
|
- VirtIOSCSIReq *cmd_req = r->hba_private;
|
|
|
|
- if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
|
|
|
|
- break;
|
|
|
|
|
|
+
|
|
|
|
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
|
|
|
|
+ QTAILQ_FOREACH(r, &d->requests, next) {
|
|
|
|
+ VirtIOSCSIReq *cmd_req = r->hba_private;
|
|
|
|
+ assert(cmd_req); /* request has hba_private while enqueued */
|
|
|
|
+
|
|
|
|
+ if (cmd_req->req.cmd.tag == req->req.tmf.tag) {
|
|
|
|
+ /*
|
|
|
|
+ * "If the specified command is present in the task set,
|
|
|
|
+ * then return a service response set to FUNCTION
|
|
|
|
+ * SUCCEEDED".
|
|
|
|
+ */
|
|
|
|
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if (r) {
|
|
|
|
- /*
|
|
|
|
- * Assert that the request has not been completed yet, we
|
|
|
|
- * check for it in the loop above.
|
|
|
|
- */
|
|
|
|
- assert(r->hba_private);
|
|
|
|
- if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
|
|
|
|
- /* "If the specified command is present in the task set, then
|
|
|
|
- * return a service response set to FUNCTION SUCCEEDED".
|
|
|
|
- */
|
|
|
|
- req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
|
|
|
|
- } else {
|
|
|
|
- VirtIOSCSICancelNotifier *notifier;
|
|
|
|
-
|
|
|
|
- req->remaining = 1;
|
|
|
|
- notifier = g_new(VirtIOSCSICancelNotifier, 1);
|
|
|
|
- notifier->tmf_req = req;
|
|
|
|
- notifier->notifier.notify = virtio_scsi_cancel_notify;
|
|
|
|
- scsi_req_cancel_async(r, ¬ifier->notifier);
|
|
|
|
- ret = -EINPROGRESS;
|
|
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
|
|
|
|
+ if (!d) {
|
|
|
|
+ goto fail;
|
|
|
|
+ }
|
|
|
|
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
|
|
|
|
+ goto incorrect_lun;
|
|
|
|
+ }
|
|
|
|
+ qatomic_inc(&s->resetting);
|
|
|
|
+ device_cold_reset(&d->qdev);
|
|
|
|
+ qatomic_dec(&s->resetting);
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET: {
|
|
|
|
+ BusChild *kid;
|
|
|
|
+ int target = req->req.tmf.lun[1];
|
|
|
|
+ qatomic_inc(&s->resetting);
|
|
|
|
+
|
|
|
|
+ rcu_read_lock();
|
|
|
|
+ QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
|
|
|
|
+ SCSIDevice *d1 = SCSI_DEVICE(kid->child);
|
|
|
|
+ if (d1->channel == 0 && d1->id == target) {
|
|
|
|
+ device_cold_reset(&d1->qdev);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ rcu_read_unlock();
|
|
|
|
+
|
|
|
|
+ qatomic_dec(&s->resetting);
|
|
break;
|
|
break;
|
|
|
|
+ }
|
|
|
|
|
|
- case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
|
|
|
|
- case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
|
|
|
|
- virtio_scsi_defer_tmf_to_bh(req);
|
|
|
|
|
|
+ case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
|
|
|
|
+ case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET: {
|
|
|
|
+ g_autoptr(GHashTable) aio_contexts = g_hash_table_new(NULL, NULL);
|
|
|
|
+
|
|
|
|
+ if (!d) {
|
|
|
|
+ goto fail;
|
|
|
|
+ }
|
|
|
|
+ if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
|
|
|
|
+ goto incorrect_lun;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ qatomic_inc(&req->remaining);
|
|
|
|
+
|
|
|
|
+ for (uint32_t i = 0; i < s->parent_obj.conf.num_queues; i++) {
|
|
|
|
+ ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED + i];
|
|
|
|
+
|
|
|
|
+ if (!g_hash_table_add(aio_contexts, ctx)) {
|
|
|
|
+ continue; /* skip previously added AioContext */
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ virtio_scsi_defer_tmf_to_aio_context(req, ctx);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ virtio_scsi_tmf_dec_remaining(req);
|
|
ret = -EINPROGRESS;
|
|
ret = -EINPROGRESS;
|
|
break;
|
|
break;
|
|
|
|
+ }
|
|
|
|
|
|
- case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
|
|
|
|
- case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
|
|
|
|
case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
|
|
case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
|
|
if (!d) {
|
|
if (!d) {
|
|
goto fail;
|
|
goto fail;
|
|
@@ -504,34 +583,19 @@ static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
goto incorrect_lun;
|
|
goto incorrect_lun;
|
|
}
|
|
}
|
|
|
|
|
|
- /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
|
|
|
|
- * This way, if the bus starts calling back to the notifiers
|
|
|
|
- * even before we finish the loop, virtio_scsi_cancel_notify
|
|
|
|
- * will not complete the TMF too early.
|
|
|
|
- */
|
|
|
|
- req->remaining = 1;
|
|
|
|
- QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
|
|
|
|
- if (r->hba_private) {
|
|
|
|
- if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
|
|
|
|
- /* "If there is any command present in the task set, then
|
|
|
|
- * return a service response set to FUNCTION SUCCEEDED".
|
|
|
|
- */
|
|
|
|
- req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
|
|
|
|
- break;
|
|
|
|
- } else {
|
|
|
|
- VirtIOSCSICancelNotifier *notifier;
|
|
|
|
-
|
|
|
|
- req->remaining++;
|
|
|
|
- notifier = g_new(VirtIOSCSICancelNotifier, 1);
|
|
|
|
- notifier->notifier.notify = virtio_scsi_cancel_notify;
|
|
|
|
- notifier->tmf_req = req;
|
|
|
|
- scsi_req_cancel_async(r, ¬ifier->notifier);
|
|
|
|
- }
|
|
|
|
|
|
+ WITH_QEMU_LOCK_GUARD(&d->requests_lock) {
|
|
|
|
+ QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
|
|
|
|
+ /* Request has hba_private while enqueued */
|
|
|
|
+ assert(r->hba_private);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * "If there is any command present in the task set, then
|
|
|
|
+ * return a service response set to FUNCTION SUCCEEDED".
|
|
|
|
+ */
|
|
|
|
+ req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if (--req->remaining > 0) {
|
|
|
|
- ret = -EINPROGRESS;
|
|
|
|
- }
|
|
|
|
break;
|
|
break;
|
|
|
|
|
|
case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
|
|
case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
|
|
@@ -562,7 +626,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
|
|
|
|
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
|
|
if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
|
|
&type, sizeof(type)) < sizeof(type)) {
|
|
&type, sizeof(type)) < sizeof(type)) {
|
|
- virtio_scsi_bad_req(req);
|
|
|
|
|
|
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -570,7 +634,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
if (type == VIRTIO_SCSI_T_TMF) {
|
|
if (type == VIRTIO_SCSI_T_TMF) {
|
|
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
|
|
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
|
|
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
|
|
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
|
|
- virtio_scsi_bad_req(req);
|
|
|
|
|
|
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
|
|
return;
|
|
return;
|
|
} else {
|
|
} else {
|
|
r = virtio_scsi_do_tmf(s, req);
|
|
r = virtio_scsi_do_tmf(s, req);
|
|
@@ -580,7 +644,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
|
|
type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
|
|
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
|
|
if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
|
|
sizeof(VirtIOSCSICtrlANResp)) < 0) {
|
|
sizeof(VirtIOSCSICtrlANResp)) < 0) {
|
|
- virtio_scsi_bad_req(req);
|
|
|
|
|
|
+ virtio_scsi_bad_req(req, &s->ctrl_lock);
|
|
return;
|
|
return;
|
|
} else {
|
|
} else {
|
|
req->req.an.event_requested =
|
|
req->req.an.event_requested =
|
|
@@ -600,7 +664,7 @@ static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
|
|
type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
|
|
trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
|
|
trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
|
|
req->resp.an.response);
|
|
req->resp.an.response);
|
|
- virtio_scsi_complete_req(req);
|
|
|
|
|
|
+ virtio_scsi_complete_req(req, &s->ctrl_lock);
|
|
} else {
|
|
} else {
|
|
assert(r == -EINPROGRESS);
|
|
assert(r == -EINPROGRESS);
|
|
}
|
|
}
|
|
@@ -610,7 +674,7 @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
|
|
{
|
|
{
|
|
VirtIOSCSIReq *req;
|
|
VirtIOSCSIReq *req;
|
|
|
|
|
|
- while ((req = virtio_scsi_pop_req(s, vq))) {
|
|
|
|
|
|
+ while ((req = virtio_scsi_pop_req(s, vq, &s->ctrl_lock))) {
|
|
virtio_scsi_handle_ctrl_req(s, req);
|
|
virtio_scsi_handle_ctrl_req(s, req);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -625,9 +689,12 @@ static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
|
|
*/
|
|
*/
|
|
static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
|
|
static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
|
|
{
|
|
{
|
|
- if (!s->ctx || s->dataplane_started) {
|
|
|
|
|
|
+ if (s->dataplane_started) {
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
+ if (s->vq_aio_context[0] == qemu_get_aio_context()) {
|
|
|
|
+ return false; /* not using IOThreads */
|
|
|
|
+ }
|
|
|
|
|
|
virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
|
|
virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
|
|
return !s->dataplane_fenced;
|
|
return !s->dataplane_fenced;
|
|
@@ -654,7 +721,7 @@ static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
|
|
* in virtio_scsi_command_complete.
|
|
* in virtio_scsi_command_complete.
|
|
*/
|
|
*/
|
|
req->resp_size = sizeof(VirtIOSCSICmdResp);
|
|
req->resp_size = sizeof(VirtIOSCSICmdResp);
|
|
- virtio_scsi_complete_req(req);
|
|
|
|
|
|
+ virtio_scsi_complete_req(req, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
static void virtio_scsi_command_failed(SCSIRequest *r)
|
|
static void virtio_scsi_command_failed(SCSIRequest *r)
|
|
@@ -788,7 +855,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
virtio_scsi_fail_cmd_req(req);
|
|
virtio_scsi_fail_cmd_req(req);
|
|
return -ENOTSUP;
|
|
return -ENOTSUP;
|
|
} else {
|
|
} else {
|
|
- virtio_scsi_bad_req(req);
|
|
|
|
|
|
+ virtio_scsi_bad_req(req, NULL);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -801,7 +868,6 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
|
|
virtio_scsi_complete_cmd_req(req);
|
|
virtio_scsi_complete_cmd_req(req);
|
|
return -ENOENT;
|
|
return -ENOENT;
|
|
}
|
|
}
|
|
- virtio_scsi_ctx_check(s, d);
|
|
|
|
req->sreq = scsi_req_new(d, req->req.cmd.tag,
|
|
req->sreq = scsi_req_new(d, req->req.cmd.tag,
|
|
virtio_scsi_get_lun(req->req.cmd.lun),
|
|
virtio_scsi_get_lun(req->req.cmd.lun),
|
|
req->req.cmd.cdb, vs->cdb_size, req);
|
|
req->req.cmd.cdb, vs->cdb_size, req);
|
|
@@ -843,7 +909,7 @@ static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
|
|
virtio_queue_set_notification(vq, 0);
|
|
virtio_queue_set_notification(vq, 0);
|
|
}
|
|
}
|
|
|
|
|
|
- while ((req = virtio_scsi_pop_req(s, vq))) {
|
|
|
|
|
|
+ while ((req = virtio_scsi_pop_req(s, vq, NULL))) {
|
|
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
|
|
ret = virtio_scsi_handle_cmd_req_prepare(s, req);
|
|
if (!ret) {
|
|
if (!ret) {
|
|
QTAILQ_INSERT_TAIL(&reqs, req, next);
|
|
QTAILQ_INSERT_TAIL(&reqs, req, next);
|
|
@@ -936,7 +1002,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
|
|
|
|
|
|
assert(!s->dataplane_started);
|
|
assert(!s->dataplane_started);
|
|
|
|
|
|
- virtio_scsi_reset_tmf_bh(s);
|
|
|
|
|
|
+ virtio_scsi_flush_defer_tmf_to_aio_context(s);
|
|
|
|
|
|
qatomic_inc(&s->resetting);
|
|
qatomic_inc(&s->resetting);
|
|
bus_cold_reset(BUS(&s->bus));
|
|
bus_cold_reset(BUS(&s->bus));
|
|
@@ -944,7 +1010,10 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
|
|
|
|
|
|
vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
|
|
vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
|
|
vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
|
|
vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
|
|
- s->events_dropped = false;
|
|
|
|
|
|
+
|
|
|
|
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
|
|
|
|
+ s->events_dropped = false;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
typedef struct {
|
|
typedef struct {
|
|
@@ -973,19 +1042,21 @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- req = virtio_scsi_pop_req(s, vs->event_vq);
|
|
|
|
- if (!req) {
|
|
|
|
- s->events_dropped = true;
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
|
|
+ req = virtio_scsi_pop_req(s, vs->event_vq, &s->event_lock);
|
|
|
|
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
|
|
|
|
+ if (!req) {
|
|
|
|
+ s->events_dropped = true;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
|
|
- if (s->events_dropped) {
|
|
|
|
- event |= VIRTIO_SCSI_T_EVENTS_MISSED;
|
|
|
|
- s->events_dropped = false;
|
|
|
|
|
|
+ if (s->events_dropped) {
|
|
|
|
+ event |= VIRTIO_SCSI_T_EVENTS_MISSED;
|
|
|
|
+ s->events_dropped = false;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
|
|
if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
|
|
- virtio_scsi_bad_req(req);
|
|
|
|
|
|
+ virtio_scsi_bad_req(req, &s->event_lock);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1005,12 +1076,18 @@ static void virtio_scsi_push_event(VirtIOSCSI *s,
|
|
}
|
|
}
|
|
trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
|
|
trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
|
|
|
|
|
|
- virtio_scsi_complete_req(req);
|
|
|
|
|
|
+ virtio_scsi_complete_req(req, &s->event_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
|
|
static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
|
|
{
|
|
{
|
|
- if (s->events_dropped) {
|
|
|
|
|
|
+ bool events_dropped;
|
|
|
|
+
|
|
|
|
+ WITH_QEMU_LOCK_GUARD(&s->event_lock) {
|
|
|
|
+ events_dropped = s->events_dropped;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (events_dropped) {
|
|
VirtIOSCSIEventInfo info = {
|
|
VirtIOSCSIEventInfo info = {
|
|
.event = VIRTIO_SCSI_T_NO_EVENT,
|
|
.event = VIRTIO_SCSI_T_NO_EVENT,
|
|
};
|
|
};
|
|
@@ -1061,14 +1138,16 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
{
|
|
{
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
|
|
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
|
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
|
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
|
|
|
|
+ AioContext *ctx = s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED];
|
|
SCSIDevice *sd = SCSI_DEVICE(dev);
|
|
SCSIDevice *sd = SCSI_DEVICE(dev);
|
|
- int ret;
|
|
|
|
|
|
|
|
- if (s->ctx && !s->dataplane_fenced) {
|
|
|
|
- ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
|
|
|
|
- if (ret < 0) {
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
|
|
+ if (ctx != qemu_get_aio_context() && !s->dataplane_fenced) {
|
|
|
|
+ /*
|
|
|
|
+ * Try to make the BlockBackend's AioContext match ours. Ignore failure
|
|
|
|
+ * because I/O will still work although block jobs and other users
|
|
|
|
+ * might be slower when multiple AioContexts use a BlockBackend.
|
|
|
|
+ */
|
|
|
|
+ blk_set_aio_context(sd->conf.blk, ctx, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
|
|
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
|
|
@@ -1103,7 +1182,7 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
|
|
|
|
|
|
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
|
|
qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
|
|
|
|
|
|
- if (s->ctx) {
|
|
|
|
|
|
+ if (s->vq_aio_context[VIRTIO_SCSI_VQ_NUM_FIXED] != qemu_get_aio_context()) {
|
|
/* If other users keep the BlockBackend in the iothread, that's ok */
|
|
/* If other users keep the BlockBackend in the iothread, that's ok */
|
|
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
|
|
blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
|
|
}
|
|
}
|
|
@@ -1137,7 +1216,7 @@ static void virtio_scsi_drained_begin(SCSIBus *bus)
|
|
|
|
|
|
for (uint32_t i = 0; i < total_queues; i++) {
|
|
for (uint32_t i = 0; i < total_queues; i++) {
|
|
VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
- virtio_queue_aio_detach_host_notifier(vq, s->ctx);
|
|
|
|
|
|
+ virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1163,10 +1242,12 @@ static void virtio_scsi_drained_end(SCSIBus *bus)
|
|
|
|
|
|
for (uint32_t i = 0; i < total_queues; i++) {
|
|
for (uint32_t i = 0; i < total_queues; i++) {
|
|
VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
VirtQueue *vq = virtio_get_queue(vdev, i);
|
|
|
|
+ AioContext *ctx = s->vq_aio_context[i];
|
|
|
|
+
|
|
if (vq == vs->event_vq) {
|
|
if (vq == vs->event_vq) {
|
|
- virtio_queue_aio_attach_host_notifier_no_poll(vq, s->ctx);
|
|
|
|
|
|
+ virtio_queue_aio_attach_host_notifier_no_poll(vq, ctx);
|
|
} else {
|
|
} else {
|
|
- virtio_queue_aio_attach_host_notifier(vq, s->ctx);
|
|
|
|
|
|
+ virtio_queue_aio_attach_host_notifier(vq, ctx);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1235,8 +1316,8 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
|
|
VirtIOSCSI *s = VIRTIO_SCSI(dev);
|
|
VirtIOSCSI *s = VIRTIO_SCSI(dev);
|
|
Error *err = NULL;
|
|
Error *err = NULL;
|
|
|
|
|
|
- QTAILQ_INIT(&s->tmf_bh_list);
|
|
|
|
- qemu_mutex_init(&s->tmf_bh_lock);
|
|
|
|
|
|
+ qemu_mutex_init(&s->ctrl_lock);
|
|
|
|
+ qemu_mutex_init(&s->event_lock);
|
|
|
|
|
|
virtio_scsi_common_realize(dev,
|
|
virtio_scsi_common_realize(dev,
|
|
virtio_scsi_handle_ctrl,
|
|
virtio_scsi_handle_ctrl,
|
|
@@ -1271,15 +1352,16 @@ void virtio_scsi_common_unrealize(DeviceState *dev)
|
|
virtio_cleanup(vdev);
|
|
virtio_cleanup(vdev);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/* main loop */
|
|
static void virtio_scsi_device_unrealize(DeviceState *dev)
|
|
static void virtio_scsi_device_unrealize(DeviceState *dev)
|
|
{
|
|
{
|
|
VirtIOSCSI *s = VIRTIO_SCSI(dev);
|
|
VirtIOSCSI *s = VIRTIO_SCSI(dev);
|
|
|
|
|
|
- virtio_scsi_reset_tmf_bh(s);
|
|
|
|
-
|
|
|
|
|
|
+ virtio_scsi_dataplane_cleanup(s);
|
|
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
|
|
qbus_set_hotplug_handler(BUS(&s->bus), NULL);
|
|
virtio_scsi_common_unrealize(dev);
|
|
virtio_scsi_common_unrealize(dev);
|
|
- qemu_mutex_destroy(&s->tmf_bh_lock);
|
|
|
|
|
|
+ qemu_mutex_destroy(&s->event_lock);
|
|
|
|
+ qemu_mutex_destroy(&s->ctrl_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static const Property virtio_scsi_properties[] = {
|
|
static const Property virtio_scsi_properties[] = {
|
|
@@ -1299,6 +1381,8 @@ static const Property virtio_scsi_properties[] = {
|
|
VIRTIO_SCSI_F_CHANGE, true),
|
|
VIRTIO_SCSI_F_CHANGE, true),
|
|
DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
|
|
DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
|
|
TYPE_IOTHREAD, IOThread *),
|
|
TYPE_IOTHREAD, IOThread *),
|
|
|
|
+ DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOSCSI,
|
|
|
|
+ parent_obj.conf.iothread_vq_mapping_list),
|
|
};
|
|
};
|
|
|
|
|
|
static const VMStateDescription vmstate_virtio_scsi = {
|
|
static const VMStateDescription vmstate_virtio_scsi = {
|