virtio-scsi.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369
  1. /*
  2. * Virtio SCSI HBA
  3. *
  4. * Copyright IBM, Corp. 2010
  5. * Copyright Red Hat, Inc. 2011
  6. *
  7. * Authors:
  8. * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  9. * Paolo Bonzini <pbonzini@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. *
  14. */
  15. #include "qemu/osdep.h"
  16. #include "qapi/error.h"
  17. #include "standard-headers/linux/virtio_ids.h"
  18. #include "hw/virtio/virtio-scsi.h"
  19. #include "migration/qemu-file-types.h"
  20. #include "qemu/defer-call.h"
  21. #include "qemu/error-report.h"
  22. #include "qemu/iov.h"
  23. #include "qemu/module.h"
  24. #include "system/block-backend.h"
  25. #include "system/dma.h"
  26. #include "hw/qdev-properties.h"
  27. #include "hw/scsi/scsi.h"
  28. #include "scsi/constants.h"
  29. #include "hw/virtio/virtio-bus.h"
  30. #include "hw/virtio/virtio-access.h"
  31. #include "trace.h"
  32. typedef struct VirtIOSCSIReq {
  33. /*
  34. * Note:
  35. * - fields up to resp_iov are initialized by virtio_scsi_init_req;
  36. * - fields starting at vring are zeroed by virtio_scsi_init_req.
  37. */
  38. VirtQueueElement elem;
  39. VirtIOSCSI *dev;
  40. VirtQueue *vq;
  41. QEMUSGList qsgl;
  42. QEMUIOVector resp_iov;
  43. /* Used for two-stage request submission and TMFs deferred to BH */
  44. QTAILQ_ENTRY(VirtIOSCSIReq) next;
  45. /* Used for cancellation of request during TMFs */
  46. int remaining;
  47. SCSIRequest *sreq;
  48. size_t resp_size;
  49. enum SCSIXferMode mode;
  50. union {
  51. VirtIOSCSICmdResp cmd;
  52. VirtIOSCSICtrlTMFResp tmf;
  53. VirtIOSCSICtrlANResp an;
  54. VirtIOSCSIEvent event;
  55. } resp;
  56. union {
  57. VirtIOSCSICmdReq cmd;
  58. VirtIOSCSICtrlTMFReq tmf;
  59. VirtIOSCSICtrlANReq an;
  60. } req;
  61. } VirtIOSCSIReq;
  62. static inline int virtio_scsi_get_lun(uint8_t *lun)
  63. {
  64. return ((lun[2] << 8) | lun[3]) & 0x3FFF;
  65. }
  66. static inline SCSIDevice *virtio_scsi_device_get(VirtIOSCSI *s, uint8_t *lun)
  67. {
  68. if (lun[0] != 1) {
  69. return NULL;
  70. }
  71. if (lun[2] != 0 && !(lun[2] >= 0x40 && lun[2] < 0x80)) {
  72. return NULL;
  73. }
  74. return scsi_device_get(&s->bus, 0, lun[1], virtio_scsi_get_lun(lun));
  75. }
  76. static void virtio_scsi_init_req(VirtIOSCSI *s, VirtQueue *vq, VirtIOSCSIReq *req)
  77. {
  78. VirtIODevice *vdev = VIRTIO_DEVICE(s);
  79. const size_t zero_skip =
  80. offsetof(VirtIOSCSIReq, resp_iov) + sizeof(req->resp_iov);
  81. req->vq = vq;
  82. req->dev = s;
  83. qemu_sglist_init(&req->qsgl, DEVICE(s), 8, vdev->dma_as);
  84. qemu_iovec_init(&req->resp_iov, 1);
  85. memset((uint8_t *)req + zero_skip, 0, sizeof(*req) - zero_skip);
  86. }
  87. static void virtio_scsi_free_req(VirtIOSCSIReq *req)
  88. {
  89. qemu_iovec_destroy(&req->resp_iov);
  90. qemu_sglist_destroy(&req->qsgl);
  91. g_free(req);
  92. }
  93. static void virtio_scsi_complete_req(VirtIOSCSIReq *req)
  94. {
  95. VirtIOSCSI *s = req->dev;
  96. VirtQueue *vq = req->vq;
  97. VirtIODevice *vdev = VIRTIO_DEVICE(s);
  98. qemu_iovec_from_buf(&req->resp_iov, 0, &req->resp, req->resp_size);
  99. virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size);
  100. if (s->dataplane_started && !s->dataplane_fenced) {
  101. virtio_notify_irqfd(vdev, vq);
  102. } else {
  103. virtio_notify(vdev, vq);
  104. }
  105. if (req->sreq) {
  106. req->sreq->hba_private = NULL;
  107. scsi_req_unref(req->sreq);
  108. }
  109. virtio_scsi_free_req(req);
  110. }
  111. static void virtio_scsi_complete_req_bh(void *opaque)
  112. {
  113. VirtIOSCSIReq *req = opaque;
  114. virtio_scsi_complete_req(req);
  115. }
  116. /*
  117. * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop
  118. * thread cannot touch the virtqueue since that could race with an IOThread.
  119. */
  120. static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req)
  121. {
  122. VirtIOSCSI *s = req->dev;
  123. if (!s->ctx || s->ctx == qemu_get_aio_context()) {
  124. /* No need to schedule a BH when there is no IOThread */
  125. virtio_scsi_complete_req(req);
  126. } else {
  127. /* Run request completion in the IOThread */
  128. aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req);
  129. }
  130. }
  131. static void virtio_scsi_bad_req(VirtIOSCSIReq *req)
  132. {
  133. virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers");
  134. virtqueue_detach_element(req->vq, &req->elem, 0);
  135. virtio_scsi_free_req(req);
  136. }
  137. static size_t qemu_sgl_concat(VirtIOSCSIReq *req, struct iovec *iov,
  138. hwaddr *addr, int num, size_t skip)
  139. {
  140. QEMUSGList *qsgl = &req->qsgl;
  141. size_t copied = 0;
  142. while (num) {
  143. if (skip >= iov->iov_len) {
  144. skip -= iov->iov_len;
  145. } else {
  146. qemu_sglist_add(qsgl, *addr + skip, iov->iov_len - skip);
  147. copied += iov->iov_len - skip;
  148. skip = 0;
  149. }
  150. iov++;
  151. addr++;
  152. num--;
  153. }
  154. assert(skip == 0);
  155. return copied;
  156. }
  157. static int virtio_scsi_parse_req(VirtIOSCSIReq *req,
  158. unsigned req_size, unsigned resp_size)
  159. {
  160. VirtIODevice *vdev = (VirtIODevice *) req->dev;
  161. size_t in_size, out_size;
  162. if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
  163. &req->req, req_size) < req_size) {
  164. return -EINVAL;
  165. }
  166. if (qemu_iovec_concat_iov(&req->resp_iov,
  167. req->elem.in_sg, req->elem.in_num, 0,
  168. resp_size) < resp_size) {
  169. return -EINVAL;
  170. }
  171. req->resp_size = resp_size;
  172. /* Old BIOSes left some padding by mistake after the req_size/resp_size.
  173. * As a workaround, always consider the first buffer as the virtio-scsi
  174. * request/response, making the payload start at the second element
  175. * of the iovec.
  176. *
  177. * The actual length of the response header, stored in req->resp_size,
  178. * does not change.
  179. *
  180. * TODO: always disable this workaround for virtio 1.0 devices.
  181. */
  182. if (!virtio_vdev_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) {
  183. if (req->elem.out_num) {
  184. req_size = req->elem.out_sg[0].iov_len;
  185. }
  186. if (req->elem.in_num) {
  187. resp_size = req->elem.in_sg[0].iov_len;
  188. }
  189. }
  190. out_size = qemu_sgl_concat(req, req->elem.out_sg,
  191. &req->elem.out_addr[0], req->elem.out_num,
  192. req_size);
  193. in_size = qemu_sgl_concat(req, req->elem.in_sg,
  194. &req->elem.in_addr[0], req->elem.in_num,
  195. resp_size);
  196. if (out_size && in_size) {
  197. return -ENOTSUP;
  198. }
  199. if (out_size) {
  200. req->mode = SCSI_XFER_TO_DEV;
  201. } else if (in_size) {
  202. req->mode = SCSI_XFER_FROM_DEV;
  203. }
  204. return 0;
  205. }
  206. static VirtIOSCSIReq *virtio_scsi_pop_req(VirtIOSCSI *s, VirtQueue *vq)
  207. {
  208. VirtIOSCSICommon *vs = (VirtIOSCSICommon *)s;
  209. VirtIOSCSIReq *req;
  210. req = virtqueue_pop(vq, sizeof(VirtIOSCSIReq) + vs->cdb_size);
  211. if (!req) {
  212. return NULL;
  213. }
  214. virtio_scsi_init_req(s, vq, req);
  215. return req;
  216. }
  217. static void virtio_scsi_save_request(QEMUFile *f, SCSIRequest *sreq)
  218. {
  219. VirtIOSCSIReq *req = sreq->hba_private;
  220. VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(req->dev);
  221. VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
  222. uint32_t n = virtio_get_queue_index(req->vq) - VIRTIO_SCSI_VQ_NUM_FIXED;
  223. assert(n < vs->conf.num_queues);
  224. qemu_put_be32s(f, &n);
  225. qemu_put_virtqueue_element(vdev, f, &req->elem);
  226. }
  227. static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
  228. {
  229. SCSIBus *bus = sreq->bus;
  230. VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
  231. VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
  232. VirtIODevice *vdev = VIRTIO_DEVICE(s);
  233. VirtIOSCSIReq *req;
  234. uint32_t n;
  235. qemu_get_be32s(f, &n);
  236. assert(n < vs->conf.num_queues);
  237. req = qemu_get_virtqueue_element(vdev, f,
  238. sizeof(VirtIOSCSIReq) + vs->cdb_size);
  239. virtio_scsi_init_req(s, vs->cmd_vqs[n], req);
  240. if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
  241. sizeof(VirtIOSCSICmdResp) + vs->sense_size) < 0) {
  242. error_report("invalid SCSI request migration data");
  243. exit(1);
  244. }
  245. scsi_req_ref(sreq);
  246. req->sreq = sreq;
  247. if (req->sreq->cmd.mode != SCSI_XFER_NONE) {
  248. assert(req->sreq->cmd.mode == req->mode);
  249. }
  250. return req;
  251. }
  252. typedef struct {
  253. Notifier notifier;
  254. VirtIOSCSIReq *tmf_req;
  255. } VirtIOSCSICancelNotifier;
  256. static void virtio_scsi_cancel_notify(Notifier *notifier, void *data)
  257. {
  258. VirtIOSCSICancelNotifier *n = container_of(notifier,
  259. VirtIOSCSICancelNotifier,
  260. notifier);
  261. if (--n->tmf_req->remaining == 0) {
  262. VirtIOSCSIReq *req = n->tmf_req;
  263. trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
  264. req->req.tmf.tag, req->resp.tmf.response);
  265. virtio_scsi_complete_req(req);
  266. }
  267. g_free(n);
  268. }
  269. static inline void virtio_scsi_ctx_check(VirtIOSCSI *s, SCSIDevice *d)
  270. {
  271. if (s->dataplane_started && d && blk_is_available(d->conf.blk)) {
  272. assert(blk_get_aio_context(d->conf.blk) == s->ctx);
  273. }
  274. }
  275. static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req)
  276. {
  277. VirtIOSCSI *s = req->dev;
  278. SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
  279. BusChild *kid;
  280. int target;
  281. switch (req->req.tmf.subtype) {
  282. case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
  283. if (!d) {
  284. req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
  285. goto out;
  286. }
  287. if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
  288. req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
  289. goto out;
  290. }
  291. qatomic_inc(&s->resetting);
  292. device_cold_reset(&d->qdev);
  293. qatomic_dec(&s->resetting);
  294. break;
  295. case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
  296. target = req->req.tmf.lun[1];
  297. qatomic_inc(&s->resetting);
  298. rcu_read_lock();
  299. QTAILQ_FOREACH_RCU(kid, &s->bus.qbus.children, sibling) {
  300. SCSIDevice *d1 = SCSI_DEVICE(kid->child);
  301. if (d1->channel == 0 && d1->id == target) {
  302. device_cold_reset(&d1->qdev);
  303. }
  304. }
  305. rcu_read_unlock();
  306. qatomic_dec(&s->resetting);
  307. break;
  308. default:
  309. g_assert_not_reached();
  310. }
  311. out:
  312. object_unref(OBJECT(d));
  313. virtio_scsi_complete_req_from_main_loop(req);
  314. }
  315. /* Some TMFs must be processed from the main loop thread */
  316. static void virtio_scsi_do_tmf_bh(void *opaque)
  317. {
  318. VirtIOSCSI *s = opaque;
  319. QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
  320. VirtIOSCSIReq *req;
  321. VirtIOSCSIReq *tmp;
  322. GLOBAL_STATE_CODE();
  323. WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
  324. QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
  325. QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
  326. QTAILQ_INSERT_TAIL(&reqs, req, next);
  327. }
  328. qemu_bh_delete(s->tmf_bh);
  329. s->tmf_bh = NULL;
  330. }
  331. QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) {
  332. QTAILQ_REMOVE(&reqs, req, next);
  333. virtio_scsi_do_one_tmf_bh(req);
  334. }
  335. }
  336. static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s)
  337. {
  338. VirtIOSCSIReq *req;
  339. VirtIOSCSIReq *tmp;
  340. GLOBAL_STATE_CODE();
  341. /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */
  342. if (s->tmf_bh) {
  343. qemu_bh_delete(s->tmf_bh);
  344. s->tmf_bh = NULL;
  345. }
  346. QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) {
  347. QTAILQ_REMOVE(&s->tmf_bh_list, req, next);
  348. /* SAM-6 6.3.2 Hard reset */
  349. req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE;
  350. virtio_scsi_complete_req(req);
  351. }
  352. }
  353. static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req)
  354. {
  355. VirtIOSCSI *s = req->dev;
  356. WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) {
  357. QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next);
  358. if (!s->tmf_bh) {
  359. s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s);
  360. qemu_bh_schedule(s->tmf_bh);
  361. }
  362. }
  363. }
  364. /* Return 0 if the request is ready to be completed and return to guest;
  365. * -EINPROGRESS if the request is submitted and will be completed later, in the
  366. * case of async cancellation. */
  367. static int virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
  368. {
  369. SCSIDevice *d = virtio_scsi_device_get(s, req->req.tmf.lun);
  370. SCSIRequest *r, *next;
  371. int ret = 0;
  372. virtio_scsi_ctx_check(s, d);
  373. /* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
  374. req->resp.tmf.response = VIRTIO_SCSI_S_OK;
  375. /*
  376. * req->req.tmf has the QEMU_PACKED attribute. Don't use virtio_tswap32s()
  377. * to avoid compiler errors.
  378. */
  379. req->req.tmf.subtype =
  380. virtio_tswap32(VIRTIO_DEVICE(s), req->req.tmf.subtype);
  381. trace_virtio_scsi_tmf_req(virtio_scsi_get_lun(req->req.tmf.lun),
  382. req->req.tmf.tag, req->req.tmf.subtype);
  383. switch (req->req.tmf.subtype) {
  384. case VIRTIO_SCSI_T_TMF_ABORT_TASK:
  385. case VIRTIO_SCSI_T_TMF_QUERY_TASK:
  386. if (!d) {
  387. goto fail;
  388. }
  389. if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
  390. goto incorrect_lun;
  391. }
  392. QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
  393. VirtIOSCSIReq *cmd_req = r->hba_private;
  394. if (cmd_req && cmd_req->req.cmd.tag == req->req.tmf.tag) {
  395. break;
  396. }
  397. }
  398. if (r) {
  399. /*
  400. * Assert that the request has not been completed yet, we
  401. * check for it in the loop above.
  402. */
  403. assert(r->hba_private);
  404. if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK) {
  405. /* "If the specified command is present in the task set, then
  406. * return a service response set to FUNCTION SUCCEEDED".
  407. */
  408. req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
  409. } else {
  410. VirtIOSCSICancelNotifier *notifier;
  411. req->remaining = 1;
  412. notifier = g_new(VirtIOSCSICancelNotifier, 1);
  413. notifier->tmf_req = req;
  414. notifier->notifier.notify = virtio_scsi_cancel_notify;
  415. scsi_req_cancel_async(r, &notifier->notifier);
  416. ret = -EINPROGRESS;
  417. }
  418. }
  419. break;
  420. case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
  421. case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
  422. virtio_scsi_defer_tmf_to_bh(req);
  423. ret = -EINPROGRESS;
  424. break;
  425. case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
  426. case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
  427. case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
  428. if (!d) {
  429. goto fail;
  430. }
  431. if (d->lun != virtio_scsi_get_lun(req->req.tmf.lun)) {
  432. goto incorrect_lun;
  433. }
  434. /* Add 1 to "remaining" until virtio_scsi_do_tmf returns.
  435. * This way, if the bus starts calling back to the notifiers
  436. * even before we finish the loop, virtio_scsi_cancel_notify
  437. * will not complete the TMF too early.
  438. */
  439. req->remaining = 1;
  440. QTAILQ_FOREACH_SAFE(r, &d->requests, next, next) {
  441. if (r->hba_private) {
  442. if (req->req.tmf.subtype == VIRTIO_SCSI_T_TMF_QUERY_TASK_SET) {
  443. /* "If there is any command present in the task set, then
  444. * return a service response set to FUNCTION SUCCEEDED".
  445. */
  446. req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
  447. break;
  448. } else {
  449. VirtIOSCSICancelNotifier *notifier;
  450. req->remaining++;
  451. notifier = g_new(VirtIOSCSICancelNotifier, 1);
  452. notifier->notifier.notify = virtio_scsi_cancel_notify;
  453. notifier->tmf_req = req;
  454. scsi_req_cancel_async(r, &notifier->notifier);
  455. }
  456. }
  457. }
  458. if (--req->remaining > 0) {
  459. ret = -EINPROGRESS;
  460. }
  461. break;
  462. case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
  463. default:
  464. req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
  465. break;
  466. }
  467. object_unref(OBJECT(d));
  468. return ret;
  469. incorrect_lun:
  470. req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
  471. object_unref(OBJECT(d));
  472. return ret;
  473. fail:
  474. req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
  475. object_unref(OBJECT(d));
  476. return ret;
  477. }
  478. static void virtio_scsi_handle_ctrl_req(VirtIOSCSI *s, VirtIOSCSIReq *req)
  479. {
  480. VirtIODevice *vdev = (VirtIODevice *)s;
  481. uint32_t type;
  482. int r = 0;
  483. if (iov_to_buf(req->elem.out_sg, req->elem.out_num, 0,
  484. &type, sizeof(type)) < sizeof(type)) {
  485. virtio_scsi_bad_req(req);
  486. return;
  487. }
  488. virtio_tswap32s(vdev, &type);
  489. if (type == VIRTIO_SCSI_T_TMF) {
  490. if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlTMFReq),
  491. sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
  492. virtio_scsi_bad_req(req);
  493. return;
  494. } else {
  495. r = virtio_scsi_do_tmf(s, req);
  496. }
  497. } else if (type == VIRTIO_SCSI_T_AN_QUERY ||
  498. type == VIRTIO_SCSI_T_AN_SUBSCRIBE) {
  499. if (virtio_scsi_parse_req(req, sizeof(VirtIOSCSICtrlANReq),
  500. sizeof(VirtIOSCSICtrlANResp)) < 0) {
  501. virtio_scsi_bad_req(req);
  502. return;
  503. } else {
  504. req->req.an.event_requested =
  505. virtio_tswap32(VIRTIO_DEVICE(s), req->req.an.event_requested);
  506. trace_virtio_scsi_an_req(virtio_scsi_get_lun(req->req.an.lun),
  507. req->req.an.event_requested);
  508. req->resp.an.event_actual = 0;
  509. req->resp.an.response = VIRTIO_SCSI_S_OK;
  510. }
  511. }
  512. if (r == 0) {
  513. if (type == VIRTIO_SCSI_T_TMF)
  514. trace_virtio_scsi_tmf_resp(virtio_scsi_get_lun(req->req.tmf.lun),
  515. req->req.tmf.tag,
  516. req->resp.tmf.response);
  517. else if (type == VIRTIO_SCSI_T_AN_QUERY ||
  518. type == VIRTIO_SCSI_T_AN_SUBSCRIBE)
  519. trace_virtio_scsi_an_resp(virtio_scsi_get_lun(req->req.an.lun),
  520. req->resp.an.response);
  521. virtio_scsi_complete_req(req);
  522. } else {
  523. assert(r == -EINPROGRESS);
  524. }
  525. }
  526. static void virtio_scsi_handle_ctrl_vq(VirtIOSCSI *s, VirtQueue *vq)
  527. {
  528. VirtIOSCSIReq *req;
  529. while ((req = virtio_scsi_pop_req(s, vq))) {
  530. virtio_scsi_handle_ctrl_req(s, req);
  531. }
  532. }
  533. /*
  534. * If dataplane is configured but not yet started, do so now and return true on
  535. * success.
  536. *
  537. * Dataplane is started by the core virtio code but virtqueue handler functions
  538. * can also be invoked when a guest kicks before DRIVER_OK, so this helper
  539. * function helps us deal with manually starting ioeventfd in that case.
  540. */
  541. static bool virtio_scsi_defer_to_dataplane(VirtIOSCSI *s)
  542. {
  543. if (!s->ctx || s->dataplane_started) {
  544. return false;
  545. }
  546. virtio_device_start_ioeventfd(&s->parent_obj.parent_obj);
  547. return !s->dataplane_fenced;
  548. }
  549. static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  550. {
  551. VirtIOSCSI *s = (VirtIOSCSI *)vdev;
  552. if (virtio_scsi_defer_to_dataplane(s)) {
  553. return;
  554. }
  555. virtio_scsi_handle_ctrl_vq(s, vq);
  556. }
  557. static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req)
  558. {
  559. trace_virtio_scsi_cmd_resp(virtio_scsi_get_lun(req->req.cmd.lun),
  560. req->req.cmd.tag,
  561. req->resp.cmd.response,
  562. req->resp.cmd.status);
  563. /* Sense data is not in req->resp and is copied separately
  564. * in virtio_scsi_command_complete.
  565. */
  566. req->resp_size = sizeof(VirtIOSCSICmdResp);
  567. virtio_scsi_complete_req(req);
  568. }
  569. static void virtio_scsi_command_failed(SCSIRequest *r)
  570. {
  571. VirtIOSCSIReq *req = r->hba_private;
  572. if (r->io_canceled) {
  573. return;
  574. }
  575. req->resp.cmd.status = GOOD;
  576. switch (r->host_status) {
  577. case SCSI_HOST_NO_LUN:
  578. req->resp.cmd.response = VIRTIO_SCSI_S_INCORRECT_LUN;
  579. break;
  580. case SCSI_HOST_BUSY:
  581. req->resp.cmd.response = VIRTIO_SCSI_S_BUSY;
  582. break;
  583. case SCSI_HOST_TIME_OUT:
  584. case SCSI_HOST_ABORTED:
  585. req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
  586. break;
  587. case SCSI_HOST_BAD_RESPONSE:
  588. req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
  589. break;
  590. case SCSI_HOST_RESET:
  591. req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
  592. break;
  593. case SCSI_HOST_TRANSPORT_DISRUPTED:
  594. req->resp.cmd.response = VIRTIO_SCSI_S_TRANSPORT_FAILURE;
  595. break;
  596. case SCSI_HOST_TARGET_FAILURE:
  597. req->resp.cmd.response = VIRTIO_SCSI_S_TARGET_FAILURE;
  598. break;
  599. case SCSI_HOST_RESERVATION_ERROR:
  600. req->resp.cmd.response = VIRTIO_SCSI_S_NEXUS_FAILURE;
  601. break;
  602. case SCSI_HOST_ALLOCATION_FAILURE:
  603. case SCSI_HOST_MEDIUM_ERROR:
  604. case SCSI_HOST_ERROR:
  605. default:
  606. req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
  607. break;
  608. }
  609. virtio_scsi_complete_cmd_req(req);
  610. }
  611. static void virtio_scsi_command_complete(SCSIRequest *r, size_t resid)
  612. {
  613. VirtIOSCSIReq *req = r->hba_private;
  614. uint8_t sense[SCSI_SENSE_BUF_SIZE];
  615. uint32_t sense_len;
  616. VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
  617. if (r->io_canceled) {
  618. return;
  619. }
  620. req->resp.cmd.response = VIRTIO_SCSI_S_OK;
  621. req->resp.cmd.status = r->status;
  622. if (req->resp.cmd.status == GOOD) {
  623. req->resp.cmd.resid = virtio_tswap32(vdev, resid);
  624. } else {
  625. req->resp.cmd.resid = 0;
  626. sense_len = scsi_req_get_sense(r, sense, sizeof(sense));
  627. sense_len = MIN(sense_len, req->resp_iov.size - sizeof(req->resp.cmd));
  628. qemu_iovec_from_buf(&req->resp_iov, sizeof(req->resp.cmd),
  629. sense, sense_len);
  630. req->resp.cmd.sense_len = virtio_tswap32(vdev, sense_len);
  631. }
  632. virtio_scsi_complete_cmd_req(req);
  633. }
  634. static int virtio_scsi_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
  635. uint8_t *buf, size_t buf_len,
  636. void *hba_private)
  637. {
  638. VirtIOSCSIReq *req = hba_private;
  639. if (cmd->len == 0) {
  640. cmd->len = MIN(VIRTIO_SCSI_CDB_DEFAULT_SIZE, SCSI_CMD_BUF_SIZE);
  641. memcpy(cmd->buf, buf, cmd->len);
  642. }
  643. /* Extract the direction and mode directly from the request, for
  644. * host device passthrough.
  645. */
  646. cmd->xfer = req->qsgl.size;
  647. cmd->mode = req->mode;
  648. return 0;
  649. }
  650. static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
  651. {
  652. VirtIOSCSIReq *req = r->hba_private;
  653. return &req->qsgl;
  654. }
  655. static void virtio_scsi_request_cancelled(SCSIRequest *r)
  656. {
  657. VirtIOSCSIReq *req = r->hba_private;
  658. if (!req) {
  659. return;
  660. }
  661. if (qatomic_read(&req->dev->resetting)) {
  662. req->resp.cmd.response = VIRTIO_SCSI_S_RESET;
  663. } else {
  664. req->resp.cmd.response = VIRTIO_SCSI_S_ABORTED;
  665. }
  666. virtio_scsi_complete_cmd_req(req);
  667. }
  668. static void virtio_scsi_fail_cmd_req(VirtIOSCSIReq *req)
  669. {
  670. req->resp.cmd.response = VIRTIO_SCSI_S_FAILURE;
  671. virtio_scsi_complete_cmd_req(req);
  672. }
  673. static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req)
  674. {
  675. VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
  676. SCSIDevice *d;
  677. int rc;
  678. rc = virtio_scsi_parse_req(req, sizeof(VirtIOSCSICmdReq) + vs->cdb_size,
  679. sizeof(VirtIOSCSICmdResp) + vs->sense_size);
  680. if (rc < 0) {
  681. if (rc == -ENOTSUP) {
  682. virtio_scsi_fail_cmd_req(req);
  683. return -ENOTSUP;
  684. } else {
  685. virtio_scsi_bad_req(req);
  686. return -EINVAL;
  687. }
  688. }
  689. trace_virtio_scsi_cmd_req(virtio_scsi_get_lun(req->req.cmd.lun),
  690. req->req.cmd.tag, req->req.cmd.cdb[0]);
  691. d = virtio_scsi_device_get(s, req->req.cmd.lun);
  692. if (!d) {
  693. req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
  694. virtio_scsi_complete_cmd_req(req);
  695. return -ENOENT;
  696. }
  697. virtio_scsi_ctx_check(s, d);
  698. req->sreq = scsi_req_new(d, req->req.cmd.tag,
  699. virtio_scsi_get_lun(req->req.cmd.lun),
  700. req->req.cmd.cdb, vs->cdb_size, req);
  701. if (req->sreq->cmd.mode != SCSI_XFER_NONE
  702. && (req->sreq->cmd.mode != req->mode ||
  703. req->sreq->cmd.xfer > req->qsgl.size)) {
  704. req->resp.cmd.response = VIRTIO_SCSI_S_OVERRUN;
  705. virtio_scsi_complete_cmd_req(req);
  706. object_unref(OBJECT(d));
  707. return -ENOBUFS;
  708. }
  709. scsi_req_ref(req->sreq);
  710. defer_call_begin();
  711. object_unref(OBJECT(d));
  712. return 0;
  713. }
  714. static void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req)
  715. {
  716. SCSIRequest *sreq = req->sreq;
  717. if (scsi_req_enqueue(sreq)) {
  718. scsi_req_continue(sreq);
  719. }
  720. defer_call_end();
  721. scsi_req_unref(sreq);
  722. }
  723. static void virtio_scsi_handle_cmd_vq(VirtIOSCSI *s, VirtQueue *vq)
  724. {
  725. VirtIOSCSIReq *req, *next;
  726. int ret = 0;
  727. bool suppress_notifications = virtio_queue_get_notification(vq);
  728. QTAILQ_HEAD(, VirtIOSCSIReq) reqs = QTAILQ_HEAD_INITIALIZER(reqs);
  729. do {
  730. if (suppress_notifications) {
  731. virtio_queue_set_notification(vq, 0);
  732. }
  733. while ((req = virtio_scsi_pop_req(s, vq))) {
  734. ret = virtio_scsi_handle_cmd_req_prepare(s, req);
  735. if (!ret) {
  736. QTAILQ_INSERT_TAIL(&reqs, req, next);
  737. } else if (ret == -EINVAL) {
  738. /* The device is broken and shouldn't process any request */
  739. while (!QTAILQ_EMPTY(&reqs)) {
  740. req = QTAILQ_FIRST(&reqs);
  741. QTAILQ_REMOVE(&reqs, req, next);
  742. defer_call_end();
  743. scsi_req_unref(req->sreq);
  744. virtqueue_detach_element(req->vq, &req->elem, 0);
  745. virtio_scsi_free_req(req);
  746. }
  747. }
  748. }
  749. if (suppress_notifications) {
  750. virtio_queue_set_notification(vq, 1);
  751. }
  752. } while (ret != -EINVAL && !virtio_queue_empty(vq));
  753. QTAILQ_FOREACH_SAFE(req, &reqs, next, next) {
  754. virtio_scsi_handle_cmd_req_submit(s, req);
  755. }
  756. }
  757. static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq)
  758. {
  759. /* use non-QOM casts in the data path */
  760. VirtIOSCSI *s = (VirtIOSCSI *)vdev;
  761. if (virtio_scsi_defer_to_dataplane(s)) {
  762. return;
  763. }
  764. virtio_scsi_handle_cmd_vq(s, vq);
  765. }
  766. static void virtio_scsi_get_config(VirtIODevice *vdev,
  767. uint8_t *config)
  768. {
  769. VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
  770. VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(vdev);
  771. virtio_stl_p(vdev, &scsiconf->num_queues, s->conf.num_queues);
  772. virtio_stl_p(vdev, &scsiconf->seg_max,
  773. s->conf.seg_max_adjust ? s->conf.virtqueue_size - 2 : 128 - 2);
  774. virtio_stl_p(vdev, &scsiconf->max_sectors, s->conf.max_sectors);
  775. virtio_stl_p(vdev, &scsiconf->cmd_per_lun, s->conf.cmd_per_lun);
  776. virtio_stl_p(vdev, &scsiconf->event_info_size, sizeof(VirtIOSCSIEvent));
  777. virtio_stl_p(vdev, &scsiconf->sense_size, s->sense_size);
  778. virtio_stl_p(vdev, &scsiconf->cdb_size, s->cdb_size);
  779. virtio_stw_p(vdev, &scsiconf->max_channel, VIRTIO_SCSI_MAX_CHANNEL);
  780. virtio_stw_p(vdev, &scsiconf->max_target, VIRTIO_SCSI_MAX_TARGET);
  781. virtio_stl_p(vdev, &scsiconf->max_lun, VIRTIO_SCSI_MAX_LUN);
  782. }
  783. static void virtio_scsi_set_config(VirtIODevice *vdev,
  784. const uint8_t *config)
  785. {
  786. VirtIOSCSIConfig *scsiconf = (VirtIOSCSIConfig *)config;
  787. VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
  788. if ((uint32_t) virtio_ldl_p(vdev, &scsiconf->sense_size) >= 65536 ||
  789. (uint32_t) virtio_ldl_p(vdev, &scsiconf->cdb_size) >= 256) {
  790. virtio_error(vdev,
  791. "bad data written to virtio-scsi configuration space");
  792. return;
  793. }
  794. vs->sense_size = virtio_ldl_p(vdev, &scsiconf->sense_size);
  795. vs->cdb_size = virtio_ldl_p(vdev, &scsiconf->cdb_size);
  796. }
  797. static uint64_t virtio_scsi_get_features(VirtIODevice *vdev,
  798. uint64_t requested_features,
  799. Error **errp)
  800. {
  801. VirtIOSCSI *s = VIRTIO_SCSI(vdev);
  802. /* Firstly sync all virtio-scsi possible supported features */
  803. requested_features |= s->host_features;
  804. return requested_features;
  805. }
  806. static void virtio_scsi_reset(VirtIODevice *vdev)
  807. {
  808. VirtIOSCSI *s = VIRTIO_SCSI(vdev);
  809. VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev);
  810. assert(!s->dataplane_started);
  811. virtio_scsi_reset_tmf_bh(s);
  812. qatomic_inc(&s->resetting);
  813. bus_cold_reset(BUS(&s->bus));
  814. qatomic_dec(&s->resetting);
  815. vs->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
  816. vs->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
  817. s->events_dropped = false;
  818. }
  819. typedef struct {
  820. uint32_t event;
  821. uint32_t reason;
  822. union {
  823. /* Used by messages specific to a device */
  824. struct {
  825. uint32_t id;
  826. uint32_t lun;
  827. } address;
  828. };
  829. } VirtIOSCSIEventInfo;
  830. static void virtio_scsi_push_event(VirtIOSCSI *s,
  831. const VirtIOSCSIEventInfo *info)
  832. {
  833. VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
  834. VirtIOSCSIReq *req;
  835. VirtIOSCSIEvent *evt;
  836. VirtIODevice *vdev = VIRTIO_DEVICE(s);
  837. uint32_t event = info->event;
  838. uint32_t reason = info->reason;
  839. if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
  840. return;
  841. }
  842. req = virtio_scsi_pop_req(s, vs->event_vq);
  843. if (!req) {
  844. s->events_dropped = true;
  845. return;
  846. }
  847. if (s->events_dropped) {
  848. event |= VIRTIO_SCSI_T_EVENTS_MISSED;
  849. s->events_dropped = false;
  850. }
  851. if (virtio_scsi_parse_req(req, 0, sizeof(VirtIOSCSIEvent))) {
  852. virtio_scsi_bad_req(req);
  853. return;
  854. }
  855. evt = &req->resp.event;
  856. memset(evt, 0, sizeof(VirtIOSCSIEvent));
  857. evt->event = virtio_tswap32(vdev, event);
  858. evt->reason = virtio_tswap32(vdev, reason);
  859. if (event != VIRTIO_SCSI_T_EVENTS_MISSED) {
  860. evt->lun[0] = 1;
  861. evt->lun[1] = info->address.id;
  862. /* Linux wants us to keep the same encoding we use for REPORT LUNS. */
  863. if (info->address.lun >= 256) {
  864. evt->lun[2] = (info->address.lun >> 8) | 0x40;
  865. }
  866. evt->lun[3] = info->address.lun & 0xFF;
  867. }
  868. trace_virtio_scsi_event(virtio_scsi_get_lun(evt->lun), event, reason);
  869. virtio_scsi_complete_req(req);
  870. }
  871. static void virtio_scsi_handle_event_vq(VirtIOSCSI *s, VirtQueue *vq)
  872. {
  873. if (s->events_dropped) {
  874. VirtIOSCSIEventInfo info = {
  875. .event = VIRTIO_SCSI_T_NO_EVENT,
  876. };
  877. virtio_scsi_push_event(s, &info);
  878. }
  879. }
  880. static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
  881. {
  882. VirtIOSCSI *s = VIRTIO_SCSI(vdev);
  883. if (virtio_scsi_defer_to_dataplane(s)) {
  884. return;
  885. }
  886. virtio_scsi_handle_event_vq(s, vq);
  887. }
  888. static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
  889. {
  890. VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
  891. VirtIODevice *vdev = VIRTIO_DEVICE(s);
  892. if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_CHANGE) &&
  893. dev->type != TYPE_ROM) {
  894. VirtIOSCSIEventInfo info = {
  895. .event = VIRTIO_SCSI_T_PARAM_CHANGE,
  896. .reason = sense.asc | (sense.ascq << 8),
  897. .address = {
  898. .id = dev->id,
  899. .lun = dev->lun,
  900. },
  901. };
  902. virtio_scsi_push_event(s, &info);
  903. }
  904. }
  905. static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev,
  906. DeviceState *dev, Error **errp)
  907. {
  908. SCSIDevice *sd = SCSI_DEVICE(dev);
  909. sd->hba_supports_iothread = true;
  910. }
  911. static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev,
  912. Error **errp)
  913. {
  914. VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
  915. VirtIOSCSI *s = VIRTIO_SCSI(vdev);
  916. SCSIDevice *sd = SCSI_DEVICE(dev);
  917. int ret;
  918. if (s->ctx && !s->dataplane_fenced) {
  919. ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
  920. if (ret < 0) {
  921. return;
  922. }
  923. }
  924. if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
  925. VirtIOSCSIEventInfo info = {
  926. .event = VIRTIO_SCSI_T_TRANSPORT_RESET,
  927. .reason = VIRTIO_SCSI_EVT_RESET_RESCAN,
  928. .address = {
  929. .id = sd->id,
  930. .lun = sd->lun,
  931. },
  932. };
  933. virtio_scsi_push_event(s, &info);
  934. scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
  935. }
  936. }
  937. static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev,
  938. Error **errp)
  939. {
  940. VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
  941. VirtIOSCSI *s = VIRTIO_SCSI(vdev);
  942. SCSIDevice *sd = SCSI_DEVICE(dev);
  943. VirtIOSCSIEventInfo info = {
  944. .event = VIRTIO_SCSI_T_TRANSPORT_RESET,
  945. .reason = VIRTIO_SCSI_EVT_RESET_REMOVED,
  946. .address = {
  947. .id = sd->id,
  948. .lun = sd->lun,
  949. },
  950. };
  951. qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
  952. if (s->ctx) {
  953. /* If other users keep the BlockBackend in the iothread, that's ok */
  954. blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
  955. }
  956. if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
  957. virtio_scsi_push_event(s, &info);
  958. scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED));
  959. }
  960. }
  961. /* Suspend virtqueue ioeventfd processing during drain */
  962. static void virtio_scsi_drained_begin(SCSIBus *bus)
  963. {
  964. VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
  965. VirtIODevice *vdev = VIRTIO_DEVICE(s);
  966. uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
  967. s->parent_obj.conf.num_queues;
  968. /*
  969. * Drain is called when stopping dataplane but the host notifier has
  970. * already been detached. Detaching multiple times is a no-op if nothing
  971. * else is using the monitoring same file descriptor, but avoid it just in
  972. * case.
  973. *
  974. * Also, don't detach if dataplane has not even been started yet because
  975. * the host notifier isn't attached.
  976. */
  977. if (s->dataplane_stopping || !s->dataplane_started) {
  978. return;
  979. }
  980. for (uint32_t i = 0; i < total_queues; i++) {
  981. VirtQueue *vq = virtio_get_queue(vdev, i);
  982. virtio_queue_aio_detach_host_notifier(vq, s->ctx);
  983. }
  984. }
  985. /* Resume virtqueue ioeventfd processing after drain */
  986. static void virtio_scsi_drained_end(SCSIBus *bus)
  987. {
  988. VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
  989. VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s);
  990. VirtIODevice *vdev = VIRTIO_DEVICE(s);
  991. uint32_t total_queues = VIRTIO_SCSI_VQ_NUM_FIXED +
  992. s->parent_obj.conf.num_queues;
  993. /*
  994. * Drain is called when stopping dataplane. Keep the host notifier detached
  995. * so it's not left dangling after dataplane is stopped.
  996. *
  997. * Also, don't attach if dataplane has not even been started yet. We're not
  998. * ready.
  999. */
  1000. if (s->dataplane_stopping || !s->dataplane_started) {
  1001. return;
  1002. }
  1003. for (uint32_t i = 0; i < total_queues; i++) {
  1004. VirtQueue *vq = virtio_get_queue(vdev, i);
  1005. if (vq == vs->event_vq) {
  1006. virtio_queue_aio_attach_host_notifier_no_poll(vq, s->ctx);
  1007. } else {
  1008. virtio_queue_aio_attach_host_notifier(vq, s->ctx);
  1009. }
  1010. }
  1011. }
  1012. static struct SCSIBusInfo virtio_scsi_scsi_info = {
  1013. .tcq = true,
  1014. .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
  1015. .max_target = VIRTIO_SCSI_MAX_TARGET,
  1016. .max_lun = VIRTIO_SCSI_MAX_LUN,
  1017. .complete = virtio_scsi_command_complete,
  1018. .fail = virtio_scsi_command_failed,
  1019. .cancel = virtio_scsi_request_cancelled,
  1020. .change = virtio_scsi_change,
  1021. .parse_cdb = virtio_scsi_parse_cdb,
  1022. .get_sg_list = virtio_scsi_get_sg_list,
  1023. .save_request = virtio_scsi_save_request,
  1024. .load_request = virtio_scsi_load_request,
  1025. .drained_begin = virtio_scsi_drained_begin,
  1026. .drained_end = virtio_scsi_drained_end,
  1027. };
  1028. void virtio_scsi_common_realize(DeviceState *dev,
  1029. VirtIOHandleOutput ctrl,
  1030. VirtIOHandleOutput evt,
  1031. VirtIOHandleOutput cmd,
  1032. Error **errp)
  1033. {
  1034. VirtIODevice *vdev = VIRTIO_DEVICE(dev);
  1035. VirtIOSCSICommon *s = VIRTIO_SCSI_COMMON(dev);
  1036. int i;
  1037. virtio_init(vdev, VIRTIO_ID_SCSI, sizeof(VirtIOSCSIConfig));
  1038. if (s->conf.num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) {
  1039. s->conf.num_queues = 1;
  1040. }
  1041. if (s->conf.num_queues == 0 ||
  1042. s->conf.num_queues > VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED) {
  1043. error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
  1044. "must be a positive integer less than %d.",
  1045. s->conf.num_queues,
  1046. VIRTIO_QUEUE_MAX - VIRTIO_SCSI_VQ_NUM_FIXED);
  1047. virtio_cleanup(vdev);
  1048. return;
  1049. }
  1050. if (s->conf.virtqueue_size <= 2) {
  1051. error_setg(errp, "invalid virtqueue_size property (= %" PRIu32 "), "
  1052. "must be > 2", s->conf.virtqueue_size);
  1053. return;
  1054. }
  1055. s->cmd_vqs = g_new0(VirtQueue *, s->conf.num_queues);
  1056. s->sense_size = VIRTIO_SCSI_SENSE_DEFAULT_SIZE;
  1057. s->cdb_size = VIRTIO_SCSI_CDB_DEFAULT_SIZE;
  1058. s->ctrl_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, ctrl);
  1059. s->event_vq = virtio_add_queue(vdev, s->conf.virtqueue_size, evt);
  1060. for (i = 0; i < s->conf.num_queues; i++) {
  1061. s->cmd_vqs[i] = virtio_add_queue(vdev, s->conf.virtqueue_size, cmd);
  1062. }
  1063. }
  1064. static void virtio_scsi_device_realize(DeviceState *dev, Error **errp)
  1065. {
  1066. VirtIODevice *vdev = VIRTIO_DEVICE(dev);
  1067. VirtIOSCSI *s = VIRTIO_SCSI(dev);
  1068. Error *err = NULL;
  1069. QTAILQ_INIT(&s->tmf_bh_list);
  1070. qemu_mutex_init(&s->tmf_bh_lock);
  1071. virtio_scsi_common_realize(dev,
  1072. virtio_scsi_handle_ctrl,
  1073. virtio_scsi_handle_event,
  1074. virtio_scsi_handle_cmd,
  1075. &err);
  1076. if (err != NULL) {
  1077. error_propagate(errp, err);
  1078. return;
  1079. }
  1080. scsi_bus_init_named(&s->bus, sizeof(s->bus), dev,
  1081. &virtio_scsi_scsi_info, vdev->bus_name);
  1082. /* override default SCSI bus hotplug-handler, with virtio-scsi's one */
  1083. qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev));
  1084. virtio_scsi_dataplane_setup(s, errp);
  1085. }
  1086. void virtio_scsi_common_unrealize(DeviceState *dev)
  1087. {
  1088. VirtIODevice *vdev = VIRTIO_DEVICE(dev);
  1089. VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
  1090. int i;
  1091. virtio_delete_queue(vs->ctrl_vq);
  1092. virtio_delete_queue(vs->event_vq);
  1093. for (i = 0; i < vs->conf.num_queues; i++) {
  1094. virtio_delete_queue(vs->cmd_vqs[i]);
  1095. }
  1096. g_free(vs->cmd_vqs);
  1097. virtio_cleanup(vdev);
  1098. }
  1099. static void virtio_scsi_device_unrealize(DeviceState *dev)
  1100. {
  1101. VirtIOSCSI *s = VIRTIO_SCSI(dev);
  1102. virtio_scsi_reset_tmf_bh(s);
  1103. qbus_set_hotplug_handler(BUS(&s->bus), NULL);
  1104. virtio_scsi_common_unrealize(dev);
  1105. qemu_mutex_destroy(&s->tmf_bh_lock);
  1106. }
  1107. static const Property virtio_scsi_properties[] = {
  1108. DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues,
  1109. VIRTIO_SCSI_AUTO_NUM_QUEUES),
  1110. DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
  1111. parent_obj.conf.virtqueue_size, 256),
  1112. DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
  1113. parent_obj.conf.seg_max_adjust, true),
  1114. DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,
  1115. 0xFFFF),
  1116. DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSI, parent_obj.conf.cmd_per_lun,
  1117. 128),
  1118. DEFINE_PROP_BIT("hotplug", VirtIOSCSI, host_features,
  1119. VIRTIO_SCSI_F_HOTPLUG, true),
  1120. DEFINE_PROP_BIT("param_change", VirtIOSCSI, host_features,
  1121. VIRTIO_SCSI_F_CHANGE, true),
  1122. DEFINE_PROP_LINK("iothread", VirtIOSCSI, parent_obj.conf.iothread,
  1123. TYPE_IOTHREAD, IOThread *),
  1124. };
  1125. static const VMStateDescription vmstate_virtio_scsi = {
  1126. .name = "virtio-scsi",
  1127. .minimum_version_id = 1,
  1128. .version_id = 1,
  1129. .fields = (const VMStateField[]) {
  1130. VMSTATE_VIRTIO_DEVICE,
  1131. VMSTATE_END_OF_LIST()
  1132. },
  1133. };
  1134. static void virtio_scsi_common_class_init(ObjectClass *klass, void *data)
  1135. {
  1136. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  1137. DeviceClass *dc = DEVICE_CLASS(klass);
  1138. vdc->get_config = virtio_scsi_get_config;
  1139. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  1140. }
  1141. static void virtio_scsi_class_init(ObjectClass *klass, void *data)
  1142. {
  1143. DeviceClass *dc = DEVICE_CLASS(klass);
  1144. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  1145. HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
  1146. device_class_set_props(dc, virtio_scsi_properties);
  1147. dc->vmsd = &vmstate_virtio_scsi;
  1148. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  1149. vdc->realize = virtio_scsi_device_realize;
  1150. vdc->unrealize = virtio_scsi_device_unrealize;
  1151. vdc->set_config = virtio_scsi_set_config;
  1152. vdc->get_features = virtio_scsi_get_features;
  1153. vdc->reset = virtio_scsi_reset;
  1154. vdc->start_ioeventfd = virtio_scsi_dataplane_start;
  1155. vdc->stop_ioeventfd = virtio_scsi_dataplane_stop;
  1156. hc->pre_plug = virtio_scsi_pre_hotplug;
  1157. hc->plug = virtio_scsi_hotplug;
  1158. hc->unplug = virtio_scsi_hotunplug;
  1159. }
  1160. static const TypeInfo virtio_scsi_common_info = {
  1161. .name = TYPE_VIRTIO_SCSI_COMMON,
  1162. .parent = TYPE_VIRTIO_DEVICE,
  1163. .instance_size = sizeof(VirtIOSCSICommon),
  1164. .abstract = true,
  1165. .class_init = virtio_scsi_common_class_init,
  1166. };
  1167. static const TypeInfo virtio_scsi_info = {
  1168. .name = TYPE_VIRTIO_SCSI,
  1169. .parent = TYPE_VIRTIO_SCSI_COMMON,
  1170. .instance_size = sizeof(VirtIOSCSI),
  1171. .class_init = virtio_scsi_class_init,
  1172. .interfaces = (InterfaceInfo[]) {
  1173. { TYPE_HOTPLUG_HANDLER },
  1174. { }
  1175. }
  1176. };
  1177. static void virtio_register_types(void)
  1178. {
  1179. type_register_static(&virtio_scsi_common_info);
  1180. type_register_static(&virtio_scsi_info);
  1181. }
  1182. type_init(virtio_register_types)