2
0

vmw_pvscsi.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. /*
  2. * QEMU VMWARE PVSCSI paravirtual SCSI bus
  3. *
  4. * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
  5. *
  6. * Developed by Daynix Computing LTD (http://www.daynix.com)
  7. *
  8. * Based on implementation by Paolo Bonzini
  9. * http://lists.gnu.org/archive/html/qemu-devel/2011-08/msg00729.html
  10. *
  11. * Authors:
  12. * Paolo Bonzini <pbonzini@redhat.com>
  13. * Dmitry Fleytman <dmitry@daynix.com>
  14. * Yan Vugenfirer <yan@daynix.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2.
  17. * See the COPYING file in the top-level directory.
  18. *
  19. * NOTE about MSI-X:
  20. * MSI-X support has been removed for the moment because it leads Windows OS
  21. * to crash on startup. The crash happens because Windows driver requires
  22. * MSI-X shared memory to be part of the same BAR used for rings state
  23. * registers, etc. This is not supported by QEMU infrastructure so separate
  24. * BAR created from MSI-X purposes. Windows driver fails to deal with 2 BARs.
  25. *
  26. */
  27. #include "qemu/osdep.h"
  28. #include "qapi/error.h"
  29. #include "qemu/main-loop.h"
  30. #include "qemu/module.h"
  31. #include "hw/scsi/scsi.h"
  32. #include "migration/vmstate.h"
  33. #include "scsi/constants.h"
  34. #include "hw/pci/msi.h"
  35. #include "hw/qdev-properties.h"
  36. #include "vmw_pvscsi.h"
  37. #include "trace.h"
  38. #define PVSCSI_USE_64BIT (true)
  39. #define PVSCSI_PER_VECTOR_MASK (false)
  40. #define PVSCSI_MAX_DEVS (64)
  41. #define PVSCSI_MSIX_NUM_VECTORS (1)
  42. #define PVSCSI_MAX_SG_ELEM 2048
  43. #define PVSCSI_MAX_CMD_DATA_WORDS \
  44. (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
  45. #define RS_GET_FIELD(m, field) \
  46. (ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
  47. (m)->rs_pa + offsetof(struct PVSCSIRingsState, field)))
  48. #define RS_SET_FIELD(m, field, val) \
  49. (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
  50. (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val))
  51. typedef struct PVSCSIClass {
  52. PCIDeviceClass parent_class;
  53. DeviceRealize parent_dc_realize;
  54. } PVSCSIClass;
  55. #define TYPE_PVSCSI "pvscsi"
  56. #define PVSCSI(obj) OBJECT_CHECK(PVSCSIState, (obj), TYPE_PVSCSI)
  57. #define PVSCSI_DEVICE_CLASS(klass) \
  58. OBJECT_CLASS_CHECK(PVSCSIClass, (klass), TYPE_PVSCSI)
  59. #define PVSCSI_DEVICE_GET_CLASS(obj) \
  60. OBJECT_GET_CLASS(PVSCSIClass, (obj), TYPE_PVSCSI)
  61. /* Compatibility flags for migration */
  62. #define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0
  63. #define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \
  64. (1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT)
  65. #define PVSCSI_COMPAT_DISABLE_PCIE_BIT 1
  66. #define PVSCSI_COMPAT_DISABLE_PCIE \
  67. (1 << PVSCSI_COMPAT_DISABLE_PCIE_BIT)
  68. #define PVSCSI_USE_OLD_PCI_CONFIGURATION(s) \
  69. ((s)->compat_flags & PVSCSI_COMPAT_OLD_PCI_CONFIGURATION)
  70. #define PVSCSI_MSI_OFFSET(s) \
  71. (PVSCSI_USE_OLD_PCI_CONFIGURATION(s) ? 0x50 : 0x7c)
  72. #define PVSCSI_EXP_EP_OFFSET (0x40)
  73. typedef struct PVSCSIRingInfo {
  74. uint64_t rs_pa;
  75. uint32_t txr_len_mask;
  76. uint32_t rxr_len_mask;
  77. uint32_t msg_len_mask;
  78. uint64_t req_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
  79. uint64_t cmp_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
  80. uint64_t msg_ring_pages_pa[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
  81. uint64_t consumed_ptr;
  82. uint64_t filled_cmp_ptr;
  83. uint64_t filled_msg_ptr;
  84. } PVSCSIRingInfo;
  85. typedef struct PVSCSISGState {
  86. hwaddr elemAddr;
  87. hwaddr dataAddr;
  88. uint32_t resid;
  89. } PVSCSISGState;
  90. typedef QTAILQ_HEAD(, PVSCSIRequest) PVSCSIRequestList;
  91. typedef struct {
  92. PCIDevice parent_obj;
  93. MemoryRegion io_space;
  94. SCSIBus bus;
  95. QEMUBH *completion_worker;
  96. PVSCSIRequestList pending_queue;
  97. PVSCSIRequestList completion_queue;
  98. uint64_t reg_interrupt_status; /* Interrupt status register value */
  99. uint64_t reg_interrupt_enabled; /* Interrupt mask register value */
  100. uint64_t reg_command_status; /* Command status register value */
  101. /* Command data adoption mechanism */
  102. uint64_t curr_cmd; /* Last command arrived */
  103. uint32_t curr_cmd_data_cntr; /* Amount of data for last command */
  104. /* Collector for current command data */
  105. uint32_t curr_cmd_data[PVSCSI_MAX_CMD_DATA_WORDS];
  106. uint8_t rings_info_valid; /* Whether data rings initialized */
  107. uint8_t msg_ring_info_valid; /* Whether message ring initialized */
  108. uint8_t use_msg; /* Whether to use message ring */
  109. uint8_t msi_used; /* For migration compatibility */
  110. PVSCSIRingInfo rings; /* Data transfer rings manager */
  111. uint32_t resetting; /* Reset in progress */
  112. uint32_t compat_flags;
  113. } PVSCSIState;
  114. typedef struct PVSCSIRequest {
  115. SCSIRequest *sreq;
  116. PVSCSIState *dev;
  117. uint8_t sense_key;
  118. uint8_t completed;
  119. int lun;
  120. QEMUSGList sgl;
  121. PVSCSISGState sg;
  122. struct PVSCSIRingReqDesc req;
  123. struct PVSCSIRingCmpDesc cmp;
  124. QTAILQ_ENTRY(PVSCSIRequest) next;
  125. } PVSCSIRequest;
  126. /* Integer binary logarithm */
  127. static int
  128. pvscsi_log2(uint32_t input)
  129. {
  130. int log = 0;
  131. assert(input > 0);
  132. while (input >> ++log) {
  133. }
  134. return log;
  135. }
  136. static void
  137. pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
  138. {
  139. int i;
  140. uint32_t txr_len_log2, rxr_len_log2;
  141. uint32_t req_ring_size, cmp_ring_size;
  142. m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
  143. req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
  144. cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
  145. txr_len_log2 = pvscsi_log2(req_ring_size - 1);
  146. rxr_len_log2 = pvscsi_log2(cmp_ring_size - 1);
  147. m->txr_len_mask = MASK(txr_len_log2);
  148. m->rxr_len_mask = MASK(rxr_len_log2);
  149. m->consumed_ptr = 0;
  150. m->filled_cmp_ptr = 0;
  151. for (i = 0; i < ri->reqRingNumPages; i++) {
  152. m->req_ring_pages_pa[i] = ri->reqRingPPNs[i] << VMW_PAGE_SHIFT;
  153. }
  154. for (i = 0; i < ri->cmpRingNumPages; i++) {
  155. m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT;
  156. }
  157. RS_SET_FIELD(m, reqProdIdx, 0);
  158. RS_SET_FIELD(m, reqConsIdx, 0);
  159. RS_SET_FIELD(m, reqNumEntriesLog2, txr_len_log2);
  160. RS_SET_FIELD(m, cmpProdIdx, 0);
  161. RS_SET_FIELD(m, cmpConsIdx, 0);
  162. RS_SET_FIELD(m, cmpNumEntriesLog2, rxr_len_log2);
  163. trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2);
  164. /* Flush ring state page changes */
  165. smp_wmb();
  166. }
  167. static int
  168. pvscsi_ring_init_msg(PVSCSIRingInfo *m, PVSCSICmdDescSetupMsgRing *ri)
  169. {
  170. int i;
  171. uint32_t len_log2;
  172. uint32_t ring_size;
  173. if (!ri->numPages || ri->numPages > PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES) {
  174. return -1;
  175. }
  176. ring_size = ri->numPages * PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
  177. len_log2 = pvscsi_log2(ring_size - 1);
  178. m->msg_len_mask = MASK(len_log2);
  179. m->filled_msg_ptr = 0;
  180. for (i = 0; i < ri->numPages; i++) {
  181. m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT;
  182. }
  183. RS_SET_FIELD(m, msgProdIdx, 0);
  184. RS_SET_FIELD(m, msgConsIdx, 0);
  185. RS_SET_FIELD(m, msgNumEntriesLog2, len_log2);
  186. trace_pvscsi_ring_init_msg(len_log2);
  187. /* Flush ring state page changes */
  188. smp_wmb();
  189. return 0;
  190. }
  191. static void
  192. pvscsi_ring_cleanup(PVSCSIRingInfo *mgr)
  193. {
  194. mgr->rs_pa = 0;
  195. mgr->txr_len_mask = 0;
  196. mgr->rxr_len_mask = 0;
  197. mgr->msg_len_mask = 0;
  198. mgr->consumed_ptr = 0;
  199. mgr->filled_cmp_ptr = 0;
  200. mgr->filled_msg_ptr = 0;
  201. memset(mgr->req_ring_pages_pa, 0, sizeof(mgr->req_ring_pages_pa));
  202. memset(mgr->cmp_ring_pages_pa, 0, sizeof(mgr->cmp_ring_pages_pa));
  203. memset(mgr->msg_ring_pages_pa, 0, sizeof(mgr->msg_ring_pages_pa));
  204. }
  205. static hwaddr
  206. pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
  207. {
  208. uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx);
  209. uint32_t ring_size = PVSCSI_MAX_NUM_PAGES_REQ_RING
  210. * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
  211. if (ready_ptr != mgr->consumed_ptr
  212. && ready_ptr - mgr->consumed_ptr < ring_size) {
  213. uint32_t next_ready_ptr =
  214. mgr->consumed_ptr++ & mgr->txr_len_mask;
  215. uint32_t next_ready_page =
  216. next_ready_ptr / PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
  217. uint32_t inpage_idx =
  218. next_ready_ptr % PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
  219. return mgr->req_ring_pages_pa[next_ready_page] +
  220. inpage_idx * sizeof(PVSCSIRingReqDesc);
  221. } else {
  222. return 0;
  223. }
  224. }
  225. static void
  226. pvscsi_ring_flush_req(PVSCSIRingInfo *mgr)
  227. {
  228. RS_SET_FIELD(mgr, reqConsIdx, mgr->consumed_ptr);
  229. }
  230. static hwaddr
  231. pvscsi_ring_pop_cmp_descr(PVSCSIRingInfo *mgr)
  232. {
  233. /*
  234. * According to Linux driver code it explicitly verifies that number
  235. * of requests being processed by device is less then the size of
  236. * completion queue, so device may omit completion queue overflow
  237. * conditions check. We assume that this is true for other (Windows)
  238. * drivers as well.
  239. */
  240. uint32_t free_cmp_ptr =
  241. mgr->filled_cmp_ptr++ & mgr->rxr_len_mask;
  242. uint32_t free_cmp_page =
  243. free_cmp_ptr / PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
  244. uint32_t inpage_idx =
  245. free_cmp_ptr % PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
  246. return mgr->cmp_ring_pages_pa[free_cmp_page] +
  247. inpage_idx * sizeof(PVSCSIRingCmpDesc);
  248. }
  249. static hwaddr
  250. pvscsi_ring_pop_msg_descr(PVSCSIRingInfo *mgr)
  251. {
  252. uint32_t free_msg_ptr =
  253. mgr->filled_msg_ptr++ & mgr->msg_len_mask;
  254. uint32_t free_msg_page =
  255. free_msg_ptr / PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
  256. uint32_t inpage_idx =
  257. free_msg_ptr % PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
  258. return mgr->msg_ring_pages_pa[free_msg_page] +
  259. inpage_idx * sizeof(PVSCSIRingMsgDesc);
  260. }
  261. static void
  262. pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr)
  263. {
  264. /* Flush descriptor changes */
  265. smp_wmb();
  266. trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr);
  267. RS_SET_FIELD(mgr, cmpProdIdx, mgr->filled_cmp_ptr);
  268. }
  269. static bool
  270. pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr)
  271. {
  272. uint32_t prodIdx = RS_GET_FIELD(mgr, msgProdIdx);
  273. uint32_t consIdx = RS_GET_FIELD(mgr, msgConsIdx);
  274. return (prodIdx - consIdx) < (mgr->msg_len_mask + 1);
  275. }
  276. static void
  277. pvscsi_ring_flush_msg(PVSCSIRingInfo *mgr)
  278. {
  279. /* Flush descriptor changes */
  280. smp_wmb();
  281. trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr);
  282. RS_SET_FIELD(mgr, msgProdIdx, mgr->filled_msg_ptr);
  283. }
  284. static void
  285. pvscsi_reset_state(PVSCSIState *s)
  286. {
  287. s->curr_cmd = PVSCSI_CMD_FIRST;
  288. s->curr_cmd_data_cntr = 0;
  289. s->reg_command_status = PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
  290. s->reg_interrupt_status = 0;
  291. pvscsi_ring_cleanup(&s->rings);
  292. s->rings_info_valid = FALSE;
  293. s->msg_ring_info_valid = FALSE;
  294. QTAILQ_INIT(&s->pending_queue);
  295. QTAILQ_INIT(&s->completion_queue);
  296. }
  297. static void
  298. pvscsi_update_irq_status(PVSCSIState *s)
  299. {
  300. PCIDevice *d = PCI_DEVICE(s);
  301. bool should_raise = s->reg_interrupt_enabled & s->reg_interrupt_status;
  302. trace_pvscsi_update_irq_level(should_raise, s->reg_interrupt_enabled,
  303. s->reg_interrupt_status);
  304. if (msi_enabled(d)) {
  305. if (should_raise) {
  306. trace_pvscsi_update_irq_msi();
  307. msi_notify(d, PVSCSI_VECTOR_COMPLETION);
  308. }
  309. return;
  310. }
  311. pci_set_irq(d, !!should_raise);
  312. }
  313. static void
  314. pvscsi_raise_completion_interrupt(PVSCSIState *s)
  315. {
  316. s->reg_interrupt_status |= PVSCSI_INTR_CMPL_0;
  317. /* Memory barrier to flush interrupt status register changes*/
  318. smp_wmb();
  319. pvscsi_update_irq_status(s);
  320. }
  321. static void
  322. pvscsi_raise_message_interrupt(PVSCSIState *s)
  323. {
  324. s->reg_interrupt_status |= PVSCSI_INTR_MSG_0;
  325. /* Memory barrier to flush interrupt status register changes*/
  326. smp_wmb();
  327. pvscsi_update_irq_status(s);
  328. }
  329. static void
  330. pvscsi_cmp_ring_put(PVSCSIState *s, struct PVSCSIRingCmpDesc *cmp_desc)
  331. {
  332. hwaddr cmp_descr_pa;
  333. cmp_descr_pa = pvscsi_ring_pop_cmp_descr(&s->rings);
  334. trace_pvscsi_cmp_ring_put(cmp_descr_pa);
  335. cpu_physical_memory_write(cmp_descr_pa, (void *)cmp_desc,
  336. sizeof(*cmp_desc));
  337. }
  338. static void
  339. pvscsi_msg_ring_put(PVSCSIState *s, struct PVSCSIRingMsgDesc *msg_desc)
  340. {
  341. hwaddr msg_descr_pa;
  342. msg_descr_pa = pvscsi_ring_pop_msg_descr(&s->rings);
  343. trace_pvscsi_msg_ring_put(msg_descr_pa);
  344. cpu_physical_memory_write(msg_descr_pa, (void *)msg_desc,
  345. sizeof(*msg_desc));
  346. }
  347. static void
  348. pvscsi_process_completion_queue(void *opaque)
  349. {
  350. PVSCSIState *s = opaque;
  351. PVSCSIRequest *pvscsi_req;
  352. bool has_completed = false;
  353. while (!QTAILQ_EMPTY(&s->completion_queue)) {
  354. pvscsi_req = QTAILQ_FIRST(&s->completion_queue);
  355. QTAILQ_REMOVE(&s->completion_queue, pvscsi_req, next);
  356. pvscsi_cmp_ring_put(s, &pvscsi_req->cmp);
  357. g_free(pvscsi_req);
  358. has_completed = true;
  359. }
  360. if (has_completed) {
  361. pvscsi_ring_flush_cmp(&s->rings);
  362. pvscsi_raise_completion_interrupt(s);
  363. }
  364. }
  365. static void
  366. pvscsi_reset_adapter(PVSCSIState *s)
  367. {
  368. s->resetting++;
  369. qbus_reset_all(BUS(&s->bus));
  370. s->resetting--;
  371. pvscsi_process_completion_queue(s);
  372. assert(QTAILQ_EMPTY(&s->pending_queue));
  373. pvscsi_reset_state(s);
  374. }
  375. static void
  376. pvscsi_schedule_completion_processing(PVSCSIState *s)
  377. {
  378. /* Try putting more complete requests on the ring. */
  379. if (!QTAILQ_EMPTY(&s->completion_queue)) {
  380. qemu_bh_schedule(s->completion_worker);
  381. }
  382. }
  383. static void
  384. pvscsi_complete_request(PVSCSIState *s, PVSCSIRequest *r)
  385. {
  386. assert(!r->completed);
  387. trace_pvscsi_complete_request(r->cmp.context, r->cmp.dataLen,
  388. r->sense_key);
  389. if (r->sreq != NULL) {
  390. scsi_req_unref(r->sreq);
  391. r->sreq = NULL;
  392. }
  393. r->completed = 1;
  394. QTAILQ_REMOVE(&s->pending_queue, r, next);
  395. QTAILQ_INSERT_TAIL(&s->completion_queue, r, next);
  396. pvscsi_schedule_completion_processing(s);
  397. }
  398. static QEMUSGList *pvscsi_get_sg_list(SCSIRequest *r)
  399. {
  400. PVSCSIRequest *req = r->hba_private;
  401. trace_pvscsi_get_sg_list(req->sgl.nsg, req->sgl.size);
  402. return &req->sgl;
  403. }
  404. static void
  405. pvscsi_get_next_sg_elem(PVSCSISGState *sg)
  406. {
  407. struct PVSCSISGElement elem;
  408. cpu_physical_memory_read(sg->elemAddr, (void *)&elem, sizeof(elem));
  409. if ((elem.flags & ~PVSCSI_KNOWN_FLAGS) != 0) {
  410. /*
  411. * There is PVSCSI_SGE_FLAG_CHAIN_ELEMENT flag described in
  412. * header file but its value is unknown. This flag requires
  413. * additional processing, so we put warning here to catch it
  414. * some day and make proper implementation
  415. */
  416. trace_pvscsi_get_next_sg_elem(elem.flags);
  417. }
  418. sg->elemAddr += sizeof(elem);
  419. sg->dataAddr = elem.addr;
  420. sg->resid = elem.length;
  421. }
  422. static void
  423. pvscsi_write_sense(PVSCSIRequest *r, uint8_t *sense, int len)
  424. {
  425. r->cmp.senseLen = MIN(r->req.senseLen, len);
  426. r->sense_key = sense[(sense[0] & 2) ? 1 : 2];
  427. cpu_physical_memory_write(r->req.senseAddr, sense, r->cmp.senseLen);
  428. }
  429. static void
  430. pvscsi_command_complete(SCSIRequest *req, uint32_t status, size_t resid)
  431. {
  432. PVSCSIRequest *pvscsi_req = req->hba_private;
  433. PVSCSIState *s;
  434. if (!pvscsi_req) {
  435. trace_pvscsi_command_complete_not_found(req->tag);
  436. return;
  437. }
  438. s = pvscsi_req->dev;
  439. if (resid) {
  440. /* Short transfer. */
  441. trace_pvscsi_command_complete_data_run();
  442. pvscsi_req->cmp.hostStatus = BTSTAT_DATARUN;
  443. }
  444. pvscsi_req->cmp.scsiStatus = status;
  445. if (pvscsi_req->cmp.scsiStatus == CHECK_CONDITION) {
  446. uint8_t sense[SCSI_SENSE_BUF_SIZE];
  447. int sense_len =
  448. scsi_req_get_sense(pvscsi_req->sreq, sense, sizeof(sense));
  449. trace_pvscsi_command_complete_sense_len(sense_len);
  450. pvscsi_write_sense(pvscsi_req, sense, sense_len);
  451. }
  452. qemu_sglist_destroy(&pvscsi_req->sgl);
  453. pvscsi_complete_request(s, pvscsi_req);
  454. }
  455. static void
  456. pvscsi_send_msg(PVSCSIState *s, SCSIDevice *dev, uint32_t msg_type)
  457. {
  458. if (s->msg_ring_info_valid && pvscsi_ring_msg_has_room(&s->rings)) {
  459. PVSCSIMsgDescDevStatusChanged msg = {0};
  460. msg.type = msg_type;
  461. msg.bus = dev->channel;
  462. msg.target = dev->id;
  463. msg.lun[1] = dev->lun;
  464. pvscsi_msg_ring_put(s, (PVSCSIRingMsgDesc *)&msg);
  465. pvscsi_ring_flush_msg(&s->rings);
  466. pvscsi_raise_message_interrupt(s);
  467. }
  468. }
  469. static void
  470. pvscsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
  471. {
  472. PVSCSIState *s = PVSCSI(hotplug_dev);
  473. pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_ADDED);
  474. }
  475. static void
  476. pvscsi_hot_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
  477. {
  478. PVSCSIState *s = PVSCSI(hotplug_dev);
  479. pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_REMOVED);
  480. qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
  481. }
  482. static void
  483. pvscsi_request_cancelled(SCSIRequest *req)
  484. {
  485. PVSCSIRequest *pvscsi_req = req->hba_private;
  486. PVSCSIState *s = pvscsi_req->dev;
  487. if (pvscsi_req->completed) {
  488. return;
  489. }
  490. if (pvscsi_req->dev->resetting) {
  491. pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET;
  492. } else {
  493. pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE;
  494. }
  495. pvscsi_complete_request(s, pvscsi_req);
  496. }
  497. static SCSIDevice*
  498. pvscsi_device_find(PVSCSIState *s, int channel, int target,
  499. uint8_t *requested_lun, uint8_t *target_lun)
  500. {
  501. if (requested_lun[0] || requested_lun[2] || requested_lun[3] ||
  502. requested_lun[4] || requested_lun[5] || requested_lun[6] ||
  503. requested_lun[7] || (target > PVSCSI_MAX_DEVS)) {
  504. return NULL;
  505. } else {
  506. *target_lun = requested_lun[1];
  507. return scsi_device_find(&s->bus, channel, target, *target_lun);
  508. }
  509. }
  510. static PVSCSIRequest *
  511. pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d,
  512. struct PVSCSIRingReqDesc *descr)
  513. {
  514. PVSCSIRequest *pvscsi_req;
  515. uint8_t lun;
  516. pvscsi_req = g_malloc0(sizeof(*pvscsi_req));
  517. pvscsi_req->dev = s;
  518. pvscsi_req->req = *descr;
  519. pvscsi_req->cmp.context = pvscsi_req->req.context;
  520. QTAILQ_INSERT_TAIL(&s->pending_queue, pvscsi_req, next);
  521. *d = pvscsi_device_find(s, descr->bus, descr->target, descr->lun, &lun);
  522. if (*d) {
  523. pvscsi_req->lun = lun;
  524. }
  525. return pvscsi_req;
  526. }
  527. static void
  528. pvscsi_convert_sglist(PVSCSIRequest *r)
  529. {
  530. uint32_t chunk_size, elmcnt = 0;
  531. uint64_t data_length = r->req.dataLen;
  532. PVSCSISGState sg = r->sg;
  533. while (data_length && elmcnt < PVSCSI_MAX_SG_ELEM) {
  534. while (!sg.resid && elmcnt++ < PVSCSI_MAX_SG_ELEM) {
  535. pvscsi_get_next_sg_elem(&sg);
  536. trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr,
  537. r->sg.resid);
  538. }
  539. chunk_size = MIN(data_length, sg.resid);
  540. if (chunk_size) {
  541. qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size);
  542. }
  543. sg.dataAddr += chunk_size;
  544. data_length -= chunk_size;
  545. sg.resid -= chunk_size;
  546. }
  547. }
  548. static void
  549. pvscsi_build_sglist(PVSCSIState *s, PVSCSIRequest *r)
  550. {
  551. PCIDevice *d = PCI_DEVICE(s);
  552. pci_dma_sglist_init(&r->sgl, d, 1);
  553. if (r->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
  554. pvscsi_convert_sglist(r);
  555. } else {
  556. qemu_sglist_add(&r->sgl, r->req.dataAddr, r->req.dataLen);
  557. }
  558. }
  559. static void
  560. pvscsi_process_request_descriptor(PVSCSIState *s,
  561. struct PVSCSIRingReqDesc *descr)
  562. {
  563. SCSIDevice *d;
  564. PVSCSIRequest *r = pvscsi_queue_pending_descriptor(s, &d, descr);
  565. int64_t n;
  566. trace_pvscsi_process_req_descr(descr->cdb[0], descr->context);
  567. if (!d) {
  568. r->cmp.hostStatus = BTSTAT_SELTIMEO;
  569. trace_pvscsi_process_req_descr_unknown_device();
  570. pvscsi_complete_request(s, r);
  571. return;
  572. }
  573. if (descr->flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
  574. r->sg.elemAddr = descr->dataAddr;
  575. }
  576. r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, r);
  577. if (r->sreq->cmd.mode == SCSI_XFER_FROM_DEV &&
  578. (descr->flags & PVSCSI_FLAG_CMD_DIR_TODEVICE)) {
  579. r->cmp.hostStatus = BTSTAT_BADMSG;
  580. trace_pvscsi_process_req_descr_invalid_dir();
  581. scsi_req_cancel(r->sreq);
  582. return;
  583. }
  584. if (r->sreq->cmd.mode == SCSI_XFER_TO_DEV &&
  585. (descr->flags & PVSCSI_FLAG_CMD_DIR_TOHOST)) {
  586. r->cmp.hostStatus = BTSTAT_BADMSG;
  587. trace_pvscsi_process_req_descr_invalid_dir();
  588. scsi_req_cancel(r->sreq);
  589. return;
  590. }
  591. pvscsi_build_sglist(s, r);
  592. n = scsi_req_enqueue(r->sreq);
  593. if (n) {
  594. scsi_req_continue(r->sreq);
  595. }
  596. }
  597. static void
  598. pvscsi_process_io(PVSCSIState *s)
  599. {
  600. PVSCSIRingReqDesc descr;
  601. hwaddr next_descr_pa;
  602. assert(s->rings_info_valid);
  603. while ((next_descr_pa = pvscsi_ring_pop_req_descr(&s->rings)) != 0) {
  604. /* Only read after production index verification */
  605. smp_rmb();
  606. trace_pvscsi_process_io(next_descr_pa);
  607. cpu_physical_memory_read(next_descr_pa, &descr, sizeof(descr));
  608. pvscsi_process_request_descriptor(s, &descr);
  609. }
  610. pvscsi_ring_flush_req(&s->rings);
  611. }
  612. static void
  613. pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc)
  614. {
  615. int i;
  616. trace_pvscsi_tx_rings_ppn("Rings State", rc->ringsStatePPN);
  617. trace_pvscsi_tx_rings_num_pages("Request Ring", rc->reqRingNumPages);
  618. for (i = 0; i < rc->reqRingNumPages; i++) {
  619. trace_pvscsi_tx_rings_ppn("Request Ring", rc->reqRingPPNs[i]);
  620. }
  621. trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages);
  622. for (i = 0; i < rc->cmpRingNumPages; i++) {
  623. trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->cmpRingPPNs[i]);
  624. }
  625. }
  626. static uint64_t
  627. pvscsi_on_cmd_config(PVSCSIState *s)
  628. {
  629. trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_CONFIG");
  630. return PVSCSI_COMMAND_PROCESSING_FAILED;
  631. }
  632. static uint64_t
  633. pvscsi_on_cmd_unplug(PVSCSIState *s)
  634. {
  635. trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_DEVICE_UNPLUG");
  636. return PVSCSI_COMMAND_PROCESSING_FAILED;
  637. }
  638. static uint64_t
  639. pvscsi_on_issue_scsi(PVSCSIState *s)
  640. {
  641. trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_ISSUE_SCSI");
  642. return PVSCSI_COMMAND_PROCESSING_FAILED;
  643. }
  644. static uint64_t
  645. pvscsi_on_cmd_setup_rings(PVSCSIState *s)
  646. {
  647. PVSCSICmdDescSetupRings *rc =
  648. (PVSCSICmdDescSetupRings *) s->curr_cmd_data;
  649. trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS");
  650. if (!rc->reqRingNumPages
  651. || rc->reqRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES
  652. || !rc->cmpRingNumPages
  653. || rc->cmpRingNumPages > PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) {
  654. return PVSCSI_COMMAND_PROCESSING_FAILED;
  655. }
  656. pvscsi_dbg_dump_tx_rings_config(rc);
  657. pvscsi_ring_init_data(&s->rings, rc);
  658. s->rings_info_valid = TRUE;
  659. return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
  660. }
  661. static uint64_t
  662. pvscsi_on_cmd_abort(PVSCSIState *s)
  663. {
  664. PVSCSICmdDescAbortCmd *cmd = (PVSCSICmdDescAbortCmd *) s->curr_cmd_data;
  665. PVSCSIRequest *r, *next;
  666. trace_pvscsi_on_cmd_abort(cmd->context, cmd->target);
  667. QTAILQ_FOREACH_SAFE(r, &s->pending_queue, next, next) {
  668. if (r->req.context == cmd->context) {
  669. break;
  670. }
  671. }
  672. if (r) {
  673. assert(!r->completed);
  674. r->cmp.hostStatus = BTSTAT_ABORTQUEUE;
  675. scsi_req_cancel(r->sreq);
  676. }
  677. return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
  678. }
  679. static uint64_t
  680. pvscsi_on_cmd_unknown(PVSCSIState *s)
  681. {
  682. trace_pvscsi_on_cmd_unknown_data(s->curr_cmd_data[0]);
  683. return PVSCSI_COMMAND_PROCESSING_FAILED;
  684. }
  685. static uint64_t
  686. pvscsi_on_cmd_reset_device(PVSCSIState *s)
  687. {
  688. uint8_t target_lun = 0;
  689. struct PVSCSICmdDescResetDevice *cmd =
  690. (struct PVSCSICmdDescResetDevice *) s->curr_cmd_data;
  691. SCSIDevice *sdev;
  692. sdev = pvscsi_device_find(s, 0, cmd->target, cmd->lun, &target_lun);
  693. trace_pvscsi_on_cmd_reset_dev(cmd->target, (int) target_lun, sdev);
  694. if (sdev != NULL) {
  695. s->resetting++;
  696. device_reset(&sdev->qdev);
  697. s->resetting--;
  698. return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
  699. }
  700. return PVSCSI_COMMAND_PROCESSING_FAILED;
  701. }
  702. static uint64_t
  703. pvscsi_on_cmd_reset_bus(PVSCSIState *s)
  704. {
  705. trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_RESET_BUS");
  706. s->resetting++;
  707. qbus_reset_all(BUS(&s->bus));
  708. s->resetting--;
  709. return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
  710. }
  711. static uint64_t
  712. pvscsi_on_cmd_setup_msg_ring(PVSCSIState *s)
  713. {
  714. PVSCSICmdDescSetupMsgRing *rc =
  715. (PVSCSICmdDescSetupMsgRing *) s->curr_cmd_data;
  716. trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_MSG_RING");
  717. if (!s->use_msg) {
  718. return PVSCSI_COMMAND_PROCESSING_FAILED;
  719. }
  720. if (s->rings_info_valid) {
  721. if (pvscsi_ring_init_msg(&s->rings, rc) < 0) {
  722. return PVSCSI_COMMAND_PROCESSING_FAILED;
  723. }
  724. s->msg_ring_info_valid = TRUE;
  725. }
  726. return sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(uint32_t);
  727. }
  728. static uint64_t
  729. pvscsi_on_cmd_adapter_reset(PVSCSIState *s)
  730. {
  731. trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_ADAPTER_RESET");
  732. pvscsi_reset_adapter(s);
  733. return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
  734. }
  735. static const struct {
  736. int data_size;
  737. uint64_t (*handler_fn)(PVSCSIState *s);
  738. } pvscsi_commands[] = {
  739. [PVSCSI_CMD_FIRST] = {
  740. .data_size = 0,
  741. .handler_fn = pvscsi_on_cmd_unknown,
  742. },
  743. /* Not implemented, data size defined based on what arrives on windows */
  744. [PVSCSI_CMD_CONFIG] = {
  745. .data_size = 6 * sizeof(uint32_t),
  746. .handler_fn = pvscsi_on_cmd_config,
  747. },
  748. /* Command not implemented, data size is unknown */
  749. [PVSCSI_CMD_ISSUE_SCSI] = {
  750. .data_size = 0,
  751. .handler_fn = pvscsi_on_issue_scsi,
  752. },
  753. /* Command not implemented, data size is unknown */
  754. [PVSCSI_CMD_DEVICE_UNPLUG] = {
  755. .data_size = 0,
  756. .handler_fn = pvscsi_on_cmd_unplug,
  757. },
  758. [PVSCSI_CMD_SETUP_RINGS] = {
  759. .data_size = sizeof(PVSCSICmdDescSetupRings),
  760. .handler_fn = pvscsi_on_cmd_setup_rings,
  761. },
  762. [PVSCSI_CMD_RESET_DEVICE] = {
  763. .data_size = sizeof(struct PVSCSICmdDescResetDevice),
  764. .handler_fn = pvscsi_on_cmd_reset_device,
  765. },
  766. [PVSCSI_CMD_RESET_BUS] = {
  767. .data_size = 0,
  768. .handler_fn = pvscsi_on_cmd_reset_bus,
  769. },
  770. [PVSCSI_CMD_SETUP_MSG_RING] = {
  771. .data_size = sizeof(PVSCSICmdDescSetupMsgRing),
  772. .handler_fn = pvscsi_on_cmd_setup_msg_ring,
  773. },
  774. [PVSCSI_CMD_ADAPTER_RESET] = {
  775. .data_size = 0,
  776. .handler_fn = pvscsi_on_cmd_adapter_reset,
  777. },
  778. [PVSCSI_CMD_ABORT_CMD] = {
  779. .data_size = sizeof(struct PVSCSICmdDescAbortCmd),
  780. .handler_fn = pvscsi_on_cmd_abort,
  781. },
  782. };
  783. static void
  784. pvscsi_do_command_processing(PVSCSIState *s)
  785. {
  786. size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
  787. assert(s->curr_cmd < PVSCSI_CMD_LAST);
  788. if (bytes_arrived >= pvscsi_commands[s->curr_cmd].data_size) {
  789. s->reg_command_status = pvscsi_commands[s->curr_cmd].handler_fn(s);
  790. s->curr_cmd = PVSCSI_CMD_FIRST;
  791. s->curr_cmd_data_cntr = 0;
  792. }
  793. }
  794. static void
  795. pvscsi_on_command_data(PVSCSIState *s, uint32_t value)
  796. {
  797. size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
  798. assert(bytes_arrived < sizeof(s->curr_cmd_data));
  799. s->curr_cmd_data[s->curr_cmd_data_cntr++] = value;
  800. pvscsi_do_command_processing(s);
  801. }
  802. static void
  803. pvscsi_on_command(PVSCSIState *s, uint64_t cmd_id)
  804. {
  805. if ((cmd_id > PVSCSI_CMD_FIRST) && (cmd_id < PVSCSI_CMD_LAST)) {
  806. s->curr_cmd = cmd_id;
  807. } else {
  808. s->curr_cmd = PVSCSI_CMD_FIRST;
  809. trace_pvscsi_on_cmd_unknown(cmd_id);
  810. }
  811. s->curr_cmd_data_cntr = 0;
  812. s->reg_command_status = PVSCSI_COMMAND_NOT_ENOUGH_DATA;
  813. pvscsi_do_command_processing(s);
  814. }
  815. static void
  816. pvscsi_io_write(void *opaque, hwaddr addr,
  817. uint64_t val, unsigned size)
  818. {
  819. PVSCSIState *s = opaque;
  820. switch (addr) {
  821. case PVSCSI_REG_OFFSET_COMMAND:
  822. pvscsi_on_command(s, val);
  823. break;
  824. case PVSCSI_REG_OFFSET_COMMAND_DATA:
  825. pvscsi_on_command_data(s, (uint32_t) val);
  826. break;
  827. case PVSCSI_REG_OFFSET_INTR_STATUS:
  828. trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_STATUS", val);
  829. s->reg_interrupt_status &= ~val;
  830. pvscsi_update_irq_status(s);
  831. pvscsi_schedule_completion_processing(s);
  832. break;
  833. case PVSCSI_REG_OFFSET_INTR_MASK:
  834. trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_MASK", val);
  835. s->reg_interrupt_enabled = val;
  836. pvscsi_update_irq_status(s);
  837. break;
  838. case PVSCSI_REG_OFFSET_KICK_NON_RW_IO:
  839. trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_NON_RW_IO", val);
  840. pvscsi_process_io(s);
  841. break;
  842. case PVSCSI_REG_OFFSET_KICK_RW_IO:
  843. trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_RW_IO", val);
  844. pvscsi_process_io(s);
  845. break;
  846. case PVSCSI_REG_OFFSET_DEBUG:
  847. trace_pvscsi_io_write("PVSCSI_REG_OFFSET_DEBUG", val);
  848. break;
  849. default:
  850. trace_pvscsi_io_write_unknown(addr, size, val);
  851. break;
  852. }
  853. }
  854. static uint64_t
  855. pvscsi_io_read(void *opaque, hwaddr addr, unsigned size)
  856. {
  857. PVSCSIState *s = opaque;
  858. switch (addr) {
  859. case PVSCSI_REG_OFFSET_INTR_STATUS:
  860. trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_STATUS",
  861. s->reg_interrupt_status);
  862. return s->reg_interrupt_status;
  863. case PVSCSI_REG_OFFSET_INTR_MASK:
  864. trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_MASK",
  865. s->reg_interrupt_status);
  866. return s->reg_interrupt_enabled;
  867. case PVSCSI_REG_OFFSET_COMMAND_STATUS:
  868. trace_pvscsi_io_read("PVSCSI_REG_OFFSET_COMMAND_STATUS",
  869. s->reg_interrupt_status);
  870. return s->reg_command_status;
  871. default:
  872. trace_pvscsi_io_read_unknown(addr, size);
  873. return 0;
  874. }
  875. }
  876. static void
  877. pvscsi_init_msi(PVSCSIState *s)
  878. {
  879. int res;
  880. PCIDevice *d = PCI_DEVICE(s);
  881. res = msi_init(d, PVSCSI_MSI_OFFSET(s), PVSCSI_MSIX_NUM_VECTORS,
  882. PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK, NULL);
  883. if (res < 0) {
  884. trace_pvscsi_init_msi_fail(res);
  885. s->msi_used = false;
  886. } else {
  887. s->msi_used = true;
  888. }
  889. }
  890. static void
  891. pvscsi_cleanup_msi(PVSCSIState *s)
  892. {
  893. PCIDevice *d = PCI_DEVICE(s);
  894. msi_uninit(d);
  895. }
  896. static const MemoryRegionOps pvscsi_ops = {
  897. .read = pvscsi_io_read,
  898. .write = pvscsi_io_write,
  899. .endianness = DEVICE_LITTLE_ENDIAN,
  900. .impl = {
  901. .min_access_size = 4,
  902. .max_access_size = 4,
  903. },
  904. };
  905. static const struct SCSIBusInfo pvscsi_scsi_info = {
  906. .tcq = true,
  907. .max_target = PVSCSI_MAX_DEVS,
  908. .max_channel = 0,
  909. .max_lun = 0,
  910. .get_sg_list = pvscsi_get_sg_list,
  911. .complete = pvscsi_command_complete,
  912. .cancel = pvscsi_request_cancelled,
  913. };
  914. static void
  915. pvscsi_realizefn(PCIDevice *pci_dev, Error **errp)
  916. {
  917. PVSCSIState *s = PVSCSI(pci_dev);
  918. trace_pvscsi_state("init");
  919. /* PCI subsystem ID, subsystem vendor ID, revision */
  920. if (PVSCSI_USE_OLD_PCI_CONFIGURATION(s)) {
  921. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 0x1000);
  922. } else {
  923. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
  924. PCI_VENDOR_ID_VMWARE);
  925. pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
  926. PCI_DEVICE_ID_VMWARE_PVSCSI);
  927. pci_config_set_revision(pci_dev->config, 0x2);
  928. }
  929. /* PCI latency timer = 255 */
  930. pci_dev->config[PCI_LATENCY_TIMER] = 0xff;
  931. /* Interrupt pin A */
  932. pci_config_set_interrupt_pin(pci_dev->config, 1);
  933. memory_region_init_io(&s->io_space, OBJECT(s), &pvscsi_ops, s,
  934. "pvscsi-io", PVSCSI_MEM_SPACE_SIZE);
  935. pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_space);
  936. pvscsi_init_msi(s);
  937. if (pci_is_express(pci_dev) && pci_bus_is_express(pci_get_bus(pci_dev))) {
  938. pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET);
  939. }
  940. s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s);
  941. scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(pci_dev),
  942. &pvscsi_scsi_info, NULL);
  943. /* override default SCSI bus hotplug-handler, with pvscsi's one */
  944. qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(s), &error_abort);
  945. pvscsi_reset_state(s);
  946. }
  947. static void
  948. pvscsi_uninit(PCIDevice *pci_dev)
  949. {
  950. PVSCSIState *s = PVSCSI(pci_dev);
  951. trace_pvscsi_state("uninit");
  952. qemu_bh_delete(s->completion_worker);
  953. pvscsi_cleanup_msi(s);
  954. }
  955. static void
  956. pvscsi_reset(DeviceState *dev)
  957. {
  958. PCIDevice *d = PCI_DEVICE(dev);
  959. PVSCSIState *s = PVSCSI(d);
  960. trace_pvscsi_state("reset");
  961. pvscsi_reset_adapter(s);
  962. }
  963. static int
  964. pvscsi_pre_save(void *opaque)
  965. {
  966. PVSCSIState *s = (PVSCSIState *) opaque;
  967. trace_pvscsi_state("presave");
  968. assert(QTAILQ_EMPTY(&s->pending_queue));
  969. assert(QTAILQ_EMPTY(&s->completion_queue));
  970. return 0;
  971. }
  972. static int
  973. pvscsi_post_load(void *opaque, int version_id)
  974. {
  975. trace_pvscsi_state("postload");
  976. return 0;
  977. }
  978. static bool pvscsi_vmstate_need_pcie_device(void *opaque)
  979. {
  980. PVSCSIState *s = PVSCSI(opaque);
  981. return !(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE);
  982. }
  983. static bool pvscsi_vmstate_test_pci_device(void *opaque, int version_id)
  984. {
  985. return !pvscsi_vmstate_need_pcie_device(opaque);
  986. }
  987. static const VMStateDescription vmstate_pvscsi_pcie_device = {
  988. .name = "pvscsi/pcie",
  989. .needed = pvscsi_vmstate_need_pcie_device,
  990. .fields = (VMStateField[]) {
  991. VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState),
  992. VMSTATE_END_OF_LIST()
  993. }
  994. };
  995. static const VMStateDescription vmstate_pvscsi = {
  996. .name = "pvscsi",
  997. .version_id = 0,
  998. .minimum_version_id = 0,
  999. .pre_save = pvscsi_pre_save,
  1000. .post_load = pvscsi_post_load,
  1001. .fields = (VMStateField[]) {
  1002. VMSTATE_STRUCT_TEST(parent_obj, PVSCSIState,
  1003. pvscsi_vmstate_test_pci_device, 0,
  1004. vmstate_pci_device, PCIDevice),
  1005. VMSTATE_UINT8(msi_used, PVSCSIState),
  1006. VMSTATE_UINT32(resetting, PVSCSIState),
  1007. VMSTATE_UINT64(reg_interrupt_status, PVSCSIState),
  1008. VMSTATE_UINT64(reg_interrupt_enabled, PVSCSIState),
  1009. VMSTATE_UINT64(reg_command_status, PVSCSIState),
  1010. VMSTATE_UINT64(curr_cmd, PVSCSIState),
  1011. VMSTATE_UINT32(curr_cmd_data_cntr, PVSCSIState),
  1012. VMSTATE_UINT32_ARRAY(curr_cmd_data, PVSCSIState,
  1013. ARRAY_SIZE(((PVSCSIState *)NULL)->curr_cmd_data)),
  1014. VMSTATE_UINT8(rings_info_valid, PVSCSIState),
  1015. VMSTATE_UINT8(msg_ring_info_valid, PVSCSIState),
  1016. VMSTATE_UINT8(use_msg, PVSCSIState),
  1017. VMSTATE_UINT64(rings.rs_pa, PVSCSIState),
  1018. VMSTATE_UINT32(rings.txr_len_mask, PVSCSIState),
  1019. VMSTATE_UINT32(rings.rxr_len_mask, PVSCSIState),
  1020. VMSTATE_UINT64_ARRAY(rings.req_ring_pages_pa, PVSCSIState,
  1021. PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
  1022. VMSTATE_UINT64_ARRAY(rings.cmp_ring_pages_pa, PVSCSIState,
  1023. PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
  1024. VMSTATE_UINT64(rings.consumed_ptr, PVSCSIState),
  1025. VMSTATE_UINT64(rings.filled_cmp_ptr, PVSCSIState),
  1026. VMSTATE_END_OF_LIST()
  1027. },
  1028. .subsections = (const VMStateDescription*[]) {
  1029. &vmstate_pvscsi_pcie_device,
  1030. NULL
  1031. }
  1032. };
  1033. static Property pvscsi_properties[] = {
  1034. DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1),
  1035. DEFINE_PROP_BIT("x-old-pci-configuration", PVSCSIState, compat_flags,
  1036. PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT, false),
  1037. DEFINE_PROP_BIT("x-disable-pcie", PVSCSIState, compat_flags,
  1038. PVSCSI_COMPAT_DISABLE_PCIE_BIT, false),
  1039. DEFINE_PROP_END_OF_LIST(),
  1040. };
  1041. static void pvscsi_realize(DeviceState *qdev, Error **errp)
  1042. {
  1043. PVSCSIClass *pvs_c = PVSCSI_DEVICE_GET_CLASS(qdev);
  1044. PCIDevice *pci_dev = PCI_DEVICE(qdev);
  1045. PVSCSIState *s = PVSCSI(qdev);
  1046. if (!(s->compat_flags & PVSCSI_COMPAT_DISABLE_PCIE)) {
  1047. pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
  1048. }
  1049. pvs_c->parent_dc_realize(qdev, errp);
  1050. }
  1051. static void pvscsi_class_init(ObjectClass *klass, void *data)
  1052. {
  1053. DeviceClass *dc = DEVICE_CLASS(klass);
  1054. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1055. PVSCSIClass *pvs_k = PVSCSI_DEVICE_CLASS(klass);
  1056. HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
  1057. k->realize = pvscsi_realizefn;
  1058. k->exit = pvscsi_uninit;
  1059. k->vendor_id = PCI_VENDOR_ID_VMWARE;
  1060. k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI;
  1061. k->class_id = PCI_CLASS_STORAGE_SCSI;
  1062. k->subsystem_id = 0x1000;
  1063. device_class_set_parent_realize(dc, pvscsi_realize,
  1064. &pvs_k->parent_dc_realize);
  1065. dc->reset = pvscsi_reset;
  1066. dc->vmsd = &vmstate_pvscsi;
  1067. dc->props = pvscsi_properties;
  1068. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  1069. hc->unplug = pvscsi_hot_unplug;
  1070. hc->plug = pvscsi_hotplug;
  1071. }
  1072. static const TypeInfo pvscsi_info = {
  1073. .name = TYPE_PVSCSI,
  1074. .parent = TYPE_PCI_DEVICE,
  1075. .class_size = sizeof(PVSCSIClass),
  1076. .instance_size = sizeof(PVSCSIState),
  1077. .class_init = pvscsi_class_init,
  1078. .interfaces = (InterfaceInfo[]) {
  1079. { TYPE_HOTPLUG_HANDLER },
  1080. { INTERFACE_PCIE_DEVICE },
  1081. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1082. { }
  1083. }
  1084. };
  1085. static void
  1086. pvscsi_register_types(void)
  1087. {
  1088. type_register_static(&pvscsi_info);
  1089. }
  1090. type_init(pvscsi_register_types);