spapr_vscsi.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301
  1. /*
  2. * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
  3. *
  4. * PAPR Virtual SCSI, aka ibmvscsi
  5. *
  6. * Copyright (c) 2010,2011 Benjamin Herrenschmidt, IBM Corporation.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  21. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24. * THE SOFTWARE.
  25. *
  26. * TODO:
  27. *
  28. * - Cleanups :-)
  29. * - Sort out better how to assign devices to VSCSI instances
  30. * - Fix residual counts
  31. * - Add indirect descriptors support
  32. * - Maybe do autosense (PAPR seems to mandate it, linux doesn't care)
  33. */
  34. #include "qemu/osdep.h"
  35. #include "qemu/module.h"
  36. #include "hw/scsi/scsi.h"
  37. #include "migration/vmstate.h"
  38. #include "scsi/constants.h"
  39. #include "srp.h"
  40. #include "hw/ppc/spapr.h"
  41. #include "hw/ppc/spapr_vio.h"
  42. #include "hw/qdev-properties.h"
  43. #include "viosrp.h"
  44. #include "trace.h"
  45. #include <libfdt.h>
  46. #include "qom/object.h"
  47. /*
  48. * Virtual SCSI device
  49. */
  50. /* Random numbers */
  51. #define VSCSI_MAX_SECTORS 4096
  52. #define VSCSI_REQ_LIMIT 24
  53. /* Maximum size of a IU payload */
  54. #define SRP_MAX_IU_DATA_LEN (SRP_MAX_IU_LEN - sizeof(union srp_iu))
  55. #define SRP_RSP_SENSE_DATA_LEN 18
  56. #define SRP_REPORT_LUNS_WLUN 0xc10100000000000ULL
  57. typedef union vscsi_crq {
  58. struct viosrp_crq s;
  59. uint8_t raw[16];
  60. } vscsi_crq;
  61. typedef struct vscsi_req {
  62. vscsi_crq crq;
  63. uint8_t viosrp_iu_buf[SRP_MAX_IU_LEN];
  64. /* SCSI request tracking */
  65. SCSIRequest *sreq;
  66. uint32_t qtag; /* qemu tag != srp tag */
  67. bool active;
  68. bool writing;
  69. bool dma_error;
  70. uint32_t data_len;
  71. uint32_t senselen;
  72. uint8_t sense[SCSI_SENSE_BUF_SIZE];
  73. /* RDMA related bits */
  74. uint8_t dma_fmt;
  75. uint16_t local_desc;
  76. uint16_t total_desc;
  77. uint16_t cdb_offset;
  78. uint16_t cur_desc_num;
  79. uint16_t cur_desc_offset;
  80. } vscsi_req;
  81. #define TYPE_VIO_SPAPR_VSCSI_DEVICE "spapr-vscsi"
  82. OBJECT_DECLARE_SIMPLE_TYPE(VSCSIState, VIO_SPAPR_VSCSI_DEVICE)
  83. struct VSCSIState {
  84. SpaprVioDevice vdev;
  85. SCSIBus bus;
  86. vscsi_req reqs[VSCSI_REQ_LIMIT];
  87. };
  88. static union viosrp_iu *req_iu(vscsi_req *req)
  89. {
  90. return (union viosrp_iu *)req->viosrp_iu_buf;
  91. }
  92. static struct vscsi_req *vscsi_get_req(VSCSIState *s)
  93. {
  94. vscsi_req *req;
  95. int i;
  96. for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
  97. req = &s->reqs[i];
  98. if (!req->active) {
  99. memset(req, 0, sizeof(*req));
  100. req->qtag = i;
  101. req->active = 1;
  102. return req;
  103. }
  104. }
  105. return NULL;
  106. }
  107. static struct vscsi_req *vscsi_find_req(VSCSIState *s, uint64_t srp_tag)
  108. {
  109. vscsi_req *req;
  110. int i;
  111. for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
  112. req = &s->reqs[i];
  113. if (req_iu(req)->srp.cmd.tag == srp_tag) {
  114. return req;
  115. }
  116. }
  117. return NULL;
  118. }
  119. static void vscsi_put_req(vscsi_req *req)
  120. {
  121. if (req->sreq != NULL) {
  122. scsi_req_unref(req->sreq);
  123. }
  124. req->sreq = NULL;
  125. req->active = 0;
  126. }
  127. static SCSIDevice *vscsi_device_find(SCSIBus *bus, uint64_t srp_lun, int *lun)
  128. {
  129. int channel = 0, id = 0;
  130. retry:
  131. switch (srp_lun >> 62) {
  132. case 0:
  133. if ((srp_lun >> 56) != 0) {
  134. channel = (srp_lun >> 56) & 0x3f;
  135. id = (srp_lun >> 48) & 0xff;
  136. srp_lun <<= 16;
  137. goto retry;
  138. }
  139. *lun = (srp_lun >> 48) & 0xff;
  140. break;
  141. case 1:
  142. *lun = (srp_lun >> 48) & 0x3fff;
  143. break;
  144. case 2:
  145. channel = (srp_lun >> 53) & 0x7;
  146. id = (srp_lun >> 56) & 0x3f;
  147. *lun = (srp_lun >> 48) & 0x1f;
  148. break;
  149. case 3:
  150. *lun = -1;
  151. return NULL;
  152. default:
  153. abort();
  154. }
  155. return scsi_device_find(bus, channel, id, *lun);
  156. }
  157. static int vscsi_send_iu(VSCSIState *s, vscsi_req *req,
  158. uint64_t length, uint8_t format)
  159. {
  160. long rc, rc1;
  161. assert(length <= SRP_MAX_IU_LEN);
  162. /* First copy the SRP */
  163. rc = spapr_vio_dma_write(&s->vdev, req->crq.s.IU_data_ptr,
  164. &req->viosrp_iu_buf, length);
  165. if (rc) {
  166. fprintf(stderr, "vscsi_send_iu: DMA write failure !\n");
  167. }
  168. req->crq.s.valid = 0x80;
  169. req->crq.s.format = format;
  170. req->crq.s.reserved = 0x00;
  171. req->crq.s.timeout = cpu_to_be16(0x0000);
  172. req->crq.s.IU_length = cpu_to_be16(length);
  173. req->crq.s.IU_data_ptr = req_iu(req)->srp.rsp.tag; /* right byte order */
  174. if (rc == 0) {
  175. req->crq.s.status = VIOSRP_OK;
  176. } else {
  177. req->crq.s.status = VIOSRP_ADAPTER_FAIL;
  178. }
  179. rc1 = spapr_vio_send_crq(&s->vdev, req->crq.raw);
  180. if (rc1) {
  181. fprintf(stderr, "vscsi_send_iu: Error sending response\n");
  182. return rc1;
  183. }
  184. return rc;
  185. }
  186. static void vscsi_makeup_sense(VSCSIState *s, vscsi_req *req,
  187. uint8_t key, uint8_t asc, uint8_t ascq)
  188. {
  189. req->senselen = SRP_RSP_SENSE_DATA_LEN;
  190. /* Valid bit and 'current errors' */
  191. req->sense[0] = (0x1 << 7 | 0x70);
  192. /* Sense key */
  193. req->sense[2] = key;
  194. /* Additional sense length */
  195. req->sense[7] = 0xa; /* 10 bytes */
  196. /* Additional sense code */
  197. req->sense[12] = asc;
  198. req->sense[13] = ascq;
  199. }
  200. static int vscsi_send_rsp(VSCSIState *s, vscsi_req *req,
  201. uint8_t status, int32_t res_in, int32_t res_out)
  202. {
  203. union viosrp_iu *iu = req_iu(req);
  204. uint64_t tag = iu->srp.rsp.tag;
  205. int total_len = sizeof(iu->srp.rsp);
  206. uint8_t sol_not = iu->srp.cmd.sol_not;
  207. trace_spapr_vscsi_send_rsp(status, res_in, res_out);
  208. memset(iu, 0, sizeof(struct srp_rsp));
  209. iu->srp.rsp.opcode = SRP_RSP;
  210. iu->srp.rsp.req_lim_delta = cpu_to_be32(1);
  211. iu->srp.rsp.tag = tag;
  212. /* Handle residuals */
  213. if (res_in < 0) {
  214. iu->srp.rsp.flags |= SRP_RSP_FLAG_DIUNDER;
  215. res_in = -res_in;
  216. } else if (res_in) {
  217. iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
  218. }
  219. if (res_out < 0) {
  220. iu->srp.rsp.flags |= SRP_RSP_FLAG_DOUNDER;
  221. res_out = -res_out;
  222. } else if (res_out) {
  223. iu->srp.rsp.flags |= SRP_RSP_FLAG_DOOVER;
  224. }
  225. iu->srp.rsp.data_in_res_cnt = cpu_to_be32(res_in);
  226. iu->srp.rsp.data_out_res_cnt = cpu_to_be32(res_out);
  227. /* We don't do response data */
  228. /* iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID; */
  229. iu->srp.rsp.resp_data_len = cpu_to_be32(0);
  230. /* Handle success vs. failure */
  231. iu->srp.rsp.status = status;
  232. if (status) {
  233. iu->srp.rsp.sol_not = (sol_not & 0x04) >> 2;
  234. if (req->senselen) {
  235. int sense_data_len = MIN(req->senselen, SRP_MAX_IU_DATA_LEN);
  236. iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
  237. iu->srp.rsp.sense_data_len = cpu_to_be32(sense_data_len);
  238. memcpy(iu->srp.rsp.data, req->sense, sense_data_len);
  239. total_len += sense_data_len;
  240. }
  241. } else {
  242. iu->srp.rsp.sol_not = (sol_not & 0x02) >> 1;
  243. }
  244. vscsi_send_iu(s, req, total_len, VIOSRP_SRP_FORMAT);
  245. return 0;
  246. }
  247. static inline struct srp_direct_buf vscsi_swap_desc(struct srp_direct_buf desc)
  248. {
  249. desc.va = be64_to_cpu(desc.va);
  250. desc.len = be32_to_cpu(desc.len);
  251. return desc;
  252. }
  253. static int vscsi_fetch_desc(VSCSIState *s, struct vscsi_req *req,
  254. unsigned n, unsigned buf_offset,
  255. struct srp_direct_buf *ret)
  256. {
  257. struct srp_cmd *cmd = &req_iu(req)->srp.cmd;
  258. switch (req->dma_fmt) {
  259. case SRP_NO_DATA_DESC: {
  260. trace_spapr_vscsi_fetch_desc_no_data();
  261. return 0;
  262. }
  263. case SRP_DATA_DESC_DIRECT: {
  264. memcpy(ret, cmd->add_data + req->cdb_offset, sizeof(*ret));
  265. assert(req->cur_desc_num == 0);
  266. trace_spapr_vscsi_fetch_desc_direct();
  267. break;
  268. }
  269. case SRP_DATA_DESC_INDIRECT: {
  270. struct srp_indirect_buf *tmp = (struct srp_indirect_buf *)
  271. (cmd->add_data + req->cdb_offset);
  272. if (n < req->local_desc) {
  273. *ret = tmp->desc_list[n];
  274. trace_spapr_vscsi_fetch_desc_indirect(req->qtag, n,
  275. req->local_desc);
  276. } else if (n < req->total_desc) {
  277. int rc;
  278. struct srp_direct_buf tbl_desc = vscsi_swap_desc(tmp->table_desc);
  279. unsigned desc_offset = n * sizeof(struct srp_direct_buf);
  280. if (desc_offset >= tbl_desc.len) {
  281. trace_spapr_vscsi_fetch_desc_out_of_range(n, desc_offset);
  282. return -1;
  283. }
  284. rc = spapr_vio_dma_read(&s->vdev, tbl_desc.va + desc_offset,
  285. ret, sizeof(struct srp_direct_buf));
  286. if (rc) {
  287. trace_spapr_vscsi_fetch_desc_dma_read_error(rc);
  288. return -1;
  289. }
  290. trace_spapr_vscsi_fetch_desc_indirect_seg_ext(req->qtag, n,
  291. req->total_desc,
  292. tbl_desc.va,
  293. tbl_desc.len);
  294. } else {
  295. trace_spapr_vscsi_fetch_desc_out_of_desc();
  296. return 0;
  297. }
  298. break;
  299. }
  300. default:
  301. fprintf(stderr, "VSCSI: Unknown format %x\n", req->dma_fmt);
  302. return -1;
  303. }
  304. *ret = vscsi_swap_desc(*ret);
  305. if (buf_offset > ret->len) {
  306. trace_spapr_vscsi_fetch_desc_out_of_desc_boundary(buf_offset,
  307. req->cur_desc_num,
  308. ret->len);
  309. return -1;
  310. }
  311. ret->va += buf_offset;
  312. ret->len -= buf_offset;
  313. trace_spapr_vscsi_fetch_desc_done(req->cur_desc_num, req->cur_desc_offset,
  314. ret->va, ret->len);
  315. return ret->len ? 1 : 0;
  316. }
  317. static int vscsi_srp_direct_data(VSCSIState *s, vscsi_req *req,
  318. uint8_t *buf, uint32_t len)
  319. {
  320. struct srp_direct_buf md;
  321. uint32_t llen;
  322. int rc = 0;
  323. rc = vscsi_fetch_desc(s, req, req->cur_desc_num, req->cur_desc_offset, &md);
  324. if (rc < 0) {
  325. return -1;
  326. } else if (rc == 0) {
  327. return 0;
  328. }
  329. llen = MIN(len, md.len);
  330. if (llen) {
  331. if (req->writing) { /* writing = to device = reading from memory */
  332. rc = spapr_vio_dma_read(&s->vdev, md.va, buf, llen);
  333. } else {
  334. rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen);
  335. }
  336. }
  337. if (rc) {
  338. return -1;
  339. }
  340. req->cur_desc_offset += llen;
  341. return llen;
  342. }
  343. static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req,
  344. uint8_t *buf, uint32_t len)
  345. {
  346. struct srp_direct_buf md;
  347. int rc = 0;
  348. uint32_t llen, total = 0;
  349. trace_spapr_vscsi_srp_indirect_data(len);
  350. /* While we have data ... */
  351. while (len) {
  352. rc = vscsi_fetch_desc(s, req, req->cur_desc_num, req->cur_desc_offset, &md);
  353. if (rc < 0) {
  354. return -1;
  355. } else if (rc == 0) {
  356. break;
  357. }
  358. /* Perform transfer */
  359. llen = MIN(len, md.len);
  360. if (req->writing) { /* writing = to device = reading from memory */
  361. rc = spapr_vio_dma_read(&s->vdev, md.va, buf, llen);
  362. } else {
  363. rc = spapr_vio_dma_write(&s->vdev, md.va, buf, llen);
  364. }
  365. if (rc) {
  366. trace_spapr_vscsi_srp_indirect_data_rw(req->writing, rc);
  367. break;
  368. }
  369. trace_spapr_vscsi_srp_indirect_data_buf(buf[0], buf[1], buf[2], buf[3]);
  370. len -= llen;
  371. buf += llen;
  372. total += llen;
  373. /* Update current position in the current descriptor */
  374. req->cur_desc_offset += llen;
  375. if (md.len == llen) {
  376. /* Go to the next descriptor if the current one finished */
  377. ++req->cur_desc_num;
  378. req->cur_desc_offset = 0;
  379. }
  380. }
  381. return rc ? -1 : total;
  382. }
  383. static int vscsi_srp_transfer_data(VSCSIState *s, vscsi_req *req,
  384. int writing, uint8_t *buf, uint32_t len)
  385. {
  386. int err = 0;
  387. switch (req->dma_fmt) {
  388. case SRP_NO_DATA_DESC:
  389. trace_spapr_vscsi_srp_transfer_data(len);
  390. break;
  391. case SRP_DATA_DESC_DIRECT:
  392. err = vscsi_srp_direct_data(s, req, buf, len);
  393. break;
  394. case SRP_DATA_DESC_INDIRECT:
  395. err = vscsi_srp_indirect_data(s, req, buf, len);
  396. break;
  397. }
  398. return err;
  399. }
  400. /* Bits from linux srp */
  401. static int data_out_desc_size(struct srp_cmd *cmd)
  402. {
  403. int size = 0;
  404. uint8_t fmt = cmd->buf_fmt >> 4;
  405. switch (fmt) {
  406. case SRP_NO_DATA_DESC:
  407. break;
  408. case SRP_DATA_DESC_DIRECT:
  409. size = sizeof(struct srp_direct_buf);
  410. break;
  411. case SRP_DATA_DESC_INDIRECT:
  412. size = sizeof(struct srp_indirect_buf) +
  413. sizeof(struct srp_direct_buf)*cmd->data_out_desc_cnt;
  414. break;
  415. default:
  416. break;
  417. }
  418. return size;
  419. }
  420. static int vscsi_preprocess_desc(vscsi_req *req)
  421. {
  422. struct srp_cmd *cmd = &req_iu(req)->srp.cmd;
  423. req->cdb_offset = cmd->add_cdb_len & ~3;
  424. if (req->writing) {
  425. req->dma_fmt = cmd->buf_fmt >> 4;
  426. } else {
  427. req->cdb_offset += data_out_desc_size(cmd);
  428. req->dma_fmt = cmd->buf_fmt & ((1U << 4) - 1);
  429. }
  430. switch (req->dma_fmt) {
  431. case SRP_NO_DATA_DESC:
  432. break;
  433. case SRP_DATA_DESC_DIRECT:
  434. req->total_desc = req->local_desc = 1;
  435. break;
  436. case SRP_DATA_DESC_INDIRECT: {
  437. struct srp_indirect_buf *ind_tmp = (struct srp_indirect_buf *)
  438. (cmd->add_data + req->cdb_offset);
  439. req->total_desc = be32_to_cpu(ind_tmp->table_desc.len) /
  440. sizeof(struct srp_direct_buf);
  441. req->local_desc = req->writing ? cmd->data_out_desc_cnt :
  442. cmd->data_in_desc_cnt;
  443. break;
  444. }
  445. default:
  446. fprintf(stderr,
  447. "vscsi_preprocess_desc: Unknown format %x\n", req->dma_fmt);
  448. return -1;
  449. }
  450. return 0;
  451. }
  452. /* Callback to indicate that the SCSI layer has completed a transfer. */
  453. static void vscsi_transfer_data(SCSIRequest *sreq, uint32_t len)
  454. {
  455. VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent);
  456. vscsi_req *req = sreq->hba_private;
  457. uint8_t *buf;
  458. int rc = 0;
  459. trace_spapr_vscsi_transfer_data(sreq->tag, len, req);
  460. if (req == NULL) {
  461. fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag);
  462. return;
  463. }
  464. if (len) {
  465. buf = scsi_req_get_buf(sreq);
  466. rc = vscsi_srp_transfer_data(s, req, req->writing, buf, len);
  467. }
  468. if (rc < 0) {
  469. fprintf(stderr, "VSCSI: RDMA error rc=%d!\n", rc);
  470. req->dma_error = true;
  471. scsi_req_cancel(req->sreq);
  472. return;
  473. }
  474. /* Start next chunk */
  475. req->data_len -= rc;
  476. scsi_req_continue(sreq);
  477. }
  478. /* Callback to indicate that the SCSI layer has completed a transfer. */
  479. static void vscsi_command_complete(SCSIRequest *sreq, size_t resid)
  480. {
  481. VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent);
  482. vscsi_req *req = sreq->hba_private;
  483. int32_t res_in = 0, res_out = 0;
  484. trace_spapr_vscsi_command_complete(sreq->tag, sreq->status, req);
  485. if (req == NULL) {
  486. fprintf(stderr, "VSCSI: Can't find request for tag 0x%x\n", sreq->tag);
  487. return;
  488. }
  489. if (sreq->status == CHECK_CONDITION) {
  490. req->senselen = scsi_req_get_sense(req->sreq, req->sense,
  491. sizeof(req->sense));
  492. trace_spapr_vscsi_command_complete_sense_data1(req->senselen,
  493. req->sense[0], req->sense[1], req->sense[2], req->sense[3],
  494. req->sense[4], req->sense[5], req->sense[6], req->sense[7]);
  495. trace_spapr_vscsi_command_complete_sense_data2(
  496. req->sense[8], req->sense[9], req->sense[10], req->sense[11],
  497. req->sense[12], req->sense[13], req->sense[14], req->sense[15]);
  498. }
  499. trace_spapr_vscsi_command_complete_status(sreq->status);
  500. if (sreq->status == 0) {
  501. /* We handle overflows, not underflows for normal commands,
  502. * but hopefully nobody cares
  503. */
  504. if (req->writing) {
  505. res_out = req->data_len;
  506. } else {
  507. res_in = req->data_len;
  508. }
  509. }
  510. vscsi_send_rsp(s, req, sreq->status, res_in, res_out);
  511. vscsi_put_req(req);
  512. }
  513. static void vscsi_request_cancelled(SCSIRequest *sreq)
  514. {
  515. vscsi_req *req = sreq->hba_private;
  516. if (req->dma_error) {
  517. VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(sreq->bus->qbus.parent);
  518. vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0);
  519. vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
  520. }
  521. vscsi_put_req(req);
  522. }
  523. static const VMStateDescription vmstate_spapr_vscsi_req = {
  524. .name = "spapr_vscsi_req",
  525. .version_id = 1,
  526. .minimum_version_id = 1,
  527. .fields = (VMStateField[]) {
  528. VMSTATE_BUFFER(crq.raw, vscsi_req),
  529. VMSTATE_BUFFER(viosrp_iu_buf, vscsi_req),
  530. VMSTATE_UINT32(qtag, vscsi_req),
  531. VMSTATE_BOOL(active, vscsi_req),
  532. VMSTATE_UINT32(data_len, vscsi_req),
  533. VMSTATE_BOOL(writing, vscsi_req),
  534. VMSTATE_UINT32(senselen, vscsi_req),
  535. VMSTATE_BUFFER(sense, vscsi_req),
  536. VMSTATE_UINT8(dma_fmt, vscsi_req),
  537. VMSTATE_UINT16(local_desc, vscsi_req),
  538. VMSTATE_UINT16(total_desc, vscsi_req),
  539. VMSTATE_UINT16(cdb_offset, vscsi_req),
  540. /*Restart SCSI request from the beginning for now */
  541. /*VMSTATE_UINT16(cur_desc_num, vscsi_req),
  542. VMSTATE_UINT16(cur_desc_offset, vscsi_req),*/
  543. VMSTATE_END_OF_LIST()
  544. },
  545. };
  546. static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq)
  547. {
  548. vscsi_req *req = sreq->hba_private;
  549. assert(req->active);
  550. vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL);
  551. trace_spapr_vscsi_save_request(req->qtag, req->cur_desc_num,
  552. req->cur_desc_offset);
  553. }
  554. static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq)
  555. {
  556. SCSIBus *bus = sreq->bus;
  557. VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(bus->qbus.parent);
  558. vscsi_req *req;
  559. int rc;
  560. assert(sreq->tag < VSCSI_REQ_LIMIT);
  561. req = &s->reqs[sreq->tag];
  562. assert(!req->active);
  563. memset(req, 0, sizeof(*req));
  564. rc = vmstate_load_state(f, &vmstate_spapr_vscsi_req, req, 1);
  565. if (rc) {
  566. fprintf(stderr, "VSCSI: failed loading request tag#%u\n", sreq->tag);
  567. return NULL;
  568. }
  569. assert(req->active);
  570. req->sreq = scsi_req_ref(sreq);
  571. trace_spapr_vscsi_load_request(req->qtag, req->cur_desc_num,
  572. req->cur_desc_offset);
  573. return req;
  574. }
  575. static void vscsi_process_login(VSCSIState *s, vscsi_req *req)
  576. {
  577. union viosrp_iu *iu = req_iu(req);
  578. struct srp_login_rsp *rsp = &iu->srp.login_rsp;
  579. uint64_t tag = iu->srp.rsp.tag;
  580. trace_spapr_vscsi_process_login();
  581. /* TODO handle case that requested size is wrong and
  582. * buffer format is wrong
  583. */
  584. memset(iu, 0, sizeof(struct srp_login_rsp));
  585. rsp->opcode = SRP_LOGIN_RSP;
  586. /* Don't advertise quite as many request as we support to
  587. * keep room for management stuff etc...
  588. */
  589. rsp->req_lim_delta = cpu_to_be32(VSCSI_REQ_LIMIT-2);
  590. rsp->tag = tag;
  591. rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
  592. rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
  593. /* direct and indirect */
  594. rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT);
  595. vscsi_send_iu(s, req, sizeof(*rsp), VIOSRP_SRP_FORMAT);
  596. }
  597. static void vscsi_inquiry_no_target(VSCSIState *s, vscsi_req *req)
  598. {
  599. uint8_t *cdb = req_iu(req)->srp.cmd.cdb;
  600. uint8_t resp_data[36];
  601. int rc, len, alen;
  602. /* We don't do EVPD. Also check that page_code is 0 */
  603. if ((cdb[1] & 0x01) || cdb[2] != 0) {
  604. /* Send INVALID FIELD IN CDB */
  605. vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0);
  606. vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
  607. return;
  608. }
  609. alen = cdb[3];
  610. alen = (alen << 8) | cdb[4];
  611. len = MIN(alen, 36);
  612. /* Fake up inquiry using PQ=3 */
  613. memset(resp_data, 0, 36);
  614. resp_data[0] = 0x7f; /* Not capable of supporting a device here */
  615. resp_data[2] = 0x06; /* SPS-4 */
  616. resp_data[3] = 0x02; /* Resp data format */
  617. resp_data[4] = 36 - 5; /* Additional length */
  618. resp_data[7] = 0x10; /* Sync transfers */
  619. memcpy(&resp_data[16], "QEMU EMPTY ", 16);
  620. memcpy(&resp_data[8], "QEMU ", 8);
  621. req->writing = 0;
  622. vscsi_preprocess_desc(req);
  623. rc = vscsi_srp_transfer_data(s, req, 0, resp_data, len);
  624. if (rc < 0) {
  625. vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0);
  626. vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
  627. } else {
  628. vscsi_send_rsp(s, req, 0, 36 - rc, 0);
  629. }
  630. }
  631. static void vscsi_report_luns(VSCSIState *s, vscsi_req *req)
  632. {
  633. BusChild *kid;
  634. int i, len, n, rc;
  635. uint8_t *resp_data;
  636. bool found_lun0;
  637. n = 0;
  638. found_lun0 = false;
  639. QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
  640. SCSIDevice *dev = SCSI_DEVICE(kid->child);
  641. n += 8;
  642. if (dev->channel == 0 && dev->id == 0 && dev->lun == 0) {
  643. found_lun0 = true;
  644. }
  645. }
  646. if (!found_lun0) {
  647. n += 8;
  648. }
  649. len = n+8;
  650. resp_data = g_malloc0(len);
  651. stl_be_p(resp_data, n);
  652. i = found_lun0 ? 8 : 16;
  653. QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
  654. DeviceState *qdev = kid->child;
  655. SCSIDevice *dev = SCSI_DEVICE(qdev);
  656. if (dev->id == 0 && dev->channel == 0) {
  657. resp_data[i] = 0; /* Use simple LUN for 0 (SAM5 4.7.7.1) */
  658. } else {
  659. resp_data[i] = (2 << 6); /* Otherwise LUN addressing (4.7.7.4) */
  660. }
  661. resp_data[i] |= dev->id;
  662. resp_data[i+1] = (dev->channel << 5);
  663. resp_data[i+1] |= dev->lun;
  664. i += 8;
  665. }
  666. vscsi_preprocess_desc(req);
  667. rc = vscsi_srp_transfer_data(s, req, 0, resp_data, len);
  668. g_free(resp_data);
  669. if (rc < 0) {
  670. vscsi_makeup_sense(s, req, HARDWARE_ERROR, 0, 0);
  671. vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
  672. } else {
  673. vscsi_send_rsp(s, req, 0, len - rc, 0);
  674. }
  675. }
  676. static int vscsi_queue_cmd(VSCSIState *s, vscsi_req *req)
  677. {
  678. union srp_iu *srp = &req_iu(req)->srp;
  679. SCSIDevice *sdev;
  680. int n, lun;
  681. size_t cdb_len = sizeof (srp->cmd.cdb) + (srp->cmd.add_cdb_len & ~3);
  682. if ((srp->cmd.lun == 0 || be64_to_cpu(srp->cmd.lun) == SRP_REPORT_LUNS_WLUN)
  683. && srp->cmd.cdb[0] == REPORT_LUNS) {
  684. vscsi_report_luns(s, req);
  685. return 0;
  686. }
  687. sdev = vscsi_device_find(&s->bus, be64_to_cpu(srp->cmd.lun), &lun);
  688. if (!sdev) {
  689. trace_spapr_vscsi_queue_cmd_no_drive(be64_to_cpu(srp->cmd.lun));
  690. if (srp->cmd.cdb[0] == INQUIRY) {
  691. vscsi_inquiry_no_target(s, req);
  692. } else {
  693. vscsi_makeup_sense(s, req, ILLEGAL_REQUEST, 0x24, 0x00);
  694. vscsi_send_rsp(s, req, CHECK_CONDITION, 0, 0);
  695. } return 1;
  696. }
  697. req->sreq = scsi_req_new(sdev, req->qtag, lun, srp->cmd.cdb, cdb_len, req);
  698. n = scsi_req_enqueue(req->sreq);
  699. trace_spapr_vscsi_queue_cmd(req->qtag, srp->cmd.cdb[0],
  700. scsi_command_name(srp->cmd.cdb[0]), lun, n);
  701. if (n) {
  702. /* Transfer direction must be set before preprocessing the
  703. * descriptors
  704. */
  705. req->writing = (n < 1);
  706. /* Preprocess RDMA descriptors */
  707. vscsi_preprocess_desc(req);
  708. /* Get transfer direction and initiate transfer */
  709. if (n > 0) {
  710. req->data_len = n;
  711. } else if (n < 0) {
  712. req->data_len = -n;
  713. }
  714. scsi_req_continue(req->sreq);
  715. }
  716. /* Don't touch req here, it may have been recycled already */
  717. return 0;
  718. }
  719. static int vscsi_process_tsk_mgmt(VSCSIState *s, vscsi_req *req)
  720. {
  721. union viosrp_iu *iu = req_iu(req);
  722. vscsi_req *tmpreq;
  723. int i, lun = 0, resp = SRP_TSK_MGMT_COMPLETE;
  724. SCSIDevice *d;
  725. uint64_t tag = iu->srp.rsp.tag;
  726. uint8_t sol_not = iu->srp.cmd.sol_not;
  727. trace_spapr_vscsi_process_tsk_mgmt(iu->srp.tsk_mgmt.tsk_mgmt_func);
  728. d = vscsi_device_find(&s->bus,
  729. be64_to_cpu(req_iu(req)->srp.tsk_mgmt.lun), &lun);
  730. if (!d) {
  731. resp = SRP_TSK_MGMT_FIELDS_INVALID;
  732. } else {
  733. switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
  734. case SRP_TSK_ABORT_TASK:
  735. if (d->lun != lun) {
  736. resp = SRP_TSK_MGMT_FIELDS_INVALID;
  737. break;
  738. }
  739. tmpreq = vscsi_find_req(s, req_iu(req)->srp.tsk_mgmt.task_tag);
  740. if (tmpreq && tmpreq->sreq) {
  741. assert(tmpreq->sreq->hba_private);
  742. scsi_req_cancel(tmpreq->sreq);
  743. }
  744. break;
  745. case SRP_TSK_LUN_RESET:
  746. if (d->lun != lun) {
  747. resp = SRP_TSK_MGMT_FIELDS_INVALID;
  748. break;
  749. }
  750. device_cold_reset(&d->qdev);
  751. break;
  752. case SRP_TSK_ABORT_TASK_SET:
  753. case SRP_TSK_CLEAR_TASK_SET:
  754. if (d->lun != lun) {
  755. resp = SRP_TSK_MGMT_FIELDS_INVALID;
  756. break;
  757. }
  758. for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
  759. tmpreq = &s->reqs[i];
  760. if (req_iu(tmpreq)->srp.cmd.lun
  761. != req_iu(req)->srp.tsk_mgmt.lun) {
  762. continue;
  763. }
  764. if (!tmpreq->active || !tmpreq->sreq) {
  765. continue;
  766. }
  767. assert(tmpreq->sreq->hba_private);
  768. scsi_req_cancel(tmpreq->sreq);
  769. }
  770. break;
  771. case SRP_TSK_CLEAR_ACA:
  772. resp = SRP_TSK_MGMT_NOT_SUPPORTED;
  773. break;
  774. default:
  775. resp = SRP_TSK_MGMT_FIELDS_INVALID;
  776. break;
  777. }
  778. }
  779. /* Compose the response here as */
  780. QEMU_BUILD_BUG_ON(SRP_MAX_IU_DATA_LEN < 4);
  781. memset(iu, 0, sizeof(struct srp_rsp) + 4);
  782. iu->srp.rsp.opcode = SRP_RSP;
  783. iu->srp.rsp.req_lim_delta = cpu_to_be32(1);
  784. iu->srp.rsp.tag = tag;
  785. iu->srp.rsp.flags |= SRP_RSP_FLAG_RSPVALID;
  786. iu->srp.rsp.resp_data_len = cpu_to_be32(4);
  787. if (resp) {
  788. iu->srp.rsp.sol_not = (sol_not & 0x04) >> 2;
  789. } else {
  790. iu->srp.rsp.sol_not = (sol_not & 0x02) >> 1;
  791. }
  792. iu->srp.rsp.status = GOOD;
  793. iu->srp.rsp.data[3] = resp;
  794. vscsi_send_iu(s, req, sizeof(iu->srp.rsp) + 4, VIOSRP_SRP_FORMAT);
  795. return 1;
  796. }
  797. static int vscsi_handle_srp_req(VSCSIState *s, vscsi_req *req)
  798. {
  799. union srp_iu *srp = &req_iu(req)->srp;
  800. int done = 1;
  801. uint8_t opcode = srp->rsp.opcode;
  802. switch (opcode) {
  803. case SRP_LOGIN_REQ:
  804. vscsi_process_login(s, req);
  805. break;
  806. case SRP_TSK_MGMT:
  807. done = vscsi_process_tsk_mgmt(s, req);
  808. break;
  809. case SRP_CMD:
  810. done = vscsi_queue_cmd(s, req);
  811. break;
  812. case SRP_LOGIN_RSP:
  813. case SRP_I_LOGOUT:
  814. case SRP_T_LOGOUT:
  815. case SRP_RSP:
  816. case SRP_CRED_REQ:
  817. case SRP_CRED_RSP:
  818. case SRP_AER_REQ:
  819. case SRP_AER_RSP:
  820. fprintf(stderr, "VSCSI: Unsupported opcode %02x\n", opcode);
  821. break;
  822. default:
  823. fprintf(stderr, "VSCSI: Unknown type %02x\n", opcode);
  824. }
  825. return done;
  826. }
  827. static int vscsi_send_adapter_info(VSCSIState *s, vscsi_req *req)
  828. {
  829. struct viosrp_adapter_info *sinfo;
  830. struct mad_adapter_info_data info;
  831. int rc;
  832. sinfo = &req_iu(req)->mad.adapter_info;
  833. #if 0 /* What for ? */
  834. rc = spapr_vio_dma_read(&s->vdev, be64_to_cpu(sinfo->buffer),
  835. &info, be16_to_cpu(sinfo->common.length));
  836. if (rc) {
  837. fprintf(stderr, "vscsi_send_adapter_info: DMA read failure !\n");
  838. }
  839. #endif
  840. memset(&info, 0, sizeof(info));
  841. strcpy(info.srp_version, SRP_VERSION);
  842. memcpy(info.partition_name, "qemu", sizeof("qemu"));
  843. info.partition_number = cpu_to_be32(0);
  844. info.mad_version = cpu_to_be32(1);
  845. info.os_type = cpu_to_be32(2);
  846. info.port_max_txu[0] = cpu_to_be32(VSCSI_MAX_SECTORS << 9);
  847. rc = spapr_vio_dma_write(&s->vdev, be64_to_cpu(sinfo->buffer),
  848. &info, be16_to_cpu(sinfo->common.length));
  849. if (rc) {
  850. fprintf(stderr, "vscsi_send_adapter_info: DMA write failure !\n");
  851. }
  852. sinfo->common.status = rc ? cpu_to_be32(1) : 0;
  853. return vscsi_send_iu(s, req, sizeof(*sinfo), VIOSRP_MAD_FORMAT);
  854. }
  855. static int vscsi_send_capabilities(VSCSIState *s, vscsi_req *req)
  856. {
  857. struct viosrp_capabilities *vcap;
  858. struct capabilities cap = { };
  859. uint16_t len, req_len;
  860. uint64_t buffer;
  861. int rc;
  862. vcap = &req_iu(req)->mad.capabilities;
  863. req_len = len = be16_to_cpu(vcap->common.length);
  864. buffer = be64_to_cpu(vcap->buffer);
  865. if (len > sizeof(cap)) {
  866. fprintf(stderr, "vscsi_send_capabilities: capabilities size mismatch !\n");
  867. /*
  868. * Just read and populate the structure that is known.
  869. * Zero rest of the structure.
  870. */
  871. len = sizeof(cap);
  872. }
  873. rc = spapr_vio_dma_read(&s->vdev, buffer, &cap, len);
  874. if (rc) {
  875. fprintf(stderr, "vscsi_send_capabilities: DMA read failure !\n");
  876. }
  877. /*
  878. * Current implementation does not support any migration or
  879. * reservation capabilities. Construct the response telling the
  880. * guest not to use them.
  881. */
  882. cap.flags = 0;
  883. cap.migration.ecl = 0;
  884. cap.reserve.type = 0;
  885. cap.migration.common.server_support = 0;
  886. cap.reserve.common.server_support = 0;
  887. rc = spapr_vio_dma_write(&s->vdev, buffer, &cap, len);
  888. if (rc) {
  889. fprintf(stderr, "vscsi_send_capabilities: DMA write failure !\n");
  890. }
  891. if (req_len > len) {
  892. /*
  893. * Being paranoid and lets not worry about the error code
  894. * here. Actual write of the cap is done above.
  895. */
  896. spapr_vio_dma_set(&s->vdev, (buffer + len), 0, (req_len - len));
  897. }
  898. vcap->common.status = rc ? cpu_to_be32(1) : 0;
  899. return vscsi_send_iu(s, req, sizeof(*vcap), VIOSRP_MAD_FORMAT);
  900. }
  901. static int vscsi_handle_mad_req(VSCSIState *s, vscsi_req *req)
  902. {
  903. union mad_iu *mad = &req_iu(req)->mad;
  904. bool request_handled = false;
  905. uint64_t retlen = 0;
  906. switch (be32_to_cpu(mad->empty_iu.common.type)) {
  907. case VIOSRP_EMPTY_IU_TYPE:
  908. fprintf(stderr, "Unsupported EMPTY MAD IU\n");
  909. retlen = sizeof(mad->empty_iu);
  910. break;
  911. case VIOSRP_ERROR_LOG_TYPE:
  912. fprintf(stderr, "Unsupported ERROR LOG MAD IU\n");
  913. retlen = sizeof(mad->error_log);
  914. break;
  915. case VIOSRP_ADAPTER_INFO_TYPE:
  916. vscsi_send_adapter_info(s, req);
  917. request_handled = true;
  918. break;
  919. case VIOSRP_HOST_CONFIG_TYPE:
  920. retlen = sizeof(mad->host_config);
  921. break;
  922. case VIOSRP_CAPABILITIES_TYPE:
  923. vscsi_send_capabilities(s, req);
  924. request_handled = true;
  925. break;
  926. default:
  927. fprintf(stderr, "VSCSI: Unknown MAD type %02x\n",
  928. be32_to_cpu(mad->empty_iu.common.type));
  929. /*
  930. * PAPR+ says that "The length field is set to the length
  931. * of the data structure(s) used in the command".
  932. * As we did not recognize the request type, put zero there.
  933. */
  934. retlen = 0;
  935. }
  936. if (!request_handled) {
  937. mad->empty_iu.common.status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
  938. vscsi_send_iu(s, req, retlen, VIOSRP_MAD_FORMAT);
  939. }
  940. return 1;
  941. }
  942. static void vscsi_got_payload(VSCSIState *s, vscsi_crq *crq)
  943. {
  944. vscsi_req *req;
  945. int done;
  946. req = vscsi_get_req(s);
  947. if (req == NULL) {
  948. fprintf(stderr, "VSCSI: Failed to get a request !\n");
  949. return;
  950. }
  951. /* We only support a limited number of descriptors, we know
  952. * the ibmvscsi driver uses up to 10 max, so it should fit
  953. * in our 256 bytes IUs. If not we'll have to increase the size
  954. * of the structure.
  955. */
  956. if (crq->s.IU_length > SRP_MAX_IU_LEN) {
  957. fprintf(stderr, "VSCSI: SRP IU too long (%d bytes) !\n",
  958. crq->s.IU_length);
  959. vscsi_put_req(req);
  960. return;
  961. }
  962. /* XXX Handle failure differently ? */
  963. if (spapr_vio_dma_read(&s->vdev, crq->s.IU_data_ptr, &req->viosrp_iu_buf,
  964. crq->s.IU_length)) {
  965. fprintf(stderr, "vscsi_got_payload: DMA read failure !\n");
  966. vscsi_put_req(req);
  967. return;
  968. }
  969. memcpy(&req->crq, crq, sizeof(vscsi_crq));
  970. if (crq->s.format == VIOSRP_MAD_FORMAT) {
  971. done = vscsi_handle_mad_req(s, req);
  972. } else {
  973. done = vscsi_handle_srp_req(s, req);
  974. }
  975. if (done) {
  976. vscsi_put_req(req);
  977. }
  978. }
  979. static int vscsi_do_crq(struct SpaprVioDevice *dev, uint8_t *crq_data)
  980. {
  981. VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev);
  982. vscsi_crq crq;
  983. memcpy(crq.raw, crq_data, 16);
  984. crq.s.timeout = be16_to_cpu(crq.s.timeout);
  985. crq.s.IU_length = be16_to_cpu(crq.s.IU_length);
  986. crq.s.IU_data_ptr = be64_to_cpu(crq.s.IU_data_ptr);
  987. trace_spapr_vscsi_do_crq(crq.raw[0], crq.raw[1]);
  988. switch (crq.s.valid) {
  989. case 0xc0: /* Init command/response */
  990. /* Respond to initialization request */
  991. if (crq.s.format == 0x01) {
  992. memset(crq.raw, 0, 16);
  993. crq.s.valid = 0xc0;
  994. crq.s.format = 0x02;
  995. spapr_vio_send_crq(dev, crq.raw);
  996. }
  997. /* Note that in hotplug cases, we might get a 0x02
  998. * as a result of us emitting the init request
  999. */
  1000. break;
  1001. case 0xff: /* Link event */
  1002. /* Not handled for now */
  1003. break;
  1004. case 0x80: /* Payloads */
  1005. switch (crq.s.format) {
  1006. case VIOSRP_SRP_FORMAT: /* AKA VSCSI request */
  1007. case VIOSRP_MAD_FORMAT: /* AKA VSCSI response */
  1008. vscsi_got_payload(s, &crq);
  1009. break;
  1010. case VIOSRP_OS400_FORMAT:
  1011. case VIOSRP_AIX_FORMAT:
  1012. case VIOSRP_LINUX_FORMAT:
  1013. case VIOSRP_INLINE_FORMAT:
  1014. fprintf(stderr, "vscsi_do_srq: Unsupported payload format %02x\n",
  1015. crq.s.format);
  1016. break;
  1017. default:
  1018. fprintf(stderr, "vscsi_do_srq: Unknown payload format %02x\n",
  1019. crq.s.format);
  1020. }
  1021. break;
  1022. default:
  1023. fprintf(stderr, "vscsi_do_crq: unknown CRQ %02x %02x ...\n",
  1024. crq.raw[0], crq.raw[1]);
  1025. };
  1026. return 0;
  1027. }
  1028. static const struct SCSIBusInfo vscsi_scsi_info = {
  1029. .tcq = true,
  1030. .max_channel = 7, /* logical unit addressing format */
  1031. .max_target = 63,
  1032. .max_lun = 31,
  1033. .transfer_data = vscsi_transfer_data,
  1034. .complete = vscsi_command_complete,
  1035. .cancel = vscsi_request_cancelled,
  1036. .save_request = vscsi_save_request,
  1037. .load_request = vscsi_load_request,
  1038. };
  1039. static void spapr_vscsi_reset(SpaprVioDevice *dev)
  1040. {
  1041. VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev);
  1042. int i;
  1043. memset(s->reqs, 0, sizeof(s->reqs));
  1044. for (i = 0; i < VSCSI_REQ_LIMIT; i++) {
  1045. s->reqs[i].qtag = i;
  1046. }
  1047. }
  1048. static void spapr_vscsi_realize(SpaprVioDevice *dev, Error **errp)
  1049. {
  1050. VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(dev);
  1051. dev->crq.SendFunc = vscsi_do_crq;
  1052. scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &vscsi_scsi_info);
  1053. /* ibmvscsi SCSI bus does not allow hotplug. */
  1054. qbus_set_hotplug_handler(BUS(&s->bus), NULL);
  1055. }
  1056. void spapr_vscsi_create(SpaprVioBus *bus)
  1057. {
  1058. DeviceState *dev;
  1059. dev = qdev_new("spapr-vscsi");
  1060. qdev_realize_and_unref(dev, &bus->bus, &error_fatal);
  1061. scsi_bus_legacy_handle_cmdline(&VIO_SPAPR_VSCSI_DEVICE(dev)->bus);
  1062. }
  1063. static int spapr_vscsi_devnode(SpaprVioDevice *dev, void *fdt, int node_off)
  1064. {
  1065. int ret;
  1066. ret = fdt_setprop_cell(fdt, node_off, "#address-cells", 2);
  1067. if (ret < 0) {
  1068. return ret;
  1069. }
  1070. ret = fdt_setprop_cell(fdt, node_off, "#size-cells", 0);
  1071. if (ret < 0) {
  1072. return ret;
  1073. }
  1074. return 0;
  1075. }
  1076. static Property spapr_vscsi_properties[] = {
  1077. DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev),
  1078. DEFINE_PROP_END_OF_LIST(),
  1079. };
  1080. static const VMStateDescription vmstate_spapr_vscsi = {
  1081. .name = "spapr_vscsi",
  1082. .version_id = 1,
  1083. .minimum_version_id = 1,
  1084. .fields = (VMStateField[]) {
  1085. VMSTATE_SPAPR_VIO(vdev, VSCSIState),
  1086. /* VSCSI state */
  1087. /* ???? */
  1088. VMSTATE_END_OF_LIST()
  1089. },
  1090. };
  1091. static void spapr_vscsi_class_init(ObjectClass *klass, void *data)
  1092. {
  1093. DeviceClass *dc = DEVICE_CLASS(klass);
  1094. SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
  1095. k->realize = spapr_vscsi_realize;
  1096. k->reset = spapr_vscsi_reset;
  1097. k->devnode = spapr_vscsi_devnode;
  1098. k->dt_name = "v-scsi";
  1099. k->dt_type = "vscsi";
  1100. k->dt_compatible = "IBM,v-scsi";
  1101. k->signal_mask = 0x00000001;
  1102. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  1103. device_class_set_props(dc, spapr_vscsi_properties);
  1104. k->rtce_window_size = 0x10000000;
  1105. dc->vmsd = &vmstate_spapr_vscsi;
  1106. }
  1107. static const TypeInfo spapr_vscsi_info = {
  1108. .name = TYPE_VIO_SPAPR_VSCSI_DEVICE,
  1109. .parent = TYPE_VIO_SPAPR_DEVICE,
  1110. .instance_size = sizeof(VSCSIState),
  1111. .class_init = spapr_vscsi_class_init,
  1112. };
  1113. static void spapr_vscsi_register_types(void)
  1114. {
  1115. type_register_static(&spapr_vscsi_info);
  1116. }
  1117. type_init(spapr_vscsi_register_types)