esp.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /*
  2. * QEMU ESP/NCR53C9x emulation
  3. *
  4. * Copyright (c) 2005-2006 Fabrice Bellard
  5. * Copyright (c) 2012 Herve Poussineau
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "qemu/osdep.h"
  26. #include "hw/sysbus.h"
  27. #include "migration/vmstate.h"
  28. #include "hw/irq.h"
  29. #include "hw/scsi/esp.h"
  30. #include "trace.h"
  31. #include "qemu/log.h"
  32. #include "qemu/module.h"
  33. /*
  34. * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
  35. * also produced as NCR89C100. See
  36. * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
  37. * and
  38. * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
  39. *
  40. * On Macintosh Quadra it is a NCR53C96.
  41. */
  42. static void esp_raise_irq(ESPState *s)
  43. {
  44. if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
  45. s->rregs[ESP_RSTAT] |= STAT_INT;
  46. qemu_irq_raise(s->irq);
  47. trace_esp_raise_irq();
  48. }
  49. }
  50. static void esp_lower_irq(ESPState *s)
  51. {
  52. if (s->rregs[ESP_RSTAT] & STAT_INT) {
  53. s->rregs[ESP_RSTAT] &= ~STAT_INT;
  54. qemu_irq_lower(s->irq);
  55. trace_esp_lower_irq();
  56. }
  57. }
  58. static void esp_raise_drq(ESPState *s)
  59. {
  60. qemu_irq_raise(s->irq_data);
  61. }
  62. static void esp_lower_drq(ESPState *s)
  63. {
  64. qemu_irq_lower(s->irq_data);
  65. }
  66. void esp_dma_enable(ESPState *s, int irq, int level)
  67. {
  68. if (level) {
  69. s->dma_enabled = 1;
  70. trace_esp_dma_enable();
  71. if (s->dma_cb) {
  72. s->dma_cb(s);
  73. s->dma_cb = NULL;
  74. }
  75. } else {
  76. trace_esp_dma_disable();
  77. s->dma_enabled = 0;
  78. }
  79. }
  80. void esp_request_cancelled(SCSIRequest *req)
  81. {
  82. ESPState *s = req->hba_private;
  83. if (req == s->current_req) {
  84. scsi_req_unref(s->current_req);
  85. s->current_req = NULL;
  86. s->current_dev = NULL;
  87. }
  88. }
  89. static void set_pdma(ESPState *s, enum pdma_origin_id origin,
  90. uint32_t index, uint32_t len)
  91. {
  92. s->pdma_origin = origin;
  93. s->pdma_start = index;
  94. s->pdma_cur = index;
  95. s->pdma_len = len;
  96. }
  97. static uint8_t *get_pdma_buf(ESPState *s)
  98. {
  99. switch (s->pdma_origin) {
  100. case PDMA:
  101. return s->pdma_buf;
  102. case TI:
  103. return s->ti_buf;
  104. case CMD:
  105. return s->cmdbuf;
  106. case ASYNC:
  107. return s->async_buf;
  108. }
  109. return NULL;
  110. }
  111. static int get_cmd_cb(ESPState *s)
  112. {
  113. int target;
  114. target = s->wregs[ESP_WBUSID] & BUSID_DID;
  115. s->ti_size = 0;
  116. s->ti_rptr = 0;
  117. s->ti_wptr = 0;
  118. if (s->current_req) {
  119. /* Started a new command before the old one finished. Cancel it. */
  120. scsi_req_cancel(s->current_req);
  121. s->async_len = 0;
  122. }
  123. s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
  124. if (!s->current_dev) {
  125. /* No such drive */
  126. s->rregs[ESP_RSTAT] = 0;
  127. s->rregs[ESP_RINTR] = INTR_DC;
  128. s->rregs[ESP_RSEQ] = SEQ_0;
  129. esp_raise_irq(s);
  130. return -1;
  131. }
  132. return 0;
  133. }
  134. static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
  135. {
  136. uint32_t dmalen;
  137. int target;
  138. target = s->wregs[ESP_WBUSID] & BUSID_DID;
  139. if (s->dma) {
  140. dmalen = s->rregs[ESP_TCLO];
  141. dmalen |= s->rregs[ESP_TCMID] << 8;
  142. dmalen |= s->rregs[ESP_TCHI] << 16;
  143. if (dmalen > buflen) {
  144. return 0;
  145. }
  146. if (s->dma_memory_read) {
  147. s->dma_memory_read(s->dma_opaque, buf, dmalen);
  148. } else {
  149. memcpy(s->pdma_buf, buf, dmalen);
  150. set_pdma(s, PDMA, 0, dmalen);
  151. esp_raise_drq(s);
  152. return 0;
  153. }
  154. } else {
  155. dmalen = s->ti_size;
  156. if (dmalen > TI_BUFSZ) {
  157. return 0;
  158. }
  159. memcpy(buf, s->ti_buf, dmalen);
  160. buf[0] = buf[2] >> 5;
  161. }
  162. trace_esp_get_cmd(dmalen, target);
  163. if (get_cmd_cb(s) < 0) {
  164. return 0;
  165. }
  166. return dmalen;
  167. }
  168. static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
  169. {
  170. int32_t datalen;
  171. int lun;
  172. SCSIDevice *current_lun;
  173. trace_esp_do_busid_cmd(busid);
  174. lun = busid & 7;
  175. current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
  176. s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
  177. datalen = scsi_req_enqueue(s->current_req);
  178. s->ti_size = datalen;
  179. if (datalen != 0) {
  180. s->rregs[ESP_RSTAT] = STAT_TC;
  181. s->dma_left = 0;
  182. s->dma_counter = 0;
  183. if (datalen > 0) {
  184. s->rregs[ESP_RSTAT] |= STAT_DI;
  185. } else {
  186. s->rregs[ESP_RSTAT] |= STAT_DO;
  187. }
  188. scsi_req_continue(s->current_req);
  189. }
  190. s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
  191. s->rregs[ESP_RSEQ] = SEQ_CD;
  192. esp_raise_irq(s);
  193. }
  194. static void do_cmd(ESPState *s, uint8_t *buf)
  195. {
  196. uint8_t busid = buf[0];
  197. do_busid_cmd(s, &buf[1], busid);
  198. }
  199. static void satn_pdma_cb(ESPState *s)
  200. {
  201. if (get_cmd_cb(s) < 0) {
  202. return;
  203. }
  204. if (s->pdma_cur != s->pdma_start) {
  205. do_cmd(s, get_pdma_buf(s) + s->pdma_start);
  206. }
  207. }
  208. static void handle_satn(ESPState *s)
  209. {
  210. uint8_t buf[32];
  211. int len;
  212. if (s->dma && !s->dma_enabled) {
  213. s->dma_cb = handle_satn;
  214. return;
  215. }
  216. s->pdma_cb = satn_pdma_cb;
  217. len = get_cmd(s, buf, sizeof(buf));
  218. if (len)
  219. do_cmd(s, buf);
  220. }
  221. static void s_without_satn_pdma_cb(ESPState *s)
  222. {
  223. if (get_cmd_cb(s) < 0) {
  224. return;
  225. }
  226. if (s->pdma_cur != s->pdma_start) {
  227. do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
  228. }
  229. }
  230. static void handle_s_without_atn(ESPState *s)
  231. {
  232. uint8_t buf[32];
  233. int len;
  234. if (s->dma && !s->dma_enabled) {
  235. s->dma_cb = handle_s_without_atn;
  236. return;
  237. }
  238. s->pdma_cb = s_without_satn_pdma_cb;
  239. len = get_cmd(s, buf, sizeof(buf));
  240. if (len) {
  241. do_busid_cmd(s, buf, 0);
  242. }
  243. }
  244. static void satn_stop_pdma_cb(ESPState *s)
  245. {
  246. if (get_cmd_cb(s) < 0) {
  247. return;
  248. }
  249. s->cmdlen = s->pdma_cur - s->pdma_start;
  250. if (s->cmdlen) {
  251. trace_esp_handle_satn_stop(s->cmdlen);
  252. s->do_cmd = 1;
  253. s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
  254. s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
  255. s->rregs[ESP_RSEQ] = SEQ_CD;
  256. esp_raise_irq(s);
  257. }
  258. }
  259. static void handle_satn_stop(ESPState *s)
  260. {
  261. if (s->dma && !s->dma_enabled) {
  262. s->dma_cb = handle_satn_stop;
  263. return;
  264. }
  265. s->pdma_cb = satn_stop_pdma_cb;;
  266. s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
  267. if (s->cmdlen) {
  268. trace_esp_handle_satn_stop(s->cmdlen);
  269. s->do_cmd = 1;
  270. s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
  271. s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
  272. s->rregs[ESP_RSEQ] = SEQ_CD;
  273. esp_raise_irq(s);
  274. }
  275. }
  276. static void write_response_pdma_cb(ESPState *s)
  277. {
  278. s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
  279. s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
  280. s->rregs[ESP_RSEQ] = SEQ_CD;
  281. esp_raise_irq(s);
  282. }
  283. static void write_response(ESPState *s)
  284. {
  285. trace_esp_write_response(s->status);
  286. s->ti_buf[0] = s->status;
  287. s->ti_buf[1] = 0;
  288. if (s->dma) {
  289. if (s->dma_memory_write) {
  290. s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
  291. s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
  292. s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
  293. s->rregs[ESP_RSEQ] = SEQ_CD;
  294. } else {
  295. set_pdma(s, TI, 0, 2);
  296. s->pdma_cb = write_response_pdma_cb;
  297. esp_raise_drq(s);
  298. return;
  299. }
  300. } else {
  301. s->ti_size = 2;
  302. s->ti_rptr = 0;
  303. s->ti_wptr = 2;
  304. s->rregs[ESP_RFLAGS] = 2;
  305. }
  306. esp_raise_irq(s);
  307. }
  308. static void esp_dma_done(ESPState *s)
  309. {
  310. s->rregs[ESP_RSTAT] |= STAT_TC;
  311. s->rregs[ESP_RINTR] = INTR_BS;
  312. s->rregs[ESP_RSEQ] = 0;
  313. s->rregs[ESP_RFLAGS] = 0;
  314. s->rregs[ESP_TCLO] = 0;
  315. s->rregs[ESP_TCMID] = 0;
  316. s->rregs[ESP_TCHI] = 0;
  317. esp_raise_irq(s);
  318. }
  319. static void do_dma_pdma_cb(ESPState *s)
  320. {
  321. int to_device = (s->ti_size < 0);
  322. int len = s->pdma_cur - s->pdma_start;
  323. if (s->do_cmd) {
  324. s->ti_size = 0;
  325. s->cmdlen = 0;
  326. s->do_cmd = 0;
  327. do_cmd(s, s->cmdbuf);
  328. return;
  329. }
  330. s->dma_left -= len;
  331. s->async_buf += len;
  332. s->async_len -= len;
  333. if (to_device) {
  334. s->ti_size += len;
  335. } else {
  336. s->ti_size -= len;
  337. }
  338. if (s->async_len == 0) {
  339. scsi_req_continue(s->current_req);
  340. /*
  341. * If there is still data to be read from the device then
  342. * complete the DMA operation immediately. Otherwise defer
  343. * until the scsi layer has completed.
  344. */
  345. if (to_device || s->dma_left != 0 || s->ti_size == 0) {
  346. return;
  347. }
  348. }
  349. /* Partially filled a scsi buffer. Complete immediately. */
  350. esp_dma_done(s);
  351. }
  352. static void esp_do_dma(ESPState *s)
  353. {
  354. uint32_t len;
  355. int to_device;
  356. len = s->dma_left;
  357. if (s->do_cmd) {
  358. /*
  359. * handle_ti_cmd() case: esp_do_dma() is called only from
  360. * handle_ti_cmd() with do_cmd != NULL (see the assert())
  361. */
  362. trace_esp_do_dma(s->cmdlen, len);
  363. assert (s->cmdlen <= sizeof(s->cmdbuf) &&
  364. len <= sizeof(s->cmdbuf) - s->cmdlen);
  365. if (s->dma_memory_read) {
  366. s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
  367. } else {
  368. set_pdma(s, CMD, s->cmdlen, len);
  369. s->pdma_cb = do_dma_pdma_cb;
  370. esp_raise_drq(s);
  371. return;
  372. }
  373. trace_esp_handle_ti_cmd(s->cmdlen);
  374. s->ti_size = 0;
  375. s->cmdlen = 0;
  376. s->do_cmd = 0;
  377. do_cmd(s, s->cmdbuf);
  378. return;
  379. }
  380. if (s->async_len == 0) {
  381. /* Defer until data is available. */
  382. return;
  383. }
  384. if (len > s->async_len) {
  385. len = s->async_len;
  386. }
  387. to_device = (s->ti_size < 0);
  388. if (to_device) {
  389. if (s->dma_memory_read) {
  390. s->dma_memory_read(s->dma_opaque, s->async_buf, len);
  391. } else {
  392. set_pdma(s, ASYNC, 0, len);
  393. s->pdma_cb = do_dma_pdma_cb;
  394. esp_raise_drq(s);
  395. return;
  396. }
  397. } else {
  398. if (s->dma_memory_write) {
  399. s->dma_memory_write(s->dma_opaque, s->async_buf, len);
  400. } else {
  401. set_pdma(s, ASYNC, 0, len);
  402. s->pdma_cb = do_dma_pdma_cb;
  403. esp_raise_drq(s);
  404. return;
  405. }
  406. }
  407. s->dma_left -= len;
  408. s->async_buf += len;
  409. s->async_len -= len;
  410. if (to_device)
  411. s->ti_size += len;
  412. else
  413. s->ti_size -= len;
  414. if (s->async_len == 0) {
  415. scsi_req_continue(s->current_req);
  416. /* If there is still data to be read from the device then
  417. complete the DMA operation immediately. Otherwise defer
  418. until the scsi layer has completed. */
  419. if (to_device || s->dma_left != 0 || s->ti_size == 0) {
  420. return;
  421. }
  422. }
  423. /* Partially filled a scsi buffer. Complete immediately. */
  424. esp_dma_done(s);
  425. }
  426. static void esp_report_command_complete(ESPState *s, uint32_t status)
  427. {
  428. trace_esp_command_complete();
  429. if (s->ti_size != 0) {
  430. trace_esp_command_complete_unexpected();
  431. }
  432. s->ti_size = 0;
  433. s->dma_left = 0;
  434. s->async_len = 0;
  435. if (status) {
  436. trace_esp_command_complete_fail();
  437. }
  438. s->status = status;
  439. s->rregs[ESP_RSTAT] = STAT_ST;
  440. esp_dma_done(s);
  441. if (s->current_req) {
  442. scsi_req_unref(s->current_req);
  443. s->current_req = NULL;
  444. s->current_dev = NULL;
  445. }
  446. }
  447. void esp_command_complete(SCSIRequest *req, uint32_t status,
  448. size_t resid)
  449. {
  450. ESPState *s = req->hba_private;
  451. if (s->rregs[ESP_RSTAT] & STAT_INT) {
  452. /* Defer handling command complete until the previous
  453. * interrupt has been handled.
  454. */
  455. trace_esp_command_complete_deferred();
  456. s->deferred_status = status;
  457. s->deferred_complete = true;
  458. return;
  459. }
  460. esp_report_command_complete(s, status);
  461. }
  462. void esp_transfer_data(SCSIRequest *req, uint32_t len)
  463. {
  464. ESPState *s = req->hba_private;
  465. assert(!s->do_cmd);
  466. trace_esp_transfer_data(s->dma_left, s->ti_size);
  467. s->async_len = len;
  468. s->async_buf = scsi_req_get_buf(req);
  469. if (s->dma_left) {
  470. esp_do_dma(s);
  471. } else if (s->dma_counter != 0 && s->ti_size <= 0) {
  472. /* If this was the last part of a DMA transfer then the
  473. completion interrupt is deferred to here. */
  474. esp_dma_done(s);
  475. }
  476. }
  477. static void handle_ti(ESPState *s)
  478. {
  479. uint32_t dmalen, minlen;
  480. if (s->dma && !s->dma_enabled) {
  481. s->dma_cb = handle_ti;
  482. return;
  483. }
  484. dmalen = s->rregs[ESP_TCLO];
  485. dmalen |= s->rregs[ESP_TCMID] << 8;
  486. dmalen |= s->rregs[ESP_TCHI] << 16;
  487. if (dmalen==0) {
  488. dmalen=0x10000;
  489. }
  490. s->dma_counter = dmalen;
  491. if (s->do_cmd)
  492. minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
  493. else if (s->ti_size < 0)
  494. minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
  495. else
  496. minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
  497. trace_esp_handle_ti(minlen);
  498. if (s->dma) {
  499. s->dma_left = minlen;
  500. s->rregs[ESP_RSTAT] &= ~STAT_TC;
  501. esp_do_dma(s);
  502. } else if (s->do_cmd) {
  503. trace_esp_handle_ti_cmd(s->cmdlen);
  504. s->ti_size = 0;
  505. s->cmdlen = 0;
  506. s->do_cmd = 0;
  507. do_cmd(s, s->cmdbuf);
  508. }
  509. }
  510. void esp_hard_reset(ESPState *s)
  511. {
  512. memset(s->rregs, 0, ESP_REGS);
  513. memset(s->wregs, 0, ESP_REGS);
  514. s->tchi_written = 0;
  515. s->ti_size = 0;
  516. s->ti_rptr = 0;
  517. s->ti_wptr = 0;
  518. s->dma = 0;
  519. s->do_cmd = 0;
  520. s->dma_cb = NULL;
  521. s->rregs[ESP_CFG1] = 7;
  522. }
  523. static void esp_soft_reset(ESPState *s)
  524. {
  525. qemu_irq_lower(s->irq);
  526. qemu_irq_lower(s->irq_data);
  527. esp_hard_reset(s);
  528. }
  529. static void parent_esp_reset(ESPState *s, int irq, int level)
  530. {
  531. if (level) {
  532. esp_soft_reset(s);
  533. }
  534. }
  535. uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
  536. {
  537. uint32_t old_val;
  538. trace_esp_mem_readb(saddr, s->rregs[saddr]);
  539. switch (saddr) {
  540. case ESP_FIFO:
  541. if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
  542. /* Data out. */
  543. qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
  544. s->rregs[ESP_FIFO] = 0;
  545. } else if (s->ti_rptr < s->ti_wptr) {
  546. s->ti_size--;
  547. s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
  548. }
  549. if (s->ti_rptr == s->ti_wptr) {
  550. s->ti_rptr = 0;
  551. s->ti_wptr = 0;
  552. }
  553. break;
  554. case ESP_RINTR:
  555. /* Clear sequence step, interrupt register and all status bits
  556. except TC */
  557. old_val = s->rregs[ESP_RINTR];
  558. s->rregs[ESP_RINTR] = 0;
  559. s->rregs[ESP_RSTAT] &= ~STAT_TC;
  560. s->rregs[ESP_RSEQ] = SEQ_CD;
  561. esp_lower_irq(s);
  562. if (s->deferred_complete) {
  563. esp_report_command_complete(s, s->deferred_status);
  564. s->deferred_complete = false;
  565. }
  566. return old_val;
  567. case ESP_TCHI:
  568. /* Return the unique id if the value has never been written */
  569. if (!s->tchi_written) {
  570. return s->chip_id;
  571. }
  572. default:
  573. break;
  574. }
  575. return s->rregs[saddr];
  576. }
  577. void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
  578. {
  579. trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
  580. switch (saddr) {
  581. case ESP_TCHI:
  582. s->tchi_written = true;
  583. /* fall through */
  584. case ESP_TCLO:
  585. case ESP_TCMID:
  586. s->rregs[ESP_RSTAT] &= ~STAT_TC;
  587. break;
  588. case ESP_FIFO:
  589. if (s->do_cmd) {
  590. if (s->cmdlen < ESP_CMDBUF_SZ) {
  591. s->cmdbuf[s->cmdlen++] = val & 0xff;
  592. } else {
  593. trace_esp_error_fifo_overrun();
  594. }
  595. } else if (s->ti_wptr == TI_BUFSZ - 1) {
  596. trace_esp_error_fifo_overrun();
  597. } else {
  598. s->ti_size++;
  599. s->ti_buf[s->ti_wptr++] = val & 0xff;
  600. }
  601. break;
  602. case ESP_CMD:
  603. s->rregs[saddr] = val;
  604. if (val & CMD_DMA) {
  605. s->dma = 1;
  606. /* Reload DMA counter. */
  607. s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
  608. s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
  609. s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
  610. } else {
  611. s->dma = 0;
  612. }
  613. switch(val & CMD_CMD) {
  614. case CMD_NOP:
  615. trace_esp_mem_writeb_cmd_nop(val);
  616. break;
  617. case CMD_FLUSH:
  618. trace_esp_mem_writeb_cmd_flush(val);
  619. //s->ti_size = 0;
  620. s->rregs[ESP_RINTR] = INTR_FC;
  621. s->rregs[ESP_RSEQ] = 0;
  622. s->rregs[ESP_RFLAGS] = 0;
  623. break;
  624. case CMD_RESET:
  625. trace_esp_mem_writeb_cmd_reset(val);
  626. esp_soft_reset(s);
  627. break;
  628. case CMD_BUSRESET:
  629. trace_esp_mem_writeb_cmd_bus_reset(val);
  630. s->rregs[ESP_RINTR] = INTR_RST;
  631. if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
  632. esp_raise_irq(s);
  633. }
  634. break;
  635. case CMD_TI:
  636. handle_ti(s);
  637. break;
  638. case CMD_ICCS:
  639. trace_esp_mem_writeb_cmd_iccs(val);
  640. write_response(s);
  641. s->rregs[ESP_RINTR] = INTR_FC;
  642. s->rregs[ESP_RSTAT] |= STAT_MI;
  643. break;
  644. case CMD_MSGACC:
  645. trace_esp_mem_writeb_cmd_msgacc(val);
  646. s->rregs[ESP_RINTR] = INTR_DC;
  647. s->rregs[ESP_RSEQ] = 0;
  648. s->rregs[ESP_RFLAGS] = 0;
  649. esp_raise_irq(s);
  650. break;
  651. case CMD_PAD:
  652. trace_esp_mem_writeb_cmd_pad(val);
  653. s->rregs[ESP_RSTAT] = STAT_TC;
  654. s->rregs[ESP_RINTR] = INTR_FC;
  655. s->rregs[ESP_RSEQ] = 0;
  656. break;
  657. case CMD_SATN:
  658. trace_esp_mem_writeb_cmd_satn(val);
  659. break;
  660. case CMD_RSTATN:
  661. trace_esp_mem_writeb_cmd_rstatn(val);
  662. break;
  663. case CMD_SEL:
  664. trace_esp_mem_writeb_cmd_sel(val);
  665. handle_s_without_atn(s);
  666. break;
  667. case CMD_SELATN:
  668. trace_esp_mem_writeb_cmd_selatn(val);
  669. handle_satn(s);
  670. break;
  671. case CMD_SELATNS:
  672. trace_esp_mem_writeb_cmd_selatns(val);
  673. handle_satn_stop(s);
  674. break;
  675. case CMD_ENSEL:
  676. trace_esp_mem_writeb_cmd_ensel(val);
  677. s->rregs[ESP_RINTR] = 0;
  678. break;
  679. case CMD_DISSEL:
  680. trace_esp_mem_writeb_cmd_dissel(val);
  681. s->rregs[ESP_RINTR] = 0;
  682. esp_raise_irq(s);
  683. break;
  684. default:
  685. trace_esp_error_unhandled_command(val);
  686. break;
  687. }
  688. break;
  689. case ESP_WBUSID ... ESP_WSYNO:
  690. break;
  691. case ESP_CFG1:
  692. case ESP_CFG2: case ESP_CFG3:
  693. case ESP_RES3: case ESP_RES4:
  694. s->rregs[saddr] = val;
  695. break;
  696. case ESP_WCCF ... ESP_WTEST:
  697. break;
  698. default:
  699. trace_esp_error_invalid_write(val, saddr);
  700. return;
  701. }
  702. s->wregs[saddr] = val;
  703. }
  704. static bool esp_mem_accepts(void *opaque, hwaddr addr,
  705. unsigned size, bool is_write,
  706. MemTxAttrs attrs)
  707. {
  708. return (size == 1) || (is_write && size == 4);
  709. }
  710. static bool esp_pdma_needed(void *opaque)
  711. {
  712. ESPState *s = opaque;
  713. return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
  714. s->dma_enabled;
  715. }
  716. static const VMStateDescription vmstate_esp_pdma = {
  717. .name = "esp/pdma",
  718. .version_id = 1,
  719. .minimum_version_id = 1,
  720. .needed = esp_pdma_needed,
  721. .fields = (VMStateField[]) {
  722. VMSTATE_BUFFER(pdma_buf, ESPState),
  723. VMSTATE_INT32(pdma_origin, ESPState),
  724. VMSTATE_UINT32(pdma_len, ESPState),
  725. VMSTATE_UINT32(pdma_start, ESPState),
  726. VMSTATE_UINT32(pdma_cur, ESPState),
  727. VMSTATE_END_OF_LIST()
  728. }
  729. };
  730. const VMStateDescription vmstate_esp = {
  731. .name ="esp",
  732. .version_id = 4,
  733. .minimum_version_id = 3,
  734. .fields = (VMStateField[]) {
  735. VMSTATE_BUFFER(rregs, ESPState),
  736. VMSTATE_BUFFER(wregs, ESPState),
  737. VMSTATE_INT32(ti_size, ESPState),
  738. VMSTATE_UINT32(ti_rptr, ESPState),
  739. VMSTATE_UINT32(ti_wptr, ESPState),
  740. VMSTATE_BUFFER(ti_buf, ESPState),
  741. VMSTATE_UINT32(status, ESPState),
  742. VMSTATE_UINT32(deferred_status, ESPState),
  743. VMSTATE_BOOL(deferred_complete, ESPState),
  744. VMSTATE_UINT32(dma, ESPState),
  745. VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
  746. VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
  747. VMSTATE_UINT32(cmdlen, ESPState),
  748. VMSTATE_UINT32(do_cmd, ESPState),
  749. VMSTATE_UINT32(dma_left, ESPState),
  750. VMSTATE_END_OF_LIST()
  751. },
  752. .subsections = (const VMStateDescription * []) {
  753. &vmstate_esp_pdma,
  754. NULL
  755. }
  756. };
  757. static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
  758. uint64_t val, unsigned int size)
  759. {
  760. SysBusESPState *sysbus = opaque;
  761. uint32_t saddr;
  762. saddr = addr >> sysbus->it_shift;
  763. esp_reg_write(&sysbus->esp, saddr, val);
  764. }
  765. static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
  766. unsigned int size)
  767. {
  768. SysBusESPState *sysbus = opaque;
  769. uint32_t saddr;
  770. saddr = addr >> sysbus->it_shift;
  771. return esp_reg_read(&sysbus->esp, saddr);
  772. }
  773. static const MemoryRegionOps sysbus_esp_mem_ops = {
  774. .read = sysbus_esp_mem_read,
  775. .write = sysbus_esp_mem_write,
  776. .endianness = DEVICE_NATIVE_ENDIAN,
  777. .valid.accepts = esp_mem_accepts,
  778. };
  779. static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
  780. uint64_t val, unsigned int size)
  781. {
  782. SysBusESPState *sysbus = opaque;
  783. ESPState *s = &sysbus->esp;
  784. uint32_t dmalen;
  785. uint8_t *buf = get_pdma_buf(s);
  786. dmalen = s->rregs[ESP_TCLO];
  787. dmalen |= s->rregs[ESP_TCMID] << 8;
  788. dmalen |= s->rregs[ESP_TCHI] << 16;
  789. if (dmalen == 0 || s->pdma_len == 0) {
  790. return;
  791. }
  792. switch (size) {
  793. case 1:
  794. buf[s->pdma_cur++] = val;
  795. s->pdma_len--;
  796. dmalen--;
  797. break;
  798. case 2:
  799. buf[s->pdma_cur++] = val >> 8;
  800. buf[s->pdma_cur++] = val;
  801. s->pdma_len -= 2;
  802. dmalen -= 2;
  803. break;
  804. }
  805. s->rregs[ESP_TCLO] = dmalen & 0xff;
  806. s->rregs[ESP_TCMID] = dmalen >> 8;
  807. s->rregs[ESP_TCHI] = dmalen >> 16;
  808. if (s->pdma_len == 0 && s->pdma_cb) {
  809. esp_lower_drq(s);
  810. s->pdma_cb(s);
  811. s->pdma_cb = NULL;
  812. }
  813. }
  814. static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
  815. unsigned int size)
  816. {
  817. SysBusESPState *sysbus = opaque;
  818. ESPState *s = &sysbus->esp;
  819. uint8_t *buf = get_pdma_buf(s);
  820. uint64_t val = 0;
  821. if (s->pdma_len == 0) {
  822. return 0;
  823. }
  824. switch (size) {
  825. case 1:
  826. val = buf[s->pdma_cur++];
  827. s->pdma_len--;
  828. break;
  829. case 2:
  830. val = buf[s->pdma_cur++];
  831. val = (val << 8) | buf[s->pdma_cur++];
  832. s->pdma_len -= 2;
  833. break;
  834. }
  835. if (s->pdma_len == 0 && s->pdma_cb) {
  836. esp_lower_drq(s);
  837. s->pdma_cb(s);
  838. s->pdma_cb = NULL;
  839. }
  840. return val;
  841. }
  842. static const MemoryRegionOps sysbus_esp_pdma_ops = {
  843. .read = sysbus_esp_pdma_read,
  844. .write = sysbus_esp_pdma_write,
  845. .endianness = DEVICE_NATIVE_ENDIAN,
  846. .valid.min_access_size = 1,
  847. .valid.max_access_size = 2,
  848. };
  849. static const struct SCSIBusInfo esp_scsi_info = {
  850. .tcq = false,
  851. .max_target = ESP_MAX_DEVS,
  852. .max_lun = 7,
  853. .transfer_data = esp_transfer_data,
  854. .complete = esp_command_complete,
  855. .cancel = esp_request_cancelled
  856. };
  857. static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
  858. {
  859. SysBusESPState *sysbus = ESP_STATE(opaque);
  860. ESPState *s = &sysbus->esp;
  861. switch (irq) {
  862. case 0:
  863. parent_esp_reset(s, irq, level);
  864. break;
  865. case 1:
  866. esp_dma_enable(opaque, irq, level);
  867. break;
  868. }
  869. }
  870. static void sysbus_esp_realize(DeviceState *dev, Error **errp)
  871. {
  872. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  873. SysBusESPState *sysbus = ESP_STATE(dev);
  874. ESPState *s = &sysbus->esp;
  875. sysbus_init_irq(sbd, &s->irq);
  876. sysbus_init_irq(sbd, &s->irq_data);
  877. assert(sysbus->it_shift != -1);
  878. s->chip_id = TCHI_FAS100A;
  879. memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
  880. sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
  881. sysbus_init_mmio(sbd, &sysbus->iomem);
  882. memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
  883. sysbus, "esp-pdma", 2);
  884. sysbus_init_mmio(sbd, &sysbus->pdma);
  885. qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
  886. scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
  887. }
  888. static void sysbus_esp_hard_reset(DeviceState *dev)
  889. {
  890. SysBusESPState *sysbus = ESP_STATE(dev);
  891. esp_hard_reset(&sysbus->esp);
  892. }
  893. static const VMStateDescription vmstate_sysbus_esp_scsi = {
  894. .name = "sysbusespscsi",
  895. .version_id = 1,
  896. .minimum_version_id = 1,
  897. .fields = (VMStateField[]) {
  898. VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
  899. VMSTATE_END_OF_LIST()
  900. }
  901. };
  902. static void sysbus_esp_class_init(ObjectClass *klass, void *data)
  903. {
  904. DeviceClass *dc = DEVICE_CLASS(klass);
  905. dc->realize = sysbus_esp_realize;
  906. dc->reset = sysbus_esp_hard_reset;
  907. dc->vmsd = &vmstate_sysbus_esp_scsi;
  908. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  909. }
  910. static const TypeInfo sysbus_esp_info = {
  911. .name = TYPE_ESP,
  912. .parent = TYPE_SYS_BUS_DEVICE,
  913. .instance_size = sizeof(SysBusESPState),
  914. .class_init = sysbus_esp_class_init,
  915. };
  916. static void esp_register_types(void)
  917. {
  918. type_register_static(&sysbus_esp_info);
  919. }
  920. type_init(esp_register_types)