esp.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515
  1. /*
  2. * QEMU ESP/NCR53C9x emulation
  3. *
  4. * Copyright (c) 2005-2006 Fabrice Bellard
  5. * Copyright (c) 2012 Herve Poussineau
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "qemu/osdep.h"
  26. #include "hw/sysbus.h"
  27. #include "migration/vmstate.h"
  28. #include "hw/irq.h"
  29. #include "hw/scsi/esp.h"
  30. #include "trace.h"
  31. #include "qemu/log.h"
  32. #include "qemu/module.h"
  33. /*
  34. * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
  35. * also produced as NCR89C100. See
  36. * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
  37. * and
  38. * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
  39. *
  40. * On Macintosh Quadra it is a NCR53C96.
  41. */
  42. static void esp_raise_irq(ESPState *s)
  43. {
  44. if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
  45. s->rregs[ESP_RSTAT] |= STAT_INT;
  46. qemu_irq_raise(s->irq);
  47. trace_esp_raise_irq();
  48. }
  49. }
  50. static void esp_lower_irq(ESPState *s)
  51. {
  52. if (s->rregs[ESP_RSTAT] & STAT_INT) {
  53. s->rregs[ESP_RSTAT] &= ~STAT_INT;
  54. qemu_irq_lower(s->irq);
  55. trace_esp_lower_irq();
  56. }
  57. }
  58. static void esp_raise_drq(ESPState *s)
  59. {
  60. qemu_irq_raise(s->irq_data);
  61. trace_esp_raise_drq();
  62. }
  63. static void esp_lower_drq(ESPState *s)
  64. {
  65. qemu_irq_lower(s->irq_data);
  66. trace_esp_lower_drq();
  67. }
  68. void esp_dma_enable(ESPState *s, int irq, int level)
  69. {
  70. if (level) {
  71. s->dma_enabled = 1;
  72. trace_esp_dma_enable();
  73. if (s->dma_cb) {
  74. s->dma_cb(s);
  75. s->dma_cb = NULL;
  76. }
  77. } else {
  78. trace_esp_dma_disable();
  79. s->dma_enabled = 0;
  80. }
  81. }
  82. void esp_request_cancelled(SCSIRequest *req)
  83. {
  84. ESPState *s = req->hba_private;
  85. if (req == s->current_req) {
  86. scsi_req_unref(s->current_req);
  87. s->current_req = NULL;
  88. s->current_dev = NULL;
  89. s->async_len = 0;
  90. }
  91. }
  92. static void esp_fifo_push(Fifo8 *fifo, uint8_t val)
  93. {
  94. if (fifo8_num_used(fifo) == fifo->capacity) {
  95. trace_esp_error_fifo_overrun();
  96. return;
  97. }
  98. fifo8_push(fifo, val);
  99. }
  100. static uint8_t esp_fifo_pop(Fifo8 *fifo)
  101. {
  102. if (fifo8_is_empty(fifo)) {
  103. return 0;
  104. }
  105. return fifo8_pop(fifo);
  106. }
  107. static uint32_t esp_fifo_pop_buf(Fifo8 *fifo, uint8_t *dest, int maxlen)
  108. {
  109. const uint8_t *buf;
  110. uint32_t n;
  111. if (maxlen == 0) {
  112. return 0;
  113. }
  114. buf = fifo8_pop_buf(fifo, maxlen, &n);
  115. if (dest) {
  116. memcpy(dest, buf, n);
  117. }
  118. return n;
  119. }
  120. static uint32_t esp_get_tc(ESPState *s)
  121. {
  122. uint32_t dmalen;
  123. dmalen = s->rregs[ESP_TCLO];
  124. dmalen |= s->rregs[ESP_TCMID] << 8;
  125. dmalen |= s->rregs[ESP_TCHI] << 16;
  126. return dmalen;
  127. }
  128. static void esp_set_tc(ESPState *s, uint32_t dmalen)
  129. {
  130. s->rregs[ESP_TCLO] = dmalen;
  131. s->rregs[ESP_TCMID] = dmalen >> 8;
  132. s->rregs[ESP_TCHI] = dmalen >> 16;
  133. }
  134. static uint32_t esp_get_stc(ESPState *s)
  135. {
  136. uint32_t dmalen;
  137. dmalen = s->wregs[ESP_TCLO];
  138. dmalen |= s->wregs[ESP_TCMID] << 8;
  139. dmalen |= s->wregs[ESP_TCHI] << 16;
  140. return dmalen;
  141. }
  142. static uint8_t esp_pdma_read(ESPState *s)
  143. {
  144. uint8_t val;
  145. if (s->do_cmd) {
  146. val = esp_fifo_pop(&s->cmdfifo);
  147. } else {
  148. val = esp_fifo_pop(&s->fifo);
  149. }
  150. return val;
  151. }
  152. static void esp_pdma_write(ESPState *s, uint8_t val)
  153. {
  154. uint32_t dmalen = esp_get_tc(s);
  155. if (dmalen == 0) {
  156. return;
  157. }
  158. if (s->do_cmd) {
  159. esp_fifo_push(&s->cmdfifo, val);
  160. } else {
  161. esp_fifo_push(&s->fifo, val);
  162. }
  163. dmalen--;
  164. esp_set_tc(s, dmalen);
  165. }
  166. static void esp_set_pdma_cb(ESPState *s, enum pdma_cb cb)
  167. {
  168. s->pdma_cb = cb;
  169. }
  170. static int esp_select(ESPState *s)
  171. {
  172. int target;
  173. target = s->wregs[ESP_WBUSID] & BUSID_DID;
  174. s->ti_size = 0;
  175. fifo8_reset(&s->fifo);
  176. s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
  177. if (!s->current_dev) {
  178. /* No such drive */
  179. s->rregs[ESP_RSTAT] = 0;
  180. s->rregs[ESP_RINTR] = INTR_DC;
  181. s->rregs[ESP_RSEQ] = SEQ_0;
  182. esp_raise_irq(s);
  183. return -1;
  184. }
  185. /*
  186. * Note that we deliberately don't raise the IRQ here: this will be done
  187. * either in do_command_phase() for DATA OUT transfers or by the deferred
  188. * IRQ mechanism in esp_transfer_data() for DATA IN transfers
  189. */
  190. s->rregs[ESP_RINTR] |= INTR_FC;
  191. s->rregs[ESP_RSEQ] = SEQ_CD;
  192. return 0;
  193. }
  194. static uint32_t get_cmd(ESPState *s, uint32_t maxlen)
  195. {
  196. uint8_t buf[ESP_CMDFIFO_SZ];
  197. uint32_t dmalen, n;
  198. int target;
  199. if (s->current_req) {
  200. /* Started a new command before the old one finished. Cancel it. */
  201. scsi_req_cancel(s->current_req);
  202. }
  203. target = s->wregs[ESP_WBUSID] & BUSID_DID;
  204. if (s->dma) {
  205. dmalen = MIN(esp_get_tc(s), maxlen);
  206. if (dmalen == 0) {
  207. return 0;
  208. }
  209. if (s->dma_memory_read) {
  210. s->dma_memory_read(s->dma_opaque, buf, dmalen);
  211. dmalen = MIN(fifo8_num_free(&s->cmdfifo), dmalen);
  212. fifo8_push_all(&s->cmdfifo, buf, dmalen);
  213. } else {
  214. if (esp_select(s) < 0) {
  215. fifo8_reset(&s->cmdfifo);
  216. return -1;
  217. }
  218. esp_raise_drq(s);
  219. fifo8_reset(&s->cmdfifo);
  220. return 0;
  221. }
  222. } else {
  223. dmalen = MIN(fifo8_num_used(&s->fifo), maxlen);
  224. if (dmalen == 0) {
  225. return 0;
  226. }
  227. n = esp_fifo_pop_buf(&s->fifo, buf, dmalen);
  228. n = MIN(fifo8_num_free(&s->cmdfifo), n);
  229. fifo8_push_all(&s->cmdfifo, buf, n);
  230. }
  231. trace_esp_get_cmd(dmalen, target);
  232. if (esp_select(s) < 0) {
  233. fifo8_reset(&s->cmdfifo);
  234. return -1;
  235. }
  236. return dmalen;
  237. }
  238. static void do_command_phase(ESPState *s)
  239. {
  240. uint32_t cmdlen;
  241. int32_t datalen;
  242. SCSIDevice *current_lun;
  243. uint8_t buf[ESP_CMDFIFO_SZ];
  244. trace_esp_do_command_phase(s->lun);
  245. cmdlen = fifo8_num_used(&s->cmdfifo);
  246. if (!cmdlen || !s->current_dev) {
  247. return;
  248. }
  249. esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen);
  250. current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun);
  251. s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s);
  252. datalen = scsi_req_enqueue(s->current_req);
  253. s->ti_size = datalen;
  254. fifo8_reset(&s->cmdfifo);
  255. if (datalen != 0) {
  256. s->rregs[ESP_RSTAT] = STAT_TC;
  257. s->rregs[ESP_RSEQ] = SEQ_CD;
  258. s->ti_cmd = 0;
  259. esp_set_tc(s, 0);
  260. if (datalen > 0) {
  261. /*
  262. * Switch to DATA IN phase but wait until initial data xfer is
  263. * complete before raising the command completion interrupt
  264. */
  265. s->data_in_ready = false;
  266. s->rregs[ESP_RSTAT] |= STAT_DI;
  267. } else {
  268. s->rregs[ESP_RSTAT] |= STAT_DO;
  269. s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
  270. esp_raise_irq(s);
  271. esp_lower_drq(s);
  272. }
  273. scsi_req_continue(s->current_req);
  274. return;
  275. }
  276. }
  277. static void do_message_phase(ESPState *s)
  278. {
  279. if (s->cmdfifo_cdb_offset) {
  280. uint8_t message = esp_fifo_pop(&s->cmdfifo);
  281. trace_esp_do_identify(message);
  282. s->lun = message & 7;
  283. s->cmdfifo_cdb_offset--;
  284. }
  285. /* Ignore extended messages for now */
  286. if (s->cmdfifo_cdb_offset) {
  287. int len = MIN(s->cmdfifo_cdb_offset, fifo8_num_used(&s->cmdfifo));
  288. esp_fifo_pop_buf(&s->cmdfifo, NULL, len);
  289. s->cmdfifo_cdb_offset = 0;
  290. }
  291. }
  292. static void do_cmd(ESPState *s)
  293. {
  294. do_message_phase(s);
  295. assert(s->cmdfifo_cdb_offset == 0);
  296. do_command_phase(s);
  297. }
  298. static void satn_pdma_cb(ESPState *s)
  299. {
  300. if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
  301. s->cmdfifo_cdb_offset = 1;
  302. s->do_cmd = 0;
  303. do_cmd(s);
  304. }
  305. }
  306. static void handle_satn(ESPState *s)
  307. {
  308. int32_t cmdlen;
  309. if (s->dma && !s->dma_enabled) {
  310. s->dma_cb = handle_satn;
  311. return;
  312. }
  313. esp_set_pdma_cb(s, SATN_PDMA_CB);
  314. cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
  315. if (cmdlen > 0) {
  316. s->cmdfifo_cdb_offset = 1;
  317. s->do_cmd = 0;
  318. do_cmd(s);
  319. } else if (cmdlen == 0) {
  320. s->do_cmd = 1;
  321. /* Target present, but no cmd yet - switch to command phase */
  322. s->rregs[ESP_RSEQ] = SEQ_CD;
  323. s->rregs[ESP_RSTAT] = STAT_CD;
  324. }
  325. }
  326. static void s_without_satn_pdma_cb(ESPState *s)
  327. {
  328. if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
  329. s->cmdfifo_cdb_offset = 0;
  330. s->do_cmd = 0;
  331. do_cmd(s);
  332. }
  333. }
  334. static void handle_s_without_atn(ESPState *s)
  335. {
  336. int32_t cmdlen;
  337. if (s->dma && !s->dma_enabled) {
  338. s->dma_cb = handle_s_without_atn;
  339. return;
  340. }
  341. esp_set_pdma_cb(s, S_WITHOUT_SATN_PDMA_CB);
  342. cmdlen = get_cmd(s, ESP_CMDFIFO_SZ);
  343. if (cmdlen > 0) {
  344. s->cmdfifo_cdb_offset = 0;
  345. s->do_cmd = 0;
  346. do_cmd(s);
  347. } else if (cmdlen == 0) {
  348. s->do_cmd = 1;
  349. /* Target present, but no cmd yet - switch to command phase */
  350. s->rregs[ESP_RSEQ] = SEQ_CD;
  351. s->rregs[ESP_RSTAT] = STAT_CD;
  352. }
  353. }
  354. static void satn_stop_pdma_cb(ESPState *s)
  355. {
  356. if (!esp_get_tc(s) && !fifo8_is_empty(&s->cmdfifo)) {
  357. trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
  358. s->do_cmd = 1;
  359. s->cmdfifo_cdb_offset = 1;
  360. s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
  361. s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
  362. s->rregs[ESP_RSEQ] = SEQ_CD;
  363. esp_raise_irq(s);
  364. }
  365. }
  366. static void handle_satn_stop(ESPState *s)
  367. {
  368. int32_t cmdlen;
  369. if (s->dma && !s->dma_enabled) {
  370. s->dma_cb = handle_satn_stop;
  371. return;
  372. }
  373. esp_set_pdma_cb(s, SATN_STOP_PDMA_CB);
  374. cmdlen = get_cmd(s, 1);
  375. if (cmdlen > 0) {
  376. trace_esp_handle_satn_stop(fifo8_num_used(&s->cmdfifo));
  377. s->do_cmd = 1;
  378. s->cmdfifo_cdb_offset = 1;
  379. s->rregs[ESP_RSTAT] = STAT_MO;
  380. s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
  381. s->rregs[ESP_RSEQ] = SEQ_MO;
  382. esp_raise_irq(s);
  383. } else if (cmdlen == 0) {
  384. s->do_cmd = 1;
  385. /* Target present, switch to message out phase */
  386. s->rregs[ESP_RSEQ] = SEQ_MO;
  387. s->rregs[ESP_RSTAT] = STAT_MO;
  388. }
  389. }
  390. static void write_response_pdma_cb(ESPState *s)
  391. {
  392. s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
  393. s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
  394. s->rregs[ESP_RSEQ] = SEQ_CD;
  395. esp_raise_irq(s);
  396. }
  397. static void write_response(ESPState *s)
  398. {
  399. uint8_t buf[2];
  400. trace_esp_write_response(s->status);
  401. buf[0] = s->status;
  402. buf[1] = 0;
  403. if (s->dma) {
  404. if (s->dma_memory_write) {
  405. s->dma_memory_write(s->dma_opaque, buf, 2);
  406. s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
  407. s->rregs[ESP_RINTR] |= INTR_BS | INTR_FC;
  408. s->rregs[ESP_RSEQ] = SEQ_CD;
  409. } else {
  410. esp_set_pdma_cb(s, WRITE_RESPONSE_PDMA_CB);
  411. esp_raise_drq(s);
  412. return;
  413. }
  414. } else {
  415. fifo8_reset(&s->fifo);
  416. fifo8_push_all(&s->fifo, buf, 2);
  417. s->rregs[ESP_RFLAGS] = 2;
  418. }
  419. esp_raise_irq(s);
  420. }
  421. static void esp_dma_done(ESPState *s)
  422. {
  423. s->rregs[ESP_RSTAT] |= STAT_TC;
  424. s->rregs[ESP_RINTR] |= INTR_BS;
  425. s->rregs[ESP_RFLAGS] = 0;
  426. esp_set_tc(s, 0);
  427. esp_raise_irq(s);
  428. }
  429. static void do_dma_pdma_cb(ESPState *s)
  430. {
  431. int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
  432. int len;
  433. uint32_t n;
  434. if (s->do_cmd) {
  435. /* Ensure we have received complete command after SATN and stop */
  436. if (esp_get_tc(s) || fifo8_is_empty(&s->cmdfifo)) {
  437. return;
  438. }
  439. s->ti_size = 0;
  440. if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
  441. /* No command received */
  442. if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
  443. return;
  444. }
  445. /* Command has been received */
  446. s->do_cmd = 0;
  447. do_cmd(s);
  448. } else {
  449. /*
  450. * Extra message out bytes received: update cmdfifo_cdb_offset
  451. * and then switch to command phase
  452. */
  453. s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
  454. s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
  455. s->rregs[ESP_RSEQ] = SEQ_CD;
  456. s->rregs[ESP_RINTR] |= INTR_BS;
  457. esp_raise_irq(s);
  458. }
  459. return;
  460. }
  461. if (!s->current_req) {
  462. return;
  463. }
  464. if (to_device) {
  465. /* Copy FIFO data to device */
  466. len = MIN(s->async_len, ESP_FIFO_SZ);
  467. len = MIN(len, fifo8_num_used(&s->fifo));
  468. n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
  469. s->async_buf += n;
  470. s->async_len -= n;
  471. s->ti_size += n;
  472. if (n < len) {
  473. /* Unaligned accesses can cause FIFO wraparound */
  474. len = len - n;
  475. n = esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
  476. s->async_buf += n;
  477. s->async_len -= n;
  478. s->ti_size += n;
  479. }
  480. if (s->async_len == 0) {
  481. scsi_req_continue(s->current_req);
  482. return;
  483. }
  484. if (esp_get_tc(s) == 0) {
  485. esp_lower_drq(s);
  486. esp_dma_done(s);
  487. }
  488. return;
  489. } else {
  490. if (s->async_len == 0) {
  491. /* Defer until the scsi layer has completed */
  492. scsi_req_continue(s->current_req);
  493. s->data_in_ready = false;
  494. return;
  495. }
  496. if (esp_get_tc(s) != 0) {
  497. /* Copy device data to FIFO */
  498. len = MIN(s->async_len, esp_get_tc(s));
  499. len = MIN(len, fifo8_num_free(&s->fifo));
  500. fifo8_push_all(&s->fifo, s->async_buf, len);
  501. s->async_buf += len;
  502. s->async_len -= len;
  503. s->ti_size -= len;
  504. esp_set_tc(s, esp_get_tc(s) - len);
  505. if (esp_get_tc(s) == 0) {
  506. /* Indicate transfer to FIFO is complete */
  507. s->rregs[ESP_RSTAT] |= STAT_TC;
  508. }
  509. return;
  510. }
  511. /* Partially filled a scsi buffer. Complete immediately. */
  512. esp_lower_drq(s);
  513. esp_dma_done(s);
  514. }
  515. }
  516. static void esp_do_dma(ESPState *s)
  517. {
  518. uint32_t len, cmdlen;
  519. int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
  520. uint8_t buf[ESP_CMDFIFO_SZ];
  521. len = esp_get_tc(s);
  522. if (s->do_cmd) {
  523. /*
  524. * handle_ti_cmd() case: esp_do_dma() is called only from
  525. * handle_ti_cmd() with do_cmd != NULL (see the assert())
  526. */
  527. cmdlen = fifo8_num_used(&s->cmdfifo);
  528. trace_esp_do_dma(cmdlen, len);
  529. if (s->dma_memory_read) {
  530. len = MIN(len, fifo8_num_free(&s->cmdfifo));
  531. s->dma_memory_read(s->dma_opaque, buf, len);
  532. fifo8_push_all(&s->cmdfifo, buf, len);
  533. } else {
  534. esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
  535. esp_raise_drq(s);
  536. return;
  537. }
  538. trace_esp_handle_ti_cmd(cmdlen);
  539. s->ti_size = 0;
  540. if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
  541. /* No command received */
  542. if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
  543. return;
  544. }
  545. /* Command has been received */
  546. s->do_cmd = 0;
  547. do_cmd(s);
  548. } else {
  549. /*
  550. * Extra message out bytes received: update cmdfifo_cdb_offset
  551. * and then switch to command phase
  552. */
  553. s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
  554. s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
  555. s->rregs[ESP_RSEQ] = SEQ_CD;
  556. s->rregs[ESP_RINTR] |= INTR_BS;
  557. esp_raise_irq(s);
  558. }
  559. return;
  560. }
  561. if (!s->current_req) {
  562. return;
  563. }
  564. if (s->async_len == 0) {
  565. /* Defer until data is available. */
  566. return;
  567. }
  568. if (len > s->async_len) {
  569. len = s->async_len;
  570. }
  571. if (to_device) {
  572. if (s->dma_memory_read) {
  573. s->dma_memory_read(s->dma_opaque, s->async_buf, len);
  574. } else {
  575. esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
  576. esp_raise_drq(s);
  577. return;
  578. }
  579. } else {
  580. if (s->dma_memory_write) {
  581. s->dma_memory_write(s->dma_opaque, s->async_buf, len);
  582. } else {
  583. /* Adjust TC for any leftover data in the FIFO */
  584. if (!fifo8_is_empty(&s->fifo)) {
  585. esp_set_tc(s, esp_get_tc(s) - fifo8_num_used(&s->fifo));
  586. }
  587. /* Copy device data to FIFO */
  588. len = MIN(len, fifo8_num_free(&s->fifo));
  589. fifo8_push_all(&s->fifo, s->async_buf, len);
  590. s->async_buf += len;
  591. s->async_len -= len;
  592. s->ti_size -= len;
  593. /*
  594. * MacOS toolbox uses a TI length of 16 bytes for all commands, so
  595. * commands shorter than this must be padded accordingly
  596. */
  597. if (len < esp_get_tc(s) && esp_get_tc(s) <= ESP_FIFO_SZ) {
  598. while (fifo8_num_used(&s->fifo) < ESP_FIFO_SZ) {
  599. esp_fifo_push(&s->fifo, 0);
  600. len++;
  601. }
  602. }
  603. esp_set_tc(s, esp_get_tc(s) - len);
  604. esp_set_pdma_cb(s, DO_DMA_PDMA_CB);
  605. esp_raise_drq(s);
  606. /* Indicate transfer to FIFO is complete */
  607. s->rregs[ESP_RSTAT] |= STAT_TC;
  608. return;
  609. }
  610. }
  611. esp_set_tc(s, esp_get_tc(s) - len);
  612. s->async_buf += len;
  613. s->async_len -= len;
  614. if (to_device) {
  615. s->ti_size += len;
  616. } else {
  617. s->ti_size -= len;
  618. }
  619. if (s->async_len == 0) {
  620. scsi_req_continue(s->current_req);
  621. /*
  622. * If there is still data to be read from the device then
  623. * complete the DMA operation immediately. Otherwise defer
  624. * until the scsi layer has completed.
  625. */
  626. if (to_device || esp_get_tc(s) != 0 || s->ti_size == 0) {
  627. return;
  628. }
  629. }
  630. /* Partially filled a scsi buffer. Complete immediately. */
  631. esp_dma_done(s);
  632. esp_lower_drq(s);
  633. }
  634. static void esp_do_nodma(ESPState *s)
  635. {
  636. int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
  637. uint32_t cmdlen;
  638. int len;
  639. if (s->do_cmd) {
  640. cmdlen = fifo8_num_used(&s->cmdfifo);
  641. trace_esp_handle_ti_cmd(cmdlen);
  642. s->ti_size = 0;
  643. if ((s->rregs[ESP_RSTAT] & 7) == STAT_CD) {
  644. /* No command received */
  645. if (s->cmdfifo_cdb_offset == fifo8_num_used(&s->cmdfifo)) {
  646. return;
  647. }
  648. /* Command has been received */
  649. s->do_cmd = 0;
  650. do_cmd(s);
  651. } else {
  652. /*
  653. * Extra message out bytes received: update cmdfifo_cdb_offset
  654. * and then switch to command phase
  655. */
  656. s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo);
  657. s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
  658. s->rregs[ESP_RSEQ] = SEQ_CD;
  659. s->rregs[ESP_RINTR] |= INTR_BS;
  660. esp_raise_irq(s);
  661. }
  662. return;
  663. }
  664. if (!s->current_req) {
  665. return;
  666. }
  667. if (s->async_len == 0) {
  668. /* Defer until data is available. */
  669. return;
  670. }
  671. if (to_device) {
  672. len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ);
  673. esp_fifo_pop_buf(&s->fifo, s->async_buf, len);
  674. s->async_buf += len;
  675. s->async_len -= len;
  676. s->ti_size += len;
  677. } else {
  678. if (fifo8_is_empty(&s->fifo)) {
  679. fifo8_push(&s->fifo, s->async_buf[0]);
  680. s->async_buf++;
  681. s->async_len--;
  682. s->ti_size--;
  683. }
  684. }
  685. if (s->async_len == 0) {
  686. scsi_req_continue(s->current_req);
  687. return;
  688. }
  689. s->rregs[ESP_RINTR] |= INTR_BS;
  690. esp_raise_irq(s);
  691. }
  692. static void esp_pdma_cb(ESPState *s)
  693. {
  694. switch (s->pdma_cb) {
  695. case SATN_PDMA_CB:
  696. satn_pdma_cb(s);
  697. break;
  698. case S_WITHOUT_SATN_PDMA_CB:
  699. s_without_satn_pdma_cb(s);
  700. break;
  701. case SATN_STOP_PDMA_CB:
  702. satn_stop_pdma_cb(s);
  703. break;
  704. case WRITE_RESPONSE_PDMA_CB:
  705. write_response_pdma_cb(s);
  706. break;
  707. case DO_DMA_PDMA_CB:
  708. do_dma_pdma_cb(s);
  709. break;
  710. default:
  711. g_assert_not_reached();
  712. }
  713. }
  714. void esp_command_complete(SCSIRequest *req, size_t resid)
  715. {
  716. ESPState *s = req->hba_private;
  717. int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
  718. trace_esp_command_complete();
  719. /*
  720. * Non-DMA transfers from the target will leave the last byte in
  721. * the FIFO so don't reset ti_size in this case
  722. */
  723. if (s->dma || to_device) {
  724. if (s->ti_size != 0) {
  725. trace_esp_command_complete_unexpected();
  726. }
  727. s->ti_size = 0;
  728. }
  729. s->async_len = 0;
  730. if (req->status) {
  731. trace_esp_command_complete_fail();
  732. }
  733. s->status = req->status;
  734. /*
  735. * If the transfer is finished, switch to status phase. For non-DMA
  736. * transfers from the target the last byte is still in the FIFO
  737. */
  738. if (s->ti_size == 0) {
  739. s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
  740. esp_dma_done(s);
  741. esp_lower_drq(s);
  742. }
  743. if (s->current_req) {
  744. scsi_req_unref(s->current_req);
  745. s->current_req = NULL;
  746. s->current_dev = NULL;
  747. }
  748. }
  749. void esp_transfer_data(SCSIRequest *req, uint32_t len)
  750. {
  751. ESPState *s = req->hba_private;
  752. int to_device = ((s->rregs[ESP_RSTAT] & 7) == STAT_DO);
  753. uint32_t dmalen = esp_get_tc(s);
  754. assert(!s->do_cmd);
  755. trace_esp_transfer_data(dmalen, s->ti_size);
  756. s->async_len = len;
  757. s->async_buf = scsi_req_get_buf(req);
  758. if (!to_device && !s->data_in_ready) {
  759. /*
  760. * Initial incoming data xfer is complete so raise command
  761. * completion interrupt
  762. */
  763. s->data_in_ready = true;
  764. s->rregs[ESP_RSTAT] |= STAT_TC;
  765. s->rregs[ESP_RINTR] |= INTR_BS;
  766. esp_raise_irq(s);
  767. }
  768. if (s->ti_cmd == 0) {
  769. /*
  770. * Always perform the initial transfer upon reception of the next TI
  771. * command to ensure the DMA/non-DMA status of the command is correct.
  772. * It is not possible to use s->dma directly in the section below as
  773. * some OSs send non-DMA NOP commands after a DMA transfer. Hence if the
  774. * async data transfer is delayed then s->dma is set incorrectly.
  775. */
  776. return;
  777. }
  778. if (s->ti_cmd == (CMD_TI | CMD_DMA)) {
  779. if (dmalen) {
  780. esp_do_dma(s);
  781. } else if (s->ti_size <= 0) {
  782. /*
  783. * If this was the last part of a DMA transfer then the
  784. * completion interrupt is deferred to here.
  785. */
  786. esp_dma_done(s);
  787. esp_lower_drq(s);
  788. }
  789. } else if (s->ti_cmd == CMD_TI) {
  790. esp_do_nodma(s);
  791. }
  792. }
  793. static void handle_ti(ESPState *s)
  794. {
  795. uint32_t dmalen;
  796. if (s->dma && !s->dma_enabled) {
  797. s->dma_cb = handle_ti;
  798. return;
  799. }
  800. s->ti_cmd = s->rregs[ESP_CMD];
  801. if (s->dma) {
  802. dmalen = esp_get_tc(s);
  803. trace_esp_handle_ti(dmalen);
  804. s->rregs[ESP_RSTAT] &= ~STAT_TC;
  805. esp_do_dma(s);
  806. } else {
  807. trace_esp_handle_ti(s->ti_size);
  808. esp_do_nodma(s);
  809. }
  810. }
  811. void esp_hard_reset(ESPState *s)
  812. {
  813. memset(s->rregs, 0, ESP_REGS);
  814. memset(s->wregs, 0, ESP_REGS);
  815. s->tchi_written = 0;
  816. s->ti_size = 0;
  817. s->async_len = 0;
  818. fifo8_reset(&s->fifo);
  819. fifo8_reset(&s->cmdfifo);
  820. s->dma = 0;
  821. s->do_cmd = 0;
  822. s->dma_cb = NULL;
  823. s->rregs[ESP_CFG1] = 7;
  824. }
  825. static void esp_soft_reset(ESPState *s)
  826. {
  827. qemu_irq_lower(s->irq);
  828. qemu_irq_lower(s->irq_data);
  829. esp_hard_reset(s);
  830. }
  831. static void esp_bus_reset(ESPState *s)
  832. {
  833. bus_cold_reset(BUS(&s->bus));
  834. }
  835. static void parent_esp_reset(ESPState *s, int irq, int level)
  836. {
  837. if (level) {
  838. esp_soft_reset(s);
  839. }
  840. }
  841. uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
  842. {
  843. uint32_t val;
  844. switch (saddr) {
  845. case ESP_FIFO:
  846. if (s->dma_memory_read && s->dma_memory_write &&
  847. (s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
  848. /* Data out. */
  849. qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
  850. s->rregs[ESP_FIFO] = 0;
  851. } else {
  852. if ((s->rregs[ESP_RSTAT] & 0x7) == STAT_DI) {
  853. if (s->ti_size) {
  854. esp_do_nodma(s);
  855. } else {
  856. /*
  857. * The last byte of a non-DMA transfer has been read out
  858. * of the FIFO so switch to status phase
  859. */
  860. s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
  861. }
  862. }
  863. s->rregs[ESP_FIFO] = esp_fifo_pop(&s->fifo);
  864. }
  865. val = s->rregs[ESP_FIFO];
  866. break;
  867. case ESP_RINTR:
  868. /*
  869. * Clear sequence step, interrupt register and all status bits
  870. * except TC
  871. */
  872. val = s->rregs[ESP_RINTR];
  873. s->rregs[ESP_RINTR] = 0;
  874. s->rregs[ESP_RSTAT] &= ~STAT_TC;
  875. /*
  876. * According to the datasheet ESP_RSEQ should be cleared, but as the
  877. * emulation currently defers information transfers to the next TI
  878. * command leave it for now so that pedantic guests such as the old
  879. * Linux 2.6 driver see the correct flags before the next SCSI phase
  880. * transition.
  881. *
  882. * s->rregs[ESP_RSEQ] = SEQ_0;
  883. */
  884. esp_lower_irq(s);
  885. break;
  886. case ESP_TCHI:
  887. /* Return the unique id if the value has never been written */
  888. if (!s->tchi_written) {
  889. val = s->chip_id;
  890. } else {
  891. val = s->rregs[saddr];
  892. }
  893. break;
  894. case ESP_RFLAGS:
  895. /* Bottom 5 bits indicate number of bytes in FIFO */
  896. val = fifo8_num_used(&s->fifo);
  897. break;
  898. default:
  899. val = s->rregs[saddr];
  900. break;
  901. }
  902. trace_esp_mem_readb(saddr, val);
  903. return val;
  904. }
  905. void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
  906. {
  907. trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
  908. switch (saddr) {
  909. case ESP_TCHI:
  910. s->tchi_written = true;
  911. /* fall through */
  912. case ESP_TCLO:
  913. case ESP_TCMID:
  914. s->rregs[ESP_RSTAT] &= ~STAT_TC;
  915. break;
  916. case ESP_FIFO:
  917. if (s->do_cmd) {
  918. esp_fifo_push(&s->cmdfifo, val);
  919. /*
  920. * If any unexpected message out/command phase data is
  921. * transferred using non-DMA, raise the interrupt
  922. */
  923. if (s->rregs[ESP_CMD] == CMD_TI) {
  924. s->rregs[ESP_RINTR] |= INTR_BS;
  925. esp_raise_irq(s);
  926. }
  927. } else {
  928. esp_fifo_push(&s->fifo, val);
  929. }
  930. break;
  931. case ESP_CMD:
  932. s->rregs[saddr] = val;
  933. if (val & CMD_DMA) {
  934. s->dma = 1;
  935. /* Reload DMA counter. */
  936. if (esp_get_stc(s) == 0) {
  937. esp_set_tc(s, 0x10000);
  938. } else {
  939. esp_set_tc(s, esp_get_stc(s));
  940. }
  941. } else {
  942. s->dma = 0;
  943. }
  944. switch (val & CMD_CMD) {
  945. case CMD_NOP:
  946. trace_esp_mem_writeb_cmd_nop(val);
  947. break;
  948. case CMD_FLUSH:
  949. trace_esp_mem_writeb_cmd_flush(val);
  950. fifo8_reset(&s->fifo);
  951. break;
  952. case CMD_RESET:
  953. trace_esp_mem_writeb_cmd_reset(val);
  954. esp_soft_reset(s);
  955. break;
  956. case CMD_BUSRESET:
  957. trace_esp_mem_writeb_cmd_bus_reset(val);
  958. esp_bus_reset(s);
  959. if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
  960. s->rregs[ESP_RINTR] |= INTR_RST;
  961. esp_raise_irq(s);
  962. }
  963. break;
  964. case CMD_TI:
  965. trace_esp_mem_writeb_cmd_ti(val);
  966. handle_ti(s);
  967. break;
  968. case CMD_ICCS:
  969. trace_esp_mem_writeb_cmd_iccs(val);
  970. write_response(s);
  971. s->rregs[ESP_RINTR] |= INTR_FC;
  972. s->rregs[ESP_RSTAT] |= STAT_MI;
  973. break;
  974. case CMD_MSGACC:
  975. trace_esp_mem_writeb_cmd_msgacc(val);
  976. s->rregs[ESP_RINTR] |= INTR_DC;
  977. s->rregs[ESP_RSEQ] = 0;
  978. s->rregs[ESP_RFLAGS] = 0;
  979. esp_raise_irq(s);
  980. break;
  981. case CMD_PAD:
  982. trace_esp_mem_writeb_cmd_pad(val);
  983. s->rregs[ESP_RSTAT] = STAT_TC;
  984. s->rregs[ESP_RINTR] |= INTR_FC;
  985. s->rregs[ESP_RSEQ] = 0;
  986. break;
  987. case CMD_SATN:
  988. trace_esp_mem_writeb_cmd_satn(val);
  989. break;
  990. case CMD_RSTATN:
  991. trace_esp_mem_writeb_cmd_rstatn(val);
  992. break;
  993. case CMD_SEL:
  994. trace_esp_mem_writeb_cmd_sel(val);
  995. handle_s_without_atn(s);
  996. break;
  997. case CMD_SELATN:
  998. trace_esp_mem_writeb_cmd_selatn(val);
  999. handle_satn(s);
  1000. break;
  1001. case CMD_SELATNS:
  1002. trace_esp_mem_writeb_cmd_selatns(val);
  1003. handle_satn_stop(s);
  1004. break;
  1005. case CMD_ENSEL:
  1006. trace_esp_mem_writeb_cmd_ensel(val);
  1007. s->rregs[ESP_RINTR] = 0;
  1008. break;
  1009. case CMD_DISSEL:
  1010. trace_esp_mem_writeb_cmd_dissel(val);
  1011. s->rregs[ESP_RINTR] = 0;
  1012. esp_raise_irq(s);
  1013. break;
  1014. default:
  1015. trace_esp_error_unhandled_command(val);
  1016. break;
  1017. }
  1018. break;
  1019. case ESP_WBUSID ... ESP_WSYNO:
  1020. break;
  1021. case ESP_CFG1:
  1022. case ESP_CFG2: case ESP_CFG3:
  1023. case ESP_RES3: case ESP_RES4:
  1024. s->rregs[saddr] = val;
  1025. break;
  1026. case ESP_WCCF ... ESP_WTEST:
  1027. break;
  1028. default:
  1029. trace_esp_error_invalid_write(val, saddr);
  1030. return;
  1031. }
  1032. s->wregs[saddr] = val;
  1033. }
  1034. static bool esp_mem_accepts(void *opaque, hwaddr addr,
  1035. unsigned size, bool is_write,
  1036. MemTxAttrs attrs)
  1037. {
  1038. return (size == 1) || (is_write && size == 4);
  1039. }
  1040. static bool esp_is_before_version_5(void *opaque, int version_id)
  1041. {
  1042. ESPState *s = ESP(opaque);
  1043. version_id = MIN(version_id, s->mig_version_id);
  1044. return version_id < 5;
  1045. }
  1046. static bool esp_is_version_5(void *opaque, int version_id)
  1047. {
  1048. ESPState *s = ESP(opaque);
  1049. version_id = MIN(version_id, s->mig_version_id);
  1050. return version_id >= 5;
  1051. }
  1052. static bool esp_is_version_6(void *opaque, int version_id)
  1053. {
  1054. ESPState *s = ESP(opaque);
  1055. version_id = MIN(version_id, s->mig_version_id);
  1056. return version_id >= 6;
  1057. }
  1058. int esp_pre_save(void *opaque)
  1059. {
  1060. ESPState *s = ESP(object_resolve_path_component(
  1061. OBJECT(opaque), "esp"));
  1062. s->mig_version_id = vmstate_esp.version_id;
  1063. return 0;
  1064. }
  1065. static int esp_post_load(void *opaque, int version_id)
  1066. {
  1067. ESPState *s = ESP(opaque);
  1068. int len, i;
  1069. version_id = MIN(version_id, s->mig_version_id);
  1070. if (version_id < 5) {
  1071. esp_set_tc(s, s->mig_dma_left);
  1072. /* Migrate ti_buf to fifo */
  1073. len = s->mig_ti_wptr - s->mig_ti_rptr;
  1074. for (i = 0; i < len; i++) {
  1075. fifo8_push(&s->fifo, s->mig_ti_buf[i]);
  1076. }
  1077. /* Migrate cmdbuf to cmdfifo */
  1078. for (i = 0; i < s->mig_cmdlen; i++) {
  1079. fifo8_push(&s->cmdfifo, s->mig_cmdbuf[i]);
  1080. }
  1081. }
  1082. s->mig_version_id = vmstate_esp.version_id;
  1083. return 0;
  1084. }
  1085. /*
  1086. * PDMA (or pseudo-DMA) is only used on the Macintosh and requires the
  1087. * guest CPU to perform the transfers between the SCSI bus and memory
  1088. * itself. This is indicated by the dma_memory_read and dma_memory_write
  1089. * functions being NULL (in contrast to the ESP PCI device) whilst
  1090. * dma_enabled is still set.
  1091. */
  1092. static bool esp_pdma_needed(void *opaque)
  1093. {
  1094. ESPState *s = ESP(opaque);
  1095. return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
  1096. s->dma_enabled;
  1097. }
  1098. static const VMStateDescription vmstate_esp_pdma = {
  1099. .name = "esp/pdma",
  1100. .version_id = 0,
  1101. .minimum_version_id = 0,
  1102. .needed = esp_pdma_needed,
  1103. .fields = (VMStateField[]) {
  1104. VMSTATE_UINT8(pdma_cb, ESPState),
  1105. VMSTATE_END_OF_LIST()
  1106. }
  1107. };
  1108. const VMStateDescription vmstate_esp = {
  1109. .name = "esp",
  1110. .version_id = 6,
  1111. .minimum_version_id = 3,
  1112. .post_load = esp_post_load,
  1113. .fields = (VMStateField[]) {
  1114. VMSTATE_BUFFER(rregs, ESPState),
  1115. VMSTATE_BUFFER(wregs, ESPState),
  1116. VMSTATE_INT32(ti_size, ESPState),
  1117. VMSTATE_UINT32_TEST(mig_ti_rptr, ESPState, esp_is_before_version_5),
  1118. VMSTATE_UINT32_TEST(mig_ti_wptr, ESPState, esp_is_before_version_5),
  1119. VMSTATE_BUFFER_TEST(mig_ti_buf, ESPState, esp_is_before_version_5),
  1120. VMSTATE_UINT32(status, ESPState),
  1121. VMSTATE_UINT32_TEST(mig_deferred_status, ESPState,
  1122. esp_is_before_version_5),
  1123. VMSTATE_BOOL_TEST(mig_deferred_complete, ESPState,
  1124. esp_is_before_version_5),
  1125. VMSTATE_UINT32(dma, ESPState),
  1126. VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 0,
  1127. esp_is_before_version_5, 0, 16),
  1128. VMSTATE_STATIC_BUFFER(mig_cmdbuf, ESPState, 4,
  1129. esp_is_before_version_5, 16,
  1130. sizeof(typeof_field(ESPState, mig_cmdbuf))),
  1131. VMSTATE_UINT32_TEST(mig_cmdlen, ESPState, esp_is_before_version_5),
  1132. VMSTATE_UINT32(do_cmd, ESPState),
  1133. VMSTATE_UINT32_TEST(mig_dma_left, ESPState, esp_is_before_version_5),
  1134. VMSTATE_BOOL_TEST(data_in_ready, ESPState, esp_is_version_5),
  1135. VMSTATE_UINT8_TEST(cmdfifo_cdb_offset, ESPState, esp_is_version_5),
  1136. VMSTATE_FIFO8_TEST(fifo, ESPState, esp_is_version_5),
  1137. VMSTATE_FIFO8_TEST(cmdfifo, ESPState, esp_is_version_5),
  1138. VMSTATE_UINT8_TEST(ti_cmd, ESPState, esp_is_version_5),
  1139. VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6),
  1140. VMSTATE_END_OF_LIST()
  1141. },
  1142. .subsections = (const VMStateDescription * []) {
  1143. &vmstate_esp_pdma,
  1144. NULL
  1145. }
  1146. };
  1147. static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
  1148. uint64_t val, unsigned int size)
  1149. {
  1150. SysBusESPState *sysbus = opaque;
  1151. ESPState *s = ESP(&sysbus->esp);
  1152. uint32_t saddr;
  1153. saddr = addr >> sysbus->it_shift;
  1154. esp_reg_write(s, saddr, val);
  1155. }
  1156. static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
  1157. unsigned int size)
  1158. {
  1159. SysBusESPState *sysbus = opaque;
  1160. ESPState *s = ESP(&sysbus->esp);
  1161. uint32_t saddr;
  1162. saddr = addr >> sysbus->it_shift;
  1163. return esp_reg_read(s, saddr);
  1164. }
  1165. static const MemoryRegionOps sysbus_esp_mem_ops = {
  1166. .read = sysbus_esp_mem_read,
  1167. .write = sysbus_esp_mem_write,
  1168. .endianness = DEVICE_NATIVE_ENDIAN,
  1169. .valid.accepts = esp_mem_accepts,
  1170. };
  1171. static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
  1172. uint64_t val, unsigned int size)
  1173. {
  1174. SysBusESPState *sysbus = opaque;
  1175. ESPState *s = ESP(&sysbus->esp);
  1176. trace_esp_pdma_write(size);
  1177. switch (size) {
  1178. case 1:
  1179. esp_pdma_write(s, val);
  1180. break;
  1181. case 2:
  1182. esp_pdma_write(s, val >> 8);
  1183. esp_pdma_write(s, val);
  1184. break;
  1185. }
  1186. esp_pdma_cb(s);
  1187. }
  1188. static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
  1189. unsigned int size)
  1190. {
  1191. SysBusESPState *sysbus = opaque;
  1192. ESPState *s = ESP(&sysbus->esp);
  1193. uint64_t val = 0;
  1194. trace_esp_pdma_read(size);
  1195. switch (size) {
  1196. case 1:
  1197. val = esp_pdma_read(s);
  1198. break;
  1199. case 2:
  1200. val = esp_pdma_read(s);
  1201. val = (val << 8) | esp_pdma_read(s);
  1202. break;
  1203. }
  1204. if (fifo8_num_used(&s->fifo) < 2) {
  1205. esp_pdma_cb(s);
  1206. }
  1207. return val;
  1208. }
  1209. static void *esp_load_request(QEMUFile *f, SCSIRequest *req)
  1210. {
  1211. ESPState *s = container_of(req->bus, ESPState, bus);
  1212. scsi_req_ref(req);
  1213. s->current_req = req;
  1214. return s;
  1215. }
  1216. static const MemoryRegionOps sysbus_esp_pdma_ops = {
  1217. .read = sysbus_esp_pdma_read,
  1218. .write = sysbus_esp_pdma_write,
  1219. .endianness = DEVICE_NATIVE_ENDIAN,
  1220. .valid.min_access_size = 1,
  1221. .valid.max_access_size = 4,
  1222. .impl.min_access_size = 1,
  1223. .impl.max_access_size = 2,
  1224. };
  1225. static const struct SCSIBusInfo esp_scsi_info = {
  1226. .tcq = false,
  1227. .max_target = ESP_MAX_DEVS,
  1228. .max_lun = 7,
  1229. .load_request = esp_load_request,
  1230. .transfer_data = esp_transfer_data,
  1231. .complete = esp_command_complete,
  1232. .cancel = esp_request_cancelled
  1233. };
  1234. static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
  1235. {
  1236. SysBusESPState *sysbus = SYSBUS_ESP(opaque);
  1237. ESPState *s = ESP(&sysbus->esp);
  1238. switch (irq) {
  1239. case 0:
  1240. parent_esp_reset(s, irq, level);
  1241. break;
  1242. case 1:
  1243. esp_dma_enable(opaque, irq, level);
  1244. break;
  1245. }
  1246. }
  1247. static void sysbus_esp_realize(DeviceState *dev, Error **errp)
  1248. {
  1249. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  1250. SysBusESPState *sysbus = SYSBUS_ESP(dev);
  1251. ESPState *s = ESP(&sysbus->esp);
  1252. if (!qdev_realize(DEVICE(s), NULL, errp)) {
  1253. return;
  1254. }
  1255. sysbus_init_irq(sbd, &s->irq);
  1256. sysbus_init_irq(sbd, &s->irq_data);
  1257. assert(sysbus->it_shift != -1);
  1258. s->chip_id = TCHI_FAS100A;
  1259. memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
  1260. sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
  1261. sysbus_init_mmio(sbd, &sysbus->iomem);
  1262. memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
  1263. sysbus, "esp-pdma", 4);
  1264. sysbus_init_mmio(sbd, &sysbus->pdma);
  1265. qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
  1266. scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info);
  1267. }
  1268. static void sysbus_esp_hard_reset(DeviceState *dev)
  1269. {
  1270. SysBusESPState *sysbus = SYSBUS_ESP(dev);
  1271. ESPState *s = ESP(&sysbus->esp);
  1272. esp_hard_reset(s);
  1273. }
  1274. static void sysbus_esp_init(Object *obj)
  1275. {
  1276. SysBusESPState *sysbus = SYSBUS_ESP(obj);
  1277. object_initialize_child(obj, "esp", &sysbus->esp, TYPE_ESP);
  1278. }
  1279. static const VMStateDescription vmstate_sysbus_esp_scsi = {
  1280. .name = "sysbusespscsi",
  1281. .version_id = 2,
  1282. .minimum_version_id = 1,
  1283. .pre_save = esp_pre_save,
  1284. .fields = (VMStateField[]) {
  1285. VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2),
  1286. VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
  1287. VMSTATE_END_OF_LIST()
  1288. }
  1289. };
  1290. static void sysbus_esp_class_init(ObjectClass *klass, void *data)
  1291. {
  1292. DeviceClass *dc = DEVICE_CLASS(klass);
  1293. dc->realize = sysbus_esp_realize;
  1294. dc->reset = sysbus_esp_hard_reset;
  1295. dc->vmsd = &vmstate_sysbus_esp_scsi;
  1296. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  1297. }
  1298. static const TypeInfo sysbus_esp_info = {
  1299. .name = TYPE_SYSBUS_ESP,
  1300. .parent = TYPE_SYS_BUS_DEVICE,
  1301. .instance_init = sysbus_esp_init,
  1302. .instance_size = sizeof(SysBusESPState),
  1303. .class_init = sysbus_esp_class_init,
  1304. };
  1305. static void esp_finalize(Object *obj)
  1306. {
  1307. ESPState *s = ESP(obj);
  1308. fifo8_destroy(&s->fifo);
  1309. fifo8_destroy(&s->cmdfifo);
  1310. }
  1311. static void esp_init(Object *obj)
  1312. {
  1313. ESPState *s = ESP(obj);
  1314. fifo8_create(&s->fifo, ESP_FIFO_SZ);
  1315. fifo8_create(&s->cmdfifo, ESP_CMDFIFO_SZ);
  1316. }
  1317. static void esp_class_init(ObjectClass *klass, void *data)
  1318. {
  1319. DeviceClass *dc = DEVICE_CLASS(klass);
  1320. /* internal device for sysbusesp/pciespscsi, not user-creatable */
  1321. dc->user_creatable = false;
  1322. set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
  1323. }
  1324. static const TypeInfo esp_info = {
  1325. .name = TYPE_ESP,
  1326. .parent = TYPE_DEVICE,
  1327. .instance_init = esp_init,
  1328. .instance_finalize = esp_finalize,
  1329. .instance_size = sizeof(ESPState),
  1330. .class_init = esp_class_init,
  1331. };
  1332. static void esp_register_types(void)
  1333. {
  1334. type_register_static(&sysbus_esp_info);
  1335. type_register_static(&esp_info);
  1336. }
  1337. type_init(esp_register_types)