ibex_spi_host.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * QEMU model of the Ibex SPI Controller
  3. * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/
  4. *
  5. * Copyright (C) 2022 Western Digital
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "qemu/osdep.h"
  26. #include "qemu/log.h"
  27. #include "qemu/module.h"
  28. #include "hw/registerfields.h"
  29. #include "hw/ssi/ibex_spi_host.h"
  30. #include "hw/irq.h"
  31. #include "hw/qdev-properties.h"
  32. #include "hw/qdev-properties-system.h"
  33. #include "migration/vmstate.h"
  34. #include "trace.h"
  35. REG32(INTR_STATE, 0x00)
  36. FIELD(INTR_STATE, ERROR, 0, 1)
  37. FIELD(INTR_STATE, SPI_EVENT, 1, 1)
  38. REG32(INTR_ENABLE, 0x04)
  39. FIELD(INTR_ENABLE, ERROR, 0, 1)
  40. FIELD(INTR_ENABLE, SPI_EVENT, 1, 1)
  41. REG32(INTR_TEST, 0x08)
  42. FIELD(INTR_TEST, ERROR, 0, 1)
  43. FIELD(INTR_TEST, SPI_EVENT, 1, 1)
  44. REG32(ALERT_TEST, 0x0c)
  45. FIELD(ALERT_TEST, FETAL_TEST, 0, 1)
  46. REG32(CONTROL, 0x10)
  47. FIELD(CONTROL, RX_WATERMARK, 0, 8)
  48. FIELD(CONTROL, TX_WATERMARK, 1, 8)
  49. FIELD(CONTROL, OUTPUT_EN, 29, 1)
  50. FIELD(CONTROL, SW_RST, 30, 1)
  51. FIELD(CONTROL, SPIEN, 31, 1)
  52. REG32(STATUS, 0x14)
  53. FIELD(STATUS, TXQD, 0, 8)
  54. FIELD(STATUS, RXQD, 18, 8)
  55. FIELD(STATUS, CMDQD, 16, 3)
  56. FIELD(STATUS, RXWM, 20, 1)
  57. FIELD(STATUS, BYTEORDER, 22, 1)
  58. FIELD(STATUS, RXSTALL, 23, 1)
  59. FIELD(STATUS, RXEMPTY, 24, 1)
  60. FIELD(STATUS, RXFULL, 25, 1)
  61. FIELD(STATUS, TXWM, 26, 1)
  62. FIELD(STATUS, TXSTALL, 27, 1)
  63. FIELD(STATUS, TXEMPTY, 28, 1)
  64. FIELD(STATUS, TXFULL, 29, 1)
  65. FIELD(STATUS, ACTIVE, 30, 1)
  66. FIELD(STATUS, READY, 31, 1)
  67. REG32(CONFIGOPTS, 0x18)
  68. FIELD(CONFIGOPTS, CLKDIV_0, 0, 16)
  69. FIELD(CONFIGOPTS, CSNIDLE_0, 16, 4)
  70. FIELD(CONFIGOPTS, CSNTRAIL_0, 20, 4)
  71. FIELD(CONFIGOPTS, CSNLEAD_0, 24, 4)
  72. FIELD(CONFIGOPTS, FULLCYC_0, 29, 1)
  73. FIELD(CONFIGOPTS, CPHA_0, 30, 1)
  74. FIELD(CONFIGOPTS, CPOL_0, 31, 1)
  75. REG32(CSID, 0x1c)
  76. FIELD(CSID, CSID, 0, 32)
  77. REG32(COMMAND, 0x20)
  78. FIELD(COMMAND, LEN, 0, 8)
  79. FIELD(COMMAND, CSAAT, 9, 1)
  80. FIELD(COMMAND, SPEED, 10, 2)
  81. FIELD(COMMAND, DIRECTION, 12, 2)
  82. REG32(ERROR_ENABLE, 0x2c)
  83. FIELD(ERROR_ENABLE, CMDBUSY, 0, 1)
  84. FIELD(ERROR_ENABLE, OVERFLOW, 1, 1)
  85. FIELD(ERROR_ENABLE, UNDERFLOW, 2, 1)
  86. FIELD(ERROR_ENABLE, CMDINVAL, 3, 1)
  87. FIELD(ERROR_ENABLE, CSIDINVAL, 4, 1)
  88. REG32(ERROR_STATUS, 0x30)
  89. FIELD(ERROR_STATUS, CMDBUSY, 0, 1)
  90. FIELD(ERROR_STATUS, OVERFLOW, 1, 1)
  91. FIELD(ERROR_STATUS, UNDERFLOW, 2, 1)
  92. FIELD(ERROR_STATUS, CMDINVAL, 3, 1)
  93. FIELD(ERROR_STATUS, CSIDINVAL, 4, 1)
  94. FIELD(ERROR_STATUS, ACCESSINVAL, 5, 1)
  95. REG32(EVENT_ENABLE, 0x34)
  96. FIELD(EVENT_ENABLE, RXFULL, 0, 1)
  97. FIELD(EVENT_ENABLE, TXEMPTY, 1, 1)
  98. FIELD(EVENT_ENABLE, RXWM, 2, 1)
  99. FIELD(EVENT_ENABLE, TXWM, 3, 1)
  100. FIELD(EVENT_ENABLE, READY, 4, 1)
  101. FIELD(EVENT_ENABLE, IDLE, 5, 1)
  102. static inline uint8_t div4_round_up(uint8_t dividend)
  103. {
  104. return (dividend + 3) / 4;
  105. }
  106. static void ibex_spi_rxfifo_reset(IbexSPIHostState *s)
  107. {
  108. uint32_t data = s->regs[IBEX_SPI_HOST_STATUS];
  109. /* Empty the RX FIFO and assert RXEMPTY */
  110. fifo8_reset(&s->rx_fifo);
  111. data = FIELD_DP32(data, STATUS, RXFULL, 0);
  112. data = FIELD_DP32(data, STATUS, RXEMPTY, 1);
  113. s->regs[IBEX_SPI_HOST_STATUS] = data;
  114. }
  115. static void ibex_spi_txfifo_reset(IbexSPIHostState *s)
  116. {
  117. uint32_t data = s->regs[IBEX_SPI_HOST_STATUS];
  118. /* Empty the TX FIFO and assert TXEMPTY */
  119. fifo8_reset(&s->tx_fifo);
  120. data = FIELD_DP32(data, STATUS, TXFULL, 0);
  121. data = FIELD_DP32(data, STATUS, TXEMPTY, 1);
  122. s->regs[IBEX_SPI_HOST_STATUS] = data;
  123. }
  124. static void ibex_spi_host_reset(DeviceState *dev)
  125. {
  126. IbexSPIHostState *s = IBEX_SPI_HOST(dev);
  127. trace_ibex_spi_host_reset("Resetting Ibex SPI");
  128. /* SPI Host Register Reset */
  129. s->regs[IBEX_SPI_HOST_INTR_STATE] = 0x00;
  130. s->regs[IBEX_SPI_HOST_INTR_ENABLE] = 0x00;
  131. s->regs[IBEX_SPI_HOST_INTR_TEST] = 0x00;
  132. s->regs[IBEX_SPI_HOST_ALERT_TEST] = 0x00;
  133. s->regs[IBEX_SPI_HOST_CONTROL] = 0x7f;
  134. s->regs[IBEX_SPI_HOST_STATUS] = 0x00;
  135. s->regs[IBEX_SPI_HOST_CONFIGOPTS] = 0x00;
  136. s->regs[IBEX_SPI_HOST_CSID] = 0x00;
  137. s->regs[IBEX_SPI_HOST_COMMAND] = 0x00;
  138. /* RX/TX Modelled by FIFO */
  139. s->regs[IBEX_SPI_HOST_RXDATA] = 0x00;
  140. s->regs[IBEX_SPI_HOST_TXDATA] = 0x00;
  141. s->regs[IBEX_SPI_HOST_ERROR_ENABLE] = 0x1F;
  142. s->regs[IBEX_SPI_HOST_ERROR_STATUS] = 0x00;
  143. s->regs[IBEX_SPI_HOST_EVENT_ENABLE] = 0x00;
  144. ibex_spi_rxfifo_reset(s);
  145. ibex_spi_txfifo_reset(s);
  146. s->init_status = true;
  147. return;
  148. }
  149. /*
  150. * Check if we need to trigger an interrupt.
  151. * The two interrupts lines (host_err and event) can
  152. * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'.
  153. *
  154. * Interrupts are triggered based on the ones
  155. * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`.
  156. */
  157. static void ibex_spi_host_irq(IbexSPIHostState *s)
  158. {
  159. uint32_t intr_test_reg = s->regs[IBEX_SPI_HOST_INTR_TEST];
  160. uint32_t intr_en_reg = s->regs[IBEX_SPI_HOST_INTR_ENABLE];
  161. uint32_t intr_state_reg = s->regs[IBEX_SPI_HOST_INTR_STATE];
  162. uint32_t err_en_reg = s->regs[IBEX_SPI_HOST_ERROR_ENABLE];
  163. uint32_t event_en_reg = s->regs[IBEX_SPI_HOST_EVENT_ENABLE];
  164. uint32_t err_status_reg = s->regs[IBEX_SPI_HOST_ERROR_STATUS];
  165. uint32_t status_reg = s->regs[IBEX_SPI_HOST_STATUS];
  166. bool error_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, ERROR);
  167. bool event_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, SPI_EVENT);
  168. bool err_pending = FIELD_EX32(intr_state_reg, INTR_STATE, ERROR);
  169. bool status_pending = FIELD_EX32(intr_state_reg, INTR_STATE, SPI_EVENT);
  170. int err_irq = 0, event_irq = 0;
  171. /* Error IRQ enabled and Error IRQ Cleared */
  172. if (error_en && !err_pending) {
  173. /* Event enabled, Interrupt Test Error */
  174. if (FIELD_EX32(intr_test_reg, INTR_TEST, ERROR)) {
  175. err_irq = 1;
  176. } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CMDBUSY) &&
  177. FIELD_EX32(err_status_reg, ERROR_STATUS, CMDBUSY)) {
  178. /* Wrote to COMMAND when not READY */
  179. err_irq = 1;
  180. } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CMDINVAL) &&
  181. FIELD_EX32(err_status_reg, ERROR_STATUS, CMDINVAL)) {
  182. /* Invalid command segment */
  183. err_irq = 1;
  184. } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE, CSIDINVAL) &&
  185. FIELD_EX32(err_status_reg, ERROR_STATUS, CSIDINVAL)) {
  186. /* Invalid value for CSID */
  187. err_irq = 1;
  188. }
  189. if (err_irq) {
  190. s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK;
  191. }
  192. qemu_set_irq(s->host_err, err_irq);
  193. }
  194. /* Event IRQ Enabled and Event IRQ Cleared */
  195. if (event_en && !status_pending) {
  196. if (FIELD_EX32(intr_test_reg, INTR_STATE, SPI_EVENT)) {
  197. /* Event enabled, Interrupt Test Event */
  198. event_irq = 1;
  199. } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, READY) &&
  200. FIELD_EX32(status_reg, STATUS, READY)) {
  201. /* SPI Host ready for next command */
  202. event_irq = 1;
  203. } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, TXEMPTY) &&
  204. FIELD_EX32(status_reg, STATUS, TXEMPTY)) {
  205. /* SPI TXEMPTY, TXFIFO drained */
  206. event_irq = 1;
  207. } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE, RXFULL) &&
  208. FIELD_EX32(status_reg, STATUS, RXFULL)) {
  209. /* SPI RXFULL, RXFIFO full */
  210. event_irq = 1;
  211. }
  212. if (event_irq) {
  213. s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK;
  214. }
  215. qemu_set_irq(s->event, event_irq);
  216. }
  217. }
  218. static void ibex_spi_host_transfer(IbexSPIHostState *s)
  219. {
  220. uint32_t rx, tx, data;
  221. /* Get num of one byte transfers */
  222. uint8_t segment_len = FIELD_EX32(s->regs[IBEX_SPI_HOST_COMMAND],
  223. COMMAND, LEN);
  224. while (segment_len > 0) {
  225. if (fifo8_is_empty(&s->tx_fifo)) {
  226. /* Assert Stall */
  227. s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXSTALL_MASK;
  228. break;
  229. } else if (fifo8_is_full(&s->rx_fifo)) {
  230. /* Assert Stall */
  231. s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXSTALL_MASK;
  232. break;
  233. } else {
  234. tx = fifo8_pop(&s->tx_fifo);
  235. }
  236. rx = ssi_transfer(s->ssi, tx);
  237. trace_ibex_spi_host_transfer(tx, rx);
  238. if (!fifo8_is_full(&s->rx_fifo)) {
  239. fifo8_push(&s->rx_fifo, rx);
  240. } else {
  241. /* Assert RXFULL */
  242. s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXFULL_MASK;
  243. }
  244. --segment_len;
  245. }
  246. data = s->regs[IBEX_SPI_HOST_STATUS];
  247. /* Assert Ready */
  248. data = FIELD_DP32(data, STATUS, READY, 1);
  249. /* Set RXQD */
  250. data = FIELD_DP32(data, STATUS, RXQD, div4_round_up(segment_len));
  251. /* Set TXQD */
  252. data = FIELD_DP32(data, STATUS, TXQD, fifo8_num_used(&s->tx_fifo) / 4);
  253. /* Clear TXFULL */
  254. data = FIELD_DP32(data, STATUS, TXFULL, 0);
  255. /* Reset RXEMPTY */
  256. data = FIELD_DP32(data, STATUS, RXEMPTY, 0);
  257. /* Update register status */
  258. s->regs[IBEX_SPI_HOST_STATUS] = data;
  259. /* Drop remaining bytes that exceed segment_len */
  260. ibex_spi_txfifo_reset(s);
  261. ibex_spi_host_irq(s);
  262. }
  263. static uint64_t ibex_spi_host_read(void *opaque, hwaddr addr,
  264. unsigned int size)
  265. {
  266. IbexSPIHostState *s = opaque;
  267. uint32_t rc = 0;
  268. uint8_t rx_byte = 0;
  269. trace_ibex_spi_host_read(addr, size);
  270. /* Match reg index */
  271. addr = addr >> 2;
  272. switch (addr) {
  273. /* Skipping any W/O registers */
  274. case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE:
  275. case IBEX_SPI_HOST_CONTROL...IBEX_SPI_HOST_STATUS:
  276. rc = s->regs[addr];
  277. break;
  278. case IBEX_SPI_HOST_CSID:
  279. rc = s->regs[addr];
  280. break;
  281. case IBEX_SPI_HOST_CONFIGOPTS:
  282. rc = s->config_opts[s->regs[IBEX_SPI_HOST_CSID]];
  283. break;
  284. case IBEX_SPI_HOST_TXDATA:
  285. rc = s->regs[addr];
  286. break;
  287. case IBEX_SPI_HOST_RXDATA:
  288. /* Clear RXFULL */
  289. s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK;
  290. for (int i = 0; i < 4; ++i) {
  291. if (fifo8_is_empty(&s->rx_fifo)) {
  292. /* Assert RXEMPTY, no IRQ */
  293. s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK;
  294. s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
  295. R_ERROR_STATUS_UNDERFLOW_MASK;
  296. return rc;
  297. }
  298. rx_byte = fifo8_pop(&s->rx_fifo);
  299. rc |= rx_byte << (i * 8);
  300. }
  301. break;
  302. case IBEX_SPI_HOST_ERROR_ENABLE...IBEX_SPI_HOST_EVENT_ENABLE:
  303. rc = s->regs[addr];
  304. break;
  305. default:
  306. qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
  307. addr << 2);
  308. }
  309. return rc;
  310. }
  311. static void ibex_spi_host_write(void *opaque, hwaddr addr,
  312. uint64_t val64, unsigned int size)
  313. {
  314. IbexSPIHostState *s = opaque;
  315. uint32_t val32 = val64;
  316. uint32_t shift_mask = 0xff, status = 0, data = 0;
  317. uint8_t txqd_len;
  318. trace_ibex_spi_host_write(addr, size, val64);
  319. /* Match reg index */
  320. addr = addr >> 2;
  321. switch (addr) {
  322. /* Skipping any R/O registers */
  323. case IBEX_SPI_HOST_INTR_STATE:
  324. /* rw1c status register */
  325. if (FIELD_EX32(val32, INTR_STATE, ERROR)) {
  326. data = FIELD_DP32(data, INTR_STATE, ERROR, 0);
  327. }
  328. if (FIELD_EX32(val32, INTR_STATE, SPI_EVENT)) {
  329. data = FIELD_DP32(data, INTR_STATE, SPI_EVENT, 0);
  330. }
  331. s->regs[addr] = data;
  332. break;
  333. case IBEX_SPI_HOST_INTR_ENABLE:
  334. s->regs[addr] = val32;
  335. break;
  336. case IBEX_SPI_HOST_INTR_TEST:
  337. s->regs[addr] = val32;
  338. ibex_spi_host_irq(s);
  339. break;
  340. case IBEX_SPI_HOST_ALERT_TEST:
  341. s->regs[addr] = val32;
  342. qemu_log_mask(LOG_UNIMP,
  343. "%s: SPI_ALERT_TEST is not supported\n", __func__);
  344. break;
  345. case IBEX_SPI_HOST_CONTROL:
  346. s->regs[addr] = val32;
  347. if (val32 & R_CONTROL_SW_RST_MASK) {
  348. ibex_spi_host_reset((DeviceState *)s);
  349. /* Clear active if any */
  350. s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_ACTIVE_MASK;
  351. }
  352. if (val32 & R_CONTROL_OUTPUT_EN_MASK) {
  353. qemu_log_mask(LOG_UNIMP,
  354. "%s: CONTROL_OUTPUT_EN is not supported\n", __func__);
  355. }
  356. break;
  357. case IBEX_SPI_HOST_CONFIGOPTS:
  358. /* Update the respective config-opts register based on CSIDth index */
  359. s->config_opts[s->regs[IBEX_SPI_HOST_CSID]] = val32;
  360. qemu_log_mask(LOG_UNIMP,
  361. "%s: CONFIGOPTS Hardware settings not supported\n",
  362. __func__);
  363. break;
  364. case IBEX_SPI_HOST_CSID:
  365. if (val32 >= s->num_cs) {
  366. /* CSID exceeds max num_cs */
  367. s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
  368. R_ERROR_STATUS_CSIDINVAL_MASK;
  369. ibex_spi_host_irq(s);
  370. return;
  371. }
  372. s->regs[addr] = val32;
  373. break;
  374. case IBEX_SPI_HOST_COMMAND:
  375. s->regs[addr] = val32;
  376. /* STALL, IP not enabled */
  377. if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_CONTROL],
  378. CONTROL, SPIEN))) {
  379. return;
  380. }
  381. /* SPI not ready, IRQ Error */
  382. if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_STATUS],
  383. STATUS, READY))) {
  384. s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= R_ERROR_STATUS_CMDBUSY_MASK;
  385. ibex_spi_host_irq(s);
  386. return;
  387. }
  388. /* Assert Not Ready */
  389. s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_READY_MASK;
  390. if (FIELD_EX32(val32, COMMAND, DIRECTION) != BIDIRECTIONAL_TRANSFER) {
  391. qemu_log_mask(LOG_UNIMP,
  392. "%s: Rx Only/Tx Only are not supported\n", __func__);
  393. }
  394. if (val32 & R_COMMAND_CSAAT_MASK) {
  395. qemu_log_mask(LOG_UNIMP,
  396. "%s: CSAAT is not supported\n", __func__);
  397. }
  398. if (val32 & R_COMMAND_SPEED_MASK) {
  399. qemu_log_mask(LOG_UNIMP,
  400. "%s: SPEED is not supported\n", __func__);
  401. }
  402. /* Set Transfer Callback */
  403. timer_mod(s->fifo_trigger_handle,
  404. qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
  405. (TX_INTERRUPT_TRIGGER_DELAY_NS));
  406. break;
  407. case IBEX_SPI_HOST_TXDATA:
  408. /*
  409. * This is a hardware `feature` where
  410. * the first word written to TXDATA after init is omitted entirely
  411. */
  412. if (s->init_status) {
  413. s->init_status = false;
  414. return;
  415. }
  416. for (int i = 0; i < 4; ++i) {
  417. /* Attempting to write when TXFULL */
  418. if (fifo8_is_full(&s->tx_fifo)) {
  419. /* Assert RXEMPTY, no IRQ */
  420. s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXFULL_MASK;
  421. s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
  422. R_ERROR_STATUS_OVERFLOW_MASK;
  423. ibex_spi_host_irq(s);
  424. return;
  425. }
  426. /* Byte ordering is set by the IP */
  427. status = s->regs[IBEX_SPI_HOST_STATUS];
  428. if (FIELD_EX32(status, STATUS, BYTEORDER) == 0) {
  429. /* LE: LSB transmitted first (default for ibex processor) */
  430. shift_mask = 0xff << (i * 8);
  431. } else {
  432. /* BE: MSB transmitted first */
  433. qemu_log_mask(LOG_UNIMP,
  434. "%s: Big endian is not supported\n", __func__);
  435. }
  436. fifo8_push(&s->tx_fifo, (val32 & shift_mask) >> (i * 8));
  437. }
  438. status = s->regs[IBEX_SPI_HOST_STATUS];
  439. /* Reset TXEMPTY */
  440. status = FIELD_DP32(status, STATUS, TXEMPTY, 0);
  441. /* Update TXQD */
  442. txqd_len = FIELD_EX32(status, STATUS, TXQD);
  443. /* Partial bytes (size < 4) are padded, in words. */
  444. txqd_len += 1;
  445. status = FIELD_DP32(status, STATUS, TXQD, txqd_len);
  446. /* Assert Ready */
  447. status = FIELD_DP32(status, STATUS, READY, 1);
  448. /* Update register status */
  449. s->regs[IBEX_SPI_HOST_STATUS] = status;
  450. break;
  451. case IBEX_SPI_HOST_ERROR_ENABLE:
  452. s->regs[addr] = val32;
  453. if (val32 & R_ERROR_ENABLE_CMDINVAL_MASK) {
  454. qemu_log_mask(LOG_UNIMP,
  455. "%s: Segment Length is not supported\n", __func__);
  456. }
  457. break;
  458. case IBEX_SPI_HOST_ERROR_STATUS:
  459. /*
  460. * Indicates any errors that have occurred.
  461. * When an error occurs, the corresponding bit must be cleared
  462. * here before issuing any further commands
  463. */
  464. status = s->regs[addr];
  465. /* rw1c status register */
  466. if (FIELD_EX32(val32, ERROR_STATUS, CMDBUSY)) {
  467. status = FIELD_DP32(status, ERROR_STATUS, CMDBUSY, 0);
  468. }
  469. if (FIELD_EX32(val32, ERROR_STATUS, OVERFLOW)) {
  470. status = FIELD_DP32(status, ERROR_STATUS, OVERFLOW, 0);
  471. }
  472. if (FIELD_EX32(val32, ERROR_STATUS, UNDERFLOW)) {
  473. status = FIELD_DP32(status, ERROR_STATUS, UNDERFLOW, 0);
  474. }
  475. if (FIELD_EX32(val32, ERROR_STATUS, CMDINVAL)) {
  476. status = FIELD_DP32(status, ERROR_STATUS, CMDINVAL, 0);
  477. }
  478. if (FIELD_EX32(val32, ERROR_STATUS, CSIDINVAL)) {
  479. status = FIELD_DP32(status, ERROR_STATUS, CSIDINVAL, 0);
  480. }
  481. if (FIELD_EX32(val32, ERROR_STATUS, ACCESSINVAL)) {
  482. status = FIELD_DP32(status, ERROR_STATUS, ACCESSINVAL, 0);
  483. }
  484. s->regs[addr] = status;
  485. break;
  486. case IBEX_SPI_HOST_EVENT_ENABLE:
  487. /* Controls which classes of SPI events raise an interrupt. */
  488. s->regs[addr] = val32;
  489. if (val32 & R_EVENT_ENABLE_RXWM_MASK) {
  490. qemu_log_mask(LOG_UNIMP,
  491. "%s: RXWM is not supported\n", __func__);
  492. }
  493. if (val32 & R_EVENT_ENABLE_TXWM_MASK) {
  494. qemu_log_mask(LOG_UNIMP,
  495. "%s: TXWM is not supported\n", __func__);
  496. }
  497. if (val32 & R_EVENT_ENABLE_IDLE_MASK) {
  498. qemu_log_mask(LOG_UNIMP,
  499. "%s: IDLE is not supported\n", __func__);
  500. }
  501. break;
  502. default:
  503. qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
  504. addr << 2);
  505. }
  506. }
  507. static const MemoryRegionOps ibex_spi_ops = {
  508. .read = ibex_spi_host_read,
  509. .write = ibex_spi_host_write,
  510. /* Ibex default LE */
  511. .endianness = DEVICE_LITTLE_ENDIAN,
  512. };
  513. static Property ibex_spi_properties[] = {
  514. DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1),
  515. DEFINE_PROP_END_OF_LIST(),
  516. };
  517. static const VMStateDescription vmstate_ibex = {
  518. .name = TYPE_IBEX_SPI_HOST,
  519. .version_id = 1,
  520. .minimum_version_id = 1,
  521. .fields = (VMStateField[]) {
  522. VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS),
  523. VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState,
  524. num_cs, 0, vmstate_info_uint32, uint32_t),
  525. VMSTATE_FIFO8(rx_fifo, IbexSPIHostState),
  526. VMSTATE_FIFO8(tx_fifo, IbexSPIHostState),
  527. VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexSPIHostState),
  528. VMSTATE_BOOL(init_status, IbexSPIHostState),
  529. VMSTATE_END_OF_LIST()
  530. }
  531. };
  532. static void fifo_trigger_update(void *opaque)
  533. {
  534. IbexSPIHostState *s = opaque;
  535. ibex_spi_host_transfer(s);
  536. }
  537. static void ibex_spi_host_realize(DeviceState *dev, Error **errp)
  538. {
  539. IbexSPIHostState *s = IBEX_SPI_HOST(dev);
  540. int i;
  541. s->ssi = ssi_create_bus(dev, "ssi");
  542. s->cs_lines = g_new0(qemu_irq, s->num_cs);
  543. for (i = 0; i < s->num_cs; ++i) {
  544. sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]);
  545. }
  546. /* Setup CONFIGOPTS Multi-register */
  547. s->config_opts = g_new0(uint32_t, s->num_cs);
  548. /* Setup FIFO Interrupt Timer */
  549. s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
  550. fifo_trigger_update, s);
  551. /* FIFO sizes as per OT Spec */
  552. fifo8_create(&s->tx_fifo, IBEX_SPI_HOST_TXFIFO_LEN);
  553. fifo8_create(&s->rx_fifo, IBEX_SPI_HOST_RXFIFO_LEN);
  554. }
  555. static void ibex_spi_host_init(Object *obj)
  556. {
  557. IbexSPIHostState *s = IBEX_SPI_HOST(obj);
  558. sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->host_err);
  559. sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->event);
  560. memory_region_init_io(&s->mmio, obj, &ibex_spi_ops, s,
  561. TYPE_IBEX_SPI_HOST, 0x1000);
  562. sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
  563. }
  564. static void ibex_spi_host_class_init(ObjectClass *klass, void *data)
  565. {
  566. DeviceClass *dc = DEVICE_CLASS(klass);
  567. dc->realize = ibex_spi_host_realize;
  568. dc->reset = ibex_spi_host_reset;
  569. dc->vmsd = &vmstate_ibex;
  570. device_class_set_props(dc, ibex_spi_properties);
  571. }
  572. static const TypeInfo ibex_spi_host_info = {
  573. .name = TYPE_IBEX_SPI_HOST,
  574. .parent = TYPE_SYS_BUS_DEVICE,
  575. .instance_size = sizeof(IbexSPIHostState),
  576. .instance_init = ibex_spi_host_init,
  577. .class_init = ibex_spi_host_class_init,
  578. };
  579. static void ibex_spi_host_register_types(void)
  580. {
  581. type_register_static(&ibex_spi_host_info);
  582. }
  583. type_init(ibex_spi_host_register_types)