xlnx_csu_dma.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. /*
  2. * Xilinx Platform CSU Stream DMA emulation
  3. *
  4. * This implementation is based on
  5. * https://github.com/Xilinx/qemu/blob/master/hw/dma/csu_stream_dma.c
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation; either version 2 or
  10. * (at your option) version 3 of the License.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "qemu/osdep.h"
  21. #include "qemu/log.h"
  22. #include "qapi/error.h"
  23. #include "hw/irq.h"
  24. #include "hw/qdev-properties.h"
  25. #include "hw/sysbus.h"
  26. #include "migration/vmstate.h"
  27. #include "system/dma.h"
  28. #include "hw/ptimer.h"
  29. #include "hw/stream.h"
  30. #include "hw/register.h"
  31. #include "hw/dma/xlnx_csu_dma.h"
  32. /*
  33. * Ref: UG1087 (v1.7) February 8, 2019
  34. * https://www.xilinx.com/html_docs/registers/ug1087/ug1087-zynq-ultrascale-registers
  35. * CSUDMA Module section
  36. */
  37. REG32(ADDR, 0x0)
  38. FIELD(ADDR, ADDR, 2, 30) /* wo */
  39. REG32(SIZE, 0x4)
  40. FIELD(SIZE, SIZE, 2, 27)
  41. FIELD(SIZE, LAST_WORD, 0, 1) /* rw, only exists in SRC */
  42. REG32(STATUS, 0x8)
  43. FIELD(STATUS, DONE_CNT, 13, 3) /* wtc */
  44. FIELD(STATUS, FIFO_LEVEL, 5, 8) /* ro */
  45. FIELD(STATUS, OUTSTANDING, 1, 4) /* ro */
  46. FIELD(STATUS, BUSY, 0, 1) /* ro */
  47. REG32(CTRL, 0xc)
  48. FIELD(CTRL, FIFOTHRESH, 25, 7) /* rw, only exists in DST, reset 0x40 */
  49. FIELD(CTRL, APB_ERR_RESP, 24, 1) /* rw */
  50. FIELD(CTRL, ENDIANNESS, 23, 1) /* rw */
  51. FIELD(CTRL, AXI_BRST_TYPE, 22, 1) /* rw */
  52. FIELD(CTRL, TIMEOUT_VAL, 10, 12) /* rw, reset: 0xFFE */
  53. FIELD(CTRL, FIFO_THRESH, 2, 8) /* rw, reset: 0x80 */
  54. FIELD(CTRL, PAUSE_STRM, 1, 1) /* rw */
  55. FIELD(CTRL, PAUSE_MEM, 0, 1) /* rw */
  56. REG32(CRC, 0x10)
  57. REG32(INT_STATUS, 0x14)
  58. FIELD(INT_STATUS, FIFO_OVERFLOW, 7, 1) /* wtc */
  59. FIELD(INT_STATUS, INVALID_APB, 6, 1) /* wtc */
  60. FIELD(INT_STATUS, THRESH_HIT, 5, 1) /* wtc */
  61. FIELD(INT_STATUS, TIMEOUT_MEM, 4, 1) /* wtc */
  62. FIELD(INT_STATUS, TIMEOUT_STRM, 3, 1) /* wtc */
  63. FIELD(INT_STATUS, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
  64. FIELD(INT_STATUS, DONE, 1, 1) /* wtc */
  65. FIELD(INT_STATUS, MEM_DONE, 0, 1) /* wtc */
  66. REG32(INT_ENABLE, 0x18)
  67. FIELD(INT_ENABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
  68. FIELD(INT_ENABLE, INVALID_APB, 6, 1) /* wtc */
  69. FIELD(INT_ENABLE, THRESH_HIT, 5, 1) /* wtc */
  70. FIELD(INT_ENABLE, TIMEOUT_MEM, 4, 1) /* wtc */
  71. FIELD(INT_ENABLE, TIMEOUT_STRM, 3, 1) /* wtc */
  72. FIELD(INT_ENABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
  73. FIELD(INT_ENABLE, DONE, 1, 1) /* wtc */
  74. FIELD(INT_ENABLE, MEM_DONE, 0, 1) /* wtc */
  75. REG32(INT_DISABLE, 0x1c)
  76. FIELD(INT_DISABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
  77. FIELD(INT_DISABLE, INVALID_APB, 6, 1) /* wtc */
  78. FIELD(INT_DISABLE, THRESH_HIT, 5, 1) /* wtc */
  79. FIELD(INT_DISABLE, TIMEOUT_MEM, 4, 1) /* wtc */
  80. FIELD(INT_DISABLE, TIMEOUT_STRM, 3, 1) /* wtc */
  81. FIELD(INT_DISABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
  82. FIELD(INT_DISABLE, DONE, 1, 1) /* wtc */
  83. FIELD(INT_DISABLE, MEM_DONE, 0, 1) /* wtc */
  84. REG32(INT_MASK, 0x20)
  85. FIELD(INT_MASK, FIFO_OVERFLOW, 7, 1) /* ro, reset: 0x1 */
  86. FIELD(INT_MASK, INVALID_APB, 6, 1) /* ro, reset: 0x1 */
  87. FIELD(INT_MASK, THRESH_HIT, 5, 1) /* ro, reset: 0x1 */
  88. FIELD(INT_MASK, TIMEOUT_MEM, 4, 1) /* ro, reset: 0x1 */
  89. FIELD(INT_MASK, TIMEOUT_STRM, 3, 1) /* ro, reset: 0x1 */
  90. FIELD(INT_MASK, AXI_BRESP_ERR, 2, 1) /* ro, reset: 0x1, SRC: AXI_RDERR */
  91. FIELD(INT_MASK, DONE, 1, 1) /* ro, reset: 0x1 */
  92. FIELD(INT_MASK, MEM_DONE, 0, 1) /* ro, reset: 0x1 */
  93. REG32(CTRL2, 0x24)
  94. FIELD(CTRL2, ARCACHE, 24, 3) /* rw */
  95. FIELD(CTRL2, ROUTE_BIT, 23, 1) /* rw */
  96. FIELD(CTRL2, TIMEOUT_EN, 22, 1) /* rw */
  97. FIELD(CTRL2, TIMEOUT_PRE, 4, 12) /* rw, reset: 0xFFF */
  98. FIELD(CTRL2, MAX_OUTS_CMDS, 0, 4) /* rw, reset: 0x8 */
  99. REG32(ADDR_MSB, 0x28)
  100. FIELD(ADDR_MSB, ADDR_MSB, 0, 17) /* wo */
  101. #define R_CTRL_TIMEOUT_VAL_RESET (0xFFE)
  102. #define R_CTRL_FIFO_THRESH_RESET (0x80)
  103. #define R_CTRL_FIFOTHRESH_RESET (0x40)
  104. #define R_CTRL2_TIMEOUT_PRE_RESET (0xFFF)
  105. #define R_CTRL2_MAX_OUTS_CMDS_RESET (0x8)
  106. #define XLNX_CSU_DMA_ERR_DEBUG (0)
  107. #define XLNX_CSU_DMA_INT_R_MASK (0xff)
  108. /* UG1807: Set the prescaler value for the timeout in clk (~2.5ns) cycles */
  109. #define XLNX_CSU_DMA_TIMER_FREQ (400 * 1000 * 1000)
  110. static bool xlnx_csu_dma_is_paused(XlnxCSUDMA *s)
  111. {
  112. bool paused;
  113. paused = !!(s->regs[R_CTRL] & R_CTRL_PAUSE_STRM_MASK);
  114. paused |= !!(s->regs[R_CTRL] & R_CTRL_PAUSE_MEM_MASK);
  115. return paused;
  116. }
  117. static bool xlnx_csu_dma_get_eop(XlnxCSUDMA *s)
  118. {
  119. return s->r_size_last_word;
  120. }
  121. static bool xlnx_csu_dma_burst_is_fixed(XlnxCSUDMA *s)
  122. {
  123. return !!(s->regs[R_CTRL] & R_CTRL_AXI_BRST_TYPE_MASK);
  124. }
  125. static bool xlnx_csu_dma_timeout_enabled(XlnxCSUDMA *s)
  126. {
  127. return !!(s->regs[R_CTRL2] & R_CTRL2_TIMEOUT_EN_MASK);
  128. }
  129. static void xlnx_csu_dma_update_done_cnt(XlnxCSUDMA *s, int a)
  130. {
  131. int cnt;
  132. /* Increase DONE_CNT */
  133. cnt = ARRAY_FIELD_EX32(s->regs, STATUS, DONE_CNT) + a;
  134. ARRAY_FIELD_DP32(s->regs, STATUS, DONE_CNT, cnt);
  135. }
  136. static void xlnx_csu_dma_data_process(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
  137. {
  138. uint32_t bswap;
  139. uint32_t i;
  140. bswap = s->regs[R_CTRL] & R_CTRL_ENDIANNESS_MASK;
  141. if (s->is_dst && !bswap) {
  142. /* Fast when ENDIANNESS cleared */
  143. return;
  144. }
  145. for (i = 0; i < len; i += 4) {
  146. uint8_t *b = &buf[i];
  147. union {
  148. uint8_t u8[4];
  149. uint32_t u32;
  150. } v = {
  151. .u8 = { b[0], b[1], b[2], b[3] }
  152. };
  153. if (!s->is_dst) {
  154. s->regs[R_CRC] += v.u32;
  155. }
  156. if (bswap) {
  157. /*
  158. * No point using bswap, we need to writeback
  159. * into a potentially unaligned pointer.
  160. */
  161. b[0] = v.u8[3];
  162. b[1] = v.u8[2];
  163. b[2] = v.u8[1];
  164. b[3] = v.u8[0];
  165. }
  166. }
  167. }
  168. static void xlnx_csu_dma_update_irq(XlnxCSUDMA *s)
  169. {
  170. qemu_set_irq(s->irq, !!(s->regs[R_INT_STATUS] & ~s->regs[R_INT_MASK]));
  171. }
  172. /* len is in bytes */
  173. static uint32_t xlnx_csu_dma_read(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
  174. {
  175. hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
  176. MemTxResult result = MEMTX_OK;
  177. if (xlnx_csu_dma_burst_is_fixed(s)) {
  178. uint32_t i;
  179. for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
  180. uint32_t mlen = MIN(len - i, s->width);
  181. result = address_space_rw(&s->dma_as, addr, s->attr,
  182. buf + i, mlen, false);
  183. }
  184. } else {
  185. result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, false);
  186. }
  187. if (result == MEMTX_OK) {
  188. xlnx_csu_dma_data_process(s, buf, len);
  189. } else {
  190. qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " HWADDR_FMT_plx
  191. " for mem read", __func__, addr);
  192. s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
  193. xlnx_csu_dma_update_irq(s);
  194. }
  195. return len;
  196. }
  197. /* len is in bytes */
  198. static uint32_t xlnx_csu_dma_write(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
  199. {
  200. hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
  201. MemTxResult result = MEMTX_OK;
  202. xlnx_csu_dma_data_process(s, buf, len);
  203. if (xlnx_csu_dma_burst_is_fixed(s)) {
  204. uint32_t i;
  205. for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
  206. uint32_t mlen = MIN(len - i, s->width);
  207. result = address_space_rw(&s->dma_as, addr, s->attr,
  208. buf, mlen, true);
  209. buf += mlen;
  210. }
  211. } else {
  212. result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, true);
  213. }
  214. if (result != MEMTX_OK) {
  215. qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " HWADDR_FMT_plx
  216. " for mem write", __func__, addr);
  217. s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
  218. xlnx_csu_dma_update_irq(s);
  219. }
  220. return len;
  221. }
  222. static void xlnx_csu_dma_done(XlnxCSUDMA *s)
  223. {
  224. s->regs[R_STATUS] &= ~R_STATUS_BUSY_MASK;
  225. s->regs[R_INT_STATUS] |= R_INT_STATUS_DONE_MASK;
  226. if (!s->is_dst) {
  227. s->regs[R_INT_STATUS] |= R_INT_STATUS_MEM_DONE_MASK;
  228. }
  229. xlnx_csu_dma_update_done_cnt(s, 1);
  230. }
  231. static uint32_t xlnx_csu_dma_advance(XlnxCSUDMA *s, uint32_t len)
  232. {
  233. uint32_t size = s->regs[R_SIZE];
  234. hwaddr dst = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
  235. assert(len <= size);
  236. size -= len;
  237. s->regs[R_SIZE] = size;
  238. if (!xlnx_csu_dma_burst_is_fixed(s)) {
  239. dst += len;
  240. s->regs[R_ADDR] = (uint32_t) dst;
  241. s->regs[R_ADDR_MSB] = dst >> 32;
  242. }
  243. if (size == 0) {
  244. xlnx_csu_dma_done(s);
  245. }
  246. return size;
  247. }
  248. static void xlnx_csu_dma_src_notify(void *opaque)
  249. {
  250. XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
  251. unsigned char buf[4 * 1024];
  252. size_t rlen = 0;
  253. ptimer_transaction_begin(s->src_timer);
  254. /* Stop the backpreassure timer */
  255. ptimer_stop(s->src_timer);
  256. while (s->regs[R_SIZE] && !xlnx_csu_dma_is_paused(s) &&
  257. stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
  258. uint32_t plen = MIN(s->regs[R_SIZE], sizeof buf);
  259. bool eop = false;
  260. /* Did we fit it all? */
  261. if (s->regs[R_SIZE] == plen && xlnx_csu_dma_get_eop(s)) {
  262. eop = true;
  263. }
  264. /* DMA transfer */
  265. xlnx_csu_dma_read(s, buf, plen);
  266. rlen = stream_push(s->tx_dev, buf, plen, eop);
  267. xlnx_csu_dma_advance(s, rlen);
  268. }
  269. if (xlnx_csu_dma_timeout_enabled(s) && s->regs[R_SIZE] &&
  270. !stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
  271. uint32_t timeout = ARRAY_FIELD_EX32(s->regs, CTRL, TIMEOUT_VAL);
  272. uint32_t div = ARRAY_FIELD_EX32(s->regs, CTRL2, TIMEOUT_PRE) + 1;
  273. uint32_t freq = XLNX_CSU_DMA_TIMER_FREQ;
  274. freq /= div;
  275. ptimer_set_freq(s->src_timer, freq);
  276. ptimer_set_count(s->src_timer, timeout);
  277. ptimer_run(s->src_timer, 1);
  278. }
  279. ptimer_transaction_commit(s->src_timer);
  280. xlnx_csu_dma_update_irq(s);
  281. }
  282. static uint64_t addr_pre_write(RegisterInfo *reg, uint64_t val)
  283. {
  284. /* Address is word aligned */
  285. return val & R_ADDR_ADDR_MASK;
  286. }
  287. static uint64_t size_pre_write(RegisterInfo *reg, uint64_t val)
  288. {
  289. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  290. uint64_t size = val & R_SIZE_SIZE_MASK;
  291. if (s->regs[R_SIZE] != 0) {
  292. if (size || s->is_dst) {
  293. qemu_log_mask(LOG_GUEST_ERROR,
  294. "%s: Starting DMA while already running.\n",
  295. __func__);
  296. }
  297. }
  298. if (!s->is_dst) {
  299. s->r_size_last_word = !!(val & R_SIZE_LAST_WORD_MASK);
  300. }
  301. /* Size is word aligned */
  302. return size;
  303. }
  304. static uint64_t size_post_read(RegisterInfo *reg, uint64_t val)
  305. {
  306. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  307. return val | s->r_size_last_word;
  308. }
  309. static void size_post_write(RegisterInfo *reg, uint64_t val)
  310. {
  311. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  312. s->regs[R_STATUS] |= R_STATUS_BUSY_MASK;
  313. /*
  314. * Note that if SIZE is programmed to 0, and the DMA is started,
  315. * the interrupts DONE and MEM_DONE will be asserted.
  316. */
  317. if (s->regs[R_SIZE] == 0) {
  318. xlnx_csu_dma_done(s);
  319. xlnx_csu_dma_update_irq(s);
  320. return;
  321. }
  322. /* Set SIZE is considered the last step in transfer configuration */
  323. if (!s->is_dst) {
  324. xlnx_csu_dma_src_notify(s);
  325. } else {
  326. if (s->notify) {
  327. s->notify(s->notify_opaque);
  328. }
  329. }
  330. }
  331. static uint64_t status_pre_write(RegisterInfo *reg, uint64_t val)
  332. {
  333. return val & (R_STATUS_DONE_CNT_MASK | R_STATUS_BUSY_MASK);
  334. }
  335. static void ctrl_post_write(RegisterInfo *reg, uint64_t val)
  336. {
  337. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  338. if (!s->is_dst) {
  339. if (!xlnx_csu_dma_is_paused(s)) {
  340. xlnx_csu_dma_src_notify(s);
  341. }
  342. } else {
  343. if (!xlnx_csu_dma_is_paused(s) && s->notify) {
  344. s->notify(s->notify_opaque);
  345. }
  346. }
  347. }
  348. static uint64_t int_status_pre_write(RegisterInfo *reg, uint64_t val)
  349. {
  350. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  351. /* DMA counter decrements when flag 'DONE' is cleared */
  352. if ((val & s->regs[R_INT_STATUS] & R_INT_STATUS_DONE_MASK)) {
  353. xlnx_csu_dma_update_done_cnt(s, -1);
  354. }
  355. return s->regs[R_INT_STATUS] & ~val;
  356. }
  357. static void int_status_post_write(RegisterInfo *reg, uint64_t val)
  358. {
  359. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  360. xlnx_csu_dma_update_irq(s);
  361. }
  362. static uint64_t int_enable_pre_write(RegisterInfo *reg, uint64_t val)
  363. {
  364. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  365. uint32_t v32 = val;
  366. /*
  367. * R_INT_ENABLE doesn't have its own state.
  368. * It is used to indirectly modify R_INT_MASK.
  369. *
  370. * 1: Enable this interrupt field (the mask bit will be cleared to 0)
  371. * 0: No effect
  372. */
  373. s->regs[R_INT_MASK] &= ~v32;
  374. return 0;
  375. }
  376. static void int_enable_post_write(RegisterInfo *reg, uint64_t val)
  377. {
  378. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  379. xlnx_csu_dma_update_irq(s);
  380. }
  381. static uint64_t int_disable_pre_write(RegisterInfo *reg, uint64_t val)
  382. {
  383. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  384. uint32_t v32 = val;
  385. /*
  386. * R_INT_DISABLE doesn't have its own state.
  387. * It is used to indirectly modify R_INT_MASK.
  388. *
  389. * 1: Disable this interrupt field (the mask bit will be set to 1)
  390. * 0: No effect
  391. */
  392. s->regs[R_INT_MASK] |= v32;
  393. return 0;
  394. }
  395. static void int_disable_post_write(RegisterInfo *reg, uint64_t val)
  396. {
  397. XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
  398. xlnx_csu_dma_update_irq(s);
  399. }
  400. static uint64_t addr_msb_pre_write(RegisterInfo *reg, uint64_t val)
  401. {
  402. return val & R_ADDR_MSB_ADDR_MSB_MASK;
  403. }
  404. static MemTxResult xlnx_csu_dma_class_read(XlnxCSUDMA *s, hwaddr addr,
  405. uint32_t len)
  406. {
  407. RegisterInfo *reg = &s->regs_info[R_SIZE];
  408. uint64_t we = MAKE_64BIT_MASK(0, 4 * 8);
  409. s->regs[R_ADDR] = addr;
  410. s->regs[R_ADDR_MSB] = (uint64_t)addr >> 32;
  411. register_write(reg, len, we, object_get_typename(OBJECT(s)), false);
  412. return (s->regs[R_SIZE] == 0) ? MEMTX_OK : MEMTX_ERROR;
  413. }
  414. static const RegisterAccessInfo *xlnx_csu_dma_regs_info[] = {
  415. #define DMACH_REGINFO(NAME, snd) \
  416. (const RegisterAccessInfo []) { \
  417. { \
  418. .name = #NAME "_ADDR", \
  419. .addr = A_ADDR, \
  420. .pre_write = addr_pre_write \
  421. }, { \
  422. .name = #NAME "_SIZE", \
  423. .addr = A_SIZE, \
  424. .pre_write = size_pre_write, \
  425. .post_write = size_post_write, \
  426. .post_read = size_post_read \
  427. }, { \
  428. .name = #NAME "_STATUS", \
  429. .addr = A_STATUS, \
  430. .pre_write = status_pre_write, \
  431. .w1c = R_STATUS_DONE_CNT_MASK, \
  432. .ro = (R_STATUS_BUSY_MASK \
  433. | R_STATUS_FIFO_LEVEL_MASK \
  434. | R_STATUS_OUTSTANDING_MASK) \
  435. }, { \
  436. .name = #NAME "_CTRL", \
  437. .addr = A_CTRL, \
  438. .post_write = ctrl_post_write, \
  439. .reset = ((R_CTRL_TIMEOUT_VAL_RESET << R_CTRL_TIMEOUT_VAL_SHIFT) \
  440. | (R_CTRL_FIFO_THRESH_RESET << R_CTRL_FIFO_THRESH_SHIFT)\
  441. | (snd ? 0 : R_CTRL_FIFOTHRESH_RESET \
  442. << R_CTRL_FIFOTHRESH_SHIFT)) \
  443. }, { \
  444. .name = #NAME "_CRC", \
  445. .addr = A_CRC, \
  446. }, { \
  447. .name = #NAME "_INT_STATUS", \
  448. .addr = A_INT_STATUS, \
  449. .pre_write = int_status_pre_write, \
  450. .post_write = int_status_post_write \
  451. }, { \
  452. .name = #NAME "_INT_ENABLE", \
  453. .addr = A_INT_ENABLE, \
  454. .pre_write = int_enable_pre_write, \
  455. .post_write = int_enable_post_write \
  456. }, { \
  457. .name = #NAME "_INT_DISABLE", \
  458. .addr = A_INT_DISABLE, \
  459. .pre_write = int_disable_pre_write, \
  460. .post_write = int_disable_post_write \
  461. }, { \
  462. .name = #NAME "_INT_MASK", \
  463. .addr = A_INT_MASK, \
  464. .ro = ~0, \
  465. .reset = XLNX_CSU_DMA_INT_R_MASK \
  466. }, { \
  467. .name = #NAME "_CTRL2", \
  468. .addr = A_CTRL2, \
  469. .reset = ((R_CTRL2_TIMEOUT_PRE_RESET \
  470. << R_CTRL2_TIMEOUT_PRE_SHIFT) \
  471. | (R_CTRL2_MAX_OUTS_CMDS_RESET \
  472. << R_CTRL2_MAX_OUTS_CMDS_SHIFT)) \
  473. }, { \
  474. .name = #NAME "_ADDR_MSB", \
  475. .addr = A_ADDR_MSB, \
  476. .pre_write = addr_msb_pre_write \
  477. } \
  478. }
  479. DMACH_REGINFO(DMA_SRC, true),
  480. DMACH_REGINFO(DMA_DST, false)
  481. };
  482. static const MemoryRegionOps xlnx_csu_dma_ops = {
  483. .read = register_read_memory,
  484. .write = register_write_memory,
  485. .endianness = DEVICE_LITTLE_ENDIAN,
  486. .valid = {
  487. .min_access_size = 4,
  488. .max_access_size = 4,
  489. }
  490. };
  491. static void xlnx_csu_dma_src_timeout_hit(void *opaque)
  492. {
  493. XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
  494. /* Ignore if the timeout is masked */
  495. if (!xlnx_csu_dma_timeout_enabled(s)) {
  496. return;
  497. }
  498. s->regs[R_INT_STATUS] |= R_INT_STATUS_TIMEOUT_STRM_MASK;
  499. xlnx_csu_dma_update_irq(s);
  500. }
  501. static size_t xlnx_csu_dma_stream_push(StreamSink *obj, uint8_t *buf,
  502. size_t len, bool eop)
  503. {
  504. XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
  505. uint32_t size = s->regs[R_SIZE];
  506. uint32_t mlen = MIN(size, len) & (~3); /* Size is word aligned */
  507. /* Be called when it's DST */
  508. assert(s->is_dst);
  509. if (size == 0 || len <= 0) {
  510. return 0;
  511. }
  512. if (len && (xlnx_csu_dma_is_paused(s) || mlen == 0)) {
  513. qemu_log_mask(LOG_GUEST_ERROR,
  514. "csu-dma: DST channel dropping %zd b of data.\n", len);
  515. s->regs[R_INT_STATUS] |= R_INT_STATUS_FIFO_OVERFLOW_MASK;
  516. return len;
  517. }
  518. if (xlnx_csu_dma_write(s, buf, mlen) != mlen) {
  519. return 0;
  520. }
  521. xlnx_csu_dma_advance(s, mlen);
  522. xlnx_csu_dma_update_irq(s);
  523. return mlen;
  524. }
  525. static bool xlnx_csu_dma_stream_can_push(StreamSink *obj,
  526. StreamCanPushNotifyFn notify,
  527. void *notify_opaque)
  528. {
  529. XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
  530. if (s->regs[R_SIZE] != 0) {
  531. return true;
  532. } else {
  533. s->notify = notify;
  534. s->notify_opaque = notify_opaque;
  535. return false;
  536. }
  537. }
  538. static void xlnx_csu_dma_reset(DeviceState *dev)
  539. {
  540. XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
  541. unsigned int i;
  542. for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
  543. register_reset(&s->regs_info[i]);
  544. }
  545. }
  546. static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp)
  547. {
  548. XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
  549. RegisterInfoArray *reg_array;
  550. if (!s->is_dst && !s->tx_dev) {
  551. error_setg(errp, "zynqmp.csu-dma: Stream not connected");
  552. return;
  553. }
  554. if (!s->dma_mr) {
  555. error_setg(errp, TYPE_XLNX_CSU_DMA " 'dma' link not set");
  556. return;
  557. }
  558. address_space_init(&s->dma_as, s->dma_mr, "csu-dma");
  559. reg_array =
  560. register_init_block32(dev, xlnx_csu_dma_regs_info[!!s->is_dst],
  561. XLNX_CSU_DMA_R_MAX,
  562. s->regs_info, s->regs,
  563. &xlnx_csu_dma_ops,
  564. XLNX_CSU_DMA_ERR_DEBUG,
  565. XLNX_CSU_DMA_R_MAX * 4);
  566. memory_region_add_subregion(&s->iomem,
  567. 0x0,
  568. &reg_array->mem);
  569. sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
  570. sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
  571. s->src_timer = ptimer_init(xlnx_csu_dma_src_timeout_hit,
  572. s, PTIMER_POLICY_LEGACY);
  573. s->attr = MEMTXATTRS_UNSPECIFIED;
  574. s->r_size_last_word = 0;
  575. }
  576. static const VMStateDescription vmstate_xlnx_csu_dma = {
  577. .name = TYPE_XLNX_CSU_DMA,
  578. .version_id = 0,
  579. .minimum_version_id = 0,
  580. .fields = (const VMStateField[]) {
  581. VMSTATE_PTIMER(src_timer, XlnxCSUDMA),
  582. VMSTATE_UINT16(width, XlnxCSUDMA),
  583. VMSTATE_BOOL(is_dst, XlnxCSUDMA),
  584. VMSTATE_BOOL(r_size_last_word, XlnxCSUDMA),
  585. VMSTATE_UINT32_ARRAY(regs, XlnxCSUDMA, XLNX_CSU_DMA_R_MAX),
  586. VMSTATE_END_OF_LIST(),
  587. }
  588. };
  589. static const Property xlnx_csu_dma_properties[] = {
  590. /*
  591. * Ref PG021, Stream Data Width:
  592. * Data width in bits of the AXI S2MM AXI4-Stream Data bus.
  593. * This value must be equal or less than the Memory Map Data Width.
  594. * Valid values are 8, 16, 32, 64, 128, 512 and 1024.
  595. * "dma-width" is the byte value of the "Stream Data Width".
  596. */
  597. DEFINE_PROP_UINT16("dma-width", XlnxCSUDMA, width, 4),
  598. /*
  599. * The CSU DMA is a two-channel, simple DMA, allowing separate control of
  600. * the SRC (read) channel and DST (write) channel. "is-dst" is used to mark
  601. * which channel the device is connected to.
  602. */
  603. DEFINE_PROP_BOOL("is-dst", XlnxCSUDMA, is_dst, true),
  604. DEFINE_PROP_LINK("stream-connected-dma", XlnxCSUDMA, tx_dev,
  605. TYPE_STREAM_SINK, StreamSink *),
  606. DEFINE_PROP_LINK("dma", XlnxCSUDMA, dma_mr,
  607. TYPE_MEMORY_REGION, MemoryRegion *),
  608. };
  609. static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data)
  610. {
  611. DeviceClass *dc = DEVICE_CLASS(klass);
  612. StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
  613. XlnxCSUDMAClass *xcdc = XLNX_CSU_DMA_CLASS(klass);
  614. device_class_set_legacy_reset(dc, xlnx_csu_dma_reset);
  615. dc->realize = xlnx_csu_dma_realize;
  616. dc->vmsd = &vmstate_xlnx_csu_dma;
  617. device_class_set_props(dc, xlnx_csu_dma_properties);
  618. ssc->push = xlnx_csu_dma_stream_push;
  619. ssc->can_push = xlnx_csu_dma_stream_can_push;
  620. xcdc->read = xlnx_csu_dma_class_read;
  621. }
  622. static void xlnx_csu_dma_init(Object *obj)
  623. {
  624. XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
  625. memory_region_init(&s->iomem, obj, TYPE_XLNX_CSU_DMA,
  626. XLNX_CSU_DMA_R_MAX * 4);
  627. }
  628. static const TypeInfo xlnx_csu_dma_info = {
  629. .name = TYPE_XLNX_CSU_DMA,
  630. .parent = TYPE_SYS_BUS_DEVICE,
  631. .instance_size = sizeof(XlnxCSUDMA),
  632. .class_init = xlnx_csu_dma_class_init,
  633. .class_size = sizeof(XlnxCSUDMAClass),
  634. .instance_init = xlnx_csu_dma_init,
  635. .interfaces = (InterfaceInfo[]) {
  636. { TYPE_STREAM_SINK },
  637. { }
  638. }
  639. };
  640. static void xlnx_csu_dma_register_types(void)
  641. {
  642. type_register_static(&xlnx_csu_dma_info);
  643. }
  644. type_init(xlnx_csu_dma_register_types)