xlnx_dpdma.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /*
  2. * xlnx_dpdma.c
  3. *
  4. * Copyright (C) 2015 : GreenSocs Ltd
  5. * http://www.greensocs.com/ , email: info@greensocs.com
  6. *
  7. * Developed by :
  8. * Frederic Konrad <fred.konrad@greensocs.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation, either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, see <http://www.gnu.org/licenses/>.
  22. *
  23. */
  24. #include "qemu/osdep.h"
  25. #include "qemu/cutils.h"
  26. #include "qemu/log.h"
  27. #include "qemu/module.h"
  28. #include "hw/dma/xlnx_dpdma.h"
  29. #include "hw/irq.h"
  30. #include "migration/vmstate.h"
  31. #ifndef DEBUG_DPDMA
  32. #define DEBUG_DPDMA 0
  33. #endif
  34. #define DPRINTF(fmt, ...) do { \
  35. if (DEBUG_DPDMA) { \
  36. qemu_log("xlnx_dpdma: " fmt , ## __VA_ARGS__); \
  37. } \
  38. } while (0)
  39. /*
  40. * Registers offset for DPDMA.
  41. */
  42. #define DPDMA_ERR_CTRL (0x0000)
  43. #define DPDMA_ISR (0x0004 >> 2)
  44. #define DPDMA_IMR (0x0008 >> 2)
  45. #define DPDMA_IEN (0x000C >> 2)
  46. #define DPDMA_IDS (0x0010 >> 2)
  47. #define DPDMA_EISR (0x0014 >> 2)
  48. #define DPDMA_EIMR (0x0018 >> 2)
  49. #define DPDMA_EIEN (0x001C >> 2)
  50. #define DPDMA_EIDS (0x0020 >> 2)
  51. #define DPDMA_CNTL (0x0100 >> 2)
  52. #define DPDMA_GBL (0x0104 >> 2)
  53. #define DPDMA_GBL_TRG_CH(n) (1 << n)
  54. #define DPDMA_GBL_RTRG_CH(n) (1 << 6 << n)
  55. #define DPDMA_ALC0_CNTL (0x0108 >> 2)
  56. #define DPDMA_ALC0_STATUS (0x010C >> 2)
  57. #define DPDMA_ALC0_MAX (0x0110 >> 2)
  58. #define DPDMA_ALC0_MIN (0x0114 >> 2)
  59. #define DPDMA_ALC0_ACC (0x0118 >> 2)
  60. #define DPDMA_ALC0_ACC_TRAN (0x011C >> 2)
  61. #define DPDMA_ALC1_CNTL (0x0120 >> 2)
  62. #define DPDMA_ALC1_STATUS (0x0124 >> 2)
  63. #define DPDMA_ALC1_MAX (0x0128 >> 2)
  64. #define DPDMA_ALC1_MIN (0x012C >> 2)
  65. #define DPDMA_ALC1_ACC (0x0130 >> 2)
  66. #define DPDMA_ALC1_ACC_TRAN (0x0134 >> 2)
  67. #define DPDMA_DSCR_STRT_ADDRE_CH(n) ((0x0200 + n * 0x100) >> 2)
  68. #define DPDMA_DSCR_STRT_ADDR_CH(n) ((0x0204 + n * 0x100) >> 2)
  69. #define DPDMA_DSCR_NEXT_ADDRE_CH(n) ((0x0208 + n * 0x100) >> 2)
  70. #define DPDMA_DSCR_NEXT_ADDR_CH(n) ((0x020C + n * 0x100) >> 2)
  71. #define DPDMA_PYLD_CUR_ADDRE_CH(n) ((0x0210 + n * 0x100) >> 2)
  72. #define DPDMA_PYLD_CUR_ADDR_CH(n) ((0x0214 + n * 0x100) >> 2)
  73. #define DPDMA_CNTL_CH(n) ((0x0218 + n * 0x100) >> 2)
  74. #define DPDMA_CNTL_CH_EN (1)
  75. #define DPDMA_CNTL_CH_PAUSED (1 << 1)
  76. #define DPDMA_STATUS_CH(n) ((0x021C + n * 0x100) >> 2)
  77. #define DPDMA_STATUS_BURST_TYPE (1 << 4)
  78. #define DPDMA_STATUS_MODE (1 << 5)
  79. #define DPDMA_STATUS_EN_CRC (1 << 6)
  80. #define DPDMA_STATUS_LAST_DSCR (1 << 7)
  81. #define DPDMA_STATUS_LDSCR_FRAME (1 << 8)
  82. #define DPDMA_STATUS_IGNR_DONE (1 << 9)
  83. #define DPDMA_STATUS_DSCR_DONE (1 << 10)
  84. #define DPDMA_STATUS_EN_DSCR_UP (1 << 11)
  85. #define DPDMA_STATUS_EN_DSCR_INTR (1 << 12)
  86. #define DPDMA_STATUS_PREAMBLE_OFF (13)
  87. #define DPDMA_VDO_CH(n) ((0x0220 + n * 0x100) >> 2)
  88. #define DPDMA_PYLD_SZ_CH(n) ((0x0224 + n * 0x100) >> 2)
  89. #define DPDMA_DSCR_ID_CH(n) ((0x0228 + n * 0x100) >> 2)
  90. /*
  91. * Descriptor control field.
  92. */
  93. #define CONTROL_PREAMBLE_VALUE 0xA5
  94. #define DSCR_CTRL_PREAMBLE 0xFF
  95. #define DSCR_CTRL_EN_DSCR_DONE_INTR (1 << 8)
  96. #define DSCR_CTRL_EN_DSCR_UPDATE (1 << 9)
  97. #define DSCR_CTRL_IGNORE_DONE (1 << 10)
  98. #define DSCR_CTRL_AXI_BURST_TYPE (1 << 11)
  99. #define DSCR_CTRL_AXCACHE (0x0F << 12)
  100. #define DSCR_CTRL_AXPROT (0x2 << 16)
  101. #define DSCR_CTRL_DESCRIPTOR_MODE (1 << 18)
  102. #define DSCR_CTRL_LAST_DESCRIPTOR (1 << 19)
  103. #define DSCR_CTRL_ENABLE_CRC (1 << 20)
  104. #define DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME (1 << 21)
  105. /*
  106. * Descriptor timestamp field.
  107. */
  108. #define STATUS_DONE (1 << 31)
  109. #define DPDMA_FRAG_MAX_SZ (4096)
  110. enum DPDMABurstType {
  111. DPDMA_INCR = 0,
  112. DPDMA_FIXED = 1
  113. };
  114. enum DPDMAMode {
  115. DPDMA_CONTIGOUS = 0,
  116. DPDMA_FRAGMENTED = 1
  117. };
  118. struct DPDMADescriptor {
  119. uint32_t control;
  120. uint32_t descriptor_id;
  121. /* transfer size in byte. */
  122. uint32_t xfer_size;
  123. uint32_t line_size_stride;
  124. uint32_t timestamp_lsb;
  125. uint32_t timestamp_msb;
  126. /* contains extension for both descriptor and source. */
  127. uint32_t address_extension;
  128. uint32_t next_descriptor;
  129. uint32_t source_address;
  130. uint32_t address_extension_23;
  131. uint32_t address_extension_45;
  132. uint32_t source_address2;
  133. uint32_t source_address3;
  134. uint32_t source_address4;
  135. uint32_t source_address5;
  136. uint32_t crc;
  137. };
  138. typedef enum DPDMABurstType DPDMABurstType;
  139. typedef enum DPDMAMode DPDMAMode;
  140. typedef struct DPDMADescriptor DPDMADescriptor;
  141. static bool xlnx_dpdma_desc_is_last(DPDMADescriptor *desc)
  142. {
  143. return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0);
  144. }
  145. static bool xlnx_dpdma_desc_is_last_of_frame(DPDMADescriptor *desc)
  146. {
  147. return ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0);
  148. }
  149. static uint64_t xlnx_dpdma_desc_get_source_address(DPDMADescriptor *desc,
  150. uint8_t frag)
  151. {
  152. uint64_t addr = 0;
  153. assert(frag < 5);
  154. switch (frag) {
  155. case 0:
  156. addr = (uint64_t)desc->source_address
  157. + (extract64(desc->address_extension, 16, 16) << 32);
  158. break;
  159. case 1:
  160. addr = (uint64_t)desc->source_address2
  161. + (extract64(desc->address_extension_23, 0, 16) << 32);
  162. break;
  163. case 2:
  164. addr = (uint64_t)desc->source_address3
  165. + (extract64(desc->address_extension_23, 16, 16) << 32);
  166. break;
  167. case 3:
  168. addr = (uint64_t)desc->source_address4
  169. + (extract64(desc->address_extension_45, 0, 16) << 32);
  170. break;
  171. case 4:
  172. addr = (uint64_t)desc->source_address5
  173. + (extract64(desc->address_extension_45, 16, 16) << 32);
  174. break;
  175. default:
  176. addr = 0;
  177. break;
  178. }
  179. return addr;
  180. }
  181. static uint32_t xlnx_dpdma_desc_get_transfer_size(DPDMADescriptor *desc)
  182. {
  183. return desc->xfer_size;
  184. }
  185. static uint32_t xlnx_dpdma_desc_get_line_size(DPDMADescriptor *desc)
  186. {
  187. return extract32(desc->line_size_stride, 0, 18);
  188. }
  189. static uint32_t xlnx_dpdma_desc_get_line_stride(DPDMADescriptor *desc)
  190. {
  191. return extract32(desc->line_size_stride, 18, 14) * 16;
  192. }
  193. static inline bool xlnx_dpdma_desc_crc_enabled(DPDMADescriptor *desc)
  194. {
  195. return (desc->control & DSCR_CTRL_ENABLE_CRC) != 0;
  196. }
  197. static inline bool xlnx_dpdma_desc_check_crc(DPDMADescriptor *desc)
  198. {
  199. uint32_t *p = (uint32_t *)desc;
  200. uint32_t crc = 0;
  201. uint8_t i;
  202. /*
  203. * CRC is calculated on the whole descriptor except the last 32bits word
  204. * using 32bits addition.
  205. */
  206. for (i = 0; i < 15; i++) {
  207. crc += p[i];
  208. }
  209. return crc == desc->crc;
  210. }
  211. static inline bool xlnx_dpdma_desc_completion_interrupt(DPDMADescriptor *desc)
  212. {
  213. return (desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0;
  214. }
  215. static inline bool xlnx_dpdma_desc_is_valid(DPDMADescriptor *desc)
  216. {
  217. return (desc->control & DSCR_CTRL_PREAMBLE) == CONTROL_PREAMBLE_VALUE;
  218. }
  219. static inline bool xlnx_dpdma_desc_is_contiguous(DPDMADescriptor *desc)
  220. {
  221. return (desc->control & DSCR_CTRL_DESCRIPTOR_MODE) == 0;
  222. }
  223. static inline bool xlnx_dpdma_desc_update_enabled(DPDMADescriptor *desc)
  224. {
  225. return (desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0;
  226. }
  227. static inline void xlnx_dpdma_desc_set_done(DPDMADescriptor *desc)
  228. {
  229. desc->timestamp_msb |= STATUS_DONE;
  230. }
  231. static inline bool xlnx_dpdma_desc_is_already_done(DPDMADescriptor *desc)
  232. {
  233. return (desc->timestamp_msb & STATUS_DONE) != 0;
  234. }
  235. static inline bool xlnx_dpdma_desc_ignore_done_bit(DPDMADescriptor *desc)
  236. {
  237. return (desc->control & DSCR_CTRL_IGNORE_DONE) != 0;
  238. }
  239. static const VMStateDescription vmstate_xlnx_dpdma = {
  240. .name = TYPE_XLNX_DPDMA,
  241. .version_id = 1,
  242. .fields = (const VMStateField[]) {
  243. VMSTATE_UINT32_ARRAY(registers, XlnxDPDMAState,
  244. XLNX_DPDMA_REG_ARRAY_SIZE),
  245. VMSTATE_BOOL_ARRAY(operation_finished, XlnxDPDMAState, 6),
  246. VMSTATE_END_OF_LIST()
  247. }
  248. };
  249. static void xlnx_dpdma_update_irq(XlnxDPDMAState *s)
  250. {
  251. bool flags;
  252. flags = ((s->registers[DPDMA_ISR] & (~s->registers[DPDMA_IMR]))
  253. || (s->registers[DPDMA_EISR] & (~s->registers[DPDMA_EIMR])));
  254. qemu_set_irq(s->irq, flags);
  255. }
  256. static uint64_t xlnx_dpdma_descriptor_start_address(XlnxDPDMAState *s,
  257. uint8_t channel)
  258. {
  259. return (s->registers[DPDMA_DSCR_STRT_ADDRE_CH(channel)] << 16)
  260. + s->registers[DPDMA_DSCR_STRT_ADDR_CH(channel)];
  261. }
  262. static uint64_t xlnx_dpdma_descriptor_next_address(XlnxDPDMAState *s,
  263. uint8_t channel)
  264. {
  265. return ((uint64_t)s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] << 32)
  266. + s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)];
  267. }
  268. static bool xlnx_dpdma_is_channel_enabled(XlnxDPDMAState *s,
  269. uint8_t channel)
  270. {
  271. return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_EN) != 0;
  272. }
  273. static bool xlnx_dpdma_is_channel_paused(XlnxDPDMAState *s,
  274. uint8_t channel)
  275. {
  276. return (s->registers[DPDMA_CNTL_CH(channel)] & DPDMA_CNTL_CH_PAUSED) != 0;
  277. }
  278. static inline bool xlnx_dpdma_is_channel_retriggered(XlnxDPDMAState *s,
  279. uint8_t channel)
  280. {
  281. /* Clear the retriggered bit after reading it. */
  282. bool channel_is_retriggered = s->registers[DPDMA_GBL]
  283. & DPDMA_GBL_RTRG_CH(channel);
  284. s->registers[DPDMA_GBL] &= ~DPDMA_GBL_RTRG_CH(channel);
  285. return channel_is_retriggered;
  286. }
  287. static inline bool xlnx_dpdma_is_channel_triggered(XlnxDPDMAState *s,
  288. uint8_t channel)
  289. {
  290. return s->registers[DPDMA_GBL] & DPDMA_GBL_TRG_CH(channel);
  291. }
  292. static void xlnx_dpdma_update_desc_info(XlnxDPDMAState *s, uint8_t channel,
  293. DPDMADescriptor *desc)
  294. {
  295. s->registers[DPDMA_DSCR_NEXT_ADDRE_CH(channel)] =
  296. extract32(desc->address_extension, 0, 16);
  297. s->registers[DPDMA_DSCR_NEXT_ADDR_CH(channel)] = desc->next_descriptor;
  298. s->registers[DPDMA_PYLD_CUR_ADDRE_CH(channel)] =
  299. extract32(desc->address_extension, 16, 16);
  300. s->registers[DPDMA_PYLD_CUR_ADDR_CH(channel)] = desc->source_address;
  301. s->registers[DPDMA_VDO_CH(channel)] =
  302. extract32(desc->line_size_stride, 18, 14)
  303. + (extract32(desc->line_size_stride, 0, 18)
  304. << 14);
  305. s->registers[DPDMA_PYLD_SZ_CH(channel)] = desc->xfer_size;
  306. s->registers[DPDMA_DSCR_ID_CH(channel)] = desc->descriptor_id;
  307. /* Compute the status register with the descriptor information. */
  308. s->registers[DPDMA_STATUS_CH(channel)] =
  309. extract32(desc->control, 0, 8) << 13;
  310. if ((desc->control & DSCR_CTRL_EN_DSCR_DONE_INTR) != 0) {
  311. s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_INTR;
  312. }
  313. if ((desc->control & DSCR_CTRL_EN_DSCR_UPDATE) != 0) {
  314. s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_DSCR_UP;
  315. }
  316. if ((desc->timestamp_msb & STATUS_DONE) != 0) {
  317. s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_DSCR_DONE;
  318. }
  319. if ((desc->control & DSCR_CTRL_IGNORE_DONE) != 0) {
  320. s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_IGNR_DONE;
  321. }
  322. if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR_OF_FRAME) != 0) {
  323. s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LDSCR_FRAME;
  324. }
  325. if ((desc->control & DSCR_CTRL_LAST_DESCRIPTOR) != 0) {
  326. s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_LAST_DSCR;
  327. }
  328. if ((desc->control & DSCR_CTRL_ENABLE_CRC) != 0) {
  329. s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_EN_CRC;
  330. }
  331. if ((desc->control & DSCR_CTRL_DESCRIPTOR_MODE) != 0) {
  332. s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_MODE;
  333. }
  334. if ((desc->control & DSCR_CTRL_AXI_BURST_TYPE) != 0) {
  335. s->registers[DPDMA_STATUS_CH(channel)] |= DPDMA_STATUS_BURST_TYPE;
  336. }
  337. }
  338. static void xlnx_dpdma_dump_descriptor(DPDMADescriptor *desc)
  339. {
  340. if (DEBUG_DPDMA) {
  341. qemu_log("DUMP DESCRIPTOR:\n");
  342. qemu_hexdump(stdout, "", desc, sizeof(DPDMADescriptor));
  343. }
  344. }
  345. static uint64_t xlnx_dpdma_read(void *opaque, hwaddr offset,
  346. unsigned size)
  347. {
  348. XlnxDPDMAState *s = XLNX_DPDMA(opaque);
  349. DPRINTF("read @%" HWADDR_PRIx "\n", offset);
  350. offset = offset >> 2;
  351. switch (offset) {
  352. /*
  353. * Trying to read a write only register.
  354. */
  355. case DPDMA_GBL:
  356. return 0;
  357. default:
  358. assert(offset <= (0xFFC >> 2));
  359. return s->registers[offset];
  360. }
  361. return 0;
  362. }
  363. static void xlnx_dpdma_write(void *opaque, hwaddr offset,
  364. uint64_t value, unsigned size)
  365. {
  366. XlnxDPDMAState *s = XLNX_DPDMA(opaque);
  367. DPRINTF("write @%" HWADDR_PRIx " = %" PRIx64 "\n", offset, value);
  368. offset = offset >> 2;
  369. switch (offset) {
  370. case DPDMA_ISR:
  371. s->registers[DPDMA_ISR] &= ~value;
  372. xlnx_dpdma_update_irq(s);
  373. break;
  374. case DPDMA_IEN:
  375. s->registers[DPDMA_IMR] &= ~value;
  376. break;
  377. case DPDMA_IDS:
  378. s->registers[DPDMA_IMR] |= value;
  379. break;
  380. case DPDMA_EISR:
  381. s->registers[DPDMA_EISR] &= ~value;
  382. xlnx_dpdma_update_irq(s);
  383. break;
  384. case DPDMA_EIEN:
  385. s->registers[DPDMA_EIMR] &= ~value;
  386. break;
  387. case DPDMA_EIDS:
  388. s->registers[DPDMA_EIMR] |= value;
  389. break;
  390. case DPDMA_IMR:
  391. case DPDMA_EIMR:
  392. case DPDMA_DSCR_NEXT_ADDRE_CH(0):
  393. case DPDMA_DSCR_NEXT_ADDRE_CH(1):
  394. case DPDMA_DSCR_NEXT_ADDRE_CH(2):
  395. case DPDMA_DSCR_NEXT_ADDRE_CH(3):
  396. case DPDMA_DSCR_NEXT_ADDRE_CH(4):
  397. case DPDMA_DSCR_NEXT_ADDRE_CH(5):
  398. case DPDMA_DSCR_NEXT_ADDR_CH(0):
  399. case DPDMA_DSCR_NEXT_ADDR_CH(1):
  400. case DPDMA_DSCR_NEXT_ADDR_CH(2):
  401. case DPDMA_DSCR_NEXT_ADDR_CH(3):
  402. case DPDMA_DSCR_NEXT_ADDR_CH(4):
  403. case DPDMA_DSCR_NEXT_ADDR_CH(5):
  404. case DPDMA_PYLD_CUR_ADDRE_CH(0):
  405. case DPDMA_PYLD_CUR_ADDRE_CH(1):
  406. case DPDMA_PYLD_CUR_ADDRE_CH(2):
  407. case DPDMA_PYLD_CUR_ADDRE_CH(3):
  408. case DPDMA_PYLD_CUR_ADDRE_CH(4):
  409. case DPDMA_PYLD_CUR_ADDRE_CH(5):
  410. case DPDMA_PYLD_CUR_ADDR_CH(0):
  411. case DPDMA_PYLD_CUR_ADDR_CH(1):
  412. case DPDMA_PYLD_CUR_ADDR_CH(2):
  413. case DPDMA_PYLD_CUR_ADDR_CH(3):
  414. case DPDMA_PYLD_CUR_ADDR_CH(4):
  415. case DPDMA_PYLD_CUR_ADDR_CH(5):
  416. case DPDMA_STATUS_CH(0):
  417. case DPDMA_STATUS_CH(1):
  418. case DPDMA_STATUS_CH(2):
  419. case DPDMA_STATUS_CH(3):
  420. case DPDMA_STATUS_CH(4):
  421. case DPDMA_STATUS_CH(5):
  422. case DPDMA_VDO_CH(0):
  423. case DPDMA_VDO_CH(1):
  424. case DPDMA_VDO_CH(2):
  425. case DPDMA_VDO_CH(3):
  426. case DPDMA_VDO_CH(4):
  427. case DPDMA_VDO_CH(5):
  428. case DPDMA_PYLD_SZ_CH(0):
  429. case DPDMA_PYLD_SZ_CH(1):
  430. case DPDMA_PYLD_SZ_CH(2):
  431. case DPDMA_PYLD_SZ_CH(3):
  432. case DPDMA_PYLD_SZ_CH(4):
  433. case DPDMA_PYLD_SZ_CH(5):
  434. case DPDMA_DSCR_ID_CH(0):
  435. case DPDMA_DSCR_ID_CH(1):
  436. case DPDMA_DSCR_ID_CH(2):
  437. case DPDMA_DSCR_ID_CH(3):
  438. case DPDMA_DSCR_ID_CH(4):
  439. case DPDMA_DSCR_ID_CH(5):
  440. /*
  441. * Trying to write to a read only register..
  442. */
  443. break;
  444. case DPDMA_GBL:
  445. /*
  446. * This is a write only register so it's read as zero in the read
  447. * callback.
  448. * We store the value anyway so we can know if the channel is
  449. * enabled.
  450. */
  451. s->registers[offset] |= value & 0x00000FFF;
  452. break;
  453. case DPDMA_DSCR_STRT_ADDRE_CH(0):
  454. case DPDMA_DSCR_STRT_ADDRE_CH(1):
  455. case DPDMA_DSCR_STRT_ADDRE_CH(2):
  456. case DPDMA_DSCR_STRT_ADDRE_CH(3):
  457. case DPDMA_DSCR_STRT_ADDRE_CH(4):
  458. case DPDMA_DSCR_STRT_ADDRE_CH(5):
  459. value &= 0x0000FFFF;
  460. s->registers[offset] = value;
  461. break;
  462. case DPDMA_CNTL_CH(0):
  463. s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(0);
  464. value &= 0x3FFFFFFF;
  465. s->registers[offset] = value;
  466. break;
  467. case DPDMA_CNTL_CH(1):
  468. s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(1);
  469. value &= 0x3FFFFFFF;
  470. s->registers[offset] = value;
  471. break;
  472. case DPDMA_CNTL_CH(2):
  473. s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(2);
  474. value &= 0x3FFFFFFF;
  475. s->registers[offset] = value;
  476. break;
  477. case DPDMA_CNTL_CH(3):
  478. s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(3);
  479. value &= 0x3FFFFFFF;
  480. s->registers[offset] = value;
  481. break;
  482. case DPDMA_CNTL_CH(4):
  483. s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(4);
  484. value &= 0x3FFFFFFF;
  485. s->registers[offset] = value;
  486. break;
  487. case DPDMA_CNTL_CH(5):
  488. s->registers[DPDMA_GBL] &= ~DPDMA_GBL_TRG_CH(5);
  489. value &= 0x3FFFFFFF;
  490. s->registers[offset] = value;
  491. break;
  492. default:
  493. assert(offset <= (0xFFC >> 2));
  494. s->registers[offset] = value;
  495. break;
  496. }
  497. }
  498. static const MemoryRegionOps dma_ops = {
  499. .read = xlnx_dpdma_read,
  500. .write = xlnx_dpdma_write,
  501. .endianness = DEVICE_NATIVE_ENDIAN,
  502. .valid = {
  503. .min_access_size = 4,
  504. .max_access_size = 4,
  505. },
  506. .impl = {
  507. .min_access_size = 4,
  508. .max_access_size = 4,
  509. },
  510. };
  511. static void xlnx_dpdma_init(Object *obj)
  512. {
  513. SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
  514. XlnxDPDMAState *s = XLNX_DPDMA(obj);
  515. memory_region_init_io(&s->iomem, obj, &dma_ops, s,
  516. TYPE_XLNX_DPDMA, 0x1000);
  517. sysbus_init_mmio(sbd, &s->iomem);
  518. sysbus_init_irq(sbd, &s->irq);
  519. }
  520. static void xlnx_dpdma_reset(DeviceState *dev)
  521. {
  522. XlnxDPDMAState *s = XLNX_DPDMA(dev);
  523. size_t i;
  524. memset(s->registers, 0, sizeof(s->registers));
  525. s->registers[DPDMA_IMR] = 0x07FFFFFF;
  526. s->registers[DPDMA_EIMR] = 0xFFFFFFFF;
  527. s->registers[DPDMA_ALC0_MIN] = 0x0000FFFF;
  528. s->registers[DPDMA_ALC1_MIN] = 0x0000FFFF;
  529. for (i = 0; i < 6; i++) {
  530. s->data[i] = NULL;
  531. s->operation_finished[i] = true;
  532. }
  533. }
  534. static void xlnx_dpdma_class_init(ObjectClass *oc, void *data)
  535. {
  536. DeviceClass *dc = DEVICE_CLASS(oc);
  537. dc->vmsd = &vmstate_xlnx_dpdma;
  538. device_class_set_legacy_reset(dc, xlnx_dpdma_reset);
  539. }
  540. static const TypeInfo xlnx_dpdma_info = {
  541. .name = TYPE_XLNX_DPDMA,
  542. .parent = TYPE_SYS_BUS_DEVICE,
  543. .instance_size = sizeof(XlnxDPDMAState),
  544. .instance_init = xlnx_dpdma_init,
  545. .class_init = xlnx_dpdma_class_init,
  546. };
  547. static void xlnx_dpdma_register_types(void)
  548. {
  549. type_register_static(&xlnx_dpdma_info);
  550. }
  551. static MemTxResult xlnx_dpdma_read_descriptor(XlnxDPDMAState *s,
  552. uint64_t desc_addr,
  553. DPDMADescriptor *desc)
  554. {
  555. MemTxResult res = dma_memory_read(&address_space_memory, desc_addr,
  556. desc, sizeof(DPDMADescriptor),
  557. MEMTXATTRS_UNSPECIFIED);
  558. if (res) {
  559. return res;
  560. }
  561. /* Convert from LE into host endianness. */
  562. desc->control = le32_to_cpu(desc->control);
  563. desc->descriptor_id = le32_to_cpu(desc->descriptor_id);
  564. desc->xfer_size = le32_to_cpu(desc->xfer_size);
  565. desc->line_size_stride = le32_to_cpu(desc->line_size_stride);
  566. desc->timestamp_lsb = le32_to_cpu(desc->timestamp_lsb);
  567. desc->timestamp_msb = le32_to_cpu(desc->timestamp_msb);
  568. desc->address_extension = le32_to_cpu(desc->address_extension);
  569. desc->next_descriptor = le32_to_cpu(desc->next_descriptor);
  570. desc->source_address = le32_to_cpu(desc->source_address);
  571. desc->address_extension_23 = le32_to_cpu(desc->address_extension_23);
  572. desc->address_extension_45 = le32_to_cpu(desc->address_extension_45);
  573. desc->source_address2 = le32_to_cpu(desc->source_address2);
  574. desc->source_address3 = le32_to_cpu(desc->source_address3);
  575. desc->source_address4 = le32_to_cpu(desc->source_address4);
  576. desc->source_address5 = le32_to_cpu(desc->source_address5);
  577. desc->crc = le32_to_cpu(desc->crc);
  578. return res;
  579. }
  580. static MemTxResult xlnx_dpdma_write_descriptor(uint64_t desc_addr,
  581. DPDMADescriptor *desc)
  582. {
  583. DPDMADescriptor tmp_desc = *desc;
  584. /* Convert from host endianness into LE. */
  585. tmp_desc.control = cpu_to_le32(tmp_desc.control);
  586. tmp_desc.descriptor_id = cpu_to_le32(tmp_desc.descriptor_id);
  587. tmp_desc.xfer_size = cpu_to_le32(tmp_desc.xfer_size);
  588. tmp_desc.line_size_stride = cpu_to_le32(tmp_desc.line_size_stride);
  589. tmp_desc.timestamp_lsb = cpu_to_le32(tmp_desc.timestamp_lsb);
  590. tmp_desc.timestamp_msb = cpu_to_le32(tmp_desc.timestamp_msb);
  591. tmp_desc.address_extension = cpu_to_le32(tmp_desc.address_extension);
  592. tmp_desc.next_descriptor = cpu_to_le32(tmp_desc.next_descriptor);
  593. tmp_desc.source_address = cpu_to_le32(tmp_desc.source_address);
  594. tmp_desc.address_extension_23 = cpu_to_le32(tmp_desc.address_extension_23);
  595. tmp_desc.address_extension_45 = cpu_to_le32(tmp_desc.address_extension_45);
  596. tmp_desc.source_address2 = cpu_to_le32(tmp_desc.source_address2);
  597. tmp_desc.source_address3 = cpu_to_le32(tmp_desc.source_address3);
  598. tmp_desc.source_address4 = cpu_to_le32(tmp_desc.source_address4);
  599. tmp_desc.source_address5 = cpu_to_le32(tmp_desc.source_address5);
  600. tmp_desc.crc = cpu_to_le32(tmp_desc.crc);
  601. return dma_memory_write(&address_space_memory, desc_addr, &tmp_desc,
  602. sizeof(DPDMADescriptor), MEMTXATTRS_UNSPECIFIED);
  603. }
  604. size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel,
  605. bool one_desc)
  606. {
  607. uint64_t desc_addr;
  608. uint64_t source_addr[6];
  609. DPDMADescriptor desc;
  610. bool done = false;
  611. size_t ptr = 0;
  612. assert(channel <= 5);
  613. DPRINTF("start dpdma channel 0x%" PRIX8 "\n", channel);
  614. if (!xlnx_dpdma_is_channel_triggered(s, channel)) {
  615. DPRINTF("Channel isn't triggered..\n");
  616. return 0;
  617. }
  618. if (!xlnx_dpdma_is_channel_enabled(s, channel)) {
  619. DPRINTF("Channel isn't enabled..\n");
  620. return 0;
  621. }
  622. if (xlnx_dpdma_is_channel_paused(s, channel)) {
  623. DPRINTF("Channel is paused..\n");
  624. return 0;
  625. }
  626. do {
  627. if ((s->operation_finished[channel])
  628. || xlnx_dpdma_is_channel_retriggered(s, channel)) {
  629. desc_addr = xlnx_dpdma_descriptor_start_address(s, channel);
  630. s->operation_finished[channel] = false;
  631. } else {
  632. desc_addr = xlnx_dpdma_descriptor_next_address(s, channel);
  633. }
  634. if (xlnx_dpdma_read_descriptor(s, desc_addr, &desc)) {
  635. s->registers[DPDMA_EISR] |= ((1 << 1) << channel);
  636. xlnx_dpdma_update_irq(s);
  637. s->operation_finished[channel] = true;
  638. DPRINTF("Can't get the descriptor.\n");
  639. break;
  640. }
  641. xlnx_dpdma_update_desc_info(s, channel, &desc);
  642. #ifdef DEBUG_DPDMA
  643. xlnx_dpdma_dump_descriptor(&desc);
  644. #endif
  645. DPRINTF("location of the descriptor: %" PRIx64 "\n", desc_addr);
  646. if (!xlnx_dpdma_desc_is_valid(&desc)) {
  647. s->registers[DPDMA_EISR] |= ((1 << 7) << channel);
  648. xlnx_dpdma_update_irq(s);
  649. s->operation_finished[channel] = true;
  650. DPRINTF("Invalid descriptor..\n");
  651. break;
  652. }
  653. if (xlnx_dpdma_desc_crc_enabled(&desc)
  654. && !xlnx_dpdma_desc_check_crc(&desc)) {
  655. s->registers[DPDMA_EISR] |= ((1 << 13) << channel);
  656. xlnx_dpdma_update_irq(s);
  657. s->operation_finished[channel] = true;
  658. DPRINTF("Bad CRC for descriptor..\n");
  659. break;
  660. }
  661. if (xlnx_dpdma_desc_is_already_done(&desc)
  662. && !xlnx_dpdma_desc_ignore_done_bit(&desc)) {
  663. /* We are trying to process an already processed descriptor. */
  664. s->registers[DPDMA_EISR] |= ((1 << 25) << channel);
  665. xlnx_dpdma_update_irq(s);
  666. s->operation_finished[channel] = true;
  667. DPRINTF("Already processed descriptor..\n");
  668. break;
  669. }
  670. done = xlnx_dpdma_desc_is_last(&desc)
  671. || xlnx_dpdma_desc_is_last_of_frame(&desc);
  672. s->operation_finished[channel] = done;
  673. if (s->data[channel]) {
  674. int64_t transfer_len = xlnx_dpdma_desc_get_transfer_size(&desc);
  675. uint32_t line_size = xlnx_dpdma_desc_get_line_size(&desc);
  676. uint32_t line_stride = xlnx_dpdma_desc_get_line_stride(&desc);
  677. if (xlnx_dpdma_desc_is_contiguous(&desc)) {
  678. source_addr[0] = xlnx_dpdma_desc_get_source_address(&desc, 0);
  679. while (transfer_len != 0) {
  680. if (dma_memory_read(&address_space_memory,
  681. source_addr[0],
  682. &s->data[channel][ptr],
  683. line_size,
  684. MEMTXATTRS_UNSPECIFIED)) {
  685. s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
  686. xlnx_dpdma_update_irq(s);
  687. DPRINTF("Can't get data.\n");
  688. break;
  689. }
  690. ptr += line_size;
  691. transfer_len -= line_size;
  692. source_addr[0] += line_stride;
  693. }
  694. } else {
  695. DPRINTF("Source address:\n");
  696. int frag;
  697. for (frag = 0; frag < 5; frag++) {
  698. source_addr[frag] =
  699. xlnx_dpdma_desc_get_source_address(&desc, frag);
  700. DPRINTF("Fragment %u: %" PRIx64 "\n", frag + 1,
  701. source_addr[frag]);
  702. }
  703. frag = 0;
  704. while ((transfer_len < 0) && (frag < 5)) {
  705. size_t fragment_len = DPDMA_FRAG_MAX_SZ
  706. - (source_addr[frag] % DPDMA_FRAG_MAX_SZ);
  707. if (dma_memory_read(&address_space_memory,
  708. source_addr[frag],
  709. &(s->data[channel][ptr]),
  710. fragment_len,
  711. MEMTXATTRS_UNSPECIFIED)) {
  712. s->registers[DPDMA_ISR] |= ((1 << 12) << channel);
  713. xlnx_dpdma_update_irq(s);
  714. DPRINTF("Can't get data.\n");
  715. break;
  716. }
  717. ptr += fragment_len;
  718. transfer_len -= fragment_len;
  719. frag += 1;
  720. }
  721. }
  722. }
  723. if (xlnx_dpdma_desc_update_enabled(&desc)) {
  724. /* The descriptor need to be updated when it's completed. */
  725. DPRINTF("update the descriptor with the done flag set.\n");
  726. xlnx_dpdma_desc_set_done(&desc);
  727. if (xlnx_dpdma_write_descriptor(desc_addr, &desc)) {
  728. DPRINTF("Can't write the descriptor.\n");
  729. /* TODO: check hardware behaviour for memory write failure */
  730. }
  731. }
  732. if (xlnx_dpdma_desc_completion_interrupt(&desc)) {
  733. DPRINTF("completion interrupt enabled!\n");
  734. s->registers[DPDMA_ISR] |= (1 << channel);
  735. xlnx_dpdma_update_irq(s);
  736. }
  737. } while (!done && !one_desc);
  738. return ptr;
  739. }
  740. void xlnx_dpdma_set_host_data_location(XlnxDPDMAState *s, uint8_t channel,
  741. void *p)
  742. {
  743. if (!s) {
  744. qemu_log_mask(LOG_UNIMP, "DPDMA client not attached to valid DPDMA"
  745. " instance\n");
  746. return;
  747. }
  748. assert(channel <= 5);
  749. s->data[channel] = p;
  750. }
  751. void xlnx_dpdma_trigger_vsync_irq(XlnxDPDMAState *s)
  752. {
  753. s->registers[DPDMA_ISR] |= (1 << 27);
  754. xlnx_dpdma_update_irq(s);
  755. }
  756. type_init(xlnx_dpdma_register_types)