xilinx_axidma.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509
  1. /*
  2. * QEMU model of Xilinx AXI-DMA block.
  3. *
  4. * Copyright (c) 2011 Edgar E. Iglesias.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "sysbus.h"
  25. #include "qemu-char.h"
  26. #include "qemu-timer.h"
  27. #include "qemu-log.h"
  28. #include "qdev-addr.h"
  29. #include "xilinx_axidma.h"
  30. #define D(x)
  31. #define R_DMACR (0x00 / 4)
  32. #define R_DMASR (0x04 / 4)
  33. #define R_CURDESC (0x08 / 4)
  34. #define R_TAILDESC (0x10 / 4)
  35. #define R_MAX (0x30 / 4)
  36. enum {
  37. DMACR_RUNSTOP = 1,
  38. DMACR_TAILPTR_MODE = 2,
  39. DMACR_RESET = 4
  40. };
  41. enum {
  42. DMASR_HALTED = 1,
  43. DMASR_IDLE = 2,
  44. DMASR_IOC_IRQ = 1 << 12,
  45. DMASR_DLY_IRQ = 1 << 13,
  46. DMASR_IRQ_MASK = 7 << 12
  47. };
  48. struct SDesc {
  49. uint64_t nxtdesc;
  50. uint64_t buffer_address;
  51. uint64_t reserved;
  52. uint32_t control;
  53. uint32_t status;
  54. uint32_t app[6];
  55. };
  56. enum {
  57. SDESC_CTRL_EOF = (1 << 26),
  58. SDESC_CTRL_SOF = (1 << 27),
  59. SDESC_CTRL_LEN_MASK = (1 << 23) - 1
  60. };
  61. enum {
  62. SDESC_STATUS_EOF = (1 << 26),
  63. SDESC_STATUS_SOF_BIT = 27,
  64. SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
  65. SDESC_STATUS_COMPLETE = (1 << 31)
  66. };
  67. struct AXIStream {
  68. QEMUBH *bh;
  69. ptimer_state *ptimer;
  70. qemu_irq irq;
  71. int nr;
  72. struct SDesc desc;
  73. int pos;
  74. unsigned int complete_cnt;
  75. uint32_t regs[R_MAX];
  76. };
  77. struct XilinxAXIDMA {
  78. SysBusDevice busdev;
  79. uint32_t freqhz;
  80. void *dmach;
  81. struct AXIStream streams[2];
  82. };
  83. /*
  84. * Helper calls to extract info from desriptors and other trivial
  85. * state from regs.
  86. */
  87. static inline int stream_desc_sof(struct SDesc *d)
  88. {
  89. return d->control & SDESC_CTRL_SOF;
  90. }
  91. static inline int stream_desc_eof(struct SDesc *d)
  92. {
  93. return d->control & SDESC_CTRL_EOF;
  94. }
  95. static inline int stream_resetting(struct AXIStream *s)
  96. {
  97. return !!(s->regs[R_DMACR] & DMACR_RESET);
  98. }
  99. static inline int stream_running(struct AXIStream *s)
  100. {
  101. return s->regs[R_DMACR] & DMACR_RUNSTOP;
  102. }
  103. static inline int stream_halted(struct AXIStream *s)
  104. {
  105. return s->regs[R_DMASR] & DMASR_HALTED;
  106. }
  107. static inline int stream_idle(struct AXIStream *s)
  108. {
  109. return !!(s->regs[R_DMASR] & DMASR_IDLE);
  110. }
  111. static void stream_reset(struct AXIStream *s)
  112. {
  113. s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
  114. s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */
  115. }
  116. /* Map an offset addr into a channel index. */
  117. static inline int streamid_from_addr(target_phys_addr_t addr)
  118. {
  119. int sid;
  120. sid = addr / (0x30);
  121. sid &= 1;
  122. return sid;
  123. }
  124. #ifdef DEBUG_ENET
  125. static void stream_desc_show(struct SDesc *d)
  126. {
  127. qemu_log("buffer_addr = " PRIx64 "\n", d->buffer_address);
  128. qemu_log("nxtdesc = " PRIx64 "\n", d->nxtdesc);
  129. qemu_log("control = %x\n", d->control);
  130. qemu_log("status = %x\n", d->status);
  131. }
  132. #endif
  133. static void stream_desc_load(struct AXIStream *s, target_phys_addr_t addr)
  134. {
  135. struct SDesc *d = &s->desc;
  136. int i;
  137. cpu_physical_memory_read(addr, (void *) d, sizeof *d);
  138. /* Convert from LE into host endianness. */
  139. d->buffer_address = le64_to_cpu(d->buffer_address);
  140. d->nxtdesc = le64_to_cpu(d->nxtdesc);
  141. d->control = le32_to_cpu(d->control);
  142. d->status = le32_to_cpu(d->status);
  143. for (i = 0; i < ARRAY_SIZE(d->app); i++) {
  144. d->app[i] = le32_to_cpu(d->app[i]);
  145. }
  146. }
  147. static void stream_desc_store(struct AXIStream *s, target_phys_addr_t addr)
  148. {
  149. struct SDesc *d = &s->desc;
  150. int i;
  151. /* Convert from host endianness into LE. */
  152. d->buffer_address = cpu_to_le64(d->buffer_address);
  153. d->nxtdesc = cpu_to_le64(d->nxtdesc);
  154. d->control = cpu_to_le32(d->control);
  155. d->status = cpu_to_le32(d->status);
  156. for (i = 0; i < ARRAY_SIZE(d->app); i++) {
  157. d->app[i] = cpu_to_le32(d->app[i]);
  158. }
  159. cpu_physical_memory_write(addr, (void *) d, sizeof *d);
  160. }
  161. static void stream_update_irq(struct AXIStream *s)
  162. {
  163. unsigned int pending, mask, irq;
  164. pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
  165. mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
  166. irq = pending & mask;
  167. qemu_set_irq(s->irq, !!irq);
  168. }
  169. static void stream_reload_complete_cnt(struct AXIStream *s)
  170. {
  171. unsigned int comp_th;
  172. comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
  173. s->complete_cnt = comp_th;
  174. }
  175. static void timer_hit(void *opaque)
  176. {
  177. struct AXIStream *s = opaque;
  178. stream_reload_complete_cnt(s);
  179. s->regs[R_DMASR] |= DMASR_DLY_IRQ;
  180. stream_update_irq(s);
  181. }
  182. static void stream_complete(struct AXIStream *s)
  183. {
  184. unsigned int comp_delay;
  185. /* Start the delayed timer. */
  186. comp_delay = s->regs[R_DMACR] >> 24;
  187. if (comp_delay) {
  188. ptimer_stop(s->ptimer);
  189. ptimer_set_count(s->ptimer, comp_delay);
  190. ptimer_run(s->ptimer, 1);
  191. }
  192. s->complete_cnt--;
  193. if (s->complete_cnt == 0) {
  194. /* Raise the IOC irq. */
  195. s->regs[R_DMASR] |= DMASR_IOC_IRQ;
  196. stream_reload_complete_cnt(s);
  197. }
  198. }
  199. static void stream_process_mem2s(struct AXIStream *s,
  200. struct XilinxDMAConnection *dmach)
  201. {
  202. uint32_t prev_d;
  203. unsigned char txbuf[16 * 1024];
  204. unsigned int txlen;
  205. uint32_t app[6];
  206. if (!stream_running(s) || stream_idle(s)) {
  207. return;
  208. }
  209. while (1) {
  210. stream_desc_load(s, s->regs[R_CURDESC]);
  211. if (s->desc.status & SDESC_STATUS_COMPLETE) {
  212. s->regs[R_DMASR] |= DMASR_IDLE;
  213. break;
  214. }
  215. if (stream_desc_sof(&s->desc)) {
  216. s->pos = 0;
  217. memcpy(app, s->desc.app, sizeof app);
  218. }
  219. txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
  220. if ((txlen + s->pos) > sizeof txbuf) {
  221. hw_error("%s: too small internal txbuf! %d\n", __func__,
  222. txlen + s->pos);
  223. }
  224. cpu_physical_memory_read(s->desc.buffer_address,
  225. txbuf + s->pos, txlen);
  226. s->pos += txlen;
  227. if (stream_desc_eof(&s->desc)) {
  228. xlx_dma_push_to_client(dmach, txbuf, s->pos, app);
  229. s->pos = 0;
  230. stream_complete(s);
  231. }
  232. /* Update the descriptor. */
  233. s->desc.status = txlen | SDESC_STATUS_COMPLETE;
  234. stream_desc_store(s, s->regs[R_CURDESC]);
  235. /* Advance. */
  236. prev_d = s->regs[R_CURDESC];
  237. s->regs[R_CURDESC] = s->desc.nxtdesc;
  238. if (prev_d == s->regs[R_TAILDESC]) {
  239. s->regs[R_DMASR] |= DMASR_IDLE;
  240. break;
  241. }
  242. }
  243. }
  244. static void stream_process_s2mem(struct AXIStream *s,
  245. unsigned char *buf, size_t len, uint32_t *app)
  246. {
  247. uint32_t prev_d;
  248. unsigned int rxlen;
  249. int pos = 0;
  250. int sof = 1;
  251. if (!stream_running(s) || stream_idle(s)) {
  252. return;
  253. }
  254. while (len) {
  255. stream_desc_load(s, s->regs[R_CURDESC]);
  256. if (s->desc.status & SDESC_STATUS_COMPLETE) {
  257. s->regs[R_DMASR] |= DMASR_IDLE;
  258. break;
  259. }
  260. rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
  261. if (rxlen > len) {
  262. /* It fits. */
  263. rxlen = len;
  264. }
  265. cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
  266. len -= rxlen;
  267. pos += rxlen;
  268. /* Update the descriptor. */
  269. if (!len) {
  270. int i;
  271. stream_complete(s);
  272. for (i = 0; i < 5; i++) {
  273. s->desc.app[i] = app[i];
  274. }
  275. s->desc.status |= SDESC_STATUS_EOF;
  276. }
  277. s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
  278. s->desc.status |= SDESC_STATUS_COMPLETE;
  279. stream_desc_store(s, s->regs[R_CURDESC]);
  280. sof = 0;
  281. /* Advance. */
  282. prev_d = s->regs[R_CURDESC];
  283. s->regs[R_CURDESC] = s->desc.nxtdesc;
  284. if (prev_d == s->regs[R_TAILDESC]) {
  285. s->regs[R_DMASR] |= DMASR_IDLE;
  286. break;
  287. }
  288. }
  289. }
  290. static
  291. void axidma_push(void *opaque, unsigned char *buf, size_t len, uint32_t *app)
  292. {
  293. struct XilinxAXIDMA *d = opaque;
  294. struct AXIStream *s = &d->streams[1];
  295. if (!app) {
  296. hw_error("No stream app data!\n");
  297. }
  298. stream_process_s2mem(s, buf, len, app);
  299. stream_update_irq(s);
  300. }
  301. static uint32_t axidma_readl(void *opaque, target_phys_addr_t addr)
  302. {
  303. struct XilinxAXIDMA *d = opaque;
  304. struct AXIStream *s;
  305. uint32_t r = 0;
  306. int sid;
  307. sid = streamid_from_addr(addr);
  308. s = &d->streams[sid];
  309. addr = addr % 0x30;
  310. addr >>= 2;
  311. switch (addr) {
  312. case R_DMACR:
  313. /* Simulate one cycles reset delay. */
  314. s->regs[addr] &= ~DMACR_RESET;
  315. r = s->regs[addr];
  316. break;
  317. case R_DMASR:
  318. s->regs[addr] &= 0xffff;
  319. s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
  320. s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
  321. r = s->regs[addr];
  322. break;
  323. default:
  324. r = s->regs[addr];
  325. D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
  326. __func__, sid, addr * 4, r));
  327. break;
  328. }
  329. return r;
  330. }
  331. static void
  332. axidma_writel(void *opaque, target_phys_addr_t addr, uint32_t value)
  333. {
  334. struct XilinxAXIDMA *d = opaque;
  335. struct AXIStream *s;
  336. int sid;
  337. sid = streamid_from_addr(addr);
  338. s = &d->streams[sid];
  339. addr = addr % 0x30;
  340. addr >>= 2;
  341. switch (addr) {
  342. case R_DMACR:
  343. /* Tailptr mode is always on. */
  344. value |= DMACR_TAILPTR_MODE;
  345. /* Remember our previous reset state. */
  346. value |= (s->regs[addr] & DMACR_RESET);
  347. s->regs[addr] = value;
  348. if (value & DMACR_RESET) {
  349. stream_reset(s);
  350. }
  351. if ((value & 1) && !stream_resetting(s)) {
  352. /* Start processing. */
  353. s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
  354. }
  355. stream_reload_complete_cnt(s);
  356. break;
  357. case R_DMASR:
  358. /* Mask away write to clear irq lines. */
  359. value &= ~(value & DMASR_IRQ_MASK);
  360. s->regs[addr] = value;
  361. break;
  362. case R_TAILDESC:
  363. s->regs[addr] = value;
  364. s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
  365. if (!sid) {
  366. stream_process_mem2s(s, d->dmach);
  367. }
  368. break;
  369. default:
  370. D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
  371. __func__, sid, addr * 4, value));
  372. s->regs[addr] = value;
  373. break;
  374. }
  375. stream_update_irq(s);
  376. }
  377. static CPUReadMemoryFunc * const axidma_read[] = {
  378. &axidma_readl,
  379. &axidma_readl,
  380. &axidma_readl,
  381. };
  382. static CPUWriteMemoryFunc * const axidma_write[] = {
  383. &axidma_writel,
  384. &axidma_writel,
  385. &axidma_writel,
  386. };
  387. static int xilinx_axidma_init(SysBusDevice *dev)
  388. {
  389. struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), dev);
  390. int axidma_regs;
  391. int i;
  392. sysbus_init_irq(dev, &s->streams[1].irq);
  393. sysbus_init_irq(dev, &s->streams[0].irq);
  394. if (!s->dmach) {
  395. hw_error("Unconnected DMA channel.\n");
  396. }
  397. xlx_dma_connect_dma(s->dmach, s, axidma_push);
  398. axidma_regs = cpu_register_io_memory(axidma_read, axidma_write, s,
  399. DEVICE_NATIVE_ENDIAN);
  400. sysbus_init_mmio(dev, R_MAX * 4 * 2, axidma_regs);
  401. for (i = 0; i < 2; i++) {
  402. stream_reset(&s->streams[i]);
  403. s->streams[i].nr = i;
  404. s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
  405. s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
  406. ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
  407. }
  408. return 0;
  409. }
  410. static SysBusDeviceInfo axidma_info = {
  411. .init = xilinx_axidma_init,
  412. .qdev.name = "xilinx,axidma",
  413. .qdev.size = sizeof(struct XilinxAXIDMA),
  414. .qdev.props = (Property[]) {
  415. DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA, freqhz, 50000000),
  416. DEFINE_PROP_PTR("dmach", struct XilinxAXIDMA, dmach),
  417. DEFINE_PROP_END_OF_LIST(),
  418. }
  419. };
  420. static void xilinx_axidma_register(void)
  421. {
  422. sysbus_register_withprop(&axidma_info);
  423. }
  424. device_init(xilinx_axidma_register)