2
0

xilinx_axidma.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /*
  2. * QEMU model of Xilinx AXI-DMA block.
  3. *
  4. * Copyright (c) 2011 Edgar E. Iglesias.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "sysbus.h"
  25. #include "qemu/timer.h"
  26. #include "ptimer.h"
  27. #include "qemu/log.h"
  28. #include "qdev-addr.h"
  29. #include "stream.h"
  30. #define D(x)
  31. #define R_DMACR (0x00 / 4)
  32. #define R_DMASR (0x04 / 4)
  33. #define R_CURDESC (0x08 / 4)
  34. #define R_TAILDESC (0x10 / 4)
  35. #define R_MAX (0x30 / 4)
  36. enum {
  37. DMACR_RUNSTOP = 1,
  38. DMACR_TAILPTR_MODE = 2,
  39. DMACR_RESET = 4
  40. };
  41. enum {
  42. DMASR_HALTED = 1,
  43. DMASR_IDLE = 2,
  44. DMASR_IOC_IRQ = 1 << 12,
  45. DMASR_DLY_IRQ = 1 << 13,
  46. DMASR_IRQ_MASK = 7 << 12
  47. };
  48. struct SDesc {
  49. uint64_t nxtdesc;
  50. uint64_t buffer_address;
  51. uint64_t reserved;
  52. uint32_t control;
  53. uint32_t status;
  54. uint32_t app[6];
  55. };
  56. enum {
  57. SDESC_CTRL_EOF = (1 << 26),
  58. SDESC_CTRL_SOF = (1 << 27),
  59. SDESC_CTRL_LEN_MASK = (1 << 23) - 1
  60. };
  61. enum {
  62. SDESC_STATUS_EOF = (1 << 26),
  63. SDESC_STATUS_SOF_BIT = 27,
  64. SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
  65. SDESC_STATUS_COMPLETE = (1 << 31)
  66. };
  67. struct Stream {
  68. QEMUBH *bh;
  69. ptimer_state *ptimer;
  70. qemu_irq irq;
  71. int nr;
  72. struct SDesc desc;
  73. int pos;
  74. unsigned int complete_cnt;
  75. uint32_t regs[R_MAX];
  76. };
  77. struct XilinxAXIDMA {
  78. SysBusDevice busdev;
  79. MemoryRegion iomem;
  80. uint32_t freqhz;
  81. StreamSlave *tx_dev;
  82. struct Stream streams[2];
  83. };
  84. /*
  85. * Helper calls to extract info from desriptors and other trivial
  86. * state from regs.
  87. */
  88. static inline int stream_desc_sof(struct SDesc *d)
  89. {
  90. return d->control & SDESC_CTRL_SOF;
  91. }
  92. static inline int stream_desc_eof(struct SDesc *d)
  93. {
  94. return d->control & SDESC_CTRL_EOF;
  95. }
  96. static inline int stream_resetting(struct Stream *s)
  97. {
  98. return !!(s->regs[R_DMACR] & DMACR_RESET);
  99. }
  100. static inline int stream_running(struct Stream *s)
  101. {
  102. return s->regs[R_DMACR] & DMACR_RUNSTOP;
  103. }
  104. static inline int stream_halted(struct Stream *s)
  105. {
  106. return s->regs[R_DMASR] & DMASR_HALTED;
  107. }
  108. static inline int stream_idle(struct Stream *s)
  109. {
  110. return !!(s->regs[R_DMASR] & DMASR_IDLE);
  111. }
  112. static void stream_reset(struct Stream *s)
  113. {
  114. s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
  115. s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */
  116. }
  117. /* Map an offset addr into a channel index. */
  118. static inline int streamid_from_addr(hwaddr addr)
  119. {
  120. int sid;
  121. sid = addr / (0x30);
  122. sid &= 1;
  123. return sid;
  124. }
  125. #ifdef DEBUG_ENET
  126. static void stream_desc_show(struct SDesc *d)
  127. {
  128. qemu_log("buffer_addr = " PRIx64 "\n", d->buffer_address);
  129. qemu_log("nxtdesc = " PRIx64 "\n", d->nxtdesc);
  130. qemu_log("control = %x\n", d->control);
  131. qemu_log("status = %x\n", d->status);
  132. }
  133. #endif
  134. static void stream_desc_load(struct Stream *s, hwaddr addr)
  135. {
  136. struct SDesc *d = &s->desc;
  137. int i;
  138. cpu_physical_memory_read(addr, (void *) d, sizeof *d);
  139. /* Convert from LE into host endianness. */
  140. d->buffer_address = le64_to_cpu(d->buffer_address);
  141. d->nxtdesc = le64_to_cpu(d->nxtdesc);
  142. d->control = le32_to_cpu(d->control);
  143. d->status = le32_to_cpu(d->status);
  144. for (i = 0; i < ARRAY_SIZE(d->app); i++) {
  145. d->app[i] = le32_to_cpu(d->app[i]);
  146. }
  147. }
  148. static void stream_desc_store(struct Stream *s, hwaddr addr)
  149. {
  150. struct SDesc *d = &s->desc;
  151. int i;
  152. /* Convert from host endianness into LE. */
  153. d->buffer_address = cpu_to_le64(d->buffer_address);
  154. d->nxtdesc = cpu_to_le64(d->nxtdesc);
  155. d->control = cpu_to_le32(d->control);
  156. d->status = cpu_to_le32(d->status);
  157. for (i = 0; i < ARRAY_SIZE(d->app); i++) {
  158. d->app[i] = cpu_to_le32(d->app[i]);
  159. }
  160. cpu_physical_memory_write(addr, (void *) d, sizeof *d);
  161. }
  162. static void stream_update_irq(struct Stream *s)
  163. {
  164. unsigned int pending, mask, irq;
  165. pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
  166. mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
  167. irq = pending & mask;
  168. qemu_set_irq(s->irq, !!irq);
  169. }
  170. static void stream_reload_complete_cnt(struct Stream *s)
  171. {
  172. unsigned int comp_th;
  173. comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
  174. s->complete_cnt = comp_th;
  175. }
  176. static void timer_hit(void *opaque)
  177. {
  178. struct Stream *s = opaque;
  179. stream_reload_complete_cnt(s);
  180. s->regs[R_DMASR] |= DMASR_DLY_IRQ;
  181. stream_update_irq(s);
  182. }
  183. static void stream_complete(struct Stream *s)
  184. {
  185. unsigned int comp_delay;
  186. /* Start the delayed timer. */
  187. comp_delay = s->regs[R_DMACR] >> 24;
  188. if (comp_delay) {
  189. ptimer_stop(s->ptimer);
  190. ptimer_set_count(s->ptimer, comp_delay);
  191. ptimer_run(s->ptimer, 1);
  192. }
  193. s->complete_cnt--;
  194. if (s->complete_cnt == 0) {
  195. /* Raise the IOC irq. */
  196. s->regs[R_DMASR] |= DMASR_IOC_IRQ;
  197. stream_reload_complete_cnt(s);
  198. }
  199. }
  200. static void stream_process_mem2s(struct Stream *s,
  201. StreamSlave *tx_dev)
  202. {
  203. uint32_t prev_d;
  204. unsigned char txbuf[16 * 1024];
  205. unsigned int txlen;
  206. uint32_t app[6];
  207. if (!stream_running(s) || stream_idle(s)) {
  208. return;
  209. }
  210. while (1) {
  211. stream_desc_load(s, s->regs[R_CURDESC]);
  212. if (s->desc.status & SDESC_STATUS_COMPLETE) {
  213. s->regs[R_DMASR] |= DMASR_IDLE;
  214. break;
  215. }
  216. if (stream_desc_sof(&s->desc)) {
  217. s->pos = 0;
  218. memcpy(app, s->desc.app, sizeof app);
  219. }
  220. txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
  221. if ((txlen + s->pos) > sizeof txbuf) {
  222. hw_error("%s: too small internal txbuf! %d\n", __func__,
  223. txlen + s->pos);
  224. }
  225. cpu_physical_memory_read(s->desc.buffer_address,
  226. txbuf + s->pos, txlen);
  227. s->pos += txlen;
  228. if (stream_desc_eof(&s->desc)) {
  229. stream_push(tx_dev, txbuf, s->pos, app);
  230. s->pos = 0;
  231. stream_complete(s);
  232. }
  233. /* Update the descriptor. */
  234. s->desc.status = txlen | SDESC_STATUS_COMPLETE;
  235. stream_desc_store(s, s->regs[R_CURDESC]);
  236. /* Advance. */
  237. prev_d = s->regs[R_CURDESC];
  238. s->regs[R_CURDESC] = s->desc.nxtdesc;
  239. if (prev_d == s->regs[R_TAILDESC]) {
  240. s->regs[R_DMASR] |= DMASR_IDLE;
  241. break;
  242. }
  243. }
  244. }
  245. static void stream_process_s2mem(struct Stream *s,
  246. unsigned char *buf, size_t len, uint32_t *app)
  247. {
  248. uint32_t prev_d;
  249. unsigned int rxlen;
  250. int pos = 0;
  251. int sof = 1;
  252. if (!stream_running(s) || stream_idle(s)) {
  253. return;
  254. }
  255. while (len) {
  256. stream_desc_load(s, s->regs[R_CURDESC]);
  257. if (s->desc.status & SDESC_STATUS_COMPLETE) {
  258. s->regs[R_DMASR] |= DMASR_IDLE;
  259. break;
  260. }
  261. rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
  262. if (rxlen > len) {
  263. /* It fits. */
  264. rxlen = len;
  265. }
  266. cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
  267. len -= rxlen;
  268. pos += rxlen;
  269. /* Update the descriptor. */
  270. if (!len) {
  271. int i;
  272. stream_complete(s);
  273. for (i = 0; i < 5; i++) {
  274. s->desc.app[i] = app[i];
  275. }
  276. s->desc.status |= SDESC_STATUS_EOF;
  277. }
  278. s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
  279. s->desc.status |= SDESC_STATUS_COMPLETE;
  280. stream_desc_store(s, s->regs[R_CURDESC]);
  281. sof = 0;
  282. /* Advance. */
  283. prev_d = s->regs[R_CURDESC];
  284. s->regs[R_CURDESC] = s->desc.nxtdesc;
  285. if (prev_d == s->regs[R_TAILDESC]) {
  286. s->regs[R_DMASR] |= DMASR_IDLE;
  287. break;
  288. }
  289. }
  290. }
  291. static void
  292. axidma_push(StreamSlave *obj, unsigned char *buf, size_t len, uint32_t *app)
  293. {
  294. struct XilinxAXIDMA *d = FROM_SYSBUS(typeof(*d), SYS_BUS_DEVICE(obj));
  295. struct Stream *s = &d->streams[1];
  296. if (!app) {
  297. hw_error("No stream app data!\n");
  298. }
  299. stream_process_s2mem(s, buf, len, app);
  300. stream_update_irq(s);
  301. }
  302. static uint64_t axidma_read(void *opaque, hwaddr addr,
  303. unsigned size)
  304. {
  305. struct XilinxAXIDMA *d = opaque;
  306. struct Stream *s;
  307. uint32_t r = 0;
  308. int sid;
  309. sid = streamid_from_addr(addr);
  310. s = &d->streams[sid];
  311. addr = addr % 0x30;
  312. addr >>= 2;
  313. switch (addr) {
  314. case R_DMACR:
  315. /* Simulate one cycles reset delay. */
  316. s->regs[addr] &= ~DMACR_RESET;
  317. r = s->regs[addr];
  318. break;
  319. case R_DMASR:
  320. s->regs[addr] &= 0xffff;
  321. s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
  322. s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
  323. r = s->regs[addr];
  324. break;
  325. default:
  326. r = s->regs[addr];
  327. D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
  328. __func__, sid, addr * 4, r));
  329. break;
  330. }
  331. return r;
  332. }
  333. static void axidma_write(void *opaque, hwaddr addr,
  334. uint64_t value, unsigned size)
  335. {
  336. struct XilinxAXIDMA *d = opaque;
  337. struct Stream *s;
  338. int sid;
  339. sid = streamid_from_addr(addr);
  340. s = &d->streams[sid];
  341. addr = addr % 0x30;
  342. addr >>= 2;
  343. switch (addr) {
  344. case R_DMACR:
  345. /* Tailptr mode is always on. */
  346. value |= DMACR_TAILPTR_MODE;
  347. /* Remember our previous reset state. */
  348. value |= (s->regs[addr] & DMACR_RESET);
  349. s->regs[addr] = value;
  350. if (value & DMACR_RESET) {
  351. stream_reset(s);
  352. }
  353. if ((value & 1) && !stream_resetting(s)) {
  354. /* Start processing. */
  355. s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
  356. }
  357. stream_reload_complete_cnt(s);
  358. break;
  359. case R_DMASR:
  360. /* Mask away write to clear irq lines. */
  361. value &= ~(value & DMASR_IRQ_MASK);
  362. s->regs[addr] = value;
  363. break;
  364. case R_TAILDESC:
  365. s->regs[addr] = value;
  366. s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
  367. if (!sid) {
  368. stream_process_mem2s(s, d->tx_dev);
  369. }
  370. break;
  371. default:
  372. D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
  373. __func__, sid, addr * 4, (unsigned)value));
  374. s->regs[addr] = value;
  375. break;
  376. }
  377. stream_update_irq(s);
  378. }
  379. static const MemoryRegionOps axidma_ops = {
  380. .read = axidma_read,
  381. .write = axidma_write,
  382. .endianness = DEVICE_NATIVE_ENDIAN,
  383. };
  384. static int xilinx_axidma_init(SysBusDevice *dev)
  385. {
  386. struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), dev);
  387. int i;
  388. sysbus_init_irq(dev, &s->streams[0].irq);
  389. sysbus_init_irq(dev, &s->streams[1].irq);
  390. memory_region_init_io(&s->iomem, &axidma_ops, s,
  391. "xlnx.axi-dma", R_MAX * 4 * 2);
  392. sysbus_init_mmio(dev, &s->iomem);
  393. for (i = 0; i < 2; i++) {
  394. stream_reset(&s->streams[i]);
  395. s->streams[i].nr = i;
  396. s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
  397. s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
  398. ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
  399. }
  400. return 0;
  401. }
  402. static void xilinx_axidma_initfn(Object *obj)
  403. {
  404. struct XilinxAXIDMA *s = FROM_SYSBUS(typeof(*s), SYS_BUS_DEVICE(obj));
  405. object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
  406. (Object **) &s->tx_dev, NULL);
  407. }
  408. static Property axidma_properties[] = {
  409. DEFINE_PROP_UINT32("freqhz", struct XilinxAXIDMA, freqhz, 50000000),
  410. DEFINE_PROP_END_OF_LIST(),
  411. };
  412. static void axidma_class_init(ObjectClass *klass, void *data)
  413. {
  414. DeviceClass *dc = DEVICE_CLASS(klass);
  415. SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
  416. StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
  417. k->init = xilinx_axidma_init;
  418. dc->props = axidma_properties;
  419. ssc->push = axidma_push;
  420. }
  421. static const TypeInfo axidma_info = {
  422. .name = "xlnx.axi-dma",
  423. .parent = TYPE_SYS_BUS_DEVICE,
  424. .instance_size = sizeof(struct XilinxAXIDMA),
  425. .class_init = axidma_class_init,
  426. .instance_init = xilinx_axidma_initfn,
  427. .interfaces = (InterfaceInfo[]) {
  428. { TYPE_STREAM_SLAVE },
  429. { }
  430. }
  431. };
  432. static void xilinx_axidma_register_types(void)
  433. {
  434. type_register_static(&axidma_info);
  435. }
  436. type_init(xilinx_axidma_register_types)