pxa2xx_dma.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. /*
  2. * Intel XScale PXA255/270 DMA controller.
  3. *
  4. * Copyright (c) 2006 Openedhand Ltd.
  5. * Copyright (c) 2006 Thorsten Zitterell
  6. * Written by Andrzej Zaborowski <balrog@zabor.org>
  7. *
  8. * This code is licensed under the GPL.
  9. */
  10. #include "qemu/osdep.h"
  11. #include "hw/hw.h"
  12. #include "hw/irq.h"
  13. #include "hw/qdev-properties.h"
  14. #include "hw/arm/pxa.h"
  15. #include "hw/sysbus.h"
  16. #include "migration/vmstate.h"
  17. #include "qapi/error.h"
  18. #include "qemu/module.h"
  19. #define PXA255_DMA_NUM_CHANNELS 16
  20. #define PXA27X_DMA_NUM_CHANNELS 32
  21. #define PXA2XX_DMA_NUM_REQUESTS 75
  22. typedef struct {
  23. uint32_t descr;
  24. uint32_t src;
  25. uint32_t dest;
  26. uint32_t cmd;
  27. uint32_t state;
  28. int request;
  29. } PXA2xxDMAChannel;
  30. #define TYPE_PXA2XX_DMA "pxa2xx-dma"
  31. #define PXA2XX_DMA(obj) OBJECT_CHECK(PXA2xxDMAState, (obj), TYPE_PXA2XX_DMA)
  32. typedef struct PXA2xxDMAState {
  33. SysBusDevice parent_obj;
  34. MemoryRegion iomem;
  35. qemu_irq irq;
  36. uint32_t stopintr;
  37. uint32_t eorintr;
  38. uint32_t rasintr;
  39. uint32_t startintr;
  40. uint32_t endintr;
  41. uint32_t align;
  42. uint32_t pio;
  43. int channels;
  44. PXA2xxDMAChannel *chan;
  45. uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
  46. /* Flag to avoid recursive DMA invocations. */
  47. int running;
  48. } PXA2xxDMAState;
  49. #define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */
  50. #define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */
  51. #define DALGN 0x00a0 /* DMA Alignment register */
  52. #define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */
  53. #define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */
  54. #define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */
  55. #define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */
  56. #define DINT 0x00f0 /* DMA Interrupt register */
  57. #define DRCMR0 0x0100 /* Request to Channel Map register 0 */
  58. #define DRCMR63 0x01fc /* Request to Channel Map register 63 */
  59. #define D_CH0 0x0200 /* Channel 0 Descriptor start */
  60. #define DRCMR64 0x1100 /* Request to Channel Map register 64 */
  61. #define DRCMR74 0x1128 /* Request to Channel Map register 74 */
  62. /* Per-channel register */
  63. #define DDADR 0x00
  64. #define DSADR 0x01
  65. #define DTADR 0x02
  66. #define DCMD 0x03
  67. /* Bit-field masks */
  68. #define DRCMR_CHLNUM 0x1f
  69. #define DRCMR_MAPVLD (1 << 7)
  70. #define DDADR_STOP (1 << 0)
  71. #define DDADR_BREN (1 << 1)
  72. #define DCMD_LEN 0x1fff
  73. #define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1))
  74. #define DCMD_SIZE(x) (4 << (((x) >> 16) & 3))
  75. #define DCMD_FLYBYT (1 << 19)
  76. #define DCMD_FLYBYS (1 << 20)
  77. #define DCMD_ENDIRQEN (1 << 21)
  78. #define DCMD_STARTIRQEN (1 << 22)
  79. #define DCMD_CMPEN (1 << 25)
  80. #define DCMD_FLOWTRG (1 << 28)
  81. #define DCMD_FLOWSRC (1 << 29)
  82. #define DCMD_INCTRGADDR (1 << 30)
  83. #define DCMD_INCSRCADDR (1 << 31)
  84. #define DCSR_BUSERRINTR (1 << 0)
  85. #define DCSR_STARTINTR (1 << 1)
  86. #define DCSR_ENDINTR (1 << 2)
  87. #define DCSR_STOPINTR (1 << 3)
  88. #define DCSR_RASINTR (1 << 4)
  89. #define DCSR_REQPEND (1 << 8)
  90. #define DCSR_EORINT (1 << 9)
  91. #define DCSR_CMPST (1 << 10)
  92. #define DCSR_MASKRUN (1 << 22)
  93. #define DCSR_RASIRQEN (1 << 23)
  94. #define DCSR_CLRCMPST (1 << 24)
  95. #define DCSR_SETCMPST (1 << 25)
  96. #define DCSR_EORSTOPEN (1 << 26)
  97. #define DCSR_EORJMPEN (1 << 27)
  98. #define DCSR_EORIRQEN (1 << 28)
  99. #define DCSR_STOPIRQEN (1 << 29)
  100. #define DCSR_NODESCFETCH (1 << 30)
  101. #define DCSR_RUN (1 << 31)
  102. static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
  103. {
  104. if (ch >= 0) {
  105. if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
  106. (s->chan[ch].state & DCSR_STOPINTR))
  107. s->stopintr |= 1 << ch;
  108. else
  109. s->stopintr &= ~(1 << ch);
  110. if ((s->chan[ch].state & DCSR_EORIRQEN) &&
  111. (s->chan[ch].state & DCSR_EORINT))
  112. s->eorintr |= 1 << ch;
  113. else
  114. s->eorintr &= ~(1 << ch);
  115. if ((s->chan[ch].state & DCSR_RASIRQEN) &&
  116. (s->chan[ch].state & DCSR_RASINTR))
  117. s->rasintr |= 1 << ch;
  118. else
  119. s->rasintr &= ~(1 << ch);
  120. if (s->chan[ch].state & DCSR_STARTINTR)
  121. s->startintr |= 1 << ch;
  122. else
  123. s->startintr &= ~(1 << ch);
  124. if (s->chan[ch].state & DCSR_ENDINTR)
  125. s->endintr |= 1 << ch;
  126. else
  127. s->endintr &= ~(1 << ch);
  128. }
  129. if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
  130. qemu_irq_raise(s->irq);
  131. else
  132. qemu_irq_lower(s->irq);
  133. }
  134. static inline void pxa2xx_dma_descriptor_fetch(
  135. PXA2xxDMAState *s, int ch)
  136. {
  137. uint32_t desc[4];
  138. hwaddr daddr = s->chan[ch].descr & ~0xf;
  139. if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
  140. daddr += 32;
  141. cpu_physical_memory_read(daddr, desc, 16);
  142. s->chan[ch].descr = desc[DDADR];
  143. s->chan[ch].src = desc[DSADR];
  144. s->chan[ch].dest = desc[DTADR];
  145. s->chan[ch].cmd = desc[DCMD];
  146. if (s->chan[ch].cmd & DCMD_FLOWSRC)
  147. s->chan[ch].src &= ~3;
  148. if (s->chan[ch].cmd & DCMD_FLOWTRG)
  149. s->chan[ch].dest &= ~3;
  150. if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
  151. printf("%s: unsupported mode in channel %i\n", __func__, ch);
  152. if (s->chan[ch].cmd & DCMD_STARTIRQEN)
  153. s->chan[ch].state |= DCSR_STARTINTR;
  154. }
  155. static void pxa2xx_dma_run(PXA2xxDMAState *s)
  156. {
  157. int c, srcinc, destinc;
  158. uint32_t n, size;
  159. uint32_t width;
  160. uint32_t length;
  161. uint8_t buffer[32];
  162. PXA2xxDMAChannel *ch;
  163. if (s->running ++)
  164. return;
  165. while (s->running) {
  166. s->running = 1;
  167. for (c = 0; c < s->channels; c ++) {
  168. ch = &s->chan[c];
  169. while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
  170. /* Test for pending requests */
  171. if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
  172. break;
  173. length = ch->cmd & DCMD_LEN;
  174. size = DCMD_SIZE(ch->cmd);
  175. width = DCMD_WIDTH(ch->cmd);
  176. srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
  177. destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
  178. while (length) {
  179. size = MIN(length, size);
  180. for (n = 0; n < size; n += width) {
  181. cpu_physical_memory_read(ch->src, buffer + n, width);
  182. ch->src += srcinc;
  183. }
  184. for (n = 0; n < size; n += width) {
  185. cpu_physical_memory_write(ch->dest, buffer + n, width);
  186. ch->dest += destinc;
  187. }
  188. length -= size;
  189. if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
  190. !ch->request) {
  191. ch->state |= DCSR_EORINT;
  192. if (ch->state & DCSR_EORSTOPEN)
  193. ch->state |= DCSR_STOPINTR;
  194. if ((ch->state & DCSR_EORJMPEN) &&
  195. !(ch->state & DCSR_NODESCFETCH))
  196. pxa2xx_dma_descriptor_fetch(s, c);
  197. break;
  198. }
  199. }
  200. ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
  201. /* Is the transfer complete now? */
  202. if (!length) {
  203. if (ch->cmd & DCMD_ENDIRQEN)
  204. ch->state |= DCSR_ENDINTR;
  205. if ((ch->state & DCSR_NODESCFETCH) ||
  206. (ch->descr & DDADR_STOP) ||
  207. (ch->state & DCSR_EORSTOPEN)) {
  208. ch->state |= DCSR_STOPINTR;
  209. ch->state &= ~DCSR_RUN;
  210. break;
  211. }
  212. ch->state |= DCSR_STOPINTR;
  213. break;
  214. }
  215. }
  216. }
  217. s->running --;
  218. }
  219. }
  220. static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset,
  221. unsigned size)
  222. {
  223. PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
  224. unsigned int channel;
  225. if (size != 4) {
  226. hw_error("%s: Bad access width\n", __func__);
  227. return 5;
  228. }
  229. switch (offset) {
  230. case DRCMR64 ... DRCMR74:
  231. offset -= DRCMR64 - DRCMR0 - (64 << 2);
  232. /* Fall through */
  233. case DRCMR0 ... DRCMR63:
  234. channel = (offset - DRCMR0) >> 2;
  235. return s->req[channel];
  236. case DRQSR0:
  237. case DRQSR1:
  238. case DRQSR2:
  239. return 0;
  240. case DCSR0 ... DCSR31:
  241. channel = offset >> 2;
  242. if (s->chan[channel].request)
  243. return s->chan[channel].state | DCSR_REQPEND;
  244. return s->chan[channel].state;
  245. case DINT:
  246. return s->stopintr | s->eorintr | s->rasintr |
  247. s->startintr | s->endintr;
  248. case DALGN:
  249. return s->align;
  250. case DPCSR:
  251. return s->pio;
  252. }
  253. if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
  254. channel = (offset - D_CH0) >> 4;
  255. switch ((offset & 0x0f) >> 2) {
  256. case DDADR:
  257. return s->chan[channel].descr;
  258. case DSADR:
  259. return s->chan[channel].src;
  260. case DTADR:
  261. return s->chan[channel].dest;
  262. case DCMD:
  263. return s->chan[channel].cmd;
  264. }
  265. }
  266. hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __func__, offset);
  267. return 7;
  268. }
  269. static void pxa2xx_dma_write(void *opaque, hwaddr offset,
  270. uint64_t value, unsigned size)
  271. {
  272. PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
  273. unsigned int channel;
  274. if (size != 4) {
  275. hw_error("%s: Bad access width\n", __func__);
  276. return;
  277. }
  278. switch (offset) {
  279. case DRCMR64 ... DRCMR74:
  280. offset -= DRCMR64 - DRCMR0 - (64 << 2);
  281. /* Fall through */
  282. case DRCMR0 ... DRCMR63:
  283. channel = (offset - DRCMR0) >> 2;
  284. if (value & DRCMR_MAPVLD)
  285. if ((value & DRCMR_CHLNUM) > s->channels)
  286. hw_error("%s: Bad DMA channel %i\n",
  287. __func__, (unsigned)value & DRCMR_CHLNUM);
  288. s->req[channel] = value;
  289. break;
  290. case DRQSR0:
  291. case DRQSR1:
  292. case DRQSR2:
  293. /* Nothing to do */
  294. break;
  295. case DCSR0 ... DCSR31:
  296. channel = offset >> 2;
  297. s->chan[channel].state &= 0x0000071f & ~(value &
  298. (DCSR_EORINT | DCSR_ENDINTR |
  299. DCSR_STARTINTR | DCSR_BUSERRINTR));
  300. s->chan[channel].state |= value & 0xfc800000;
  301. if (s->chan[channel].state & DCSR_STOPIRQEN)
  302. s->chan[channel].state &= ~DCSR_STOPINTR;
  303. if (value & DCSR_NODESCFETCH) {
  304. /* No-descriptor-fetch mode */
  305. if (value & DCSR_RUN) {
  306. s->chan[channel].state &= ~DCSR_STOPINTR;
  307. pxa2xx_dma_run(s);
  308. }
  309. } else {
  310. /* Descriptor-fetch mode */
  311. if (value & DCSR_RUN) {
  312. s->chan[channel].state &= ~DCSR_STOPINTR;
  313. pxa2xx_dma_descriptor_fetch(s, channel);
  314. pxa2xx_dma_run(s);
  315. }
  316. }
  317. /* Shouldn't matter as our DMA is synchronous. */
  318. if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
  319. s->chan[channel].state |= DCSR_STOPINTR;
  320. if (value & DCSR_CLRCMPST)
  321. s->chan[channel].state &= ~DCSR_CMPST;
  322. if (value & DCSR_SETCMPST)
  323. s->chan[channel].state |= DCSR_CMPST;
  324. pxa2xx_dma_update(s, channel);
  325. break;
  326. case DALGN:
  327. s->align = value;
  328. break;
  329. case DPCSR:
  330. s->pio = value & 0x80000001;
  331. break;
  332. default:
  333. if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
  334. channel = (offset - D_CH0) >> 4;
  335. switch ((offset & 0x0f) >> 2) {
  336. case DDADR:
  337. s->chan[channel].descr = value;
  338. break;
  339. case DSADR:
  340. s->chan[channel].src = value;
  341. break;
  342. case DTADR:
  343. s->chan[channel].dest = value;
  344. break;
  345. case DCMD:
  346. s->chan[channel].cmd = value;
  347. break;
  348. default:
  349. goto fail;
  350. }
  351. break;
  352. }
  353. fail:
  354. hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __func__, offset);
  355. }
  356. }
  357. static const MemoryRegionOps pxa2xx_dma_ops = {
  358. .read = pxa2xx_dma_read,
  359. .write = pxa2xx_dma_write,
  360. .endianness = DEVICE_NATIVE_ENDIAN,
  361. };
  362. static void pxa2xx_dma_request(void *opaque, int req_num, int on)
  363. {
  364. PXA2xxDMAState *s = opaque;
  365. int ch;
  366. if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
  367. hw_error("%s: Bad DMA request %i\n", __func__, req_num);
  368. if (!(s->req[req_num] & DRCMR_MAPVLD))
  369. return;
  370. ch = s->req[req_num] & DRCMR_CHLNUM;
  371. if (!s->chan[ch].request && on)
  372. s->chan[ch].state |= DCSR_RASINTR;
  373. else
  374. s->chan[ch].state &= ~DCSR_RASINTR;
  375. if (s->chan[ch].request && !on)
  376. s->chan[ch].state |= DCSR_EORINT;
  377. s->chan[ch].request = on;
  378. if (on) {
  379. pxa2xx_dma_run(s);
  380. pxa2xx_dma_update(s, ch);
  381. }
  382. }
  383. static void pxa2xx_dma_init(Object *obj)
  384. {
  385. DeviceState *dev = DEVICE(obj);
  386. PXA2xxDMAState *s = PXA2XX_DMA(obj);
  387. SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
  388. memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
  389. qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
  390. memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s,
  391. "pxa2xx.dma", 0x00010000);
  392. sysbus_init_mmio(sbd, &s->iomem);
  393. sysbus_init_irq(sbd, &s->irq);
  394. }
  395. static void pxa2xx_dma_realize(DeviceState *dev, Error **errp)
  396. {
  397. PXA2xxDMAState *s = PXA2XX_DMA(dev);
  398. int i;
  399. if (s->channels <= 0) {
  400. error_setg(errp, "channels value invalid");
  401. return;
  402. }
  403. s->chan = g_new0(PXA2xxDMAChannel, s->channels);
  404. for (i = 0; i < s->channels; i ++)
  405. s->chan[i].state = DCSR_STOPINTR;
  406. }
  407. DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq)
  408. {
  409. DeviceState *dev;
  410. dev = qdev_create(NULL, "pxa2xx-dma");
  411. qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
  412. qdev_init_nofail(dev);
  413. sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
  414. sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
  415. return dev;
  416. }
  417. DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq)
  418. {
  419. DeviceState *dev;
  420. dev = qdev_create(NULL, "pxa2xx-dma");
  421. qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
  422. qdev_init_nofail(dev);
  423. sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
  424. sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
  425. return dev;
  426. }
  427. static bool is_version_0(void *opaque, int version_id)
  428. {
  429. return version_id == 0;
  430. }
  431. static VMStateDescription vmstate_pxa2xx_dma_chan = {
  432. .name = "pxa2xx_dma_chan",
  433. .version_id = 1,
  434. .minimum_version_id = 1,
  435. .fields = (VMStateField[]) {
  436. VMSTATE_UINT32(descr, PXA2xxDMAChannel),
  437. VMSTATE_UINT32(src, PXA2xxDMAChannel),
  438. VMSTATE_UINT32(dest, PXA2xxDMAChannel),
  439. VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
  440. VMSTATE_UINT32(state, PXA2xxDMAChannel),
  441. VMSTATE_INT32(request, PXA2xxDMAChannel),
  442. VMSTATE_END_OF_LIST(),
  443. },
  444. };
  445. static VMStateDescription vmstate_pxa2xx_dma = {
  446. .name = "pxa2xx_dma",
  447. .version_id = 1,
  448. .minimum_version_id = 0,
  449. .fields = (VMStateField[]) {
  450. VMSTATE_UNUSED_TEST(is_version_0, 4),
  451. VMSTATE_UINT32(stopintr, PXA2xxDMAState),
  452. VMSTATE_UINT32(eorintr, PXA2xxDMAState),
  453. VMSTATE_UINT32(rasintr, PXA2xxDMAState),
  454. VMSTATE_UINT32(startintr, PXA2xxDMAState),
  455. VMSTATE_UINT32(endintr, PXA2xxDMAState),
  456. VMSTATE_UINT32(align, PXA2xxDMAState),
  457. VMSTATE_UINT32(pio, PXA2xxDMAState),
  458. VMSTATE_BUFFER(req, PXA2xxDMAState),
  459. VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
  460. vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
  461. VMSTATE_END_OF_LIST(),
  462. },
  463. };
  464. static Property pxa2xx_dma_properties[] = {
  465. DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
  466. DEFINE_PROP_END_OF_LIST(),
  467. };
  468. static void pxa2xx_dma_class_init(ObjectClass *klass, void *data)
  469. {
  470. DeviceClass *dc = DEVICE_CLASS(klass);
  471. dc->desc = "PXA2xx DMA controller";
  472. dc->vmsd = &vmstate_pxa2xx_dma;
  473. dc->props = pxa2xx_dma_properties;
  474. dc->realize = pxa2xx_dma_realize;
  475. }
  476. static const TypeInfo pxa2xx_dma_info = {
  477. .name = TYPE_PXA2XX_DMA,
  478. .parent = TYPE_SYS_BUS_DEVICE,
  479. .instance_size = sizeof(PXA2xxDMAState),
  480. .instance_init = pxa2xx_dma_init,
  481. .class_init = pxa2xx_dma_class_init,
  482. };
  483. static void pxa2xx_dma_register_types(void)
  484. {
  485. type_register_static(&pxa2xx_dma_info);
  486. }
  487. type_init(pxa2xx_dma_register_types)