pxa2xx_dma.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * Intel XScale PXA255/270 DMA controller.
  3. *
  4. * Copyright (c) 2006 Openedhand Ltd.
  5. * Copyright (c) 2006 Thorsten Zitterell
  6. * Written by Andrzej Zaborowski <balrog@zabor.org>
  7. *
  8. * This code is licensed under the GPL.
  9. */
  10. #include "hw.h"
  11. #include "pxa.h"
  12. #include "sysbus.h"
  13. #define PXA255_DMA_NUM_CHANNELS 16
  14. #define PXA27X_DMA_NUM_CHANNELS 32
  15. #define PXA2XX_DMA_NUM_REQUESTS 75
  16. typedef struct {
  17. target_phys_addr_t descr;
  18. target_phys_addr_t src;
  19. target_phys_addr_t dest;
  20. uint32_t cmd;
  21. uint32_t state;
  22. int request;
  23. } PXA2xxDMAChannel;
  24. typedef struct PXA2xxDMAState {
  25. SysBusDevice busdev;
  26. qemu_irq irq;
  27. uint32_t stopintr;
  28. uint32_t eorintr;
  29. uint32_t rasintr;
  30. uint32_t startintr;
  31. uint32_t endintr;
  32. uint32_t align;
  33. uint32_t pio;
  34. int channels;
  35. PXA2xxDMAChannel *chan;
  36. uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
  37. /* Flag to avoid recursive DMA invocations. */
  38. int running;
  39. } PXA2xxDMAState;
  40. #define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */
  41. #define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */
  42. #define DALGN 0x00a0 /* DMA Alignment register */
  43. #define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */
  44. #define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */
  45. #define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */
  46. #define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */
  47. #define DINT 0x00f0 /* DMA Interrupt register */
  48. #define DRCMR0 0x0100 /* Request to Channel Map register 0 */
  49. #define DRCMR63 0x01fc /* Request to Channel Map register 63 */
  50. #define D_CH0 0x0200 /* Channel 0 Descriptor start */
  51. #define DRCMR64 0x1100 /* Request to Channel Map register 64 */
  52. #define DRCMR74 0x1128 /* Request to Channel Map register 74 */
  53. /* Per-channel register */
  54. #define DDADR 0x00
  55. #define DSADR 0x01
  56. #define DTADR 0x02
  57. #define DCMD 0x03
  58. /* Bit-field masks */
  59. #define DRCMR_CHLNUM 0x1f
  60. #define DRCMR_MAPVLD (1 << 7)
  61. #define DDADR_STOP (1 << 0)
  62. #define DDADR_BREN (1 << 1)
  63. #define DCMD_LEN 0x1fff
  64. #define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1))
  65. #define DCMD_SIZE(x) (4 << (((x) >> 16) & 3))
  66. #define DCMD_FLYBYT (1 << 19)
  67. #define DCMD_FLYBYS (1 << 20)
  68. #define DCMD_ENDIRQEN (1 << 21)
  69. #define DCMD_STARTIRQEN (1 << 22)
  70. #define DCMD_CMPEN (1 << 25)
  71. #define DCMD_FLOWTRG (1 << 28)
  72. #define DCMD_FLOWSRC (1 << 29)
  73. #define DCMD_INCTRGADDR (1 << 30)
  74. #define DCMD_INCSRCADDR (1 << 31)
  75. #define DCSR_BUSERRINTR (1 << 0)
  76. #define DCSR_STARTINTR (1 << 1)
  77. #define DCSR_ENDINTR (1 << 2)
  78. #define DCSR_STOPINTR (1 << 3)
  79. #define DCSR_RASINTR (1 << 4)
  80. #define DCSR_REQPEND (1 << 8)
  81. #define DCSR_EORINT (1 << 9)
  82. #define DCSR_CMPST (1 << 10)
  83. #define DCSR_MASKRUN (1 << 22)
  84. #define DCSR_RASIRQEN (1 << 23)
  85. #define DCSR_CLRCMPST (1 << 24)
  86. #define DCSR_SETCMPST (1 << 25)
  87. #define DCSR_EORSTOPEN (1 << 26)
  88. #define DCSR_EORJMPEN (1 << 27)
  89. #define DCSR_EORIRQEN (1 << 28)
  90. #define DCSR_STOPIRQEN (1 << 29)
  91. #define DCSR_NODESCFETCH (1 << 30)
  92. #define DCSR_RUN (1 << 31)
  93. static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
  94. {
  95. if (ch >= 0) {
  96. if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
  97. (s->chan[ch].state & DCSR_STOPINTR))
  98. s->stopintr |= 1 << ch;
  99. else
  100. s->stopintr &= ~(1 << ch);
  101. if ((s->chan[ch].state & DCSR_EORIRQEN) &&
  102. (s->chan[ch].state & DCSR_EORINT))
  103. s->eorintr |= 1 << ch;
  104. else
  105. s->eorintr &= ~(1 << ch);
  106. if ((s->chan[ch].state & DCSR_RASIRQEN) &&
  107. (s->chan[ch].state & DCSR_RASINTR))
  108. s->rasintr |= 1 << ch;
  109. else
  110. s->rasintr &= ~(1 << ch);
  111. if (s->chan[ch].state & DCSR_STARTINTR)
  112. s->startintr |= 1 << ch;
  113. else
  114. s->startintr &= ~(1 << ch);
  115. if (s->chan[ch].state & DCSR_ENDINTR)
  116. s->endintr |= 1 << ch;
  117. else
  118. s->endintr &= ~(1 << ch);
  119. }
  120. if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
  121. qemu_irq_raise(s->irq);
  122. else
  123. qemu_irq_lower(s->irq);
  124. }
  125. static inline void pxa2xx_dma_descriptor_fetch(
  126. PXA2xxDMAState *s, int ch)
  127. {
  128. uint32_t desc[4];
  129. target_phys_addr_t daddr = s->chan[ch].descr & ~0xf;
  130. if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
  131. daddr += 32;
  132. cpu_physical_memory_read(daddr, (uint8_t *) desc, 16);
  133. s->chan[ch].descr = desc[DDADR];
  134. s->chan[ch].src = desc[DSADR];
  135. s->chan[ch].dest = desc[DTADR];
  136. s->chan[ch].cmd = desc[DCMD];
  137. if (s->chan[ch].cmd & DCMD_FLOWSRC)
  138. s->chan[ch].src &= ~3;
  139. if (s->chan[ch].cmd & DCMD_FLOWTRG)
  140. s->chan[ch].dest &= ~3;
  141. if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
  142. printf("%s: unsupported mode in channel %i\n", __FUNCTION__, ch);
  143. if (s->chan[ch].cmd & DCMD_STARTIRQEN)
  144. s->chan[ch].state |= DCSR_STARTINTR;
  145. }
  146. static void pxa2xx_dma_run(PXA2xxDMAState *s)
  147. {
  148. int c, srcinc, destinc;
  149. uint32_t n, size;
  150. uint32_t width;
  151. uint32_t length;
  152. uint8_t buffer[32];
  153. PXA2xxDMAChannel *ch;
  154. if (s->running ++)
  155. return;
  156. while (s->running) {
  157. s->running = 1;
  158. for (c = 0; c < s->channels; c ++) {
  159. ch = &s->chan[c];
  160. while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
  161. /* Test for pending requests */
  162. if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
  163. break;
  164. length = ch->cmd & DCMD_LEN;
  165. size = DCMD_SIZE(ch->cmd);
  166. width = DCMD_WIDTH(ch->cmd);
  167. srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
  168. destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
  169. while (length) {
  170. size = MIN(length, size);
  171. for (n = 0; n < size; n += width) {
  172. cpu_physical_memory_read(ch->src, buffer + n, width);
  173. ch->src += srcinc;
  174. }
  175. for (n = 0; n < size; n += width) {
  176. cpu_physical_memory_write(ch->dest, buffer + n, width);
  177. ch->dest += destinc;
  178. }
  179. length -= size;
  180. if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
  181. !ch->request) {
  182. ch->state |= DCSR_EORINT;
  183. if (ch->state & DCSR_EORSTOPEN)
  184. ch->state |= DCSR_STOPINTR;
  185. if ((ch->state & DCSR_EORJMPEN) &&
  186. !(ch->state & DCSR_NODESCFETCH))
  187. pxa2xx_dma_descriptor_fetch(s, c);
  188. break;
  189. }
  190. }
  191. ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
  192. /* Is the transfer complete now? */
  193. if (!length) {
  194. if (ch->cmd & DCMD_ENDIRQEN)
  195. ch->state |= DCSR_ENDINTR;
  196. if ((ch->state & DCSR_NODESCFETCH) ||
  197. (ch->descr & DDADR_STOP) ||
  198. (ch->state & DCSR_EORSTOPEN)) {
  199. ch->state |= DCSR_STOPINTR;
  200. ch->state &= ~DCSR_RUN;
  201. break;
  202. }
  203. ch->state |= DCSR_STOPINTR;
  204. break;
  205. }
  206. }
  207. }
  208. s->running --;
  209. }
  210. }
  211. static uint32_t pxa2xx_dma_read(void *opaque, target_phys_addr_t offset)
  212. {
  213. PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
  214. unsigned int channel;
  215. switch (offset) {
  216. case DRCMR64 ... DRCMR74:
  217. offset -= DRCMR64 - DRCMR0 - (64 << 2);
  218. /* Fall through */
  219. case DRCMR0 ... DRCMR63:
  220. channel = (offset - DRCMR0) >> 2;
  221. return s->req[channel];
  222. case DRQSR0:
  223. case DRQSR1:
  224. case DRQSR2:
  225. return 0;
  226. case DCSR0 ... DCSR31:
  227. channel = offset >> 2;
  228. if (s->chan[channel].request)
  229. return s->chan[channel].state | DCSR_REQPEND;
  230. return s->chan[channel].state;
  231. case DINT:
  232. return s->stopintr | s->eorintr | s->rasintr |
  233. s->startintr | s->endintr;
  234. case DALGN:
  235. return s->align;
  236. case DPCSR:
  237. return s->pio;
  238. }
  239. if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
  240. channel = (offset - D_CH0) >> 4;
  241. switch ((offset & 0x0f) >> 2) {
  242. case DDADR:
  243. return s->chan[channel].descr;
  244. case DSADR:
  245. return s->chan[channel].src;
  246. case DTADR:
  247. return s->chan[channel].dest;
  248. case DCMD:
  249. return s->chan[channel].cmd;
  250. }
  251. }
  252. hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __FUNCTION__, offset);
  253. return 7;
  254. }
  255. static void pxa2xx_dma_write(void *opaque,
  256. target_phys_addr_t offset, uint32_t value)
  257. {
  258. PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
  259. unsigned int channel;
  260. switch (offset) {
  261. case DRCMR64 ... DRCMR74:
  262. offset -= DRCMR64 - DRCMR0 - (64 << 2);
  263. /* Fall through */
  264. case DRCMR0 ... DRCMR63:
  265. channel = (offset - DRCMR0) >> 2;
  266. if (value & DRCMR_MAPVLD)
  267. if ((value & DRCMR_CHLNUM) > s->channels)
  268. hw_error("%s: Bad DMA channel %i\n",
  269. __FUNCTION__, value & DRCMR_CHLNUM);
  270. s->req[channel] = value;
  271. break;
  272. case DRQSR0:
  273. case DRQSR1:
  274. case DRQSR2:
  275. /* Nothing to do */
  276. break;
  277. case DCSR0 ... DCSR31:
  278. channel = offset >> 2;
  279. s->chan[channel].state &= 0x0000071f & ~(value &
  280. (DCSR_EORINT | DCSR_ENDINTR |
  281. DCSR_STARTINTR | DCSR_BUSERRINTR));
  282. s->chan[channel].state |= value & 0xfc800000;
  283. if (s->chan[channel].state & DCSR_STOPIRQEN)
  284. s->chan[channel].state &= ~DCSR_STOPINTR;
  285. if (value & DCSR_NODESCFETCH) {
  286. /* No-descriptor-fetch mode */
  287. if (value & DCSR_RUN) {
  288. s->chan[channel].state &= ~DCSR_STOPINTR;
  289. pxa2xx_dma_run(s);
  290. }
  291. } else {
  292. /* Descriptor-fetch mode */
  293. if (value & DCSR_RUN) {
  294. s->chan[channel].state &= ~DCSR_STOPINTR;
  295. pxa2xx_dma_descriptor_fetch(s, channel);
  296. pxa2xx_dma_run(s);
  297. }
  298. }
  299. /* Shouldn't matter as our DMA is synchronous. */
  300. if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
  301. s->chan[channel].state |= DCSR_STOPINTR;
  302. if (value & DCSR_CLRCMPST)
  303. s->chan[channel].state &= ~DCSR_CMPST;
  304. if (value & DCSR_SETCMPST)
  305. s->chan[channel].state |= DCSR_CMPST;
  306. pxa2xx_dma_update(s, channel);
  307. break;
  308. case DALGN:
  309. s->align = value;
  310. break;
  311. case DPCSR:
  312. s->pio = value & 0x80000001;
  313. break;
  314. default:
  315. if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
  316. channel = (offset - D_CH0) >> 4;
  317. switch ((offset & 0x0f) >> 2) {
  318. case DDADR:
  319. s->chan[channel].descr = value;
  320. break;
  321. case DSADR:
  322. s->chan[channel].src = value;
  323. break;
  324. case DTADR:
  325. s->chan[channel].dest = value;
  326. break;
  327. case DCMD:
  328. s->chan[channel].cmd = value;
  329. break;
  330. default:
  331. goto fail;
  332. }
  333. break;
  334. }
  335. fail:
  336. hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __FUNCTION__, offset);
  337. }
  338. }
  339. static uint32_t pxa2xx_dma_readbad(void *opaque, target_phys_addr_t offset)
  340. {
  341. hw_error("%s: Bad access width\n", __FUNCTION__);
  342. return 5;
  343. }
  344. static void pxa2xx_dma_writebad(void *opaque,
  345. target_phys_addr_t offset, uint32_t value)
  346. {
  347. hw_error("%s: Bad access width\n", __FUNCTION__);
  348. }
  349. static CPUReadMemoryFunc * const pxa2xx_dma_readfn[] = {
  350. pxa2xx_dma_readbad,
  351. pxa2xx_dma_readbad,
  352. pxa2xx_dma_read
  353. };
  354. static CPUWriteMemoryFunc * const pxa2xx_dma_writefn[] = {
  355. pxa2xx_dma_writebad,
  356. pxa2xx_dma_writebad,
  357. pxa2xx_dma_write
  358. };
  359. static void pxa2xx_dma_request(void *opaque, int req_num, int on)
  360. {
  361. PXA2xxDMAState *s = opaque;
  362. int ch;
  363. if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
  364. hw_error("%s: Bad DMA request %i\n", __FUNCTION__, req_num);
  365. if (!(s->req[req_num] & DRCMR_MAPVLD))
  366. return;
  367. ch = s->req[req_num] & DRCMR_CHLNUM;
  368. if (!s->chan[ch].request && on)
  369. s->chan[ch].state |= DCSR_RASINTR;
  370. else
  371. s->chan[ch].state &= ~DCSR_RASINTR;
  372. if (s->chan[ch].request && !on)
  373. s->chan[ch].state |= DCSR_EORINT;
  374. s->chan[ch].request = on;
  375. if (on) {
  376. pxa2xx_dma_run(s);
  377. pxa2xx_dma_update(s, ch);
  378. }
  379. }
  380. static int pxa2xx_dma_init(SysBusDevice *dev)
  381. {
  382. int i, iomemtype;
  383. PXA2xxDMAState *s;
  384. s = FROM_SYSBUS(PXA2xxDMAState, dev);
  385. if (s->channels <= 0) {
  386. return -1;
  387. }
  388. s->chan = qemu_mallocz(sizeof(PXA2xxDMAChannel) * s->channels);
  389. memset(s->chan, 0, sizeof(PXA2xxDMAChannel) * s->channels);
  390. for (i = 0; i < s->channels; i ++)
  391. s->chan[i].state = DCSR_STOPINTR;
  392. memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
  393. qdev_init_gpio_in(&dev->qdev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
  394. iomemtype = cpu_register_io_memory(pxa2xx_dma_readfn,
  395. pxa2xx_dma_writefn, s, DEVICE_NATIVE_ENDIAN);
  396. sysbus_init_mmio(dev, 0x00010000, iomemtype);
  397. sysbus_init_irq(dev, &s->irq);
  398. return 0;
  399. }
  400. DeviceState *pxa27x_dma_init(target_phys_addr_t base, qemu_irq irq)
  401. {
  402. DeviceState *dev;
  403. dev = qdev_create(NULL, "pxa2xx-dma");
  404. qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
  405. qdev_init_nofail(dev);
  406. sysbus_mmio_map(sysbus_from_qdev(dev), 0, base);
  407. sysbus_connect_irq(sysbus_from_qdev(dev), 0, irq);
  408. return dev;
  409. }
  410. DeviceState *pxa255_dma_init(target_phys_addr_t base, qemu_irq irq)
  411. {
  412. DeviceState *dev;
  413. dev = qdev_create(NULL, "pxa2xx-dma");
  414. qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
  415. qdev_init_nofail(dev);
  416. sysbus_mmio_map(sysbus_from_qdev(dev), 0, base);
  417. sysbus_connect_irq(sysbus_from_qdev(dev), 0, irq);
  418. return dev;
  419. }
  420. static bool is_version_0(void *opaque, int version_id)
  421. {
  422. return version_id == 0;
  423. }
  424. static VMStateDescription vmstate_pxa2xx_dma_chan = {
  425. .name = "pxa2xx_dma_chan",
  426. .version_id = 1,
  427. .minimum_version_id = 1,
  428. .minimum_version_id_old = 1,
  429. .fields = (VMStateField[]) {
  430. VMSTATE_UINTTL(descr, PXA2xxDMAChannel),
  431. VMSTATE_UINTTL(src, PXA2xxDMAChannel),
  432. VMSTATE_UINTTL(dest, PXA2xxDMAChannel),
  433. VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
  434. VMSTATE_UINT32(state, PXA2xxDMAChannel),
  435. VMSTATE_INT32(request, PXA2xxDMAChannel),
  436. VMSTATE_END_OF_LIST(),
  437. },
  438. };
  439. static VMStateDescription vmstate_pxa2xx_dma = {
  440. .name = "pxa2xx_dma",
  441. .version_id = 1,
  442. .minimum_version_id = 0,
  443. .minimum_version_id_old = 0,
  444. .fields = (VMStateField[]) {
  445. VMSTATE_UNUSED_TEST(is_version_0, 4),
  446. VMSTATE_UINT32(stopintr, PXA2xxDMAState),
  447. VMSTATE_UINT32(eorintr, PXA2xxDMAState),
  448. VMSTATE_UINT32(rasintr, PXA2xxDMAState),
  449. VMSTATE_UINT32(startintr, PXA2xxDMAState),
  450. VMSTATE_UINT32(endintr, PXA2xxDMAState),
  451. VMSTATE_UINT32(align, PXA2xxDMAState),
  452. VMSTATE_UINT32(pio, PXA2xxDMAState),
  453. VMSTATE_BUFFER(req, PXA2xxDMAState),
  454. VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
  455. vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
  456. VMSTATE_END_OF_LIST(),
  457. },
  458. };
  459. static SysBusDeviceInfo pxa2xx_dma_info = {
  460. .init = pxa2xx_dma_init,
  461. .qdev.name = "pxa2xx-dma",
  462. .qdev.desc = "PXA2xx DMA controller",
  463. .qdev.size = sizeof(PXA2xxDMAState),
  464. .qdev.vmsd = &vmstate_pxa2xx_dma,
  465. .qdev.props = (Property[]) {
  466. DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
  467. DEFINE_PROP_END_OF_LIST(),
  468. },
  469. };
  470. static void pxa2xx_dma_register(void)
  471. {
  472. sysbus_register_withprop(&pxa2xx_dma_info);
  473. }
  474. device_init(pxa2xx_dma_register);