pxa2xx_dma.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. /*
  2. * Intel XScale PXA255/270 DMA controller.
  3. *
  4. * Copyright (c) 2006 Openedhand Ltd.
  5. * Copyright (c) 2006 Thorsten Zitterell
  6. * Written by Andrzej Zaborowski <balrog@zabor.org>
  7. *
  8. * This code is licensed under the GPL.
  9. */
  10. #include "hw.h"
  11. #include "pxa.h"
  12. #include "sysbus.h"
  13. #define PXA255_DMA_NUM_CHANNELS 16
  14. #define PXA27X_DMA_NUM_CHANNELS 32
  15. #define PXA2XX_DMA_NUM_REQUESTS 75
  16. typedef struct {
  17. uint32_t descr;
  18. uint32_t src;
  19. uint32_t dest;
  20. uint32_t cmd;
  21. uint32_t state;
  22. int request;
  23. } PXA2xxDMAChannel;
  24. typedef struct PXA2xxDMAState {
  25. SysBusDevice busdev;
  26. MemoryRegion iomem;
  27. qemu_irq irq;
  28. uint32_t stopintr;
  29. uint32_t eorintr;
  30. uint32_t rasintr;
  31. uint32_t startintr;
  32. uint32_t endintr;
  33. uint32_t align;
  34. uint32_t pio;
  35. int channels;
  36. PXA2xxDMAChannel *chan;
  37. uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
  38. /* Flag to avoid recursive DMA invocations. */
  39. int running;
  40. } PXA2xxDMAState;
  41. #define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */
  42. #define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */
  43. #define DALGN 0x00a0 /* DMA Alignment register */
  44. #define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */
  45. #define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */
  46. #define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */
  47. #define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */
  48. #define DINT 0x00f0 /* DMA Interrupt register */
  49. #define DRCMR0 0x0100 /* Request to Channel Map register 0 */
  50. #define DRCMR63 0x01fc /* Request to Channel Map register 63 */
  51. #define D_CH0 0x0200 /* Channel 0 Descriptor start */
  52. #define DRCMR64 0x1100 /* Request to Channel Map register 64 */
  53. #define DRCMR74 0x1128 /* Request to Channel Map register 74 */
  54. /* Per-channel register */
  55. #define DDADR 0x00
  56. #define DSADR 0x01
  57. #define DTADR 0x02
  58. #define DCMD 0x03
  59. /* Bit-field masks */
  60. #define DRCMR_CHLNUM 0x1f
  61. #define DRCMR_MAPVLD (1 << 7)
  62. #define DDADR_STOP (1 << 0)
  63. #define DDADR_BREN (1 << 1)
  64. #define DCMD_LEN 0x1fff
  65. #define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1))
  66. #define DCMD_SIZE(x) (4 << (((x) >> 16) & 3))
  67. #define DCMD_FLYBYT (1 << 19)
  68. #define DCMD_FLYBYS (1 << 20)
  69. #define DCMD_ENDIRQEN (1 << 21)
  70. #define DCMD_STARTIRQEN (1 << 22)
  71. #define DCMD_CMPEN (1 << 25)
  72. #define DCMD_FLOWTRG (1 << 28)
  73. #define DCMD_FLOWSRC (1 << 29)
  74. #define DCMD_INCTRGADDR (1 << 30)
  75. #define DCMD_INCSRCADDR (1 << 31)
  76. #define DCSR_BUSERRINTR (1 << 0)
  77. #define DCSR_STARTINTR (1 << 1)
  78. #define DCSR_ENDINTR (1 << 2)
  79. #define DCSR_STOPINTR (1 << 3)
  80. #define DCSR_RASINTR (1 << 4)
  81. #define DCSR_REQPEND (1 << 8)
  82. #define DCSR_EORINT (1 << 9)
  83. #define DCSR_CMPST (1 << 10)
  84. #define DCSR_MASKRUN (1 << 22)
  85. #define DCSR_RASIRQEN (1 << 23)
  86. #define DCSR_CLRCMPST (1 << 24)
  87. #define DCSR_SETCMPST (1 << 25)
  88. #define DCSR_EORSTOPEN (1 << 26)
  89. #define DCSR_EORJMPEN (1 << 27)
  90. #define DCSR_EORIRQEN (1 << 28)
  91. #define DCSR_STOPIRQEN (1 << 29)
  92. #define DCSR_NODESCFETCH (1 << 30)
  93. #define DCSR_RUN (1 << 31)
  94. static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
  95. {
  96. if (ch >= 0) {
  97. if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
  98. (s->chan[ch].state & DCSR_STOPINTR))
  99. s->stopintr |= 1 << ch;
  100. else
  101. s->stopintr &= ~(1 << ch);
  102. if ((s->chan[ch].state & DCSR_EORIRQEN) &&
  103. (s->chan[ch].state & DCSR_EORINT))
  104. s->eorintr |= 1 << ch;
  105. else
  106. s->eorintr &= ~(1 << ch);
  107. if ((s->chan[ch].state & DCSR_RASIRQEN) &&
  108. (s->chan[ch].state & DCSR_RASINTR))
  109. s->rasintr |= 1 << ch;
  110. else
  111. s->rasintr &= ~(1 << ch);
  112. if (s->chan[ch].state & DCSR_STARTINTR)
  113. s->startintr |= 1 << ch;
  114. else
  115. s->startintr &= ~(1 << ch);
  116. if (s->chan[ch].state & DCSR_ENDINTR)
  117. s->endintr |= 1 << ch;
  118. else
  119. s->endintr &= ~(1 << ch);
  120. }
  121. if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
  122. qemu_irq_raise(s->irq);
  123. else
  124. qemu_irq_lower(s->irq);
  125. }
  126. static inline void pxa2xx_dma_descriptor_fetch(
  127. PXA2xxDMAState *s, int ch)
  128. {
  129. uint32_t desc[4];
  130. hwaddr daddr = s->chan[ch].descr & ~0xf;
  131. if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
  132. daddr += 32;
  133. cpu_physical_memory_read(daddr, (uint8_t *) desc, 16);
  134. s->chan[ch].descr = desc[DDADR];
  135. s->chan[ch].src = desc[DSADR];
  136. s->chan[ch].dest = desc[DTADR];
  137. s->chan[ch].cmd = desc[DCMD];
  138. if (s->chan[ch].cmd & DCMD_FLOWSRC)
  139. s->chan[ch].src &= ~3;
  140. if (s->chan[ch].cmd & DCMD_FLOWTRG)
  141. s->chan[ch].dest &= ~3;
  142. if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
  143. printf("%s: unsupported mode in channel %i\n", __FUNCTION__, ch);
  144. if (s->chan[ch].cmd & DCMD_STARTIRQEN)
  145. s->chan[ch].state |= DCSR_STARTINTR;
  146. }
  147. static void pxa2xx_dma_run(PXA2xxDMAState *s)
  148. {
  149. int c, srcinc, destinc;
  150. uint32_t n, size;
  151. uint32_t width;
  152. uint32_t length;
  153. uint8_t buffer[32];
  154. PXA2xxDMAChannel *ch;
  155. if (s->running ++)
  156. return;
  157. while (s->running) {
  158. s->running = 1;
  159. for (c = 0; c < s->channels; c ++) {
  160. ch = &s->chan[c];
  161. while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
  162. /* Test for pending requests */
  163. if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
  164. break;
  165. length = ch->cmd & DCMD_LEN;
  166. size = DCMD_SIZE(ch->cmd);
  167. width = DCMD_WIDTH(ch->cmd);
  168. srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
  169. destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
  170. while (length) {
  171. size = MIN(length, size);
  172. for (n = 0; n < size; n += width) {
  173. cpu_physical_memory_read(ch->src, buffer + n, width);
  174. ch->src += srcinc;
  175. }
  176. for (n = 0; n < size; n += width) {
  177. cpu_physical_memory_write(ch->dest, buffer + n, width);
  178. ch->dest += destinc;
  179. }
  180. length -= size;
  181. if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
  182. !ch->request) {
  183. ch->state |= DCSR_EORINT;
  184. if (ch->state & DCSR_EORSTOPEN)
  185. ch->state |= DCSR_STOPINTR;
  186. if ((ch->state & DCSR_EORJMPEN) &&
  187. !(ch->state & DCSR_NODESCFETCH))
  188. pxa2xx_dma_descriptor_fetch(s, c);
  189. break;
  190. }
  191. }
  192. ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
  193. /* Is the transfer complete now? */
  194. if (!length) {
  195. if (ch->cmd & DCMD_ENDIRQEN)
  196. ch->state |= DCSR_ENDINTR;
  197. if ((ch->state & DCSR_NODESCFETCH) ||
  198. (ch->descr & DDADR_STOP) ||
  199. (ch->state & DCSR_EORSTOPEN)) {
  200. ch->state |= DCSR_STOPINTR;
  201. ch->state &= ~DCSR_RUN;
  202. break;
  203. }
  204. ch->state |= DCSR_STOPINTR;
  205. break;
  206. }
  207. }
  208. }
  209. s->running --;
  210. }
  211. }
  212. static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset,
  213. unsigned size)
  214. {
  215. PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
  216. unsigned int channel;
  217. if (size != 4) {
  218. hw_error("%s: Bad access width\n", __FUNCTION__);
  219. return 5;
  220. }
  221. switch (offset) {
  222. case DRCMR64 ... DRCMR74:
  223. offset -= DRCMR64 - DRCMR0 - (64 << 2);
  224. /* Fall through */
  225. case DRCMR0 ... DRCMR63:
  226. channel = (offset - DRCMR0) >> 2;
  227. return s->req[channel];
  228. case DRQSR0:
  229. case DRQSR1:
  230. case DRQSR2:
  231. return 0;
  232. case DCSR0 ... DCSR31:
  233. channel = offset >> 2;
  234. if (s->chan[channel].request)
  235. return s->chan[channel].state | DCSR_REQPEND;
  236. return s->chan[channel].state;
  237. case DINT:
  238. return s->stopintr | s->eorintr | s->rasintr |
  239. s->startintr | s->endintr;
  240. case DALGN:
  241. return s->align;
  242. case DPCSR:
  243. return s->pio;
  244. }
  245. if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
  246. channel = (offset - D_CH0) >> 4;
  247. switch ((offset & 0x0f) >> 2) {
  248. case DDADR:
  249. return s->chan[channel].descr;
  250. case DSADR:
  251. return s->chan[channel].src;
  252. case DTADR:
  253. return s->chan[channel].dest;
  254. case DCMD:
  255. return s->chan[channel].cmd;
  256. }
  257. }
  258. hw_error("%s: Bad offset 0x" TARGET_FMT_plx "\n", __FUNCTION__, offset);
  259. return 7;
  260. }
  261. static void pxa2xx_dma_write(void *opaque, hwaddr offset,
  262. uint64_t value, unsigned size)
  263. {
  264. PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
  265. unsigned int channel;
  266. if (size != 4) {
  267. hw_error("%s: Bad access width\n", __FUNCTION__);
  268. return;
  269. }
  270. switch (offset) {
  271. case DRCMR64 ... DRCMR74:
  272. offset -= DRCMR64 - DRCMR0 - (64 << 2);
  273. /* Fall through */
  274. case DRCMR0 ... DRCMR63:
  275. channel = (offset - DRCMR0) >> 2;
  276. if (value & DRCMR_MAPVLD)
  277. if ((value & DRCMR_CHLNUM) > s->channels)
  278. hw_error("%s: Bad DMA channel %i\n",
  279. __FUNCTION__, (unsigned)value & DRCMR_CHLNUM);
  280. s->req[channel] = value;
  281. break;
  282. case DRQSR0:
  283. case DRQSR1:
  284. case DRQSR2:
  285. /* Nothing to do */
  286. break;
  287. case DCSR0 ... DCSR31:
  288. channel = offset >> 2;
  289. s->chan[channel].state &= 0x0000071f & ~(value &
  290. (DCSR_EORINT | DCSR_ENDINTR |
  291. DCSR_STARTINTR | DCSR_BUSERRINTR));
  292. s->chan[channel].state |= value & 0xfc800000;
  293. if (s->chan[channel].state & DCSR_STOPIRQEN)
  294. s->chan[channel].state &= ~DCSR_STOPINTR;
  295. if (value & DCSR_NODESCFETCH) {
  296. /* No-descriptor-fetch mode */
  297. if (value & DCSR_RUN) {
  298. s->chan[channel].state &= ~DCSR_STOPINTR;
  299. pxa2xx_dma_run(s);
  300. }
  301. } else {
  302. /* Descriptor-fetch mode */
  303. if (value & DCSR_RUN) {
  304. s->chan[channel].state &= ~DCSR_STOPINTR;
  305. pxa2xx_dma_descriptor_fetch(s, channel);
  306. pxa2xx_dma_run(s);
  307. }
  308. }
  309. /* Shouldn't matter as our DMA is synchronous. */
  310. if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
  311. s->chan[channel].state |= DCSR_STOPINTR;
  312. if (value & DCSR_CLRCMPST)
  313. s->chan[channel].state &= ~DCSR_CMPST;
  314. if (value & DCSR_SETCMPST)
  315. s->chan[channel].state |= DCSR_CMPST;
  316. pxa2xx_dma_update(s, channel);
  317. break;
  318. case DALGN:
  319. s->align = value;
  320. break;
  321. case DPCSR:
  322. s->pio = value & 0x80000001;
  323. break;
  324. default:
  325. if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
  326. channel = (offset - D_CH0) >> 4;
  327. switch ((offset & 0x0f) >> 2) {
  328. case DDADR:
  329. s->chan[channel].descr = value;
  330. break;
  331. case DSADR:
  332. s->chan[channel].src = value;
  333. break;
  334. case DTADR:
  335. s->chan[channel].dest = value;
  336. break;
  337. case DCMD:
  338. s->chan[channel].cmd = value;
  339. break;
  340. default:
  341. goto fail;
  342. }
  343. break;
  344. }
  345. fail:
  346. hw_error("%s: Bad offset " TARGET_FMT_plx "\n", __FUNCTION__, offset);
  347. }
  348. }
  349. static const MemoryRegionOps pxa2xx_dma_ops = {
  350. .read = pxa2xx_dma_read,
  351. .write = pxa2xx_dma_write,
  352. .endianness = DEVICE_NATIVE_ENDIAN,
  353. };
  354. static void pxa2xx_dma_request(void *opaque, int req_num, int on)
  355. {
  356. PXA2xxDMAState *s = opaque;
  357. int ch;
  358. if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
  359. hw_error("%s: Bad DMA request %i\n", __FUNCTION__, req_num);
  360. if (!(s->req[req_num] & DRCMR_MAPVLD))
  361. return;
  362. ch = s->req[req_num] & DRCMR_CHLNUM;
  363. if (!s->chan[ch].request && on)
  364. s->chan[ch].state |= DCSR_RASINTR;
  365. else
  366. s->chan[ch].state &= ~DCSR_RASINTR;
  367. if (s->chan[ch].request && !on)
  368. s->chan[ch].state |= DCSR_EORINT;
  369. s->chan[ch].request = on;
  370. if (on) {
  371. pxa2xx_dma_run(s);
  372. pxa2xx_dma_update(s, ch);
  373. }
  374. }
  375. static int pxa2xx_dma_init(SysBusDevice *dev)
  376. {
  377. int i;
  378. PXA2xxDMAState *s;
  379. s = FROM_SYSBUS(PXA2xxDMAState, dev);
  380. if (s->channels <= 0) {
  381. return -1;
  382. }
  383. s->chan = g_malloc0(sizeof(PXA2xxDMAChannel) * s->channels);
  384. memset(s->chan, 0, sizeof(PXA2xxDMAChannel) * s->channels);
  385. for (i = 0; i < s->channels; i ++)
  386. s->chan[i].state = DCSR_STOPINTR;
  387. memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
  388. qdev_init_gpio_in(&dev->qdev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
  389. memory_region_init_io(&s->iomem, &pxa2xx_dma_ops, s,
  390. "pxa2xx.dma", 0x00010000);
  391. sysbus_init_mmio(dev, &s->iomem);
  392. sysbus_init_irq(dev, &s->irq);
  393. return 0;
  394. }
  395. DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq)
  396. {
  397. DeviceState *dev;
  398. dev = qdev_create(NULL, "pxa2xx-dma");
  399. qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
  400. qdev_init_nofail(dev);
  401. sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
  402. sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
  403. return dev;
  404. }
  405. DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq)
  406. {
  407. DeviceState *dev;
  408. dev = qdev_create(NULL, "pxa2xx-dma");
  409. qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
  410. qdev_init_nofail(dev);
  411. sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
  412. sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
  413. return dev;
  414. }
  415. static bool is_version_0(void *opaque, int version_id)
  416. {
  417. return version_id == 0;
  418. }
  419. static VMStateDescription vmstate_pxa2xx_dma_chan = {
  420. .name = "pxa2xx_dma_chan",
  421. .version_id = 1,
  422. .minimum_version_id = 1,
  423. .minimum_version_id_old = 1,
  424. .fields = (VMStateField[]) {
  425. VMSTATE_UINT32(descr, PXA2xxDMAChannel),
  426. VMSTATE_UINT32(src, PXA2xxDMAChannel),
  427. VMSTATE_UINT32(dest, PXA2xxDMAChannel),
  428. VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
  429. VMSTATE_UINT32(state, PXA2xxDMAChannel),
  430. VMSTATE_INT32(request, PXA2xxDMAChannel),
  431. VMSTATE_END_OF_LIST(),
  432. },
  433. };
  434. static VMStateDescription vmstate_pxa2xx_dma = {
  435. .name = "pxa2xx_dma",
  436. .version_id = 1,
  437. .minimum_version_id = 0,
  438. .minimum_version_id_old = 0,
  439. .fields = (VMStateField[]) {
  440. VMSTATE_UNUSED_TEST(is_version_0, 4),
  441. VMSTATE_UINT32(stopintr, PXA2xxDMAState),
  442. VMSTATE_UINT32(eorintr, PXA2xxDMAState),
  443. VMSTATE_UINT32(rasintr, PXA2xxDMAState),
  444. VMSTATE_UINT32(startintr, PXA2xxDMAState),
  445. VMSTATE_UINT32(endintr, PXA2xxDMAState),
  446. VMSTATE_UINT32(align, PXA2xxDMAState),
  447. VMSTATE_UINT32(pio, PXA2xxDMAState),
  448. VMSTATE_BUFFER(req, PXA2xxDMAState),
  449. VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
  450. vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
  451. VMSTATE_END_OF_LIST(),
  452. },
  453. };
  454. static Property pxa2xx_dma_properties[] = {
  455. DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
  456. DEFINE_PROP_END_OF_LIST(),
  457. };
  458. static void pxa2xx_dma_class_init(ObjectClass *klass, void *data)
  459. {
  460. DeviceClass *dc = DEVICE_CLASS(klass);
  461. SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
  462. k->init = pxa2xx_dma_init;
  463. dc->desc = "PXA2xx DMA controller";
  464. dc->vmsd = &vmstate_pxa2xx_dma;
  465. dc->props = pxa2xx_dma_properties;
  466. }
  467. static const TypeInfo pxa2xx_dma_info = {
  468. .name = "pxa2xx-dma",
  469. .parent = TYPE_SYS_BUS_DEVICE,
  470. .instance_size = sizeof(PXA2xxDMAState),
  471. .class_init = pxa2xx_dma_class_init,
  472. };
  473. static void pxa2xx_dma_register_types(void)
  474. {
  475. type_register_static(&pxa2xx_dma_info);
  476. }
  477. type_init(pxa2xx_dma_register_types)