pl080.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * Arm PrimeCell PL080/PL081 DMA controller
  3. *
  4. * Copyright (c) 2006 CodeSourcery.
  5. * Written by Paul Brook
  6. *
  7. * This code is licenced under the GPL.
  8. */
  9. #include "hw.h"
  10. #include "primecell.h"
  11. #define PL080_MAX_CHANNELS 8
  12. #define PL080_CONF_E 0x1
  13. #define PL080_CONF_M1 0x2
  14. #define PL080_CONF_M2 0x4
  15. #define PL080_CCONF_H 0x40000
  16. #define PL080_CCONF_A 0x20000
  17. #define PL080_CCONF_L 0x10000
  18. #define PL080_CCONF_ITC 0x08000
  19. #define PL080_CCONF_IE 0x04000
  20. #define PL080_CCONF_E 0x00001
  21. #define PL080_CCTRL_I 0x80000000
  22. #define PL080_CCTRL_DI 0x08000000
  23. #define PL080_CCTRL_SI 0x04000000
  24. #define PL080_CCTRL_D 0x02000000
  25. #define PL080_CCTRL_S 0x01000000
  26. typedef struct {
  27. uint32_t src;
  28. uint32_t dest;
  29. uint32_t lli;
  30. uint32_t ctrl;
  31. uint32_t conf;
  32. } pl080_channel;
  33. typedef struct {
  34. uint8_t tc_int;
  35. uint8_t tc_mask;
  36. uint8_t err_int;
  37. uint8_t err_mask;
  38. uint32_t conf;
  39. uint32_t sync;
  40. uint32_t req_single;
  41. uint32_t req_burst;
  42. pl080_channel chan[PL080_MAX_CHANNELS];
  43. int nchannels;
  44. /* Flag to avoid recursive DMA invocations. */
  45. int running;
  46. qemu_irq irq;
  47. } pl080_state;
  48. static const unsigned char pl080_id[] =
  49. { 0x80, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
  50. static const unsigned char pl081_id[] =
  51. { 0x81, 0x10, 0x04, 0x0a, 0x0d, 0xf0, 0x05, 0xb1 };
  52. static void pl080_update(pl080_state *s)
  53. {
  54. if ((s->tc_int & s->tc_mask)
  55. || (s->err_int & s->err_mask))
  56. qemu_irq_raise(s->irq);
  57. else
  58. qemu_irq_lower(s->irq);
  59. }
  60. static void pl080_run(pl080_state *s)
  61. {
  62. int c;
  63. int flow;
  64. pl080_channel *ch;
  65. int swidth;
  66. int dwidth;
  67. int xsize;
  68. int n;
  69. int src_id;
  70. int dest_id;
  71. int size;
  72. uint8_t buff[4];
  73. uint32_t req;
  74. s->tc_mask = 0;
  75. for (c = 0; c < s->nchannels; c++) {
  76. if (s->chan[c].conf & PL080_CCONF_ITC)
  77. s->tc_mask |= 1 << c;
  78. if (s->chan[c].conf & PL080_CCONF_IE)
  79. s->err_mask |= 1 << c;
  80. }
  81. if ((s->conf & PL080_CONF_E) == 0)
  82. return;
  83. cpu_abort(cpu_single_env, "DMA active\n");
  84. /* If we are already in the middle of a DMA operation then indicate that
  85. there may be new DMA requests and return immediately. */
  86. if (s->running) {
  87. s->running++;
  88. return;
  89. }
  90. s->running = 1;
  91. while (s->running) {
  92. for (c = 0; c < s->nchannels; c++) {
  93. ch = &s->chan[c];
  94. again:
  95. /* Test if thiws channel has any pending DMA requests. */
  96. if ((ch->conf & (PL080_CCONF_H | PL080_CCONF_E))
  97. != PL080_CCONF_E)
  98. continue;
  99. flow = (ch->conf >> 11) & 7;
  100. if (flow >= 4) {
  101. cpu_abort(cpu_single_env,
  102. "pl080_run: Peripheral flow control not implemented\n");
  103. }
  104. src_id = (ch->conf >> 1) & 0x1f;
  105. dest_id = (ch->conf >> 6) & 0x1f;
  106. size = ch->ctrl & 0xfff;
  107. req = s->req_single | s->req_burst;
  108. switch (flow) {
  109. case 0:
  110. break;
  111. case 1:
  112. if ((req & (1u << dest_id)) == 0)
  113. size = 0;
  114. break;
  115. case 2:
  116. if ((req & (1u << src_id)) == 0)
  117. size = 0;
  118. break;
  119. case 3:
  120. if ((req & (1u << src_id)) == 0
  121. || (req & (1u << dest_id)) == 0)
  122. size = 0;
  123. break;
  124. }
  125. if (!size)
  126. continue;
  127. /* Transfer one element. */
  128. /* ??? Should transfer multiple elements for a burst request. */
  129. /* ??? Unclear what the proper behavior is when source and
  130. destination widths are different. */
  131. swidth = 1 << ((ch->ctrl >> 18) & 7);
  132. dwidth = 1 << ((ch->ctrl >> 21) & 7);
  133. for (n = 0; n < dwidth; n+= swidth) {
  134. cpu_physical_memory_read(ch->src, buff + n, swidth);
  135. if (ch->ctrl & PL080_CCTRL_SI)
  136. ch->src += swidth;
  137. }
  138. xsize = (dwidth < swidth) ? swidth : dwidth;
  139. /* ??? This may pad the value incorrectly for dwidth < 32. */
  140. for (n = 0; n < xsize; n += dwidth) {
  141. cpu_physical_memory_write(ch->dest + n, buff + n, dwidth);
  142. if (ch->ctrl & PL080_CCTRL_DI)
  143. ch->dest += swidth;
  144. }
  145. size--;
  146. ch->ctrl = (ch->ctrl & 0xfffff000) | size;
  147. if (size == 0) {
  148. /* Transfer complete. */
  149. if (ch->lli) {
  150. ch->src = ldl_phys(ch->lli);
  151. ch->dest = ldl_phys(ch->lli + 4);
  152. ch->ctrl = ldl_phys(ch->lli + 12);
  153. ch->lli = ldl_phys(ch->lli + 8);
  154. } else {
  155. ch->conf &= ~PL080_CCONF_E;
  156. }
  157. if (ch->ctrl & PL080_CCTRL_I) {
  158. s->tc_int |= 1 << c;
  159. }
  160. }
  161. goto again;
  162. }
  163. if (--s->running)
  164. s->running = 1;
  165. }
  166. }
  167. static uint32_t pl080_read(void *opaque, target_phys_addr_t offset)
  168. {
  169. pl080_state *s = (pl080_state *)opaque;
  170. uint32_t i;
  171. uint32_t mask;
  172. if (offset >= 0xfe0 && offset < 0x1000) {
  173. if (s->nchannels == 8) {
  174. return pl080_id[(offset - 0xfe0) >> 2];
  175. } else {
  176. return pl081_id[(offset - 0xfe0) >> 2];
  177. }
  178. }
  179. if (offset >= 0x100 && offset < 0x200) {
  180. i = (offset & 0xe0) >> 5;
  181. if (i >= s->nchannels)
  182. goto bad_offset;
  183. switch (offset >> 2) {
  184. case 0: /* SrcAddr */
  185. return s->chan[i].src;
  186. case 1: /* DestAddr */
  187. return s->chan[i].dest;
  188. case 2: /* LLI */
  189. return s->chan[i].lli;
  190. case 3: /* Control */
  191. return s->chan[i].ctrl;
  192. case 4: /* Configuration */
  193. return s->chan[i].conf;
  194. default:
  195. goto bad_offset;
  196. }
  197. }
  198. switch (offset >> 2) {
  199. case 0: /* IntStatus */
  200. return (s->tc_int & s->tc_mask) | (s->err_int & s->err_mask);
  201. case 1: /* IntTCStatus */
  202. return (s->tc_int & s->tc_mask);
  203. case 3: /* IntErrorStatus */
  204. return (s->err_int & s->err_mask);
  205. case 5: /* RawIntTCStatus */
  206. return s->tc_int;
  207. case 6: /* RawIntErrorStatus */
  208. return s->err_int;
  209. case 7: /* EnbldChns */
  210. mask = 0;
  211. for (i = 0; i < s->nchannels; i++) {
  212. if (s->chan[i].conf & PL080_CCONF_E)
  213. mask |= 1 << i;
  214. }
  215. return mask;
  216. case 8: /* SoftBReq */
  217. case 9: /* SoftSReq */
  218. case 10: /* SoftLBReq */
  219. case 11: /* SoftLSReq */
  220. /* ??? Implement these. */
  221. return 0;
  222. case 12: /* Configuration */
  223. return s->conf;
  224. case 13: /* Sync */
  225. return s->sync;
  226. default:
  227. bad_offset:
  228. cpu_abort(cpu_single_env, "pl080_read: Bad offset %x\n", (int)offset);
  229. return 0;
  230. }
  231. }
  232. static void pl080_write(void *opaque, target_phys_addr_t offset,
  233. uint32_t value)
  234. {
  235. pl080_state *s = (pl080_state *)opaque;
  236. int i;
  237. if (offset >= 0x100 && offset < 0x200) {
  238. i = (offset & 0xe0) >> 5;
  239. if (i >= s->nchannels)
  240. goto bad_offset;
  241. switch (offset >> 2) {
  242. case 0: /* SrcAddr */
  243. s->chan[i].src = value;
  244. break;
  245. case 1: /* DestAddr */
  246. s->chan[i].dest = value;
  247. break;
  248. case 2: /* LLI */
  249. s->chan[i].lli = value;
  250. break;
  251. case 3: /* Control */
  252. s->chan[i].ctrl = value;
  253. break;
  254. case 4: /* Configuration */
  255. s->chan[i].conf = value;
  256. pl080_run(s);
  257. break;
  258. }
  259. }
  260. switch (offset >> 2) {
  261. case 2: /* IntTCClear */
  262. s->tc_int &= ~value;
  263. break;
  264. case 4: /* IntErrorClear */
  265. s->err_int &= ~value;
  266. break;
  267. case 8: /* SoftBReq */
  268. case 9: /* SoftSReq */
  269. case 10: /* SoftLBReq */
  270. case 11: /* SoftLSReq */
  271. /* ??? Implement these. */
  272. cpu_abort(cpu_single_env, "pl080_write: Soft DMA not implemented\n");
  273. break;
  274. case 12: /* Configuration */
  275. s->conf = value;
  276. if (s->conf & (PL080_CONF_M1 | PL080_CONF_M1)) {
  277. cpu_abort(cpu_single_env,
  278. "pl080_write: Big-endian DMA not implemented\n");
  279. }
  280. pl080_run(s);
  281. break;
  282. case 13: /* Sync */
  283. s->sync = value;
  284. break;
  285. default:
  286. bad_offset:
  287. cpu_abort(cpu_single_env, "pl080_write: Bad offset %x\n", (int)offset);
  288. }
  289. pl080_update(s);
  290. }
  291. static CPUReadMemoryFunc *pl080_readfn[] = {
  292. pl080_read,
  293. pl080_read,
  294. pl080_read
  295. };
  296. static CPUWriteMemoryFunc *pl080_writefn[] = {
  297. pl080_write,
  298. pl080_write,
  299. pl080_write
  300. };
  301. /* The PL080 and PL081 are the same except for the number of channels
  302. they implement (8 and 2 respectively). */
  303. void *pl080_init(uint32_t base, qemu_irq irq, int nchannels)
  304. {
  305. int iomemtype;
  306. pl080_state *s;
  307. s = (pl080_state *)qemu_mallocz(sizeof(pl080_state));
  308. iomemtype = cpu_register_io_memory(0, pl080_readfn,
  309. pl080_writefn, s);
  310. cpu_register_physical_memory(base, 0x00001000, iomemtype);
  311. s->irq = irq;
  312. s->nchannels = nchannels;
  313. /* ??? Save/restore. */
  314. return s;
  315. }