soc_dma.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. /*
  2. * On-chip DMA controller framework.
  3. *
  4. * Copyright (C) 2008 Nokia Corporation
  5. * Written by Andrzej Zaborowski <andrew@openedhand.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License as
  9. * published by the Free Software Foundation; either version 2 or
  10. * (at your option) version 3 of the License.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include "qemu/osdep.h"
  21. #include "qemu/error-report.h"
  22. #include "qemu/timer.h"
  23. #include "hw/arm/soc_dma.h"
  24. static void transfer_mem2mem(struct soc_dma_ch_s *ch)
  25. {
  26. memcpy(ch->paddr[0], ch->paddr[1], ch->bytes);
  27. ch->paddr[0] += ch->bytes;
  28. ch->paddr[1] += ch->bytes;
  29. }
  30. static void transfer_mem2fifo(struct soc_dma_ch_s *ch)
  31. {
  32. ch->io_fn[1](ch->io_opaque[1], ch->paddr[0], ch->bytes);
  33. ch->paddr[0] += ch->bytes;
  34. }
  35. static void transfer_fifo2mem(struct soc_dma_ch_s *ch)
  36. {
  37. ch->io_fn[0](ch->io_opaque[0], ch->paddr[1], ch->bytes);
  38. ch->paddr[1] += ch->bytes;
  39. }
  40. /* This is further optimisable but isn't very important because often
  41. * DMA peripherals forbid this kind of transfers and even when they don't,
  42. * oprating systems may not need to use them. */
  43. static void *fifo_buf;
  44. static int fifo_size;
  45. static void transfer_fifo2fifo(struct soc_dma_ch_s *ch)
  46. {
  47. if (ch->bytes > fifo_size)
  48. fifo_buf = g_realloc(fifo_buf, fifo_size = ch->bytes);
  49. /* Implement as transfer_fifo2linear + transfer_linear2fifo. */
  50. ch->io_fn[0](ch->io_opaque[0], fifo_buf, ch->bytes);
  51. ch->io_fn[1](ch->io_opaque[1], fifo_buf, ch->bytes);
  52. }
  53. struct dma_s {
  54. struct soc_dma_s soc;
  55. int chnum;
  56. uint64_t ch_enable_mask;
  57. int64_t channel_freq;
  58. int enabled_count;
  59. struct memmap_entry_s {
  60. enum soc_dma_port_type type;
  61. hwaddr addr;
  62. union {
  63. struct {
  64. void *opaque;
  65. soc_dma_io_t fn;
  66. int out;
  67. } fifo;
  68. struct {
  69. void *base;
  70. size_t size;
  71. } mem;
  72. } u;
  73. } *memmap;
  74. int memmap_size;
  75. struct soc_dma_ch_s ch[];
  76. };
  77. static void soc_dma_ch_schedule(struct soc_dma_ch_s *ch, int delay_bytes)
  78. {
  79. int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  80. struct dma_s *dma = (struct dma_s *) ch->dma;
  81. timer_mod(ch->timer, now + delay_bytes / dma->channel_freq);
  82. }
  83. static void soc_dma_ch_run(void *opaque)
  84. {
  85. struct soc_dma_ch_s *ch = (struct soc_dma_ch_s *) opaque;
  86. ch->running = 1;
  87. ch->dma->setup_fn(ch);
  88. ch->transfer_fn(ch);
  89. ch->running = 0;
  90. if (ch->enable)
  91. soc_dma_ch_schedule(ch, ch->bytes);
  92. ch->bytes = 0;
  93. }
  94. static inline struct memmap_entry_s *soc_dma_lookup(struct dma_s *dma,
  95. hwaddr addr)
  96. {
  97. struct memmap_entry_s *lo;
  98. int hi;
  99. lo = dma->memmap;
  100. hi = dma->memmap_size;
  101. while (hi > 1) {
  102. hi /= 2;
  103. if (lo[hi].addr <= addr)
  104. lo += hi;
  105. }
  106. return lo;
  107. }
  108. static inline enum soc_dma_port_type soc_dma_ch_update_type(
  109. struct soc_dma_ch_s *ch, int port)
  110. {
  111. struct dma_s *dma = (struct dma_s *) ch->dma;
  112. struct memmap_entry_s *entry = soc_dma_lookup(dma, ch->vaddr[port]);
  113. if (entry->type == soc_dma_port_fifo) {
  114. while (entry < dma->memmap + dma->memmap_size &&
  115. entry->u.fifo.out != port)
  116. entry ++;
  117. if (entry->addr != ch->vaddr[port] || entry->u.fifo.out != port)
  118. return soc_dma_port_other;
  119. if (ch->type[port] != soc_dma_access_const)
  120. return soc_dma_port_other;
  121. ch->io_fn[port] = entry->u.fifo.fn;
  122. ch->io_opaque[port] = entry->u.fifo.opaque;
  123. return soc_dma_port_fifo;
  124. } else if (entry->type == soc_dma_port_mem) {
  125. if (entry->addr > ch->vaddr[port] ||
  126. entry->addr + entry->u.mem.size <= ch->vaddr[port])
  127. return soc_dma_port_other;
  128. /* TODO: support constant memory address for source port as used for
  129. * drawing solid rectangles by PalmOS(R). */
  130. if (ch->type[port] != soc_dma_access_const)
  131. return soc_dma_port_other;
  132. ch->paddr[port] = (uint8_t *) entry->u.mem.base +
  133. (ch->vaddr[port] - entry->addr);
  134. /* TODO: save bytes left to the end of the mapping somewhere so we
  135. * can check we're not reading beyond it. */
  136. return soc_dma_port_mem;
  137. } else
  138. return soc_dma_port_other;
  139. }
  140. void soc_dma_ch_update(struct soc_dma_ch_s *ch)
  141. {
  142. enum soc_dma_port_type src, dst;
  143. src = soc_dma_ch_update_type(ch, 0);
  144. if (src == soc_dma_port_other) {
  145. ch->update = 0;
  146. ch->transfer_fn = ch->dma->transfer_fn;
  147. return;
  148. }
  149. dst = soc_dma_ch_update_type(ch, 1);
  150. /* TODO: use src and dst as array indices. */
  151. if (src == soc_dma_port_mem && dst == soc_dma_port_mem)
  152. ch->transfer_fn = transfer_mem2mem;
  153. else if (src == soc_dma_port_mem && dst == soc_dma_port_fifo)
  154. ch->transfer_fn = transfer_mem2fifo;
  155. else if (src == soc_dma_port_fifo && dst == soc_dma_port_mem)
  156. ch->transfer_fn = transfer_fifo2mem;
  157. else if (src == soc_dma_port_fifo && dst == soc_dma_port_fifo)
  158. ch->transfer_fn = transfer_fifo2fifo;
  159. else
  160. ch->transfer_fn = ch->dma->transfer_fn;
  161. ch->update = (dst != soc_dma_port_other);
  162. }
  163. static void soc_dma_ch_freq_update(struct dma_s *s)
  164. {
  165. if (s->enabled_count)
  166. /* We completely ignore channel priorities and stuff */
  167. s->channel_freq = s->soc.freq / s->enabled_count;
  168. else {
  169. /* TODO: Signal that we want to disable the functional clock and let
  170. * the platform code decide what to do with it, i.e. check that
  171. * auto-idle is enabled in the clock controller and if we are stopping
  172. * the clock, do the same with any parent clocks that had only one
  173. * user keeping them on and auto-idle enabled. */
  174. }
  175. }
  176. void soc_dma_set_request(struct soc_dma_ch_s *ch, int level)
  177. {
  178. struct dma_s *dma = (struct dma_s *) ch->dma;
  179. dma->enabled_count += level - ch->enable;
  180. if (level)
  181. dma->ch_enable_mask |= (uint64_t)1 << ch->num;
  182. else
  183. dma->ch_enable_mask &= ~((uint64_t)1 << ch->num);
  184. if (level != ch->enable) {
  185. soc_dma_ch_freq_update(dma);
  186. ch->enable = level;
  187. if (!ch->enable)
  188. timer_del(ch->timer);
  189. else if (!ch->running)
  190. soc_dma_ch_run(ch);
  191. else
  192. soc_dma_ch_schedule(ch, 1);
  193. }
  194. }
  195. void soc_dma_reset(struct soc_dma_s *soc)
  196. {
  197. struct dma_s *s = (struct dma_s *) soc;
  198. s->soc.drqbmp = 0;
  199. s->ch_enable_mask = 0;
  200. s->enabled_count = 0;
  201. soc_dma_ch_freq_update(s);
  202. }
  203. /* TODO: take a functional-clock argument */
  204. struct soc_dma_s *soc_dma_init(int n)
  205. {
  206. int i;
  207. struct dma_s *s = g_malloc0(sizeof(*s) + n * sizeof(*s->ch));
  208. s->chnum = n;
  209. s->soc.ch = s->ch;
  210. for (i = 0; i < n; i ++) {
  211. s->ch[i].dma = &s->soc;
  212. s->ch[i].num = i;
  213. s->ch[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, soc_dma_ch_run, &s->ch[i]);
  214. }
  215. soc_dma_reset(&s->soc);
  216. fifo_size = 0;
  217. return &s->soc;
  218. }
  219. void soc_dma_port_add_fifo(struct soc_dma_s *soc, hwaddr virt_base,
  220. soc_dma_io_t fn, void *opaque, int out)
  221. {
  222. struct memmap_entry_s *entry;
  223. struct dma_s *dma = (struct dma_s *) soc;
  224. dma->memmap = g_realloc(dma->memmap, sizeof(*entry) *
  225. (dma->memmap_size + 1));
  226. entry = soc_dma_lookup(dma, virt_base);
  227. if (dma->memmap_size) {
  228. if (entry->type == soc_dma_port_mem) {
  229. if (entry->addr <= virt_base &&
  230. entry->addr + entry->u.mem.size > virt_base) {
  231. error_report("%s: FIFO at %"PRIx64
  232. " collides with RAM region at %"PRIx64
  233. "-%"PRIx64, __func__,
  234. virt_base, entry->addr,
  235. (entry->addr + entry->u.mem.size));
  236. exit(-1);
  237. }
  238. if (entry->addr <= virt_base)
  239. entry ++;
  240. } else
  241. while (entry < dma->memmap + dma->memmap_size &&
  242. entry->addr <= virt_base) {
  243. if (entry->addr == virt_base && entry->u.fifo.out == out) {
  244. error_report("%s: FIFO at %"PRIx64
  245. " collides FIFO at %"PRIx64,
  246. __func__, virt_base, entry->addr);
  247. exit(-1);
  248. }
  249. entry ++;
  250. }
  251. memmove(entry + 1, entry,
  252. (uint8_t *) (dma->memmap + dma->memmap_size ++) -
  253. (uint8_t *) entry);
  254. } else
  255. dma->memmap_size ++;
  256. entry->addr = virt_base;
  257. entry->type = soc_dma_port_fifo;
  258. entry->u.fifo.fn = fn;
  259. entry->u.fifo.opaque = opaque;
  260. entry->u.fifo.out = out;
  261. }
  262. void soc_dma_port_add_mem(struct soc_dma_s *soc, uint8_t *phys_base,
  263. hwaddr virt_base, size_t size)
  264. {
  265. struct memmap_entry_s *entry;
  266. struct dma_s *dma = (struct dma_s *) soc;
  267. dma->memmap = g_realloc(dma->memmap, sizeof(*entry) *
  268. (dma->memmap_size + 1));
  269. entry = soc_dma_lookup(dma, virt_base);
  270. if (dma->memmap_size) {
  271. if (entry->type == soc_dma_port_mem) {
  272. if ((entry->addr >= virt_base && entry->addr < virt_base + size) ||
  273. (entry->addr <= virt_base &&
  274. entry->addr + entry->u.mem.size > virt_base)) {
  275. error_report("%s: RAM at %"PRIx64 "-%"PRIx64
  276. " collides with RAM region at %"PRIx64
  277. "-%"PRIx64, __func__,
  278. virt_base, virt_base + size,
  279. entry->addr, entry->addr + entry->u.mem.size);
  280. exit(-1);
  281. }
  282. if (entry->addr <= virt_base)
  283. entry ++;
  284. } else {
  285. if (entry->addr >= virt_base &&
  286. entry->addr < virt_base + size) {
  287. error_report("%s: RAM at %"PRIx64 "-%"PRIx64
  288. " collides with FIFO at %"PRIx64,
  289. __func__, virt_base, virt_base + size,
  290. entry->addr);
  291. exit(-1);
  292. }
  293. while (entry < dma->memmap + dma->memmap_size &&
  294. entry->addr <= virt_base)
  295. entry ++;
  296. }
  297. memmove(entry + 1, entry,
  298. (uint8_t *) (dma->memmap + dma->memmap_size ++) -
  299. (uint8_t *) entry);
  300. } else
  301. dma->memmap_size ++;
  302. entry->addr = virt_base;
  303. entry->type = soc_dma_port_mem;
  304. entry->u.mem.base = phys_base;
  305. entry->u.mem.size = size;
  306. }
  307. /* TODO: port removal for ports like PCMCIA memory */