dma-helpers.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * DMA helper functions
  3. *
  4. * Copyright (c) 2009 Red Hat
  5. *
  6. * This work is licensed under the terms of the GNU General Public License
  7. * (GNU GPL), version 2 or later.
  8. */
  9. #include "dma.h"
  10. #include "trace.h"
  11. void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
  12. {
  13. qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
  14. qsg->nsg = 0;
  15. qsg->nalloc = alloc_hint;
  16. qsg->size = 0;
  17. }
  18. void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
  19. {
  20. if (qsg->nsg == qsg->nalloc) {
  21. qsg->nalloc = 2 * qsg->nalloc + 1;
  22. qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
  23. }
  24. qsg->sg[qsg->nsg].base = base;
  25. qsg->sg[qsg->nsg].len = len;
  26. qsg->size += len;
  27. ++qsg->nsg;
  28. }
  29. void qemu_sglist_destroy(QEMUSGList *qsg)
  30. {
  31. g_free(qsg->sg);
  32. memset(qsg, 0, sizeof(*qsg));
  33. }
  34. typedef struct {
  35. BlockDriverAIOCB common;
  36. BlockDriverState *bs;
  37. BlockDriverAIOCB *acb;
  38. QEMUSGList *sg;
  39. uint64_t sector_num;
  40. DMADirection dir;
  41. bool in_cancel;
  42. int sg_cur_index;
  43. dma_addr_t sg_cur_byte;
  44. QEMUIOVector iov;
  45. QEMUBH *bh;
  46. DMAIOFunc *io_func;
  47. } DMAAIOCB;
  48. static void dma_bdrv_cb(void *opaque, int ret);
  49. static void reschedule_dma(void *opaque)
  50. {
  51. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  52. qemu_bh_delete(dbs->bh);
  53. dbs->bh = NULL;
  54. dma_bdrv_cb(dbs, 0);
  55. }
  56. static void continue_after_map_failure(void *opaque)
  57. {
  58. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  59. dbs->bh = qemu_bh_new(reschedule_dma, dbs);
  60. qemu_bh_schedule(dbs->bh);
  61. }
  62. static void dma_bdrv_unmap(DMAAIOCB *dbs)
  63. {
  64. int i;
  65. for (i = 0; i < dbs->iov.niov; ++i) {
  66. cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
  67. dbs->iov.iov[i].iov_len,
  68. dbs->dir != DMA_DIRECTION_TO_DEVICE,
  69. dbs->iov.iov[i].iov_len);
  70. }
  71. qemu_iovec_reset(&dbs->iov);
  72. }
  73. static void dma_complete(DMAAIOCB *dbs, int ret)
  74. {
  75. trace_dma_complete(dbs, ret, dbs->common.cb);
  76. dma_bdrv_unmap(dbs);
  77. if (dbs->common.cb) {
  78. dbs->common.cb(dbs->common.opaque, ret);
  79. }
  80. qemu_iovec_destroy(&dbs->iov);
  81. if (dbs->bh) {
  82. qemu_bh_delete(dbs->bh);
  83. dbs->bh = NULL;
  84. }
  85. if (!dbs->in_cancel) {
  86. /* Requests may complete while dma_aio_cancel is in progress. In
  87. * this case, the AIOCB should not be released because it is still
  88. * referenced by dma_aio_cancel. */
  89. qemu_aio_release(dbs);
  90. }
  91. }
  92. static void dma_bdrv_cb(void *opaque, int ret)
  93. {
  94. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  95. target_phys_addr_t cur_addr, cur_len;
  96. void *mem;
  97. trace_dma_bdrv_cb(dbs, ret);
  98. dbs->acb = NULL;
  99. dbs->sector_num += dbs->iov.size / 512;
  100. dma_bdrv_unmap(dbs);
  101. if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
  102. dma_complete(dbs, ret);
  103. return;
  104. }
  105. while (dbs->sg_cur_index < dbs->sg->nsg) {
  106. cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
  107. cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
  108. mem = cpu_physical_memory_map(cur_addr, &cur_len,
  109. dbs->dir != DMA_DIRECTION_TO_DEVICE);
  110. if (!mem)
  111. break;
  112. qemu_iovec_add(&dbs->iov, mem, cur_len);
  113. dbs->sg_cur_byte += cur_len;
  114. if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
  115. dbs->sg_cur_byte = 0;
  116. ++dbs->sg_cur_index;
  117. }
  118. }
  119. if (dbs->iov.size == 0) {
  120. trace_dma_map_wait(dbs);
  121. cpu_register_map_client(dbs, continue_after_map_failure);
  122. return;
  123. }
  124. dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
  125. dbs->iov.size / 512, dma_bdrv_cb, dbs);
  126. assert(dbs->acb);
  127. }
  128. static void dma_aio_cancel(BlockDriverAIOCB *acb)
  129. {
  130. DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
  131. trace_dma_aio_cancel(dbs);
  132. if (dbs->acb) {
  133. BlockDriverAIOCB *acb = dbs->acb;
  134. dbs->acb = NULL;
  135. dbs->in_cancel = true;
  136. bdrv_aio_cancel(acb);
  137. dbs->in_cancel = false;
  138. }
  139. dbs->common.cb = NULL;
  140. dma_complete(dbs, 0);
  141. }
  142. static AIOPool dma_aio_pool = {
  143. .aiocb_size = sizeof(DMAAIOCB),
  144. .cancel = dma_aio_cancel,
  145. };
  146. BlockDriverAIOCB *dma_bdrv_io(
  147. BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
  148. DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
  149. void *opaque, DMADirection dir)
  150. {
  151. DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
  152. trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
  153. dbs->acb = NULL;
  154. dbs->bs = bs;
  155. dbs->sg = sg;
  156. dbs->sector_num = sector_num;
  157. dbs->sg_cur_index = 0;
  158. dbs->sg_cur_byte = 0;
  159. dbs->dir = dir;
  160. dbs->io_func = io_func;
  161. dbs->bh = NULL;
  162. qemu_iovec_init(&dbs->iov, sg->nsg);
  163. dma_bdrv_cb(dbs, 0);
  164. return &dbs->common;
  165. }
  166. BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
  167. QEMUSGList *sg, uint64_t sector,
  168. void (*cb)(void *opaque, int ret), void *opaque)
  169. {
  170. return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
  171. DMA_DIRECTION_FROM_DEVICE);
  172. }
  173. BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
  174. QEMUSGList *sg, uint64_t sector,
  175. void (*cb)(void *opaque, int ret), void *opaque)
  176. {
  177. return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
  178. DMA_DIRECTION_TO_DEVICE);
  179. }
  180. static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_dev)
  181. {
  182. uint64_t resid;
  183. int sg_cur_index;
  184. resid = sg->size;
  185. sg_cur_index = 0;
  186. len = MIN(len, resid);
  187. while (len > 0) {
  188. ScatterGatherEntry entry = sg->sg[sg_cur_index++];
  189. int32_t xfer = MIN(len, entry.len);
  190. cpu_physical_memory_rw(entry.base, ptr, xfer, !to_dev);
  191. ptr += xfer;
  192. len -= xfer;
  193. resid -= xfer;
  194. }
  195. return resid;
  196. }
  197. uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
  198. {
  199. return dma_buf_rw(ptr, len, sg, 0);
  200. }
  201. uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
  202. {
  203. return dma_buf_rw(ptr, len, sg, 1);
  204. }
  205. void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
  206. QEMUSGList *sg, enum BlockAcctType type)
  207. {
  208. bdrv_acct_start(bs, cookie, sg->size, type);
  209. }