dma-helpers.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * DMA helper functions
  3. *
  4. * Copyright (c) 2009 Red Hat
  5. *
  6. * This work is licensed under the terms of the GNU General Public License
  7. * (GNU GPL), version 2 or later.
  8. */
  9. #include "dma.h"
  10. #include "block_int.h"
  11. void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
  12. {
  13. qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
  14. qsg->nsg = 0;
  15. qsg->nalloc = alloc_hint;
  16. qsg->size = 0;
  17. }
  18. void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
  19. {
  20. if (qsg->nsg == qsg->nalloc) {
  21. qsg->nalloc = 2 * qsg->nalloc + 1;
  22. qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
  23. }
  24. qsg->sg[qsg->nsg].base = base;
  25. qsg->sg[qsg->nsg].len = len;
  26. qsg->size += len;
  27. ++qsg->nsg;
  28. }
  29. void qemu_sglist_destroy(QEMUSGList *qsg)
  30. {
  31. g_free(qsg->sg);
  32. }
  33. typedef struct {
  34. BlockDriverAIOCB common;
  35. BlockDriverState *bs;
  36. BlockDriverAIOCB *acb;
  37. QEMUSGList *sg;
  38. uint64_t sector_num;
  39. bool to_dev;
  40. bool in_cancel;
  41. int sg_cur_index;
  42. dma_addr_t sg_cur_byte;
  43. QEMUIOVector iov;
  44. QEMUBH *bh;
  45. DMAIOFunc *io_func;
  46. } DMAAIOCB;
  47. static void dma_bdrv_cb(void *opaque, int ret);
  48. static void reschedule_dma(void *opaque)
  49. {
  50. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  51. qemu_bh_delete(dbs->bh);
  52. dbs->bh = NULL;
  53. dma_bdrv_cb(dbs, 0);
  54. }
  55. static void continue_after_map_failure(void *opaque)
  56. {
  57. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  58. dbs->bh = qemu_bh_new(reschedule_dma, dbs);
  59. qemu_bh_schedule(dbs->bh);
  60. }
  61. static void dma_bdrv_unmap(DMAAIOCB *dbs)
  62. {
  63. int i;
  64. for (i = 0; i < dbs->iov.niov; ++i) {
  65. cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
  66. dbs->iov.iov[i].iov_len, !dbs->to_dev,
  67. dbs->iov.iov[i].iov_len);
  68. }
  69. qemu_iovec_reset(&dbs->iov);
  70. }
  71. static void dma_complete(DMAAIOCB *dbs, int ret)
  72. {
  73. dma_bdrv_unmap(dbs);
  74. if (dbs->common.cb) {
  75. dbs->common.cb(dbs->common.opaque, ret);
  76. }
  77. qemu_iovec_destroy(&dbs->iov);
  78. if (dbs->bh) {
  79. qemu_bh_delete(dbs->bh);
  80. dbs->bh = NULL;
  81. }
  82. if (!dbs->in_cancel) {
  83. /* Requests may complete while dma_aio_cancel is in progress. In
  84. * this case, the AIOCB should not be released because it is still
  85. * referenced by dma_aio_cancel. */
  86. qemu_aio_release(dbs);
  87. }
  88. }
  89. static void dma_bdrv_cb(void *opaque, int ret)
  90. {
  91. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  92. target_phys_addr_t cur_addr, cur_len;
  93. void *mem;
  94. dbs->acb = NULL;
  95. dbs->sector_num += dbs->iov.size / 512;
  96. dma_bdrv_unmap(dbs);
  97. if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
  98. dma_complete(dbs, ret);
  99. return;
  100. }
  101. while (dbs->sg_cur_index < dbs->sg->nsg) {
  102. cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
  103. cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
  104. mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->to_dev);
  105. if (!mem)
  106. break;
  107. qemu_iovec_add(&dbs->iov, mem, cur_len);
  108. dbs->sg_cur_byte += cur_len;
  109. if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
  110. dbs->sg_cur_byte = 0;
  111. ++dbs->sg_cur_index;
  112. }
  113. }
  114. if (dbs->iov.size == 0) {
  115. cpu_register_map_client(dbs, continue_after_map_failure);
  116. return;
  117. }
  118. dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
  119. dbs->iov.size / 512, dma_bdrv_cb, dbs);
  120. if (!dbs->acb) {
  121. dma_complete(dbs, -EIO);
  122. }
  123. }
  124. static void dma_aio_cancel(BlockDriverAIOCB *acb)
  125. {
  126. DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
  127. if (dbs->acb) {
  128. BlockDriverAIOCB *acb = dbs->acb;
  129. dbs->acb = NULL;
  130. dbs->in_cancel = true;
  131. bdrv_aio_cancel(acb);
  132. dbs->in_cancel = false;
  133. }
  134. dbs->common.cb = NULL;
  135. dma_complete(dbs, 0);
  136. }
  137. static AIOPool dma_aio_pool = {
  138. .aiocb_size = sizeof(DMAAIOCB),
  139. .cancel = dma_aio_cancel,
  140. };
  141. BlockDriverAIOCB *dma_bdrv_io(
  142. BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
  143. DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
  144. void *opaque, bool to_dev)
  145. {
  146. DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
  147. dbs->acb = NULL;
  148. dbs->bs = bs;
  149. dbs->sg = sg;
  150. dbs->sector_num = sector_num;
  151. dbs->sg_cur_index = 0;
  152. dbs->sg_cur_byte = 0;
  153. dbs->to_dev = to_dev;
  154. dbs->io_func = io_func;
  155. dbs->bh = NULL;
  156. qemu_iovec_init(&dbs->iov, sg->nsg);
  157. dma_bdrv_cb(dbs, 0);
  158. return &dbs->common;
  159. }
  160. BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
  161. QEMUSGList *sg, uint64_t sector,
  162. void (*cb)(void *opaque, int ret), void *opaque)
  163. {
  164. return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, false);
  165. }
  166. BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
  167. QEMUSGList *sg, uint64_t sector,
  168. void (*cb)(void *opaque, int ret), void *opaque)
  169. {
  170. return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, true);
  171. }