dma-helpers.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. /*
  2. * DMA helper functions
  3. *
  4. * Copyright (c) 2009 Red Hat
  5. *
  6. * This work is licensed under the terms of the GNU General Public License
  7. * (GNU GPL), version 2 or later.
  8. */
  9. #include "dma.h"
  10. #include "block_int.h"
  11. void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
  12. {
  13. qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
  14. qsg->nsg = 0;
  15. qsg->nalloc = alloc_hint;
  16. qsg->size = 0;
  17. }
  18. void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
  19. target_phys_addr_t len)
  20. {
  21. if (qsg->nsg == qsg->nalloc) {
  22. qsg->nalloc = 2 * qsg->nalloc + 1;
  23. qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
  24. }
  25. qsg->sg[qsg->nsg].base = base;
  26. qsg->sg[qsg->nsg].len = len;
  27. qsg->size += len;
  28. ++qsg->nsg;
  29. }
  30. void qemu_sglist_destroy(QEMUSGList *qsg)
  31. {
  32. qemu_free(qsg->sg);
  33. }
  34. typedef struct {
  35. BlockDriverAIOCB common;
  36. BlockDriverState *bs;
  37. BlockDriverAIOCB *acb;
  38. QEMUSGList *sg;
  39. uint64_t sector_num;
  40. int is_write;
  41. int sg_cur_index;
  42. target_phys_addr_t sg_cur_byte;
  43. QEMUIOVector iov;
  44. QEMUBH *bh;
  45. DMAIOFunc *io_func;
  46. } DMAAIOCB;
  47. static void dma_bdrv_cb(void *opaque, int ret);
  48. static void reschedule_dma(void *opaque)
  49. {
  50. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  51. qemu_bh_delete(dbs->bh);
  52. dbs->bh = NULL;
  53. dma_bdrv_cb(opaque, 0);
  54. }
  55. static void continue_after_map_failure(void *opaque)
  56. {
  57. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  58. dbs->bh = qemu_bh_new(reschedule_dma, dbs);
  59. qemu_bh_schedule(dbs->bh);
  60. }
  61. static void dma_bdrv_unmap(DMAAIOCB *dbs)
  62. {
  63. int i;
  64. for (i = 0; i < dbs->iov.niov; ++i) {
  65. cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
  66. dbs->iov.iov[i].iov_len, !dbs->is_write,
  67. dbs->iov.iov[i].iov_len);
  68. }
  69. }
  70. static void dma_bdrv_cb(void *opaque, int ret)
  71. {
  72. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  73. target_phys_addr_t cur_addr, cur_len;
  74. void *mem;
  75. dbs->acb = NULL;
  76. dbs->sector_num += dbs->iov.size / 512;
  77. dma_bdrv_unmap(dbs);
  78. qemu_iovec_reset(&dbs->iov);
  79. if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
  80. dbs->common.cb(dbs->common.opaque, ret);
  81. qemu_iovec_destroy(&dbs->iov);
  82. qemu_aio_release(dbs);
  83. return;
  84. }
  85. while (dbs->sg_cur_index < dbs->sg->nsg) {
  86. cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
  87. cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
  88. mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
  89. if (!mem)
  90. break;
  91. qemu_iovec_add(&dbs->iov, mem, cur_len);
  92. dbs->sg_cur_byte += cur_len;
  93. if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
  94. dbs->sg_cur_byte = 0;
  95. ++dbs->sg_cur_index;
  96. }
  97. }
  98. if (dbs->iov.size == 0) {
  99. cpu_register_map_client(dbs, continue_after_map_failure);
  100. return;
  101. }
  102. dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
  103. dbs->iov.size / 512, dma_bdrv_cb, dbs);
  104. if (!dbs->acb) {
  105. dma_bdrv_unmap(dbs);
  106. qemu_iovec_destroy(&dbs->iov);
  107. return;
  108. }
  109. }
  110. static void dma_aio_cancel(BlockDriverAIOCB *acb)
  111. {
  112. DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
  113. if (dbs->acb) {
  114. bdrv_aio_cancel(dbs->acb);
  115. }
  116. }
  117. static AIOPool dma_aio_pool = {
  118. .aiocb_size = sizeof(DMAAIOCB),
  119. .cancel = dma_aio_cancel,
  120. };
  121. BlockDriverAIOCB *dma_bdrv_io(
  122. BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
  123. DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
  124. void *opaque, int is_write)
  125. {
  126. DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
  127. dbs->acb = NULL;
  128. dbs->bs = bs;
  129. dbs->sg = sg;
  130. dbs->sector_num = sector_num;
  131. dbs->sg_cur_index = 0;
  132. dbs->sg_cur_byte = 0;
  133. dbs->is_write = is_write;
  134. dbs->io_func = io_func;
  135. dbs->bh = NULL;
  136. qemu_iovec_init(&dbs->iov, sg->nsg);
  137. dma_bdrv_cb(dbs, 0);
  138. if (!dbs->acb) {
  139. qemu_aio_release(dbs);
  140. return NULL;
  141. }
  142. return &dbs->common;
  143. }
  144. BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
  145. QEMUSGList *sg, uint64_t sector,
  146. void (*cb)(void *opaque, int ret), void *opaque)
  147. {
  148. return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, 0);
  149. }
  150. BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
  151. QEMUSGList *sg, uint64_t sector,
  152. void (*cb)(void *opaque, int ret), void *opaque)
  153. {
  154. return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, 1);
  155. }