dma-helpers.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /*
  2. * DMA helper functions
  3. *
  4. * Copyright (c) 2009 Red Hat
  5. *
  6. * This work is licensed under the terms of the GNU General Public License
  7. * (GNU GPL), version 2 or later.
  8. */
  9. #include "dma.h"
  10. #include "block_int.h"
  11. static AIOPool dma_aio_pool;
  12. void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
  13. {
  14. qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
  15. qsg->nsg = 0;
  16. qsg->nalloc = alloc_hint;
  17. qsg->size = 0;
  18. }
  19. void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
  20. target_phys_addr_t len)
  21. {
  22. if (qsg->nsg == qsg->nalloc) {
  23. qsg->nalloc = 2 * qsg->nalloc + 1;
  24. qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
  25. }
  26. qsg->sg[qsg->nsg].base = base;
  27. qsg->sg[qsg->nsg].len = len;
  28. qsg->size += len;
  29. ++qsg->nsg;
  30. }
  31. void qemu_sglist_destroy(QEMUSGList *qsg)
  32. {
  33. qemu_free(qsg->sg);
  34. }
  35. typedef struct {
  36. BlockDriverAIOCB common;
  37. BlockDriverState *bs;
  38. BlockDriverAIOCB *acb;
  39. QEMUSGList *sg;
  40. uint64_t sector_num;
  41. int is_write;
  42. int sg_cur_index;
  43. target_phys_addr_t sg_cur_byte;
  44. QEMUIOVector iov;
  45. QEMUBH *bh;
  46. } DMAAIOCB;
  47. static void dma_bdrv_cb(void *opaque, int ret);
  48. static void reschedule_dma(void *opaque)
  49. {
  50. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  51. qemu_bh_delete(dbs->bh);
  52. dbs->bh = NULL;
  53. dma_bdrv_cb(opaque, 0);
  54. }
  55. static void continue_after_map_failure(void *opaque)
  56. {
  57. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  58. dbs->bh = qemu_bh_new(reschedule_dma, dbs);
  59. qemu_bh_schedule(dbs->bh);
  60. }
  61. static void dma_bdrv_unmap(DMAAIOCB *dbs)
  62. {
  63. int i;
  64. for (i = 0; i < dbs->iov.niov; ++i) {
  65. cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
  66. dbs->iov.iov[i].iov_len, !dbs->is_write,
  67. dbs->iov.iov[i].iov_len);
  68. }
  69. }
  70. void dma_bdrv_cb(void *opaque, int ret)
  71. {
  72. DMAAIOCB *dbs = (DMAAIOCB *)opaque;
  73. target_phys_addr_t cur_addr, cur_len;
  74. void *mem;
  75. dbs->acb = NULL;
  76. dbs->sector_num += dbs->iov.size / 512;
  77. dma_bdrv_unmap(dbs);
  78. qemu_iovec_reset(&dbs->iov);
  79. if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
  80. dbs->common.cb(dbs->common.opaque, ret);
  81. qemu_iovec_destroy(&dbs->iov);
  82. qemu_aio_release(dbs);
  83. return;
  84. }
  85. while (dbs->sg_cur_index < dbs->sg->nsg) {
  86. cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
  87. cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
  88. mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
  89. if (!mem)
  90. break;
  91. qemu_iovec_add(&dbs->iov, mem, cur_len);
  92. dbs->sg_cur_byte += cur_len;
  93. if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
  94. dbs->sg_cur_byte = 0;
  95. ++dbs->sg_cur_index;
  96. }
  97. }
  98. if (dbs->iov.size == 0) {
  99. cpu_register_map_client(dbs, continue_after_map_failure);
  100. return;
  101. }
  102. if (dbs->is_write) {
  103. dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
  104. dbs->iov.size / 512, dma_bdrv_cb, dbs);
  105. } else {
  106. dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
  107. dbs->iov.size / 512, dma_bdrv_cb, dbs);
  108. }
  109. if (!dbs->acb) {
  110. dma_bdrv_unmap(dbs);
  111. qemu_iovec_destroy(&dbs->iov);
  112. return;
  113. }
  114. }
  115. static BlockDriverAIOCB *dma_bdrv_io(
  116. BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
  117. BlockDriverCompletionFunc *cb, void *opaque,
  118. int is_write)
  119. {
  120. DMAAIOCB *dbs = qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);
  121. dbs->acb = NULL;
  122. dbs->bs = bs;
  123. dbs->sg = sg;
  124. dbs->sector_num = sector_num;
  125. dbs->sg_cur_index = 0;
  126. dbs->sg_cur_byte = 0;
  127. dbs->is_write = is_write;
  128. dbs->bh = NULL;
  129. qemu_iovec_init(&dbs->iov, sg->nsg);
  130. dma_bdrv_cb(dbs, 0);
  131. if (!dbs->acb) {
  132. qemu_aio_release(dbs);
  133. return NULL;
  134. }
  135. return &dbs->common;
  136. }
  137. BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
  138. QEMUSGList *sg, uint64_t sector,
  139. void (*cb)(void *opaque, int ret), void *opaque)
  140. {
  141. return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
  142. }
  143. BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
  144. QEMUSGList *sg, uint64_t sector,
  145. void (*cb)(void *opaque, int ret), void *opaque)
  146. {
  147. return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
  148. }
  149. static void dma_aio_cancel(BlockDriverAIOCB *acb)
  150. {
  151. DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
  152. if (dbs->acb) {
  153. bdrv_aio_cancel(dbs->acb);
  154. }
  155. }
  156. void dma_helper_init(void)
  157. {
  158. aio_pool_init(&dma_aio_pool, sizeof(DMAAIOCB), dma_aio_cancel);
  159. }