dma.h 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. * DMA helper functions
  3. *
  4. * Copyright (c) 2009 Red Hat
  5. *
  6. * This work is licensed under the terms of the GNU General Public License
  7. * (GNU GPL), version 2 or later.
  8. */
  9. #ifndef DMA_H
  10. #define DMA_H
  11. #include <stdio.h>
  12. #include "hw/hw.h"
  13. #include "block.h"
  14. #include "kvm.h"
  15. typedef struct DMAContext DMAContext;
  16. typedef struct ScatterGatherEntry ScatterGatherEntry;
  17. typedef enum {
  18. DMA_DIRECTION_TO_DEVICE = 0,
  19. DMA_DIRECTION_FROM_DEVICE = 1,
  20. } DMADirection;
  21. struct QEMUSGList {
  22. ScatterGatherEntry *sg;
  23. int nsg;
  24. int nalloc;
  25. size_t size;
  26. DMAContext *dma;
  27. };
  28. #if defined(TARGET_PHYS_ADDR_BITS)
  29. /*
  30. * When an IOMMU is present, bus addresses become distinct from
  31. * CPU/memory physical addresses and may be a different size. Because
  32. * the IOVA size depends more on the bus than on the platform, we more
  33. * or less have to treat these as 64-bit always to cover all (or at
  34. * least most) cases.
  35. */
  36. typedef uint64_t dma_addr_t;
  37. #define DMA_ADDR_BITS 64
  38. #define DMA_ADDR_FMT "%" PRIx64
  39. typedef int DMATranslateFunc(DMAContext *dma,
  40. dma_addr_t addr,
  41. target_phys_addr_t *paddr,
  42. target_phys_addr_t *len,
  43. DMADirection dir);
  44. typedef void* DMAMapFunc(DMAContext *dma,
  45. dma_addr_t addr,
  46. dma_addr_t *len,
  47. DMADirection dir);
  48. typedef void DMAUnmapFunc(DMAContext *dma,
  49. void *buffer,
  50. dma_addr_t len,
  51. DMADirection dir,
  52. dma_addr_t access_len);
  53. struct DMAContext {
  54. DMATranslateFunc *translate;
  55. DMAMapFunc *map;
  56. DMAUnmapFunc *unmap;
  57. };
  58. static inline void dma_barrier(DMAContext *dma, DMADirection dir)
  59. {
  60. /*
  61. * This is called before DMA read and write operations
  62. * unless the _relaxed form is used and is responsible
  63. * for providing some sane ordering of accesses vs
  64. * concurrently running VCPUs.
  65. *
  66. * Users of map(), unmap() or lower level st/ld_*
  67. * operations are responsible for providing their own
  68. * ordering via barriers.
  69. *
  70. * This primitive implementation does a simple smp_mb()
  71. * before each operation which provides pretty much full
  72. * ordering.
  73. *
  74. * A smarter implementation can be devised if needed to
  75. * use lighter barriers based on the direction of the
  76. * transfer, the DMA context, etc...
  77. */
  78. if (kvm_enabled()) {
  79. smp_mb();
  80. }
  81. }
  82. static inline bool dma_has_iommu(DMAContext *dma)
  83. {
  84. return !!dma;
  85. }
  86. /* Checks that the given range of addresses is valid for DMA. This is
  87. * useful for certain cases, but usually you should just use
  88. * dma_memory_{read,write}() and check for errors */
  89. bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
  90. DMADirection dir);
  91. static inline bool dma_memory_valid(DMAContext *dma,
  92. dma_addr_t addr, dma_addr_t len,
  93. DMADirection dir)
  94. {
  95. if (!dma_has_iommu(dma)) {
  96. return true;
  97. } else {
  98. return iommu_dma_memory_valid(dma, addr, len, dir);
  99. }
  100. }
  101. int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
  102. void *buf, dma_addr_t len, DMADirection dir);
  103. static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
  104. void *buf, dma_addr_t len,
  105. DMADirection dir)
  106. {
  107. if (!dma_has_iommu(dma)) {
  108. /* Fast-path for no IOMMU */
  109. cpu_physical_memory_rw(addr, buf, len,
  110. dir == DMA_DIRECTION_FROM_DEVICE);
  111. return 0;
  112. } else {
  113. return iommu_dma_memory_rw(dma, addr, buf, len, dir);
  114. }
  115. }
  116. static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr,
  117. void *buf, dma_addr_t len)
  118. {
  119. return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
  120. }
  121. static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr,
  122. const void *buf, dma_addr_t len)
  123. {
  124. return dma_memory_rw_relaxed(dma, addr, (void *)buf, len,
  125. DMA_DIRECTION_FROM_DEVICE);
  126. }
  127. static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
  128. void *buf, dma_addr_t len,
  129. DMADirection dir)
  130. {
  131. dma_barrier(dma, dir);
  132. return dma_memory_rw_relaxed(dma, addr, buf, len, dir);
  133. }
  134. static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
  135. void *buf, dma_addr_t len)
  136. {
  137. return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
  138. }
  139. static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr,
  140. const void *buf, dma_addr_t len)
  141. {
  142. return dma_memory_rw(dma, addr, (void *)buf, len,
  143. DMA_DIRECTION_FROM_DEVICE);
  144. }
  145. int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
  146. dma_addr_t len);
  147. int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
  148. void *iommu_dma_memory_map(DMAContext *dma,
  149. dma_addr_t addr, dma_addr_t *len,
  150. DMADirection dir);
  151. static inline void *dma_memory_map(DMAContext *dma,
  152. dma_addr_t addr, dma_addr_t *len,
  153. DMADirection dir)
  154. {
  155. if (!dma_has_iommu(dma)) {
  156. target_phys_addr_t xlen = *len;
  157. void *p;
  158. p = cpu_physical_memory_map(addr, &xlen,
  159. dir == DMA_DIRECTION_FROM_DEVICE);
  160. *len = xlen;
  161. return p;
  162. } else {
  163. return iommu_dma_memory_map(dma, addr, len, dir);
  164. }
  165. }
  166. void iommu_dma_memory_unmap(DMAContext *dma,
  167. void *buffer, dma_addr_t len,
  168. DMADirection dir, dma_addr_t access_len);
  169. static inline void dma_memory_unmap(DMAContext *dma,
  170. void *buffer, dma_addr_t len,
  171. DMADirection dir, dma_addr_t access_len)
  172. {
  173. if (!dma_has_iommu(dma)) {
  174. cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
  175. dir == DMA_DIRECTION_FROM_DEVICE,
  176. access_len);
  177. } else {
  178. iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
  179. }
  180. }
  181. #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
  182. static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \
  183. dma_addr_t addr) \
  184. { \
  185. uint##_bits##_t val; \
  186. dma_memory_read(dma, addr, &val, (_bits) / 8); \
  187. return _end##_bits##_to_cpu(val); \
  188. } \
  189. static inline void st##_sname##_##_end##_dma(DMAContext *dma, \
  190. dma_addr_t addr, \
  191. uint##_bits##_t val) \
  192. { \
  193. val = cpu_to_##_end##_bits(val); \
  194. dma_memory_write(dma, addr, &val, (_bits) / 8); \
  195. }
  196. static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr)
  197. {
  198. uint8_t val;
  199. dma_memory_read(dma, addr, &val, 1);
  200. return val;
  201. }
  202. static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val)
  203. {
  204. dma_memory_write(dma, addr, &val, 1);
  205. }
  206. DEFINE_LDST_DMA(uw, w, 16, le);
  207. DEFINE_LDST_DMA(l, l, 32, le);
  208. DEFINE_LDST_DMA(q, q, 64, le);
  209. DEFINE_LDST_DMA(uw, w, 16, be);
  210. DEFINE_LDST_DMA(l, l, 32, be);
  211. DEFINE_LDST_DMA(q, q, 64, be);
  212. #undef DEFINE_LDST_DMA
  213. void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
  214. DMAMapFunc map, DMAUnmapFunc unmap);
  215. struct ScatterGatherEntry {
  216. dma_addr_t base;
  217. dma_addr_t len;
  218. };
  219. void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma);
  220. void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
  221. void qemu_sglist_destroy(QEMUSGList *qsg);
  222. #endif
  223. typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
  224. QEMUIOVector *iov, int nb_sectors,
  225. BlockDriverCompletionFunc *cb, void *opaque);
  226. BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs,
  227. QEMUSGList *sg, uint64_t sector_num,
  228. DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
  229. void *opaque, DMADirection dir);
  230. BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
  231. QEMUSGList *sg, uint64_t sector,
  232. BlockDriverCompletionFunc *cb, void *opaque);
  233. BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
  234. QEMUSGList *sg, uint64_t sector,
  235. BlockDriverCompletionFunc *cb, void *opaque);
  236. uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
  237. uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
  238. void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
  239. QEMUSGList *sg, enum BlockAcctType type);
  240. #endif