2
0

vugbm.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. * Virtio vhost-user GPU Device
  3. *
  4. * DRM helpers
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "vugbm.h"
  11. static bool
  12. mem_alloc_bo(struct vugbm_buffer *buf)
  13. {
  14. buf->mmap = g_malloc(buf->width * buf->height * 4);
  15. buf->stride = buf->width * 4;
  16. return true;
  17. }
  18. static void
  19. mem_free_bo(struct vugbm_buffer *buf)
  20. {
  21. g_free(buf->mmap);
  22. }
  23. static bool
  24. mem_map_bo(struct vugbm_buffer *buf)
  25. {
  26. return buf->mmap != NULL;
  27. }
  28. static void
  29. mem_unmap_bo(struct vugbm_buffer *buf)
  30. {
  31. }
  32. static void
  33. mem_device_destroy(struct vugbm_device *dev)
  34. {
  35. }
  36. #ifdef CONFIG_MEMFD
  37. struct udmabuf_create {
  38. uint32_t memfd;
  39. uint32_t flags;
  40. uint64_t offset;
  41. uint64_t size;
  42. };
  43. #define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
  44. static size_t
  45. udmabuf_get_size(struct vugbm_buffer *buf)
  46. {
  47. return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size());
  48. }
  49. static bool
  50. udmabuf_alloc_bo(struct vugbm_buffer *buf)
  51. {
  52. int ret;
  53. buf->memfd = memfd_create("udmabuf-bo", MFD_ALLOW_SEALING);
  54. if (buf->memfd < 0) {
  55. return false;
  56. }
  57. ret = ftruncate(buf->memfd, udmabuf_get_size(buf));
  58. if (ret < 0) {
  59. close(buf->memfd);
  60. return false;
  61. }
  62. ret = fcntl(buf->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
  63. if (ret < 0) {
  64. close(buf->memfd);
  65. return false;
  66. }
  67. buf->stride = buf->width * 4;
  68. return true;
  69. }
  70. static void
  71. udmabuf_free_bo(struct vugbm_buffer *buf)
  72. {
  73. close(buf->memfd);
  74. }
  75. static bool
  76. udmabuf_map_bo(struct vugbm_buffer *buf)
  77. {
  78. buf->mmap = mmap(NULL, udmabuf_get_size(buf),
  79. PROT_READ | PROT_WRITE, MAP_SHARED, buf->memfd, 0);
  80. if (buf->mmap == MAP_FAILED) {
  81. return false;
  82. }
  83. return true;
  84. }
  85. static bool
  86. udmabuf_get_fd(struct vugbm_buffer *buf, int *fd)
  87. {
  88. struct udmabuf_create create = {
  89. .memfd = buf->memfd,
  90. .offset = 0,
  91. .size = udmabuf_get_size(buf),
  92. };
  93. *fd = ioctl(buf->dev->fd, UDMABUF_CREATE, &create);
  94. return *fd >= 0;
  95. }
  96. static void
  97. udmabuf_unmap_bo(struct vugbm_buffer *buf)
  98. {
  99. munmap(buf->mmap, udmabuf_get_size(buf));
  100. }
  101. static void
  102. udmabuf_device_destroy(struct vugbm_device *dev)
  103. {
  104. close(dev->fd);
  105. }
  106. #endif
  107. #ifdef CONFIG_GBM
  108. static bool
  109. alloc_bo(struct vugbm_buffer *buf)
  110. {
  111. struct gbm_device *dev = buf->dev->dev;
  112. assert(!buf->bo);
  113. buf->bo = gbm_bo_create(dev, buf->width, buf->height,
  114. buf->format,
  115. GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
  116. if (buf->bo) {
  117. buf->stride = gbm_bo_get_stride(buf->bo);
  118. return true;
  119. }
  120. return false;
  121. }
  122. static void
  123. free_bo(struct vugbm_buffer *buf)
  124. {
  125. gbm_bo_destroy(buf->bo);
  126. }
  127. static bool
  128. map_bo(struct vugbm_buffer *buf)
  129. {
  130. uint32_t stride;
  131. buf->mmap = gbm_bo_map(buf->bo, 0, 0, buf->width, buf->height,
  132. GBM_BO_TRANSFER_READ_WRITE, &stride,
  133. &buf->mmap_data);
  134. assert(stride == buf->stride);
  135. return buf->mmap != NULL;
  136. }
  137. static void
  138. unmap_bo(struct vugbm_buffer *buf)
  139. {
  140. gbm_bo_unmap(buf->bo, buf->mmap_data);
  141. }
  142. static bool
  143. get_fd(struct vugbm_buffer *buf, int *fd)
  144. {
  145. *fd = gbm_bo_get_fd(buf->bo);
  146. return *fd >= 0;
  147. }
  148. static void
  149. device_destroy(struct vugbm_device *dev)
  150. {
  151. gbm_device_destroy(dev->dev);
  152. }
  153. #endif
  154. void
  155. vugbm_device_destroy(struct vugbm_device *dev)
  156. {
  157. if (!dev->inited) {
  158. return;
  159. }
  160. dev->device_destroy(dev);
  161. }
  162. void
  163. vugbm_device_init(struct vugbm_device *dev, int fd)
  164. {
  165. assert(!dev->inited);
  166. #ifdef CONFIG_GBM
  167. if (fd >= 0) {
  168. dev->dev = gbm_create_device(fd);
  169. }
  170. if (dev->dev != NULL) {
  171. dev->fd = fd;
  172. dev->alloc_bo = alloc_bo;
  173. dev->free_bo = free_bo;
  174. dev->get_fd = get_fd;
  175. dev->map_bo = map_bo;
  176. dev->unmap_bo = unmap_bo;
  177. dev->device_destroy = device_destroy;
  178. dev->inited = true;
  179. }
  180. #endif
  181. #ifdef CONFIG_MEMFD
  182. if (!dev->inited && g_file_test("/dev/udmabuf", G_FILE_TEST_EXISTS)) {
  183. dev->fd = open("/dev/udmabuf", O_RDWR);
  184. if (dev->fd >= 0) {
  185. g_debug("Using experimental udmabuf backend");
  186. dev->alloc_bo = udmabuf_alloc_bo;
  187. dev->free_bo = udmabuf_free_bo;
  188. dev->get_fd = udmabuf_get_fd;
  189. dev->map_bo = udmabuf_map_bo;
  190. dev->unmap_bo = udmabuf_unmap_bo;
  191. dev->device_destroy = udmabuf_device_destroy;
  192. dev->inited = true;
  193. }
  194. }
  195. #endif
  196. if (!dev->inited) {
  197. g_debug("Using mem fallback");
  198. dev->alloc_bo = mem_alloc_bo;
  199. dev->free_bo = mem_free_bo;
  200. dev->map_bo = mem_map_bo;
  201. dev->unmap_bo = mem_unmap_bo;
  202. dev->device_destroy = mem_device_destroy;
  203. dev->inited = true;
  204. }
  205. assert(dev->inited);
  206. }
  207. static bool
  208. vugbm_buffer_map(struct vugbm_buffer *buf)
  209. {
  210. struct vugbm_device *dev = buf->dev;
  211. return dev->map_bo(buf);
  212. }
  213. static void
  214. vugbm_buffer_unmap(struct vugbm_buffer *buf)
  215. {
  216. struct vugbm_device *dev = buf->dev;
  217. dev->unmap_bo(buf);
  218. }
  219. bool
  220. vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer)
  221. {
  222. if (!buffer->dev->get_fd) {
  223. return false;
  224. }
  225. return true;
  226. }
  227. bool
  228. vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd)
  229. {
  230. if (!vugbm_buffer_can_get_dmabuf_fd(buffer) ||
  231. !buffer->dev->get_fd(buffer, fd)) {
  232. g_warning("Failed to get dmabuf");
  233. return false;
  234. }
  235. if (*fd < 0) {
  236. g_warning("error: dmabuf_fd < 0");
  237. return false;
  238. }
  239. return true;
  240. }
  241. bool
  242. vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
  243. uint32_t width, uint32_t height)
  244. {
  245. buffer->dev = dev;
  246. buffer->width = width;
  247. buffer->height = height;
  248. buffer->format = GBM_FORMAT_XRGB8888;
  249. buffer->stride = 0; /* modified during alloc */
  250. if (!dev->alloc_bo(buffer)) {
  251. g_warning("alloc_bo failed");
  252. return false;
  253. }
  254. if (!vugbm_buffer_map(buffer)) {
  255. g_warning("map_bo failed");
  256. goto err;
  257. }
  258. return true;
  259. err:
  260. dev->free_bo(buffer);
  261. return false;
  262. }
  263. void
  264. vugbm_buffer_destroy(struct vugbm_buffer *buffer)
  265. {
  266. struct vugbm_device *dev = buffer->dev;
  267. vugbm_buffer_unmap(buffer);
  268. dev->free_bo(buffer);
  269. }