vugbm.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * Virtio vhost-user GPU Device
  3. *
  4. * DRM helpers
  5. *
  6. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  7. * See the COPYING file in the top-level directory.
  8. */
  9. #include "vugbm.h"
  10. static bool
  11. mem_alloc_bo(struct vugbm_buffer *buf)
  12. {
  13. buf->mmap = g_malloc(buf->width * buf->height * 4);
  14. buf->stride = buf->width * 4;
  15. return true;
  16. }
  17. static void
  18. mem_free_bo(struct vugbm_buffer *buf)
  19. {
  20. g_free(buf->mmap);
  21. }
  22. static bool
  23. mem_map_bo(struct vugbm_buffer *buf)
  24. {
  25. return buf->mmap != NULL;
  26. }
  27. static void
  28. mem_unmap_bo(struct vugbm_buffer *buf)
  29. {
  30. }
  31. static void
  32. mem_device_destroy(struct vugbm_device *dev)
  33. {
  34. }
  35. #ifdef CONFIG_MEMFD
  36. struct udmabuf_create {
  37. uint32_t memfd;
  38. uint32_t flags;
  39. uint64_t offset;
  40. uint64_t size;
  41. };
  42. #define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
  43. static size_t
  44. udmabuf_get_size(struct vugbm_buffer *buf)
  45. {
  46. return ROUND_UP(buf->width * buf->height * 4, qemu_real_host_page_size);
  47. }
  48. static bool
  49. udmabuf_alloc_bo(struct vugbm_buffer *buf)
  50. {
  51. int ret;
  52. buf->memfd = memfd_create("udmabuf-bo", MFD_ALLOW_SEALING);
  53. if (buf->memfd < 0) {
  54. return false;
  55. }
  56. ret = ftruncate(buf->memfd, udmabuf_get_size(buf));
  57. if (ret < 0) {
  58. close(buf->memfd);
  59. return false;
  60. }
  61. ret = fcntl(buf->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
  62. if (ret < 0) {
  63. close(buf->memfd);
  64. return false;
  65. }
  66. buf->stride = buf->width * 4;
  67. return true;
  68. }
  69. static void
  70. udmabuf_free_bo(struct vugbm_buffer *buf)
  71. {
  72. close(buf->memfd);
  73. }
  74. static bool
  75. udmabuf_map_bo(struct vugbm_buffer *buf)
  76. {
  77. buf->mmap = mmap(NULL, udmabuf_get_size(buf),
  78. PROT_READ | PROT_WRITE, MAP_SHARED, buf->memfd, 0);
  79. if (buf->mmap == MAP_FAILED) {
  80. return false;
  81. }
  82. return true;
  83. }
  84. static bool
  85. udmabuf_get_fd(struct vugbm_buffer *buf, int *fd)
  86. {
  87. struct udmabuf_create create = {
  88. .memfd = buf->memfd,
  89. .offset = 0,
  90. .size = udmabuf_get_size(buf),
  91. };
  92. *fd = ioctl(buf->dev->fd, UDMABUF_CREATE, &create);
  93. return *fd >= 0;
  94. }
  95. static void
  96. udmabuf_unmap_bo(struct vugbm_buffer *buf)
  97. {
  98. munmap(buf->mmap, udmabuf_get_size(buf));
  99. }
  100. static void
  101. udmabuf_device_destroy(struct vugbm_device *dev)
  102. {
  103. close(dev->fd);
  104. }
  105. #endif
  106. #ifdef CONFIG_GBM
  107. static bool
  108. alloc_bo(struct vugbm_buffer *buf)
  109. {
  110. struct gbm_device *dev = buf->dev->dev;
  111. assert(!buf->bo);
  112. buf->bo = gbm_bo_create(dev, buf->width, buf->height,
  113. buf->format,
  114. GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
  115. if (buf->bo) {
  116. buf->stride = gbm_bo_get_stride(buf->bo);
  117. return true;
  118. }
  119. return false;
  120. }
  121. static void
  122. free_bo(struct vugbm_buffer *buf)
  123. {
  124. gbm_bo_destroy(buf->bo);
  125. }
  126. static bool
  127. map_bo(struct vugbm_buffer *buf)
  128. {
  129. uint32_t stride;
  130. buf->mmap = gbm_bo_map(buf->bo, 0, 0, buf->width, buf->height,
  131. GBM_BO_TRANSFER_READ_WRITE, &stride,
  132. &buf->mmap_data);
  133. assert(stride == buf->stride);
  134. return buf->mmap != NULL;
  135. }
  136. static void
  137. unmap_bo(struct vugbm_buffer *buf)
  138. {
  139. gbm_bo_unmap(buf->bo, buf->mmap_data);
  140. }
  141. static bool
  142. get_fd(struct vugbm_buffer *buf, int *fd)
  143. {
  144. *fd = gbm_bo_get_fd(buf->bo);
  145. return *fd >= 0;
  146. }
  147. static void
  148. device_destroy(struct vugbm_device *dev)
  149. {
  150. gbm_device_destroy(dev->dev);
  151. }
  152. #endif
  153. void
  154. vugbm_device_destroy(struct vugbm_device *dev)
  155. {
  156. if (!dev->inited) {
  157. return;
  158. }
  159. dev->device_destroy(dev);
  160. }
  161. bool
  162. vugbm_device_init(struct vugbm_device *dev, int fd)
  163. {
  164. dev->fd = fd;
  165. #ifdef CONFIG_GBM
  166. dev->dev = gbm_create_device(fd);
  167. #endif
  168. if (0) {
  169. /* nothing */
  170. }
  171. #ifdef CONFIG_GBM
  172. else if (dev->dev != NULL) {
  173. dev->alloc_bo = alloc_bo;
  174. dev->free_bo = free_bo;
  175. dev->get_fd = get_fd;
  176. dev->map_bo = map_bo;
  177. dev->unmap_bo = unmap_bo;
  178. dev->device_destroy = device_destroy;
  179. }
  180. #endif
  181. #ifdef CONFIG_MEMFD
  182. else if (g_file_test("/dev/udmabuf", G_FILE_TEST_EXISTS)) {
  183. dev->fd = open("/dev/udmabuf", O_RDWR);
  184. if (dev->fd < 0) {
  185. return false;
  186. }
  187. g_debug("Using experimental udmabuf backend");
  188. dev->alloc_bo = udmabuf_alloc_bo;
  189. dev->free_bo = udmabuf_free_bo;
  190. dev->get_fd = udmabuf_get_fd;
  191. dev->map_bo = udmabuf_map_bo;
  192. dev->unmap_bo = udmabuf_unmap_bo;
  193. dev->device_destroy = udmabuf_device_destroy;
  194. }
  195. #endif
  196. else {
  197. g_debug("Using mem fallback");
  198. dev->alloc_bo = mem_alloc_bo;
  199. dev->free_bo = mem_free_bo;
  200. dev->map_bo = mem_map_bo;
  201. dev->unmap_bo = mem_unmap_bo;
  202. dev->device_destroy = mem_device_destroy;
  203. return false;
  204. }
  205. dev->inited = true;
  206. return true;
  207. }
  208. static bool
  209. vugbm_buffer_map(struct vugbm_buffer *buf)
  210. {
  211. struct vugbm_device *dev = buf->dev;
  212. return dev->map_bo(buf);
  213. }
  214. static void
  215. vugbm_buffer_unmap(struct vugbm_buffer *buf)
  216. {
  217. struct vugbm_device *dev = buf->dev;
  218. dev->unmap_bo(buf);
  219. }
  220. bool
  221. vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer)
  222. {
  223. if (!buffer->dev->get_fd) {
  224. return false;
  225. }
  226. return true;
  227. }
  228. bool
  229. vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd)
  230. {
  231. if (!vugbm_buffer_can_get_dmabuf_fd(buffer) ||
  232. !buffer->dev->get_fd(buffer, fd)) {
  233. g_warning("Failed to get dmabuf");
  234. return false;
  235. }
  236. if (*fd < 0) {
  237. g_warning("error: dmabuf_fd < 0");
  238. return false;
  239. }
  240. return true;
  241. }
  242. bool
  243. vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
  244. uint32_t width, uint32_t height)
  245. {
  246. buffer->dev = dev;
  247. buffer->width = width;
  248. buffer->height = height;
  249. buffer->format = GBM_FORMAT_XRGB8888;
  250. buffer->stride = 0; /* modified during alloc */
  251. if (!dev->alloc_bo(buffer)) {
  252. g_warning("alloc_bo failed");
  253. return false;
  254. }
  255. if (!vugbm_buffer_map(buffer)) {
  256. g_warning("map_bo failed");
  257. goto err;
  258. }
  259. return true;
  260. err:
  261. dev->free_bo(buffer);
  262. return false;
  263. }
  264. void
  265. vugbm_buffer_destroy(struct vugbm_buffer *buffer)
  266. {
  267. struct vugbm_device *dev = buffer->dev;
  268. vugbm_buffer_unmap(buffer);
  269. dev->free_bo(buffer);
  270. }