virtio-gpu-udmabuf.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/units.h"
  15. #include "qemu/iov.h"
  16. #include "ui/console.h"
  17. #include "hw/virtio/virtio-gpu.h"
  18. #include "hw/virtio/virtio-gpu-pixman.h"
  19. #include "trace.h"
  20. #include "exec/ramblock.h"
  21. #include "sysemu/hostmem.h"
  22. #include <sys/ioctl.h>
  23. #include <fcntl.h>
  24. #include <linux/memfd.h>
  25. #include "qemu/memfd.h"
  26. #include "standard-headers/linux/udmabuf.h"
  27. static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
  28. {
  29. struct udmabuf_create_list *list;
  30. RAMBlock *rb;
  31. ram_addr_t offset;
  32. int udmabuf, i;
  33. udmabuf = udmabuf_fd();
  34. if (udmabuf < 0) {
  35. return;
  36. }
  37. list = g_malloc0(sizeof(struct udmabuf_create_list) +
  38. sizeof(struct udmabuf_create_item) * res->iov_cnt);
  39. for (i = 0; i < res->iov_cnt; i++) {
  40. rcu_read_lock();
  41. rb = qemu_ram_block_from_host(res->iov[i].iov_base, false, &offset);
  42. rcu_read_unlock();
  43. if (!rb || rb->fd < 0) {
  44. g_free(list);
  45. return;
  46. }
  47. list->list[i].memfd = rb->fd;
  48. list->list[i].offset = offset;
  49. list->list[i].size = res->iov[i].iov_len;
  50. }
  51. list->count = res->iov_cnt;
  52. list->flags = UDMABUF_FLAGS_CLOEXEC;
  53. res->dmabuf_fd = ioctl(udmabuf, UDMABUF_CREATE_LIST, list);
  54. if (res->dmabuf_fd < 0) {
  55. warn_report("%s: UDMABUF_CREATE_LIST: %s", __func__,
  56. strerror(errno));
  57. }
  58. g_free(list);
  59. }
  60. static void virtio_gpu_remap_udmabuf(struct virtio_gpu_simple_resource *res)
  61. {
  62. res->remapped = mmap(NULL, res->blob_size, PROT_READ,
  63. MAP_SHARED, res->dmabuf_fd, 0);
  64. if (res->remapped == MAP_FAILED) {
  65. warn_report("%s: dmabuf mmap failed: %s", __func__,
  66. strerror(errno));
  67. res->remapped = NULL;
  68. }
  69. }
  70. static void virtio_gpu_destroy_udmabuf(struct virtio_gpu_simple_resource *res)
  71. {
  72. if (res->remapped) {
  73. munmap(res->remapped, res->blob_size);
  74. res->remapped = NULL;
  75. }
  76. if (res->dmabuf_fd >= 0) {
  77. close(res->dmabuf_fd);
  78. res->dmabuf_fd = -1;
  79. }
  80. }
  81. static int find_memory_backend_type(Object *obj, void *opaque)
  82. {
  83. bool *memfd_backend = opaque;
  84. int ret;
  85. if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
  86. HostMemoryBackend *backend = MEMORY_BACKEND(obj);
  87. RAMBlock *rb = backend->mr.ram_block;
  88. if (rb && rb->fd > 0) {
  89. ret = fcntl(rb->fd, F_GET_SEALS);
  90. if (ret > 0) {
  91. *memfd_backend = true;
  92. }
  93. }
  94. }
  95. return 0;
  96. }
  97. bool virtio_gpu_have_udmabuf(void)
  98. {
  99. Object *memdev_root;
  100. int udmabuf;
  101. bool memfd_backend = false;
  102. udmabuf = udmabuf_fd();
  103. if (udmabuf < 0) {
  104. return false;
  105. }
  106. memdev_root = object_resolve_path("/objects", NULL);
  107. object_child_foreach(memdev_root, find_memory_backend_type, &memfd_backend);
  108. return memfd_backend;
  109. }
  110. void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res)
  111. {
  112. void *pdata = NULL;
  113. res->dmabuf_fd = -1;
  114. if (res->iov_cnt == 1) {
  115. pdata = res->iov[0].iov_base;
  116. } else {
  117. virtio_gpu_create_udmabuf(res);
  118. if (res->dmabuf_fd < 0) {
  119. return;
  120. }
  121. virtio_gpu_remap_udmabuf(res);
  122. if (!res->remapped) {
  123. return;
  124. }
  125. pdata = res->remapped;
  126. }
  127. res->blob = pdata;
  128. }
  129. void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res)
  130. {
  131. if (res->remapped) {
  132. virtio_gpu_destroy_udmabuf(res);
  133. }
  134. }
  135. static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf)
  136. {
  137. struct virtio_gpu_scanout *scanout;
  138. scanout = &g->parent_obj.scanout[dmabuf->scanout_id];
  139. dpy_gl_release_dmabuf(scanout->con, &dmabuf->buf);
  140. QTAILQ_REMOVE(&g->dmabuf.bufs, dmabuf, next);
  141. g_free(dmabuf);
  142. }
  143. static VGPUDMABuf
  144. *virtio_gpu_create_dmabuf(VirtIOGPU *g,
  145. uint32_t scanout_id,
  146. struct virtio_gpu_simple_resource *res,
  147. struct virtio_gpu_framebuffer *fb,
  148. struct virtio_gpu_rect *r)
  149. {
  150. VGPUDMABuf *dmabuf;
  151. if (res->dmabuf_fd < 0) {
  152. return NULL;
  153. }
  154. dmabuf = g_new0(VGPUDMABuf, 1);
  155. dmabuf->buf.width = fb->width;
  156. dmabuf->buf.height = fb->height;
  157. dmabuf->buf.stride = fb->stride;
  158. dmabuf->buf.x = r->x;
  159. dmabuf->buf.y = r->y;
  160. dmabuf->buf.scanout_width = r->width;
  161. dmabuf->buf.scanout_height = r->height;
  162. dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format);
  163. dmabuf->buf.fd = res->dmabuf_fd;
  164. dmabuf->buf.allow_fences = true;
  165. dmabuf->buf.draw_submitted = false;
  166. dmabuf->scanout_id = scanout_id;
  167. QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next);
  168. return dmabuf;
  169. }
  170. int virtio_gpu_update_dmabuf(VirtIOGPU *g,
  171. uint32_t scanout_id,
  172. struct virtio_gpu_simple_resource *res,
  173. struct virtio_gpu_framebuffer *fb,
  174. struct virtio_gpu_rect *r)
  175. {
  176. struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
  177. VGPUDMABuf *new_primary, *old_primary = NULL;
  178. new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb, r);
  179. if (!new_primary) {
  180. return -EINVAL;
  181. }
  182. if (g->dmabuf.primary[scanout_id]) {
  183. old_primary = g->dmabuf.primary[scanout_id];
  184. }
  185. g->dmabuf.primary[scanout_id] = new_primary;
  186. qemu_console_resize(scanout->con,
  187. new_primary->buf.scanout_width,
  188. new_primary->buf.scanout_height);
  189. dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf);
  190. if (old_primary) {
  191. virtio_gpu_free_dmabuf(g, old_primary);
  192. }
  193. return 0;
  194. }