2
0

virtio-gpu-base.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "hw/virtio/virtio-gpu.h"
  15. #include "migration/blocker.h"
  16. #include "qapi/error.h"
  17. #include "qemu/error-report.h"
  18. #include "trace.h"
  19. void
  20. virtio_gpu_base_reset(VirtIOGPUBase *g)
  21. {
  22. int i;
  23. g->enable = 0;
  24. for (i = 0; i < g->conf.max_outputs; i++) {
  25. g->scanout[i].resource_id = 0;
  26. g->scanout[i].width = 0;
  27. g->scanout[i].height = 0;
  28. g->scanout[i].x = 0;
  29. g->scanout[i].y = 0;
  30. g->scanout[i].ds = NULL;
  31. }
  32. }
  33. void
  34. virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
  35. struct virtio_gpu_resp_display_info *dpy_info)
  36. {
  37. int i;
  38. for (i = 0; i < g->conf.max_outputs; i++) {
  39. if (g->enabled_output_bitmask & (1 << i)) {
  40. dpy_info->pmodes[i].enabled = 1;
  41. dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
  42. dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
  43. }
  44. }
  45. }
  46. static void virtio_gpu_invalidate_display(void *opaque)
  47. {
  48. }
  49. static void virtio_gpu_update_display(void *opaque)
  50. {
  51. }
  52. static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
  53. {
  54. }
  55. static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type)
  56. {
  57. g->virtio_config.events_read |= event_type;
  58. virtio_notify_config(&g->parent_obj);
  59. }
  60. static void virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
  61. {
  62. VirtIOGPUBase *g = opaque;
  63. if (idx >= g->conf.max_outputs) {
  64. return;
  65. }
  66. g->req_state[idx].x = info->xoff;
  67. g->req_state[idx].y = info->yoff;
  68. g->req_state[idx].refresh_rate = info->refresh_rate;
  69. g->req_state[idx].width = info->width;
  70. g->req_state[idx].height = info->height;
  71. g->req_state[idx].width_mm = info->width_mm;
  72. g->req_state[idx].height_mm = info->height_mm;
  73. if (info->width && info->height) {
  74. g->enabled_output_bitmask |= (1 << idx);
  75. } else {
  76. g->enabled_output_bitmask &= ~(1 << idx);
  77. }
  78. /* send event to guest */
  79. virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
  80. return;
  81. }
  82. static void
  83. virtio_gpu_gl_flushed(void *opaque)
  84. {
  85. VirtIOGPUBase *g = opaque;
  86. VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g);
  87. if (vgc->gl_flushed) {
  88. vgc->gl_flushed(g);
  89. }
  90. }
  91. static void
  92. virtio_gpu_gl_block(void *opaque, bool block)
  93. {
  94. VirtIOGPUBase *g = opaque;
  95. if (block) {
  96. g->renderer_blocked++;
  97. } else {
  98. g->renderer_blocked--;
  99. }
  100. assert(g->renderer_blocked >= 0);
  101. if (!block && g->renderer_blocked == 0) {
  102. virtio_gpu_gl_flushed(g);
  103. }
  104. }
  105. static int
  106. virtio_gpu_get_flags(void *opaque)
  107. {
  108. VirtIOGPUBase *g = opaque;
  109. int flags = GRAPHIC_FLAGS_NONE;
  110. if (virtio_gpu_virgl_enabled(g->conf)) {
  111. flags |= GRAPHIC_FLAGS_GL;
  112. }
  113. if (virtio_gpu_dmabuf_enabled(g->conf)) {
  114. flags |= GRAPHIC_FLAGS_DMABUF;
  115. }
  116. return flags;
  117. }
  118. static const GraphicHwOps virtio_gpu_ops = {
  119. .get_flags = virtio_gpu_get_flags,
  120. .invalidate = virtio_gpu_invalidate_display,
  121. .gfx_update = virtio_gpu_update_display,
  122. .text_update = virtio_gpu_text_update,
  123. .ui_info = virtio_gpu_ui_info,
  124. .gl_block = virtio_gpu_gl_block,
  125. };
  126. bool
  127. virtio_gpu_base_device_realize(DeviceState *qdev,
  128. VirtIOHandleOutput ctrl_cb,
  129. VirtIOHandleOutput cursor_cb,
  130. Error **errp)
  131. {
  132. VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
  133. VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
  134. int i;
  135. if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
  136. error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
  137. return false;
  138. }
  139. if (virtio_gpu_virgl_enabled(g->conf)) {
  140. error_setg(&g->migration_blocker, "virgl is not yet migratable");
  141. if (migrate_add_blocker(g->migration_blocker, errp) < 0) {
  142. error_free(g->migration_blocker);
  143. return false;
  144. }
  145. }
  146. g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
  147. virtio_init(VIRTIO_DEVICE(g), VIRTIO_ID_GPU,
  148. sizeof(struct virtio_gpu_config));
  149. if (virtio_gpu_virgl_enabled(g->conf)) {
  150. /* use larger control queue in 3d mode */
  151. virtio_add_queue(vdev, 256, ctrl_cb);
  152. virtio_add_queue(vdev, 16, cursor_cb);
  153. } else {
  154. virtio_add_queue(vdev, 64, ctrl_cb);
  155. virtio_add_queue(vdev, 16, cursor_cb);
  156. }
  157. g->enabled_output_bitmask = 1;
  158. g->req_state[0].width = g->conf.xres;
  159. g->req_state[0].height = g->conf.yres;
  160. g->hw_ops = &virtio_gpu_ops;
  161. for (i = 0; i < g->conf.max_outputs; i++) {
  162. g->scanout[i].con =
  163. graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
  164. }
  165. return true;
  166. }
  167. static uint64_t
  168. virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
  169. Error **errp)
  170. {
  171. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  172. if (virtio_gpu_virgl_enabled(g->conf)) {
  173. features |= (1 << VIRTIO_GPU_F_VIRGL);
  174. }
  175. if (virtio_gpu_edid_enabled(g->conf)) {
  176. features |= (1 << VIRTIO_GPU_F_EDID);
  177. }
  178. if (virtio_gpu_blob_enabled(g->conf)) {
  179. features |= (1 << VIRTIO_GPU_F_RESOURCE_BLOB);
  180. }
  181. return features;
  182. }
  183. static void
  184. virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features)
  185. {
  186. static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
  187. trace_virtio_gpu_features(((features & virgl) == virgl));
  188. }
  189. static void
  190. virtio_gpu_base_device_unrealize(DeviceState *qdev)
  191. {
  192. VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
  193. if (g->migration_blocker) {
  194. migrate_del_blocker(g->migration_blocker);
  195. error_free(g->migration_blocker);
  196. }
  197. }
  198. static void
  199. virtio_gpu_base_class_init(ObjectClass *klass, void *data)
  200. {
  201. DeviceClass *dc = DEVICE_CLASS(klass);
  202. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  203. vdc->unrealize = virtio_gpu_base_device_unrealize;
  204. vdc->get_features = virtio_gpu_base_get_features;
  205. vdc->set_features = virtio_gpu_base_set_features;
  206. set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
  207. dc->hotpluggable = false;
  208. }
  209. static const TypeInfo virtio_gpu_base_info = {
  210. .name = TYPE_VIRTIO_GPU_BASE,
  211. .parent = TYPE_VIRTIO_DEVICE,
  212. .instance_size = sizeof(VirtIOGPUBase),
  213. .class_size = sizeof(VirtIOGPUBaseClass),
  214. .class_init = virtio_gpu_base_class_init,
  215. .abstract = true
  216. };
  217. module_obj(TYPE_VIRTIO_GPU_BASE);
  218. module_kconfig(VIRTIO_GPU);
  219. static void
  220. virtio_register_types(void)
  221. {
  222. type_register_static(&virtio_gpu_base_info);
  223. }
  224. type_init(virtio_register_types)
  225. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
  226. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
  227. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
  228. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
  229. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
  230. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
  231. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
  232. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
  233. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
  234. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
  235. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
  236. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
  237. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
  238. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
  239. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
  240. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
  241. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
  242. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
  243. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
  244. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
  245. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);