virtio-gpu-base.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "hw/virtio/virtio-gpu.h"
  15. #include "migration/blocker.h"
  16. #include "qapi/error.h"
  17. #include "qemu/error-report.h"
  18. #include "hw/display/edid.h"
  19. #include "trace.h"
  20. void
  21. virtio_gpu_base_reset(VirtIOGPUBase *g)
  22. {
  23. int i;
  24. g->enable = 0;
  25. for (i = 0; i < g->conf.max_outputs; i++) {
  26. g->scanout[i].resource_id = 0;
  27. g->scanout[i].width = 0;
  28. g->scanout[i].height = 0;
  29. g->scanout[i].x = 0;
  30. g->scanout[i].y = 0;
  31. g->scanout[i].ds = NULL;
  32. }
  33. }
  34. void
  35. virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
  36. struct virtio_gpu_resp_display_info *dpy_info)
  37. {
  38. int i;
  39. for (i = 0; i < g->conf.max_outputs; i++) {
  40. if (g->enabled_output_bitmask & (1 << i)) {
  41. dpy_info->pmodes[i].enabled = 1;
  42. dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
  43. dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
  44. }
  45. }
  46. }
  47. void
  48. virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
  49. struct virtio_gpu_resp_edid *edid)
  50. {
  51. qemu_edid_info info = {
  52. .width_mm = g->req_state[scanout].width_mm,
  53. .height_mm = g->req_state[scanout].height_mm,
  54. .prefx = g->req_state[scanout].width,
  55. .prefy = g->req_state[scanout].height,
  56. .refresh_rate = g->req_state[scanout].refresh_rate,
  57. };
  58. edid->size = cpu_to_le32(sizeof(edid->edid));
  59. qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
  60. }
  61. static void virtio_gpu_invalidate_display(void *opaque)
  62. {
  63. }
  64. static void virtio_gpu_update_display(void *opaque)
  65. {
  66. }
  67. static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
  68. {
  69. }
  70. static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type)
  71. {
  72. g->virtio_config.events_read |= event_type;
  73. virtio_notify_config(&g->parent_obj);
  74. }
  75. static void virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
  76. {
  77. VirtIOGPUBase *g = opaque;
  78. if (idx >= g->conf.max_outputs) {
  79. return;
  80. }
  81. g->req_state[idx].x = info->xoff;
  82. g->req_state[idx].y = info->yoff;
  83. g->req_state[idx].refresh_rate = info->refresh_rate;
  84. g->req_state[idx].width = info->width;
  85. g->req_state[idx].height = info->height;
  86. g->req_state[idx].width_mm = info->width_mm;
  87. g->req_state[idx].height_mm = info->height_mm;
  88. if (info->width && info->height) {
  89. g->enabled_output_bitmask |= (1 << idx);
  90. } else {
  91. g->enabled_output_bitmask &= ~(1 << idx);
  92. }
  93. /* send event to guest */
  94. virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
  95. return;
  96. }
  97. static void
  98. virtio_gpu_gl_flushed(void *opaque)
  99. {
  100. VirtIOGPUBase *g = opaque;
  101. VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g);
  102. if (vgc->gl_flushed) {
  103. vgc->gl_flushed(g);
  104. }
  105. }
  106. static void
  107. virtio_gpu_gl_block(void *opaque, bool block)
  108. {
  109. VirtIOGPUBase *g = opaque;
  110. if (block) {
  111. g->renderer_blocked++;
  112. } else {
  113. g->renderer_blocked--;
  114. }
  115. assert(g->renderer_blocked >= 0);
  116. if (!block && g->renderer_blocked == 0) {
  117. virtio_gpu_gl_flushed(g);
  118. }
  119. }
  120. static int
  121. virtio_gpu_get_flags(void *opaque)
  122. {
  123. VirtIOGPUBase *g = opaque;
  124. int flags = GRAPHIC_FLAGS_NONE;
  125. if (virtio_gpu_virgl_enabled(g->conf)) {
  126. flags |= GRAPHIC_FLAGS_GL;
  127. }
  128. if (virtio_gpu_dmabuf_enabled(g->conf)) {
  129. flags |= GRAPHIC_FLAGS_DMABUF;
  130. }
  131. return flags;
  132. }
  133. static const GraphicHwOps virtio_gpu_ops = {
  134. .get_flags = virtio_gpu_get_flags,
  135. .invalidate = virtio_gpu_invalidate_display,
  136. .gfx_update = virtio_gpu_update_display,
  137. .text_update = virtio_gpu_text_update,
  138. .ui_info = virtio_gpu_ui_info,
  139. .gl_block = virtio_gpu_gl_block,
  140. };
  141. bool
  142. virtio_gpu_base_device_realize(DeviceState *qdev,
  143. VirtIOHandleOutput ctrl_cb,
  144. VirtIOHandleOutput cursor_cb,
  145. Error **errp)
  146. {
  147. VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
  148. VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
  149. int i;
  150. if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
  151. error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
  152. return false;
  153. }
  154. if (virtio_gpu_virgl_enabled(g->conf)) {
  155. error_setg(&g->migration_blocker, "virgl is not yet migratable");
  156. if (migrate_add_blocker(&g->migration_blocker, errp) < 0) {
  157. return false;
  158. }
  159. }
  160. g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
  161. virtio_init(VIRTIO_DEVICE(g), VIRTIO_ID_GPU,
  162. sizeof(struct virtio_gpu_config));
  163. if (virtio_gpu_virgl_enabled(g->conf)) {
  164. /* use larger control queue in 3d mode */
  165. virtio_add_queue(vdev, 256, ctrl_cb);
  166. virtio_add_queue(vdev, 16, cursor_cb);
  167. } else {
  168. virtio_add_queue(vdev, 64, ctrl_cb);
  169. virtio_add_queue(vdev, 16, cursor_cb);
  170. }
  171. g->enabled_output_bitmask = 1;
  172. g->req_state[0].width = g->conf.xres;
  173. g->req_state[0].height = g->conf.yres;
  174. g->hw_ops = &virtio_gpu_ops;
  175. for (i = 0; i < g->conf.max_outputs; i++) {
  176. g->scanout[i].con =
  177. graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
  178. }
  179. return true;
  180. }
  181. static uint64_t
  182. virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
  183. Error **errp)
  184. {
  185. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  186. if (virtio_gpu_virgl_enabled(g->conf) ||
  187. virtio_gpu_rutabaga_enabled(g->conf)) {
  188. features |= (1 << VIRTIO_GPU_F_VIRGL);
  189. }
  190. if (virtio_gpu_edid_enabled(g->conf)) {
  191. features |= (1 << VIRTIO_GPU_F_EDID);
  192. }
  193. if (virtio_gpu_blob_enabled(g->conf)) {
  194. features |= (1 << VIRTIO_GPU_F_RESOURCE_BLOB);
  195. }
  196. if (virtio_gpu_context_init_enabled(g->conf)) {
  197. features |= (1 << VIRTIO_GPU_F_CONTEXT_INIT);
  198. }
  199. if (virtio_gpu_resource_uuid_enabled(g->conf)) {
  200. features |= (1 << VIRTIO_GPU_F_RESOURCE_UUID);
  201. }
  202. return features;
  203. }
  204. static void
  205. virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features)
  206. {
  207. static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
  208. trace_virtio_gpu_features(((features & virgl) == virgl));
  209. }
  210. void
  211. virtio_gpu_base_device_unrealize(DeviceState *qdev)
  212. {
  213. VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
  214. VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
  215. virtio_del_queue(vdev, 0);
  216. virtio_del_queue(vdev, 1);
  217. virtio_cleanup(vdev);
  218. migrate_del_blocker(&g->migration_blocker);
  219. }
  220. static void
  221. virtio_gpu_base_class_init(ObjectClass *klass, void *data)
  222. {
  223. DeviceClass *dc = DEVICE_CLASS(klass);
  224. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  225. vdc->unrealize = virtio_gpu_base_device_unrealize;
  226. vdc->get_features = virtio_gpu_base_get_features;
  227. vdc->set_features = virtio_gpu_base_set_features;
  228. set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
  229. dc->hotpluggable = false;
  230. }
  231. static const TypeInfo virtio_gpu_base_info = {
  232. .name = TYPE_VIRTIO_GPU_BASE,
  233. .parent = TYPE_VIRTIO_DEVICE,
  234. .instance_size = sizeof(VirtIOGPUBase),
  235. .class_size = sizeof(VirtIOGPUBaseClass),
  236. .class_init = virtio_gpu_base_class_init,
  237. .abstract = true
  238. };
  239. module_obj(TYPE_VIRTIO_GPU_BASE);
  240. module_kconfig(VIRTIO_GPU);
  241. static void
  242. virtio_register_types(void)
  243. {
  244. type_register_static(&virtio_gpu_base_info);
  245. }
  246. type_init(virtio_register_types)
  247. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
  248. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
  249. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
  250. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
  251. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
  252. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
  253. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
  254. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
  255. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
  256. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
  257. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
  258. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
  259. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
  260. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
  261. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
  262. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
  263. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
  264. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
  265. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
  266. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
  267. QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);