virtio-gpu-virgl.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/error-report.h"
  15. #include "qemu/iov.h"
  16. #include "trace.h"
  17. #include "hw/virtio/virtio.h"
  18. #include "hw/virtio/virtio-gpu.h"
  19. #include "hw/virtio/virtio-gpu-bswap.h"
  20. #include "hw/virtio/virtio-gpu-pixman.h"
  21. #include "ui/egl-helpers.h"
  22. #include <virglrenderer.h>
  23. struct virtio_gpu_virgl_resource {
  24. struct virtio_gpu_simple_resource base;
  25. MemoryRegion *mr;
  26. };
  27. static struct virtio_gpu_virgl_resource *
  28. virtio_gpu_virgl_find_resource(VirtIOGPU *g, uint32_t resource_id)
  29. {
  30. struct virtio_gpu_simple_resource *res;
  31. res = virtio_gpu_find_resource(g, resource_id);
  32. if (!res) {
  33. return NULL;
  34. }
  35. return container_of(res, struct virtio_gpu_virgl_resource, base);
  36. }
  37. #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
  38. static void *
  39. virgl_get_egl_display(G_GNUC_UNUSED void *cookie)
  40. {
  41. return qemu_egl_display;
  42. }
  43. #endif
  44. #if VIRGL_VERSION_MAJOR >= 1
  45. struct virtio_gpu_virgl_hostmem_region {
  46. MemoryRegion mr;
  47. struct VirtIOGPU *g;
  48. bool finish_unmapping;
  49. };
  50. static struct virtio_gpu_virgl_hostmem_region *
  51. to_hostmem_region(MemoryRegion *mr)
  52. {
  53. return container_of(mr, struct virtio_gpu_virgl_hostmem_region, mr);
  54. }
  55. static void virtio_gpu_virgl_resume_cmdq_bh(void *opaque)
  56. {
  57. VirtIOGPU *g = opaque;
  58. virtio_gpu_process_cmdq(g);
  59. }
  60. static void virtio_gpu_virgl_hostmem_region_free(void *obj)
  61. {
  62. MemoryRegion *mr = MEMORY_REGION(obj);
  63. struct virtio_gpu_virgl_hostmem_region *vmr;
  64. VirtIOGPUBase *b;
  65. VirtIOGPUGL *gl;
  66. vmr = to_hostmem_region(mr);
  67. vmr->finish_unmapping = true;
  68. b = VIRTIO_GPU_BASE(vmr->g);
  69. b->renderer_blocked--;
  70. /*
  71. * memory_region_unref() is executed from RCU thread context, while
  72. * virglrenderer works only on the main-loop thread that's holding GL
  73. * context.
  74. */
  75. gl = VIRTIO_GPU_GL(vmr->g);
  76. qemu_bh_schedule(gl->cmdq_resume_bh);
  77. }
  78. static int
  79. virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g,
  80. struct virtio_gpu_virgl_resource *res,
  81. uint64_t offset)
  82. {
  83. struct virtio_gpu_virgl_hostmem_region *vmr;
  84. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  85. MemoryRegion *mr;
  86. uint64_t size;
  87. void *data;
  88. int ret;
  89. if (!virtio_gpu_hostmem_enabled(b->conf)) {
  90. qemu_log_mask(LOG_GUEST_ERROR, "%s: hostmem disabled\n", __func__);
  91. return -EOPNOTSUPP;
  92. }
  93. ret = virgl_renderer_resource_map(res->base.resource_id, &data, &size);
  94. if (ret) {
  95. qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map virgl resource: %s\n",
  96. __func__, strerror(-ret));
  97. return ret;
  98. }
  99. vmr = g_new0(struct virtio_gpu_virgl_hostmem_region, 1);
  100. vmr->g = g;
  101. mr = &vmr->mr;
  102. memory_region_init_ram_ptr(mr, OBJECT(mr), "blob", size, data);
  103. memory_region_add_subregion(&b->hostmem, offset, mr);
  104. memory_region_set_enabled(mr, true);
  105. /*
  106. * MR could outlive the resource if MR's reference is held outside of
  107. * virtio-gpu. In order to prevent unmapping resource while MR is alive,
  108. * and thus, making the data pointer invalid, we will block virtio-gpu
  109. * command processing until MR is fully unreferenced and freed.
  110. */
  111. OBJECT(mr)->free = virtio_gpu_virgl_hostmem_region_free;
  112. res->mr = mr;
  113. return 0;
  114. }
  115. static int
  116. virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g,
  117. struct virtio_gpu_virgl_resource *res,
  118. bool *cmd_suspended)
  119. {
  120. struct virtio_gpu_virgl_hostmem_region *vmr;
  121. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  122. MemoryRegion *mr = res->mr;
  123. int ret;
  124. if (!mr) {
  125. return 0;
  126. }
  127. vmr = to_hostmem_region(res->mr);
  128. /*
  129. * Perform async unmapping in 3 steps:
  130. *
  131. * 1. Begin async unmapping with memory_region_del_subregion()
  132. * and suspend/block cmd processing.
  133. * 2. Wait for res->mr to be freed and cmd processing resumed
  134. * asynchronously by virtio_gpu_virgl_hostmem_region_free().
  135. * 3. Finish the unmapping with final virgl_renderer_resource_unmap().
  136. */
  137. if (vmr->finish_unmapping) {
  138. res->mr = NULL;
  139. g_free(vmr);
  140. ret = virgl_renderer_resource_unmap(res->base.resource_id);
  141. if (ret) {
  142. qemu_log_mask(LOG_GUEST_ERROR,
  143. "%s: failed to unmap virgl resource: %s\n",
  144. __func__, strerror(-ret));
  145. return ret;
  146. }
  147. } else {
  148. *cmd_suspended = true;
  149. /* render will be unblocked once MR is freed */
  150. b->renderer_blocked++;
  151. /* memory region owns self res->mr object and frees it by itself */
  152. memory_region_set_enabled(mr, false);
  153. memory_region_del_subregion(&b->hostmem, mr);
  154. object_unparent(OBJECT(mr));
  155. }
  156. return 0;
  157. }
  158. #endif
  159. static void virgl_cmd_create_resource_2d(VirtIOGPU *g,
  160. struct virtio_gpu_ctrl_command *cmd)
  161. {
  162. struct virtio_gpu_resource_create_2d c2d;
  163. struct virgl_renderer_resource_create_args args;
  164. struct virtio_gpu_virgl_resource *res;
  165. VIRTIO_GPU_FILL_CMD(c2d);
  166. trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  167. c2d.width, c2d.height);
  168. if (c2d.resource_id == 0) {
  169. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  170. __func__);
  171. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  172. return;
  173. }
  174. res = virtio_gpu_virgl_find_resource(g, c2d.resource_id);
  175. if (res) {
  176. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  177. __func__, c2d.resource_id);
  178. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  179. return;
  180. }
  181. res = g_new0(struct virtio_gpu_virgl_resource, 1);
  182. res->base.width = c2d.width;
  183. res->base.height = c2d.height;
  184. res->base.format = c2d.format;
  185. res->base.resource_id = c2d.resource_id;
  186. res->base.dmabuf_fd = -1;
  187. QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
  188. args.handle = c2d.resource_id;
  189. args.target = 2;
  190. args.format = c2d.format;
  191. args.bind = (1 << 1);
  192. args.width = c2d.width;
  193. args.height = c2d.height;
  194. args.depth = 1;
  195. args.array_size = 1;
  196. args.last_level = 0;
  197. args.nr_samples = 0;
  198. args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
  199. virgl_renderer_resource_create(&args, NULL, 0);
  200. }
  201. static void virgl_cmd_create_resource_3d(VirtIOGPU *g,
  202. struct virtio_gpu_ctrl_command *cmd)
  203. {
  204. struct virtio_gpu_resource_create_3d c3d;
  205. struct virgl_renderer_resource_create_args args;
  206. struct virtio_gpu_virgl_resource *res;
  207. VIRTIO_GPU_FILL_CMD(c3d);
  208. trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
  209. c3d.width, c3d.height, c3d.depth);
  210. if (c3d.resource_id == 0) {
  211. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  212. __func__);
  213. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  214. return;
  215. }
  216. res = virtio_gpu_virgl_find_resource(g, c3d.resource_id);
  217. if (res) {
  218. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  219. __func__, c3d.resource_id);
  220. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  221. return;
  222. }
  223. res = g_new0(struct virtio_gpu_virgl_resource, 1);
  224. res->base.width = c3d.width;
  225. res->base.height = c3d.height;
  226. res->base.format = c3d.format;
  227. res->base.resource_id = c3d.resource_id;
  228. res->base.dmabuf_fd = -1;
  229. QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
  230. args.handle = c3d.resource_id;
  231. args.target = c3d.target;
  232. args.format = c3d.format;
  233. args.bind = c3d.bind;
  234. args.width = c3d.width;
  235. args.height = c3d.height;
  236. args.depth = c3d.depth;
  237. args.array_size = c3d.array_size;
  238. args.last_level = c3d.last_level;
  239. args.nr_samples = c3d.nr_samples;
  240. args.flags = c3d.flags;
  241. virgl_renderer_resource_create(&args, NULL, 0);
  242. }
  243. static void virgl_cmd_resource_unref(VirtIOGPU *g,
  244. struct virtio_gpu_ctrl_command *cmd,
  245. bool *cmd_suspended)
  246. {
  247. struct virtio_gpu_resource_unref unref;
  248. struct virtio_gpu_virgl_resource *res;
  249. struct iovec *res_iovs = NULL;
  250. int num_iovs = 0;
  251. VIRTIO_GPU_FILL_CMD(unref);
  252. trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  253. res = virtio_gpu_virgl_find_resource(g, unref.resource_id);
  254. if (!res) {
  255. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
  256. __func__, unref.resource_id);
  257. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  258. return;
  259. }
  260. #if VIRGL_VERSION_MAJOR >= 1
  261. if (virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended)) {
  262. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  263. return;
  264. }
  265. if (*cmd_suspended) {
  266. return;
  267. }
  268. #endif
  269. virgl_renderer_resource_detach_iov(unref.resource_id,
  270. &res_iovs,
  271. &num_iovs);
  272. if (res_iovs != NULL && num_iovs != 0) {
  273. virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
  274. }
  275. virgl_renderer_resource_unref(unref.resource_id);
  276. QTAILQ_REMOVE(&g->reslist, &res->base, next);
  277. g_free(res);
  278. }
  279. static void virgl_cmd_context_create(VirtIOGPU *g,
  280. struct virtio_gpu_ctrl_command *cmd)
  281. {
  282. struct virtio_gpu_ctx_create cc;
  283. VIRTIO_GPU_FILL_CMD(cc);
  284. trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
  285. cc.debug_name);
  286. if (cc.context_init) {
  287. if (!virtio_gpu_context_init_enabled(g->parent_obj.conf)) {
  288. qemu_log_mask(LOG_GUEST_ERROR, "%s: context_init disabled",
  289. __func__);
  290. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  291. return;
  292. }
  293. #if VIRGL_VERSION_MAJOR >= 1
  294. virgl_renderer_context_create_with_flags(cc.hdr.ctx_id,
  295. cc.context_init,
  296. cc.nlen,
  297. cc.debug_name);
  298. return;
  299. #endif
  300. }
  301. virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, cc.debug_name);
  302. }
  303. static void virgl_cmd_context_destroy(VirtIOGPU *g,
  304. struct virtio_gpu_ctrl_command *cmd)
  305. {
  306. struct virtio_gpu_ctx_destroy cd;
  307. VIRTIO_GPU_FILL_CMD(cd);
  308. trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
  309. virgl_renderer_context_destroy(cd.hdr.ctx_id);
  310. }
  311. static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
  312. int width, int height)
  313. {
  314. if (!g->parent_obj.scanout[idx].con) {
  315. return;
  316. }
  317. dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
  318. }
  319. static void virgl_cmd_resource_flush(VirtIOGPU *g,
  320. struct virtio_gpu_ctrl_command *cmd)
  321. {
  322. struct virtio_gpu_resource_flush rf;
  323. int i;
  324. VIRTIO_GPU_FILL_CMD(rf);
  325. trace_virtio_gpu_cmd_res_flush(rf.resource_id,
  326. rf.r.width, rf.r.height, rf.r.x, rf.r.y);
  327. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  328. if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
  329. continue;
  330. }
  331. virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
  332. }
  333. }
  334. static void virgl_cmd_set_scanout(VirtIOGPU *g,
  335. struct virtio_gpu_ctrl_command *cmd)
  336. {
  337. struct virtio_gpu_set_scanout ss;
  338. int ret;
  339. VIRTIO_GPU_FILL_CMD(ss);
  340. trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
  341. ss.r.width, ss.r.height, ss.r.x, ss.r.y);
  342. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  343. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  344. __func__, ss.scanout_id);
  345. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  346. return;
  347. }
  348. g->parent_obj.enable = 1;
  349. if (ss.resource_id && ss.r.width && ss.r.height) {
  350. struct virgl_renderer_resource_info info;
  351. void *d3d_tex2d = NULL;
  352. #if VIRGL_VERSION_MAJOR >= 1
  353. struct virgl_renderer_resource_info_ext ext;
  354. memset(&ext, 0, sizeof(ext));
  355. ret = virgl_renderer_resource_get_info_ext(ss.resource_id, &ext);
  356. info = ext.base;
  357. d3d_tex2d = ext.d3d_tex2d;
  358. #else
  359. memset(&info, 0, sizeof(info));
  360. ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
  361. #endif
  362. if (ret) {
  363. qemu_log_mask(LOG_GUEST_ERROR,
  364. "%s: illegal resource specified %d\n",
  365. __func__, ss.resource_id);
  366. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  367. return;
  368. }
  369. qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
  370. ss.r.width, ss.r.height);
  371. virgl_renderer_force_ctx_0();
  372. dpy_gl_scanout_texture(
  373. g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
  374. info.flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
  375. info.width, info.height,
  376. ss.r.x, ss.r.y, ss.r.width, ss.r.height,
  377. d3d_tex2d);
  378. } else {
  379. dpy_gfx_replace_surface(
  380. g->parent_obj.scanout[ss.scanout_id].con, NULL);
  381. dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
  382. }
  383. g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
  384. }
  385. static void virgl_cmd_submit_3d(VirtIOGPU *g,
  386. struct virtio_gpu_ctrl_command *cmd)
  387. {
  388. struct virtio_gpu_cmd_submit cs;
  389. void *buf;
  390. size_t s;
  391. VIRTIO_GPU_FILL_CMD(cs);
  392. trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
  393. buf = g_malloc(cs.size);
  394. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  395. sizeof(cs), buf, cs.size);
  396. if (s != cs.size) {
  397. qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)",
  398. __func__, s, cs.size);
  399. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  400. goto out;
  401. }
  402. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  403. g->stats.req_3d++;
  404. g->stats.bytes_3d += cs.size;
  405. }
  406. virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
  407. out:
  408. g_free(buf);
  409. }
  410. static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g,
  411. struct virtio_gpu_ctrl_command *cmd)
  412. {
  413. struct virtio_gpu_transfer_to_host_2d t2d;
  414. struct virtio_gpu_box box;
  415. VIRTIO_GPU_FILL_CMD(t2d);
  416. trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
  417. box.x = t2d.r.x;
  418. box.y = t2d.r.y;
  419. box.z = 0;
  420. box.w = t2d.r.width;
  421. box.h = t2d.r.height;
  422. box.d = 1;
  423. virgl_renderer_transfer_write_iov(t2d.resource_id,
  424. 0,
  425. 0,
  426. 0,
  427. 0,
  428. (struct virgl_box *)&box,
  429. t2d.offset, NULL, 0);
  430. }
  431. static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g,
  432. struct virtio_gpu_ctrl_command *cmd)
  433. {
  434. struct virtio_gpu_transfer_host_3d t3d;
  435. VIRTIO_GPU_FILL_CMD(t3d);
  436. trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
  437. virgl_renderer_transfer_write_iov(t3d.resource_id,
  438. t3d.hdr.ctx_id,
  439. t3d.level,
  440. t3d.stride,
  441. t3d.layer_stride,
  442. (struct virgl_box *)&t3d.box,
  443. t3d.offset, NULL, 0);
  444. }
  445. static void
  446. virgl_cmd_transfer_from_host_3d(VirtIOGPU *g,
  447. struct virtio_gpu_ctrl_command *cmd)
  448. {
  449. struct virtio_gpu_transfer_host_3d tf3d;
  450. VIRTIO_GPU_FILL_CMD(tf3d);
  451. trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id);
  452. virgl_renderer_transfer_read_iov(tf3d.resource_id,
  453. tf3d.hdr.ctx_id,
  454. tf3d.level,
  455. tf3d.stride,
  456. tf3d.layer_stride,
  457. (struct virgl_box *)&tf3d.box,
  458. tf3d.offset, NULL, 0);
  459. }
  460. static void virgl_resource_attach_backing(VirtIOGPU *g,
  461. struct virtio_gpu_ctrl_command *cmd)
  462. {
  463. struct virtio_gpu_resource_attach_backing att_rb;
  464. struct iovec *res_iovs;
  465. uint32_t res_niov;
  466. int ret;
  467. VIRTIO_GPU_FILL_CMD(att_rb);
  468. trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
  469. ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
  470. cmd, NULL, &res_iovs, &res_niov);
  471. if (ret != 0) {
  472. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  473. return;
  474. }
  475. ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
  476. res_iovs, res_niov);
  477. if (ret != 0)
  478. virtio_gpu_cleanup_mapping_iov(g, res_iovs, res_niov);
  479. }
  480. static void virgl_resource_detach_backing(VirtIOGPU *g,
  481. struct virtio_gpu_ctrl_command *cmd)
  482. {
  483. struct virtio_gpu_resource_detach_backing detach_rb;
  484. struct iovec *res_iovs = NULL;
  485. int num_iovs = 0;
  486. VIRTIO_GPU_FILL_CMD(detach_rb);
  487. trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
  488. virgl_renderer_resource_detach_iov(detach_rb.resource_id,
  489. &res_iovs,
  490. &num_iovs);
  491. if (res_iovs == NULL || num_iovs == 0) {
  492. return;
  493. }
  494. virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs);
  495. }
  496. static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g,
  497. struct virtio_gpu_ctrl_command *cmd)
  498. {
  499. struct virtio_gpu_ctx_resource att_res;
  500. VIRTIO_GPU_FILL_CMD(att_res);
  501. trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
  502. att_res.resource_id);
  503. virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
  504. }
  505. static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g,
  506. struct virtio_gpu_ctrl_command *cmd)
  507. {
  508. struct virtio_gpu_ctx_resource det_res;
  509. VIRTIO_GPU_FILL_CMD(det_res);
  510. trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
  511. det_res.resource_id);
  512. virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
  513. }
  514. static void virgl_cmd_get_capset_info(VirtIOGPU *g,
  515. struct virtio_gpu_ctrl_command *cmd)
  516. {
  517. struct virtio_gpu_get_capset_info info;
  518. struct virtio_gpu_resp_capset_info resp;
  519. VIRTIO_GPU_FILL_CMD(info);
  520. memset(&resp, 0, sizeof(resp));
  521. if (info.capset_index < g->capset_ids->len) {
  522. resp.capset_id = g_array_index(g->capset_ids, uint32_t,
  523. info.capset_index);
  524. virgl_renderer_get_cap_set(resp.capset_id,
  525. &resp.capset_max_version,
  526. &resp.capset_max_size);
  527. }
  528. resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
  529. virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
  530. }
  531. static void virgl_cmd_get_capset(VirtIOGPU *g,
  532. struct virtio_gpu_ctrl_command *cmd)
  533. {
  534. struct virtio_gpu_get_capset gc;
  535. struct virtio_gpu_resp_capset *resp;
  536. uint32_t max_ver, max_size;
  537. VIRTIO_GPU_FILL_CMD(gc);
  538. virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
  539. &max_size);
  540. if (!max_size) {
  541. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  542. return;
  543. }
  544. resp = g_malloc0(sizeof(*resp) + max_size);
  545. resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
  546. virgl_renderer_fill_caps(gc.capset_id,
  547. gc.capset_version,
  548. (void *)resp->capset_data);
  549. virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
  550. g_free(resp);
  551. }
  552. #if VIRGL_VERSION_MAJOR >= 1
  553. static void virgl_cmd_resource_create_blob(VirtIOGPU *g,
  554. struct virtio_gpu_ctrl_command *cmd)
  555. {
  556. struct virgl_renderer_resource_create_blob_args virgl_args = { 0 };
  557. g_autofree struct virtio_gpu_virgl_resource *res = NULL;
  558. struct virtio_gpu_resource_create_blob cblob;
  559. struct virgl_renderer_resource_info info;
  560. int ret;
  561. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  562. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  563. return;
  564. }
  565. VIRTIO_GPU_FILL_CMD(cblob);
  566. virtio_gpu_create_blob_bswap(&cblob);
  567. trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
  568. if (cblob.resource_id == 0) {
  569. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  570. __func__);
  571. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  572. return;
  573. }
  574. res = virtio_gpu_virgl_find_resource(g, cblob.resource_id);
  575. if (res) {
  576. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  577. __func__, cblob.resource_id);
  578. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  579. return;
  580. }
  581. res = g_new0(struct virtio_gpu_virgl_resource, 1);
  582. res->base.resource_id = cblob.resource_id;
  583. res->base.blob_size = cblob.size;
  584. res->base.dmabuf_fd = -1;
  585. if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
  586. ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
  587. cmd, &res->base.addrs,
  588. &res->base.iov, &res->base.iov_cnt);
  589. if (!ret) {
  590. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  591. return;
  592. }
  593. }
  594. virgl_args.res_handle = cblob.resource_id;
  595. virgl_args.ctx_id = cblob.hdr.ctx_id;
  596. virgl_args.blob_mem = cblob.blob_mem;
  597. virgl_args.blob_id = cblob.blob_id;
  598. virgl_args.blob_flags = cblob.blob_flags;
  599. virgl_args.size = cblob.size;
  600. virgl_args.iovecs = res->base.iov;
  601. virgl_args.num_iovs = res->base.iov_cnt;
  602. ret = virgl_renderer_resource_create_blob(&virgl_args);
  603. if (ret) {
  604. qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n",
  605. __func__, strerror(-ret));
  606. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  607. virtio_gpu_cleanup_mapping(g, &res->base);
  608. return;
  609. }
  610. ret = virgl_renderer_resource_get_info(cblob.resource_id, &info);
  611. if (ret) {
  612. qemu_log_mask(LOG_GUEST_ERROR,
  613. "%s: resource does not have info %d: %s\n",
  614. __func__, cblob.resource_id, strerror(-ret));
  615. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  616. virtio_gpu_cleanup_mapping(g, &res->base);
  617. virgl_renderer_resource_unref(cblob.resource_id);
  618. return;
  619. }
  620. res->base.dmabuf_fd = info.fd;
  621. QTAILQ_INSERT_HEAD(&g->reslist, &res->base, next);
  622. res = NULL;
  623. }
  624. static void virgl_cmd_resource_map_blob(VirtIOGPU *g,
  625. struct virtio_gpu_ctrl_command *cmd)
  626. {
  627. struct virtio_gpu_resource_map_blob mblob;
  628. struct virtio_gpu_virgl_resource *res;
  629. struct virtio_gpu_resp_map_info resp;
  630. int ret;
  631. VIRTIO_GPU_FILL_CMD(mblob);
  632. virtio_gpu_map_blob_bswap(&mblob);
  633. res = virtio_gpu_virgl_find_resource(g, mblob.resource_id);
  634. if (!res) {
  635. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
  636. __func__, mblob.resource_id);
  637. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  638. return;
  639. }
  640. ret = virtio_gpu_virgl_map_resource_blob(g, res, mblob.offset);
  641. if (ret) {
  642. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  643. return;
  644. }
  645. memset(&resp, 0, sizeof(resp));
  646. resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
  647. virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info);
  648. virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
  649. }
  650. static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g,
  651. struct virtio_gpu_ctrl_command *cmd,
  652. bool *cmd_suspended)
  653. {
  654. struct virtio_gpu_resource_unmap_blob ublob;
  655. struct virtio_gpu_virgl_resource *res;
  656. int ret;
  657. VIRTIO_GPU_FILL_CMD(ublob);
  658. virtio_gpu_unmap_blob_bswap(&ublob);
  659. res = virtio_gpu_virgl_find_resource(g, ublob.resource_id);
  660. if (!res) {
  661. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
  662. __func__, ublob.resource_id);
  663. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  664. return;
  665. }
  666. ret = virtio_gpu_virgl_unmap_resource_blob(g, res, cmd_suspended);
  667. if (ret) {
  668. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  669. return;
  670. }
  671. }
  672. static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
  673. struct virtio_gpu_ctrl_command *cmd)
  674. {
  675. struct virtio_gpu_framebuffer fb = { 0 };
  676. struct virtio_gpu_virgl_resource *res;
  677. struct virtio_gpu_set_scanout_blob ss;
  678. VIRTIO_GPU_FILL_CMD(ss);
  679. virtio_gpu_scanout_blob_bswap(&ss);
  680. trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
  681. ss.r.width, ss.r.height, ss.r.x,
  682. ss.r.y);
  683. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  684. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  685. __func__, ss.scanout_id);
  686. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  687. return;
  688. }
  689. if (ss.resource_id == 0) {
  690. virtio_gpu_disable_scanout(g, ss.scanout_id);
  691. return;
  692. }
  693. if (ss.width < 16 ||
  694. ss.height < 16 ||
  695. ss.r.x + ss.r.width > ss.width ||
  696. ss.r.y + ss.r.height > ss.height) {
  697. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
  698. " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
  699. __func__, ss.scanout_id, ss.resource_id,
  700. ss.r.x, ss.r.y, ss.r.width, ss.r.height,
  701. ss.width, ss.height);
  702. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  703. return;
  704. }
  705. res = virtio_gpu_virgl_find_resource(g, ss.resource_id);
  706. if (!res) {
  707. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n",
  708. __func__, ss.resource_id);
  709. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  710. return;
  711. }
  712. if (res->base.dmabuf_fd < 0) {
  713. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource not backed by dmabuf %d\n",
  714. __func__, ss.resource_id);
  715. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  716. return;
  717. }
  718. if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->base.blob_size)) {
  719. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  720. return;
  721. }
  722. g->parent_obj.enable = 1;
  723. if (virtio_gpu_update_dmabuf(g, ss.scanout_id, &res->base, &fb, &ss.r)) {
  724. qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to update dmabuf\n",
  725. __func__);
  726. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  727. return;
  728. }
  729. virtio_gpu_update_scanout(g, ss.scanout_id, &res->base, &fb, &ss.r);
  730. }
  731. #endif
  732. void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
  733. struct virtio_gpu_ctrl_command *cmd)
  734. {
  735. bool cmd_suspended = false;
  736. VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
  737. virgl_renderer_force_ctx_0();
  738. switch (cmd->cmd_hdr.type) {
  739. case VIRTIO_GPU_CMD_CTX_CREATE:
  740. virgl_cmd_context_create(g, cmd);
  741. break;
  742. case VIRTIO_GPU_CMD_CTX_DESTROY:
  743. virgl_cmd_context_destroy(g, cmd);
  744. break;
  745. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  746. virgl_cmd_create_resource_2d(g, cmd);
  747. break;
  748. case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
  749. virgl_cmd_create_resource_3d(g, cmd);
  750. break;
  751. case VIRTIO_GPU_CMD_SUBMIT_3D:
  752. virgl_cmd_submit_3d(g, cmd);
  753. break;
  754. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  755. virgl_cmd_transfer_to_host_2d(g, cmd);
  756. break;
  757. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
  758. virgl_cmd_transfer_to_host_3d(g, cmd);
  759. break;
  760. case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
  761. virgl_cmd_transfer_from_host_3d(g, cmd);
  762. break;
  763. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  764. virgl_resource_attach_backing(g, cmd);
  765. break;
  766. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  767. virgl_resource_detach_backing(g, cmd);
  768. break;
  769. case VIRTIO_GPU_CMD_SET_SCANOUT:
  770. virgl_cmd_set_scanout(g, cmd);
  771. break;
  772. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  773. virgl_cmd_resource_flush(g, cmd);
  774. break;
  775. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  776. virgl_cmd_resource_unref(g, cmd, &cmd_suspended);
  777. break;
  778. case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
  779. /* TODO add security */
  780. virgl_cmd_ctx_attach_resource(g, cmd);
  781. break;
  782. case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
  783. /* TODO add security */
  784. virgl_cmd_ctx_detach_resource(g, cmd);
  785. break;
  786. case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
  787. virgl_cmd_get_capset_info(g, cmd);
  788. break;
  789. case VIRTIO_GPU_CMD_GET_CAPSET:
  790. virgl_cmd_get_capset(g, cmd);
  791. break;
  792. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  793. virtio_gpu_get_display_info(g, cmd);
  794. break;
  795. case VIRTIO_GPU_CMD_GET_EDID:
  796. virtio_gpu_get_edid(g, cmd);
  797. break;
  798. #if VIRGL_VERSION_MAJOR >= 1
  799. case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
  800. virgl_cmd_resource_create_blob(g, cmd);
  801. break;
  802. case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
  803. virgl_cmd_resource_map_blob(g, cmd);
  804. break;
  805. case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
  806. virgl_cmd_resource_unmap_blob(g, cmd, &cmd_suspended);
  807. break;
  808. case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
  809. virgl_cmd_set_scanout_blob(g, cmd);
  810. break;
  811. #endif
  812. default:
  813. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  814. break;
  815. }
  816. if (cmd_suspended || cmd->finished) {
  817. return;
  818. }
  819. if (cmd->error) {
  820. fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__,
  821. cmd->cmd_hdr.type, cmd->error);
  822. virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
  823. return;
  824. }
  825. if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
  826. virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  827. return;
  828. }
  829. trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
  830. virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
  831. }
  832. static void virgl_write_fence(void *opaque, uint32_t fence)
  833. {
  834. VirtIOGPU *g = opaque;
  835. struct virtio_gpu_ctrl_command *cmd, *tmp;
  836. QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
  837. /*
  838. * the guest can end up emitting fences out of order
  839. * so we should check all fenced cmds not just the first one.
  840. */
  841. if (cmd->cmd_hdr.fence_id > fence) {
  842. continue;
  843. }
  844. trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
  845. virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  846. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  847. g_free(cmd);
  848. g->inflight--;
  849. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  850. trace_virtio_gpu_dec_inflight_fences(g->inflight);
  851. }
  852. }
  853. }
  854. static virgl_renderer_gl_context
  855. virgl_create_context(void *opaque, int scanout_idx,
  856. struct virgl_renderer_gl_ctx_param *params)
  857. {
  858. VirtIOGPU *g = opaque;
  859. QEMUGLContext ctx;
  860. QEMUGLParams qparams;
  861. qparams.major_ver = params->major_ver;
  862. qparams.minor_ver = params->minor_ver;
  863. ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
  864. return (virgl_renderer_gl_context)ctx;
  865. }
  866. static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
  867. {
  868. VirtIOGPU *g = opaque;
  869. QEMUGLContext qctx = (QEMUGLContext)ctx;
  870. dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
  871. }
  872. static int virgl_make_context_current(void *opaque, int scanout_idx,
  873. virgl_renderer_gl_context ctx)
  874. {
  875. VirtIOGPU *g = opaque;
  876. QEMUGLContext qctx = (QEMUGLContext)ctx;
  877. return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
  878. qctx);
  879. }
  880. static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
  881. .version = 1,
  882. .write_fence = virgl_write_fence,
  883. .create_gl_context = virgl_create_context,
  884. .destroy_gl_context = virgl_destroy_context,
  885. .make_current = virgl_make_context_current,
  886. };
  887. static void virtio_gpu_print_stats(void *opaque)
  888. {
  889. VirtIOGPU *g = opaque;
  890. VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
  891. if (g->stats.requests) {
  892. fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n",
  893. g->stats.requests,
  894. g->stats.max_inflight,
  895. g->stats.req_3d,
  896. g->stats.bytes_3d);
  897. g->stats.requests = 0;
  898. g->stats.max_inflight = 0;
  899. g->stats.req_3d = 0;
  900. g->stats.bytes_3d = 0;
  901. } else {
  902. fprintf(stderr, "stats: idle\r");
  903. }
  904. timer_mod(gl->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
  905. }
  906. static void virtio_gpu_fence_poll(void *opaque)
  907. {
  908. VirtIOGPU *g = opaque;
  909. VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
  910. virgl_renderer_poll();
  911. virtio_gpu_process_cmdq(g);
  912. if (!QTAILQ_EMPTY(&g->cmdq) || !QTAILQ_EMPTY(&g->fenceq)) {
  913. timer_mod(gl->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10);
  914. }
  915. }
  916. void virtio_gpu_virgl_fence_poll(VirtIOGPU *g)
  917. {
  918. virtio_gpu_fence_poll(g);
  919. }
  920. void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
  921. {
  922. int i;
  923. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  924. dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
  925. dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
  926. }
  927. }
  928. void virtio_gpu_virgl_reset(VirtIOGPU *g)
  929. {
  930. virgl_renderer_reset();
  931. }
  932. int virtio_gpu_virgl_init(VirtIOGPU *g)
  933. {
  934. int ret;
  935. uint32_t flags = 0;
  936. VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
  937. #if VIRGL_RENDERER_CALLBACKS_VERSION >= 4
  938. if (qemu_egl_display) {
  939. virtio_gpu_3d_cbs.version = 4;
  940. virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display;
  941. }
  942. #endif
  943. #ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
  944. if (qemu_egl_angle_d3d) {
  945. flags |= VIRGL_RENDERER_D3D11_SHARE_TEXTURE;
  946. }
  947. #endif
  948. #if VIRGL_VERSION_MAJOR >= 1
  949. if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
  950. flags |= VIRGL_RENDERER_VENUS | VIRGL_RENDERER_RENDER_SERVER;
  951. }
  952. #endif
  953. ret = virgl_renderer_init(g, flags, &virtio_gpu_3d_cbs);
  954. if (ret != 0) {
  955. error_report("virgl could not be initialized: %d", ret);
  956. return ret;
  957. }
  958. gl->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  959. virtio_gpu_fence_poll, g);
  960. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  961. gl->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  962. virtio_gpu_print_stats, g);
  963. timer_mod(gl->print_stats,
  964. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
  965. }
  966. #if VIRGL_VERSION_MAJOR >= 1
  967. gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
  968. virtio_gpu_virgl_resume_cmdq_bh,
  969. g);
  970. #endif
  971. return 0;
  972. }
  973. static void virtio_gpu_virgl_add_capset(GArray *capset_ids, uint32_t capset_id)
  974. {
  975. g_array_append_val(capset_ids, capset_id);
  976. }
  977. GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g)
  978. {
  979. uint32_t capset_max_ver, capset_max_size;
  980. GArray *capset_ids;
  981. capset_ids = g_array_new(false, false, sizeof(uint32_t));
  982. /* VIRGL is always supported. */
  983. virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL);
  984. virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
  985. &capset_max_ver,
  986. &capset_max_size);
  987. if (capset_max_ver) {
  988. virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VIRGL2);
  989. }
  990. if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
  991. virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VENUS,
  992. &capset_max_ver,
  993. &capset_max_size);
  994. if (capset_max_size) {
  995. virtio_gpu_virgl_add_capset(capset_ids, VIRTIO_GPU_CAPSET_VENUS);
  996. }
  997. }
  998. return capset_ids;
  999. }