2
0

virgl.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * Virtio vhost-user GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2018
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. * Marc-André Lureau <marcandre.lureau@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. */
  14. #include "qemu/osdep.h"
  15. #include <virglrenderer.h>
  16. #include "virgl.h"
  17. #include <epoxy/gl.h>
  18. void
  19. vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id,
  20. gpointer data)
  21. {
  22. uint32_t width, height;
  23. uint32_t *cursor;
  24. cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height);
  25. g_return_if_fail(cursor != NULL);
  26. g_return_if_fail(width == 64);
  27. g_return_if_fail(height == 64);
  28. memcpy(data, cursor, 64 * 64 * sizeof(uint32_t));
  29. free(cursor);
  30. }
  31. static void
  32. virgl_cmd_context_create(VuGpu *g,
  33. struct virtio_gpu_ctrl_command *cmd)
  34. {
  35. struct virtio_gpu_ctx_create cc;
  36. VUGPU_FILL_CMD(cc);
  37. virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
  38. cc.debug_name);
  39. }
  40. static void
  41. virgl_cmd_context_destroy(VuGpu *g,
  42. struct virtio_gpu_ctrl_command *cmd)
  43. {
  44. struct virtio_gpu_ctx_destroy cd;
  45. VUGPU_FILL_CMD(cd);
  46. virgl_renderer_context_destroy(cd.hdr.ctx_id);
  47. }
  48. static void
  49. virgl_cmd_create_resource_2d(VuGpu *g,
  50. struct virtio_gpu_ctrl_command *cmd)
  51. {
  52. struct virtio_gpu_resource_create_2d c2d;
  53. struct virgl_renderer_resource_create_args args;
  54. VUGPU_FILL_CMD(c2d);
  55. args.handle = c2d.resource_id;
  56. args.target = 2;
  57. args.format = c2d.format;
  58. args.bind = (1 << 1);
  59. args.width = c2d.width;
  60. args.height = c2d.height;
  61. args.depth = 1;
  62. args.array_size = 1;
  63. args.last_level = 0;
  64. args.nr_samples = 0;
  65. args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
  66. virgl_renderer_resource_create(&args, NULL, 0);
  67. }
  68. static void
  69. virgl_cmd_create_resource_3d(VuGpu *g,
  70. struct virtio_gpu_ctrl_command *cmd)
  71. {
  72. struct virtio_gpu_resource_create_3d c3d;
  73. struct virgl_renderer_resource_create_args args;
  74. VUGPU_FILL_CMD(c3d);
  75. args.handle = c3d.resource_id;
  76. args.target = c3d.target;
  77. args.format = c3d.format;
  78. args.bind = c3d.bind;
  79. args.width = c3d.width;
  80. args.height = c3d.height;
  81. args.depth = c3d.depth;
  82. args.array_size = c3d.array_size;
  83. args.last_level = c3d.last_level;
  84. args.nr_samples = c3d.nr_samples;
  85. args.flags = c3d.flags;
  86. virgl_renderer_resource_create(&args, NULL, 0);
  87. }
  88. static void
  89. virgl_cmd_resource_unref(VuGpu *g,
  90. struct virtio_gpu_ctrl_command *cmd)
  91. {
  92. struct virtio_gpu_resource_unref unref;
  93. struct iovec *res_iovs = NULL;
  94. int num_iovs = 0;
  95. VUGPU_FILL_CMD(unref);
  96. virgl_renderer_resource_detach_iov(unref.resource_id,
  97. &res_iovs,
  98. &num_iovs);
  99. if (res_iovs != NULL && num_iovs != 0) {
  100. vg_cleanup_mapping_iov(g, res_iovs, num_iovs);
  101. }
  102. virgl_renderer_resource_unref(unref.resource_id);
  103. }
  104. /* Not yet(?) defined in standard-headers, remove when possible */
  105. #ifndef VIRTIO_GPU_CAPSET_VIRGL2
  106. #define VIRTIO_GPU_CAPSET_VIRGL2 2
  107. #endif
  108. static void
  109. virgl_cmd_get_capset_info(VuGpu *g,
  110. struct virtio_gpu_ctrl_command *cmd)
  111. {
  112. struct virtio_gpu_get_capset_info info;
  113. struct virtio_gpu_resp_capset_info resp;
  114. VUGPU_FILL_CMD(info);
  115. memset(&resp, 0, sizeof(resp));
  116. if (info.capset_index == 0) {
  117. resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
  118. virgl_renderer_get_cap_set(resp.capset_id,
  119. &resp.capset_max_version,
  120. &resp.capset_max_size);
  121. } else if (info.capset_index == 1) {
  122. resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
  123. virgl_renderer_get_cap_set(resp.capset_id,
  124. &resp.capset_max_version,
  125. &resp.capset_max_size);
  126. } else {
  127. resp.capset_max_version = 0;
  128. resp.capset_max_size = 0;
  129. }
  130. resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
  131. vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
  132. }
  133. uint32_t
  134. vg_virgl_get_num_capsets(void)
  135. {
  136. uint32_t capset2_max_ver, capset2_max_size;
  137. virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
  138. &capset2_max_ver,
  139. &capset2_max_size);
  140. return capset2_max_ver ? 2 : 1;
  141. }
  142. static void
  143. virgl_cmd_get_capset(VuGpu *g,
  144. struct virtio_gpu_ctrl_command *cmd)
  145. {
  146. struct virtio_gpu_get_capset gc;
  147. struct virtio_gpu_resp_capset *resp;
  148. uint32_t max_ver, max_size;
  149. VUGPU_FILL_CMD(gc);
  150. virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
  151. &max_size);
  152. if (!max_size) {
  153. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  154. return;
  155. }
  156. resp = g_malloc0(sizeof(*resp) + max_size);
  157. resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
  158. virgl_renderer_fill_caps(gc.capset_id,
  159. gc.capset_version,
  160. (void *)resp->capset_data);
  161. vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
  162. g_free(resp);
  163. }
  164. static void
  165. virgl_cmd_submit_3d(VuGpu *g,
  166. struct virtio_gpu_ctrl_command *cmd)
  167. {
  168. struct virtio_gpu_cmd_submit cs;
  169. void *buf;
  170. size_t s;
  171. VUGPU_FILL_CMD(cs);
  172. buf = g_malloc(cs.size);
  173. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  174. sizeof(cs), buf, cs.size);
  175. if (s != cs.size) {
  176. g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size);
  177. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  178. goto out;
  179. }
  180. virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
  181. out:
  182. g_free(buf);
  183. }
  184. static void
  185. virgl_cmd_transfer_to_host_2d(VuGpu *g,
  186. struct virtio_gpu_ctrl_command *cmd)
  187. {
  188. struct virtio_gpu_transfer_to_host_2d t2d;
  189. struct virtio_gpu_box box;
  190. VUGPU_FILL_CMD(t2d);
  191. box.x = t2d.r.x;
  192. box.y = t2d.r.y;
  193. box.z = 0;
  194. box.w = t2d.r.width;
  195. box.h = t2d.r.height;
  196. box.d = 1;
  197. virgl_renderer_transfer_write_iov(t2d.resource_id,
  198. 0,
  199. 0,
  200. 0,
  201. 0,
  202. (struct virgl_box *)&box,
  203. t2d.offset, NULL, 0);
  204. }
  205. static void
  206. virgl_cmd_transfer_to_host_3d(VuGpu *g,
  207. struct virtio_gpu_ctrl_command *cmd)
  208. {
  209. struct virtio_gpu_transfer_host_3d t3d;
  210. VUGPU_FILL_CMD(t3d);
  211. virgl_renderer_transfer_write_iov(t3d.resource_id,
  212. t3d.hdr.ctx_id,
  213. t3d.level,
  214. t3d.stride,
  215. t3d.layer_stride,
  216. (struct virgl_box *)&t3d.box,
  217. t3d.offset, NULL, 0);
  218. }
  219. static void
  220. virgl_cmd_transfer_from_host_3d(VuGpu *g,
  221. struct virtio_gpu_ctrl_command *cmd)
  222. {
  223. struct virtio_gpu_transfer_host_3d tf3d;
  224. VUGPU_FILL_CMD(tf3d);
  225. virgl_renderer_transfer_read_iov(tf3d.resource_id,
  226. tf3d.hdr.ctx_id,
  227. tf3d.level,
  228. tf3d.stride,
  229. tf3d.layer_stride,
  230. (struct virgl_box *)&tf3d.box,
  231. tf3d.offset, NULL, 0);
  232. }
  233. static void
  234. virgl_resource_attach_backing(VuGpu *g,
  235. struct virtio_gpu_ctrl_command *cmd)
  236. {
  237. struct virtio_gpu_resource_attach_backing att_rb;
  238. struct iovec *res_iovs;
  239. int ret;
  240. VUGPU_FILL_CMD(att_rb);
  241. ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs);
  242. if (ret != 0) {
  243. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  244. return;
  245. }
  246. ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
  247. res_iovs, att_rb.nr_entries);
  248. if (ret != 0) {
  249. vg_cleanup_mapping_iov(g, res_iovs, att_rb.nr_entries);
  250. }
  251. }
  252. static void
  253. virgl_resource_detach_backing(VuGpu *g,
  254. struct virtio_gpu_ctrl_command *cmd)
  255. {
  256. struct virtio_gpu_resource_detach_backing detach_rb;
  257. struct iovec *res_iovs = NULL;
  258. int num_iovs = 0;
  259. VUGPU_FILL_CMD(detach_rb);
  260. virgl_renderer_resource_detach_iov(detach_rb.resource_id,
  261. &res_iovs,
  262. &num_iovs);
  263. if (res_iovs == NULL || num_iovs == 0) {
  264. return;
  265. }
  266. vg_cleanup_mapping_iov(g, res_iovs, num_iovs);
  267. }
  268. static int
  269. virgl_get_resource_info_modifiers(uint32_t resource_id,
  270. struct virgl_renderer_resource_info *info,
  271. uint64_t *modifiers)
  272. {
  273. int ret;
  274. #ifdef VIRGL_RENDERER_RESOURCE_INFO_EXT_VERSION
  275. struct virgl_renderer_resource_info_ext info_ext;
  276. ret = virgl_renderer_resource_get_info_ext(resource_id, &info_ext);
  277. if (ret) {
  278. return ret;
  279. }
  280. *info = info_ext.base;
  281. *modifiers = info_ext.modifiers;
  282. #else
  283. ret = virgl_renderer_borrow_texture_for_scanout(resource_id, info);
  284. if (ret) {
  285. return ret;
  286. }
  287. /*
  288. * Before virgl_renderer_resource_get_info_ext,
  289. * getting the modifiers was not possible.
  290. */
  291. *modifiers = 0;
  292. #endif
  293. return 0;
  294. }
  295. static void
  296. virgl_cmd_set_scanout(VuGpu *g,
  297. struct virtio_gpu_ctrl_command *cmd)
  298. {
  299. struct virtio_gpu_set_scanout ss;
  300. struct virgl_renderer_texture_info info;
  301. int ret;
  302. VUGPU_FILL_CMD(ss);
  303. if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
  304. g_critical("%s: illegal scanout id specified %d",
  305. __func__, ss.scanout_id);
  306. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  307. return;
  308. }
  309. memset(&info, 0, sizeof(info));
  310. if (ss.resource_id && ss.r.width && ss.r.height) {
  311. uint64_t modifiers = 0;
  312. ret = virgl_get_resource_info_modifiers(ss.resource_id, &info,
  313. &modifiers);
  314. if (ret) {
  315. g_critical("%s: illegal resource specified %d\n",
  316. __func__, ss.resource_id);
  317. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  318. return;
  319. }
  320. int fd = -1;
  321. if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) {
  322. g_critical("%s: failed to get fd for texture\n", __func__);
  323. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  324. return;
  325. }
  326. assert(fd >= 0);
  327. VhostUserGpuMsg msg = {
  328. .payload.dmabuf_scanout.scanout_id = ss.scanout_id,
  329. .payload.dmabuf_scanout.x = ss.r.x,
  330. .payload.dmabuf_scanout.y = ss.r.y,
  331. .payload.dmabuf_scanout.width = ss.r.width,
  332. .payload.dmabuf_scanout.height = ss.r.height,
  333. .payload.dmabuf_scanout.fd_width = info.width,
  334. .payload.dmabuf_scanout.fd_height = info.height,
  335. .payload.dmabuf_scanout.fd_stride = info.stride,
  336. .payload.dmabuf_scanout.fd_flags = info.flags,
  337. .payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc
  338. };
  339. if (g->use_modifiers) {
  340. /*
  341. * The message uses all the fields set in dmabuf_scanout plus
  342. * modifiers which is appended after VhostUserGpuDMABUFScanout.
  343. */
  344. msg.request = VHOST_USER_GPU_DMABUF_SCANOUT2;
  345. msg.size = sizeof(VhostUserGpuDMABUFScanout2);
  346. msg.payload.dmabuf_scanout2.modifier = modifiers;
  347. } else {
  348. msg.request = VHOST_USER_GPU_DMABUF_SCANOUT;
  349. msg.size = sizeof(VhostUserGpuDMABUFScanout);
  350. }
  351. vg_send_msg(g, &msg, fd);
  352. close(fd);
  353. } else {
  354. VhostUserGpuMsg msg = {
  355. .request = VHOST_USER_GPU_DMABUF_SCANOUT,
  356. .size = sizeof(VhostUserGpuDMABUFScanout),
  357. .payload.dmabuf_scanout.scanout_id = ss.scanout_id,
  358. };
  359. g_debug("disable scanout");
  360. vg_send_msg(g, &msg, -1);
  361. }
  362. g->scanout[ss.scanout_id].resource_id = ss.resource_id;
  363. }
  364. static void
  365. virgl_cmd_resource_flush(VuGpu *g,
  366. struct virtio_gpu_ctrl_command *cmd)
  367. {
  368. struct virtio_gpu_resource_flush rf;
  369. int i;
  370. VUGPU_FILL_CMD(rf);
  371. glFlush();
  372. if (!rf.resource_id) {
  373. g_debug("bad resource id for flush..?");
  374. return;
  375. }
  376. for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
  377. if (g->scanout[i].resource_id != rf.resource_id) {
  378. continue;
  379. }
  380. VhostUserGpuMsg msg = {
  381. .request = VHOST_USER_GPU_DMABUF_UPDATE,
  382. .size = sizeof(VhostUserGpuUpdate),
  383. .payload.update.scanout_id = i,
  384. .payload.update.x = rf.r.x,
  385. .payload.update.y = rf.r.y,
  386. .payload.update.width = rf.r.width,
  387. .payload.update.height = rf.r.height
  388. };
  389. vg_send_msg(g, &msg, -1);
  390. vg_wait_ok(g);
  391. }
  392. }
  393. static void
  394. virgl_cmd_ctx_attach_resource(VuGpu *g,
  395. struct virtio_gpu_ctrl_command *cmd)
  396. {
  397. struct virtio_gpu_ctx_resource att_res;
  398. VUGPU_FILL_CMD(att_res);
  399. virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
  400. }
  401. static void
  402. virgl_cmd_ctx_detach_resource(VuGpu *g,
  403. struct virtio_gpu_ctrl_command *cmd)
  404. {
  405. struct virtio_gpu_ctx_resource det_res;
  406. VUGPU_FILL_CMD(det_res);
  407. virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
  408. }
  409. void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd)
  410. {
  411. virgl_renderer_force_ctx_0();
  412. switch (cmd->cmd_hdr.type) {
  413. case VIRTIO_GPU_CMD_CTX_CREATE:
  414. virgl_cmd_context_create(g, cmd);
  415. break;
  416. case VIRTIO_GPU_CMD_CTX_DESTROY:
  417. virgl_cmd_context_destroy(g, cmd);
  418. break;
  419. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  420. virgl_cmd_create_resource_2d(g, cmd);
  421. break;
  422. case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
  423. virgl_cmd_create_resource_3d(g, cmd);
  424. break;
  425. case VIRTIO_GPU_CMD_SUBMIT_3D:
  426. virgl_cmd_submit_3d(g, cmd);
  427. break;
  428. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  429. virgl_cmd_transfer_to_host_2d(g, cmd);
  430. break;
  431. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
  432. virgl_cmd_transfer_to_host_3d(g, cmd);
  433. break;
  434. case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
  435. virgl_cmd_transfer_from_host_3d(g, cmd);
  436. break;
  437. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  438. virgl_resource_attach_backing(g, cmd);
  439. break;
  440. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  441. virgl_resource_detach_backing(g, cmd);
  442. break;
  443. case VIRTIO_GPU_CMD_SET_SCANOUT:
  444. virgl_cmd_set_scanout(g, cmd);
  445. break;
  446. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  447. virgl_cmd_resource_flush(g, cmd);
  448. break;
  449. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  450. virgl_cmd_resource_unref(g, cmd);
  451. break;
  452. case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
  453. /* TODO add security */
  454. virgl_cmd_ctx_attach_resource(g, cmd);
  455. break;
  456. case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
  457. /* TODO add security */
  458. virgl_cmd_ctx_detach_resource(g, cmd);
  459. break;
  460. case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
  461. virgl_cmd_get_capset_info(g, cmd);
  462. break;
  463. case VIRTIO_GPU_CMD_GET_CAPSET:
  464. virgl_cmd_get_capset(g, cmd);
  465. break;
  466. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  467. vg_get_display_info(g, cmd);
  468. break;
  469. case VIRTIO_GPU_CMD_GET_EDID:
  470. vg_get_edid(g, cmd);
  471. break;
  472. default:
  473. g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
  474. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  475. break;
  476. }
  477. if (cmd->state != VG_CMD_STATE_NEW) {
  478. return;
  479. }
  480. if (cmd->error) {
  481. g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__,
  482. cmd->cmd_hdr.type, cmd->error);
  483. vg_ctrl_response_nodata(g, cmd, cmd->error);
  484. return;
  485. }
  486. if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
  487. vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  488. return;
  489. }
  490. g_debug("Creating fence id:%" PRId64 " type:%d",
  491. cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
  492. virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
  493. }
  494. static void
  495. virgl_write_fence(void *opaque, uint32_t fence)
  496. {
  497. VuGpu *g = opaque;
  498. struct virtio_gpu_ctrl_command *cmd, *tmp;
  499. QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
  500. /*
  501. * the guest can end up emitting fences out of order
  502. * so we should check all fenced cmds not just the first one.
  503. */
  504. if (cmd->cmd_hdr.fence_id > fence) {
  505. continue;
  506. }
  507. g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id);
  508. vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  509. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  510. free(cmd);
  511. g->inflight--;
  512. }
  513. }
  514. #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
  515. VIRGL_RENDERER_CALLBACKS_VERSION >= 2
  516. static int
  517. virgl_get_drm_fd(void *opaque)
  518. {
  519. VuGpu *g = opaque;
  520. return g->drm_rnode_fd;
  521. }
  522. #endif
  523. static struct virgl_renderer_callbacks virgl_cbs = {
  524. #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
  525. VIRGL_RENDERER_CALLBACKS_VERSION >= 2
  526. .get_drm_fd = virgl_get_drm_fd,
  527. .version = 2,
  528. #else
  529. .version = 1,
  530. #endif
  531. .write_fence = virgl_write_fence,
  532. };
  533. static void
  534. vg_virgl_poll(VuDev *dev, int condition, void *data)
  535. {
  536. virgl_renderer_poll();
  537. }
  538. bool
  539. vg_virgl_init(VuGpu *g)
  540. {
  541. int ret;
  542. if (g->drm_rnode_fd && virgl_cbs.version == 1) {
  543. g_warning("virgl will use the default rendernode");
  544. }
  545. ret = virgl_renderer_init(g,
  546. VIRGL_RENDERER_USE_EGL |
  547. VIRGL_RENDERER_THREAD_SYNC,
  548. &virgl_cbs);
  549. if (ret != 0) {
  550. return false;
  551. }
  552. ret = virgl_renderer_get_poll_fd();
  553. if (ret != -1) {
  554. g->renderer_source =
  555. vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g);
  556. }
  557. return true;
  558. }