virgl.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. /*
  2. * Virtio vhost-user GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2018
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. * Marc-André Lureau <marcandre.lureau@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. */
  14. #include "qemu/osdep.h"
  15. #include <virglrenderer.h>
  16. #include "virgl.h"
  17. #include <epoxy/gl.h>
  18. void
  19. vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id,
  20. gpointer data)
  21. {
  22. uint32_t width, height;
  23. uint32_t *cursor;
  24. cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height);
  25. g_return_if_fail(cursor != NULL);
  26. g_return_if_fail(width == 64);
  27. g_return_if_fail(height == 64);
  28. memcpy(data, cursor, 64 * 64 * sizeof(uint32_t));
  29. free(cursor);
  30. }
  31. static void
  32. virgl_cmd_context_create(VuGpu *g,
  33. struct virtio_gpu_ctrl_command *cmd)
  34. {
  35. struct virtio_gpu_ctx_create cc;
  36. VUGPU_FILL_CMD(cc);
  37. virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
  38. cc.debug_name);
  39. }
  40. static void
  41. virgl_cmd_context_destroy(VuGpu *g,
  42. struct virtio_gpu_ctrl_command *cmd)
  43. {
  44. struct virtio_gpu_ctx_destroy cd;
  45. VUGPU_FILL_CMD(cd);
  46. virgl_renderer_context_destroy(cd.hdr.ctx_id);
  47. }
  48. static void
  49. virgl_cmd_create_resource_2d(VuGpu *g,
  50. struct virtio_gpu_ctrl_command *cmd)
  51. {
  52. struct virtio_gpu_resource_create_2d c2d;
  53. struct virgl_renderer_resource_create_args args;
  54. VUGPU_FILL_CMD(c2d);
  55. args.handle = c2d.resource_id;
  56. args.target = 2;
  57. args.format = c2d.format;
  58. args.bind = (1 << 1);
  59. args.width = c2d.width;
  60. args.height = c2d.height;
  61. args.depth = 1;
  62. args.array_size = 1;
  63. args.last_level = 0;
  64. args.nr_samples = 0;
  65. args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
  66. virgl_renderer_resource_create(&args, NULL, 0);
  67. }
  68. static void
  69. virgl_cmd_create_resource_3d(VuGpu *g,
  70. struct virtio_gpu_ctrl_command *cmd)
  71. {
  72. struct virtio_gpu_resource_create_3d c3d;
  73. struct virgl_renderer_resource_create_args args;
  74. VUGPU_FILL_CMD(c3d);
  75. args.handle = c3d.resource_id;
  76. args.target = c3d.target;
  77. args.format = c3d.format;
  78. args.bind = c3d.bind;
  79. args.width = c3d.width;
  80. args.height = c3d.height;
  81. args.depth = c3d.depth;
  82. args.array_size = c3d.array_size;
  83. args.last_level = c3d.last_level;
  84. args.nr_samples = c3d.nr_samples;
  85. args.flags = c3d.flags;
  86. virgl_renderer_resource_create(&args, NULL, 0);
  87. }
  88. static void
  89. virgl_cmd_resource_unref(VuGpu *g,
  90. struct virtio_gpu_ctrl_command *cmd)
  91. {
  92. struct virtio_gpu_resource_unref unref;
  93. struct iovec *res_iovs = NULL;
  94. int num_iovs = 0;
  95. VUGPU_FILL_CMD(unref);
  96. virgl_renderer_resource_detach_iov(unref.resource_id,
  97. &res_iovs,
  98. &num_iovs);
  99. if (res_iovs != NULL && num_iovs != 0) {
  100. vg_cleanup_mapping_iov(g, res_iovs, num_iovs);
  101. }
  102. virgl_renderer_resource_unref(unref.resource_id);
  103. }
  104. /* Not yet(?) defined in standard-headers, remove when possible */
  105. #ifndef VIRTIO_GPU_CAPSET_VIRGL2
  106. #define VIRTIO_GPU_CAPSET_VIRGL2 2
  107. #endif
  108. static void
  109. virgl_cmd_get_capset_info(VuGpu *g,
  110. struct virtio_gpu_ctrl_command *cmd)
  111. {
  112. struct virtio_gpu_get_capset_info info;
  113. struct virtio_gpu_resp_capset_info resp;
  114. VUGPU_FILL_CMD(info);
  115. memset(&resp, 0, sizeof(resp));
  116. if (info.capset_index == 0) {
  117. resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
  118. virgl_renderer_get_cap_set(resp.capset_id,
  119. &resp.capset_max_version,
  120. &resp.capset_max_size);
  121. } else if (info.capset_index == 1) {
  122. resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
  123. virgl_renderer_get_cap_set(resp.capset_id,
  124. &resp.capset_max_version,
  125. &resp.capset_max_size);
  126. } else {
  127. resp.capset_max_version = 0;
  128. resp.capset_max_size = 0;
  129. }
  130. resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
  131. vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
  132. }
  133. uint32_t
  134. vg_virgl_get_num_capsets(void)
  135. {
  136. uint32_t capset2_max_ver, capset2_max_size;
  137. virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
  138. &capset2_max_ver,
  139. &capset2_max_size);
  140. return capset2_max_ver ? 2 : 1;
  141. }
  142. static void
  143. virgl_cmd_get_capset(VuGpu *g,
  144. struct virtio_gpu_ctrl_command *cmd)
  145. {
  146. struct virtio_gpu_get_capset gc;
  147. struct virtio_gpu_resp_capset *resp;
  148. uint32_t max_ver, max_size;
  149. VUGPU_FILL_CMD(gc);
  150. virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
  151. &max_size);
  152. if (!max_size) {
  153. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  154. return;
  155. }
  156. resp = g_malloc0(sizeof(*resp) + max_size);
  157. resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
  158. virgl_renderer_fill_caps(gc.capset_id,
  159. gc.capset_version,
  160. (void *)resp->capset_data);
  161. vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
  162. g_free(resp);
  163. }
  164. static void
  165. virgl_cmd_submit_3d(VuGpu *g,
  166. struct virtio_gpu_ctrl_command *cmd)
  167. {
  168. struct virtio_gpu_cmd_submit cs;
  169. void *buf;
  170. size_t s;
  171. VUGPU_FILL_CMD(cs);
  172. buf = g_malloc(cs.size);
  173. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  174. sizeof(cs), buf, cs.size);
  175. if (s != cs.size) {
  176. g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size);
  177. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  178. goto out;
  179. }
  180. virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
  181. out:
  182. g_free(buf);
  183. }
  184. static void
  185. virgl_cmd_transfer_to_host_2d(VuGpu *g,
  186. struct virtio_gpu_ctrl_command *cmd)
  187. {
  188. struct virtio_gpu_transfer_to_host_2d t2d;
  189. struct virtio_gpu_box box;
  190. VUGPU_FILL_CMD(t2d);
  191. box.x = t2d.r.x;
  192. box.y = t2d.r.y;
  193. box.z = 0;
  194. box.w = t2d.r.width;
  195. box.h = t2d.r.height;
  196. box.d = 1;
  197. virgl_renderer_transfer_write_iov(t2d.resource_id,
  198. 0,
  199. 0,
  200. 0,
  201. 0,
  202. (struct virgl_box *)&box,
  203. t2d.offset, NULL, 0);
  204. }
  205. static void
  206. virgl_cmd_transfer_to_host_3d(VuGpu *g,
  207. struct virtio_gpu_ctrl_command *cmd)
  208. {
  209. struct virtio_gpu_transfer_host_3d t3d;
  210. VUGPU_FILL_CMD(t3d);
  211. virgl_renderer_transfer_write_iov(t3d.resource_id,
  212. t3d.hdr.ctx_id,
  213. t3d.level,
  214. t3d.stride,
  215. t3d.layer_stride,
  216. (struct virgl_box *)&t3d.box,
  217. t3d.offset, NULL, 0);
  218. }
  219. static void
  220. virgl_cmd_transfer_from_host_3d(VuGpu *g,
  221. struct virtio_gpu_ctrl_command *cmd)
  222. {
  223. struct virtio_gpu_transfer_host_3d tf3d;
  224. VUGPU_FILL_CMD(tf3d);
  225. virgl_renderer_transfer_read_iov(tf3d.resource_id,
  226. tf3d.hdr.ctx_id,
  227. tf3d.level,
  228. tf3d.stride,
  229. tf3d.layer_stride,
  230. (struct virgl_box *)&tf3d.box,
  231. tf3d.offset, NULL, 0);
  232. }
  233. static void
  234. virgl_resource_attach_backing(VuGpu *g,
  235. struct virtio_gpu_ctrl_command *cmd)
  236. {
  237. struct virtio_gpu_resource_attach_backing att_rb;
  238. struct iovec *res_iovs;
  239. int ret;
  240. VUGPU_FILL_CMD(att_rb);
  241. ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs);
  242. if (ret != 0) {
  243. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  244. return;
  245. }
  246. ret = virgl_renderer_resource_attach_iov(att_rb.resource_id,
  247. res_iovs, att_rb.nr_entries);
  248. if (ret != 0) {
  249. vg_cleanup_mapping_iov(g, res_iovs, att_rb.nr_entries);
  250. }
  251. }
  252. static void
  253. virgl_resource_detach_backing(VuGpu *g,
  254. struct virtio_gpu_ctrl_command *cmd)
  255. {
  256. struct virtio_gpu_resource_detach_backing detach_rb;
  257. struct iovec *res_iovs = NULL;
  258. int num_iovs = 0;
  259. VUGPU_FILL_CMD(detach_rb);
  260. virgl_renderer_resource_detach_iov(detach_rb.resource_id,
  261. &res_iovs,
  262. &num_iovs);
  263. if (res_iovs == NULL || num_iovs == 0) {
  264. return;
  265. }
  266. vg_cleanup_mapping_iov(g, res_iovs, num_iovs);
  267. }
  268. static void
  269. virgl_cmd_set_scanout(VuGpu *g,
  270. struct virtio_gpu_ctrl_command *cmd)
  271. {
  272. struct virtio_gpu_set_scanout ss;
  273. struct virgl_renderer_texture_info info;
  274. int ret;
  275. VUGPU_FILL_CMD(ss);
  276. if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
  277. g_critical("%s: illegal scanout id specified %d",
  278. __func__, ss.scanout_id);
  279. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  280. return;
  281. }
  282. memset(&info, 0, sizeof(info));
  283. if (ss.resource_id && ss.r.width && ss.r.height) {
  284. ret = virgl_renderer_borrow_texture_for_scanout(ss.resource_id, &info);
  285. if (ret == -1) {
  286. g_critical("%s: illegal resource specified %d\n",
  287. __func__, ss.resource_id);
  288. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  289. return;
  290. }
  291. int fd = -1;
  292. if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) {
  293. g_critical("%s: failed to get fd for texture\n", __func__);
  294. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  295. return;
  296. }
  297. assert(fd >= 0);
  298. VhostUserGpuMsg msg = {
  299. .request = VHOST_USER_GPU_DMABUF_SCANOUT,
  300. .size = sizeof(VhostUserGpuDMABUFScanout),
  301. .payload.dmabuf_scanout.scanout_id = ss.scanout_id,
  302. .payload.dmabuf_scanout.x = ss.r.x,
  303. .payload.dmabuf_scanout.y = ss.r.y,
  304. .payload.dmabuf_scanout.width = ss.r.width,
  305. .payload.dmabuf_scanout.height = ss.r.height,
  306. .payload.dmabuf_scanout.fd_width = info.width,
  307. .payload.dmabuf_scanout.fd_height = info.height,
  308. .payload.dmabuf_scanout.fd_stride = info.stride,
  309. .payload.dmabuf_scanout.fd_flags = info.flags,
  310. .payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc
  311. };
  312. vg_send_msg(g, &msg, fd);
  313. close(fd);
  314. } else {
  315. VhostUserGpuMsg msg = {
  316. .request = VHOST_USER_GPU_DMABUF_SCANOUT,
  317. .size = sizeof(VhostUserGpuDMABUFScanout),
  318. .payload.dmabuf_scanout.scanout_id = ss.scanout_id,
  319. };
  320. g_debug("disable scanout");
  321. vg_send_msg(g, &msg, -1);
  322. }
  323. g->scanout[ss.scanout_id].resource_id = ss.resource_id;
  324. }
  325. static void
  326. virgl_cmd_resource_flush(VuGpu *g,
  327. struct virtio_gpu_ctrl_command *cmd)
  328. {
  329. struct virtio_gpu_resource_flush rf;
  330. int i;
  331. VUGPU_FILL_CMD(rf);
  332. glFlush();
  333. if (!rf.resource_id) {
  334. g_debug("bad resource id for flush..?");
  335. return;
  336. }
  337. for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
  338. if (g->scanout[i].resource_id != rf.resource_id) {
  339. continue;
  340. }
  341. VhostUserGpuMsg msg = {
  342. .request = VHOST_USER_GPU_DMABUF_UPDATE,
  343. .size = sizeof(VhostUserGpuUpdate),
  344. .payload.update.scanout_id = i,
  345. .payload.update.x = rf.r.x,
  346. .payload.update.y = rf.r.y,
  347. .payload.update.width = rf.r.width,
  348. .payload.update.height = rf.r.height
  349. };
  350. vg_send_msg(g, &msg, -1);
  351. vg_wait_ok(g);
  352. }
  353. }
  354. static void
  355. virgl_cmd_ctx_attach_resource(VuGpu *g,
  356. struct virtio_gpu_ctrl_command *cmd)
  357. {
  358. struct virtio_gpu_ctx_resource att_res;
  359. VUGPU_FILL_CMD(att_res);
  360. virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
  361. }
  362. static void
  363. virgl_cmd_ctx_detach_resource(VuGpu *g,
  364. struct virtio_gpu_ctrl_command *cmd)
  365. {
  366. struct virtio_gpu_ctx_resource det_res;
  367. VUGPU_FILL_CMD(det_res);
  368. virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
  369. }
  370. void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd)
  371. {
  372. virgl_renderer_force_ctx_0();
  373. switch (cmd->cmd_hdr.type) {
  374. case VIRTIO_GPU_CMD_CTX_CREATE:
  375. virgl_cmd_context_create(g, cmd);
  376. break;
  377. case VIRTIO_GPU_CMD_CTX_DESTROY:
  378. virgl_cmd_context_destroy(g, cmd);
  379. break;
  380. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  381. virgl_cmd_create_resource_2d(g, cmd);
  382. break;
  383. case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
  384. virgl_cmd_create_resource_3d(g, cmd);
  385. break;
  386. case VIRTIO_GPU_CMD_SUBMIT_3D:
  387. virgl_cmd_submit_3d(g, cmd);
  388. break;
  389. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  390. virgl_cmd_transfer_to_host_2d(g, cmd);
  391. break;
  392. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
  393. virgl_cmd_transfer_to_host_3d(g, cmd);
  394. break;
  395. case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
  396. virgl_cmd_transfer_from_host_3d(g, cmd);
  397. break;
  398. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  399. virgl_resource_attach_backing(g, cmd);
  400. break;
  401. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  402. virgl_resource_detach_backing(g, cmd);
  403. break;
  404. case VIRTIO_GPU_CMD_SET_SCANOUT:
  405. virgl_cmd_set_scanout(g, cmd);
  406. break;
  407. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  408. virgl_cmd_resource_flush(g, cmd);
  409. break;
  410. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  411. virgl_cmd_resource_unref(g, cmd);
  412. break;
  413. case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
  414. /* TODO add security */
  415. virgl_cmd_ctx_attach_resource(g, cmd);
  416. break;
  417. case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
  418. /* TODO add security */
  419. virgl_cmd_ctx_detach_resource(g, cmd);
  420. break;
  421. case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
  422. virgl_cmd_get_capset_info(g, cmd);
  423. break;
  424. case VIRTIO_GPU_CMD_GET_CAPSET:
  425. virgl_cmd_get_capset(g, cmd);
  426. break;
  427. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  428. vg_get_display_info(g, cmd);
  429. break;
  430. default:
  431. g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
  432. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  433. break;
  434. }
  435. if (cmd->state != VG_CMD_STATE_NEW) {
  436. return;
  437. }
  438. if (cmd->error) {
  439. g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__,
  440. cmd->cmd_hdr.type, cmd->error);
  441. vg_ctrl_response_nodata(g, cmd, cmd->error);
  442. return;
  443. }
  444. if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
  445. vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  446. return;
  447. }
  448. g_debug("Creating fence id:%" PRId64 " type:%d",
  449. cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
  450. virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
  451. }
  452. static void
  453. virgl_write_fence(void *opaque, uint32_t fence)
  454. {
  455. VuGpu *g = opaque;
  456. struct virtio_gpu_ctrl_command *cmd, *tmp;
  457. QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
  458. /*
  459. * the guest can end up emitting fences out of order
  460. * so we should check all fenced cmds not just the first one.
  461. */
  462. if (cmd->cmd_hdr.fence_id > fence) {
  463. continue;
  464. }
  465. g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id);
  466. vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  467. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  468. free(cmd);
  469. g->inflight--;
  470. }
  471. }
  472. #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
  473. VIRGL_RENDERER_CALLBACKS_VERSION >= 2
  474. static int
  475. virgl_get_drm_fd(void *opaque)
  476. {
  477. VuGpu *g = opaque;
  478. return g->drm_rnode_fd;
  479. }
  480. #endif
  481. static struct virgl_renderer_callbacks virgl_cbs = {
  482. #if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
  483. VIRGL_RENDERER_CALLBACKS_VERSION >= 2
  484. .get_drm_fd = virgl_get_drm_fd,
  485. .version = 2,
  486. #else
  487. .version = 1,
  488. #endif
  489. .write_fence = virgl_write_fence,
  490. };
  491. static void
  492. vg_virgl_poll(VuDev *dev, int condition, void *data)
  493. {
  494. virgl_renderer_poll();
  495. }
  496. bool
  497. vg_virgl_init(VuGpu *g)
  498. {
  499. int ret;
  500. if (g->drm_rnode_fd && virgl_cbs.version == 1) {
  501. g_warning("virgl will use the default rendernode");
  502. }
  503. ret = virgl_renderer_init(g,
  504. VIRGL_RENDERER_USE_EGL |
  505. VIRGL_RENDERER_THREAD_SYNC,
  506. &virgl_cbs);
  507. if (ret != 0) {
  508. return false;
  509. }
  510. ret = virgl_renderer_get_poll_fd();
  511. if (ret != -1) {
  512. g->renderer_source =
  513. vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g);
  514. }
  515. return true;
  516. }