vhost-user-gpu.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. /*
  2. * Virtio vhost-user GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2018
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. * Marc-André Lureau <marcandre.lureau@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. */
  14. #include "qemu/osdep.h"
  15. #include "qemu/drm.h"
  16. #include "qapi/error.h"
  17. #include "qemu/sockets.h"
  18. #include <pixman.h>
  19. #include <glib-unix.h>
  20. #include "vugpu.h"
  21. #include "hw/virtio/virtio-gpu-bswap.h"
  22. #include "hw/virtio/virtio-gpu-pixman.h"
  23. #include "virgl.h"
  24. #include "vugbm.h"
  25. enum {
  26. VHOST_USER_GPU_MAX_QUEUES = 2,
  27. };
  28. struct virtio_gpu_simple_resource {
  29. uint32_t resource_id;
  30. uint32_t width;
  31. uint32_t height;
  32. uint32_t format;
  33. struct iovec *iov;
  34. unsigned int iov_cnt;
  35. uint32_t scanout_bitmask;
  36. pixman_image_t *image;
  37. struct vugbm_buffer buffer;
  38. QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
  39. };
  40. static gboolean opt_print_caps;
  41. static int opt_fdnum = -1;
  42. static char *opt_socket_path;
  43. static char *opt_render_node;
  44. static gboolean opt_virgl;
  45. static void vg_handle_ctrl(VuDev *dev, int qidx);
  46. static void vg_cleanup_mapping(VuGpu *g,
  47. struct virtio_gpu_simple_resource *res);
  48. static const char *
  49. vg_cmd_to_string(int cmd)
  50. {
  51. #define CMD(cmd) [cmd] = #cmd
  52. static const char *vg_cmd_str[] = {
  53. CMD(VIRTIO_GPU_UNDEFINED),
  54. /* 2d commands */
  55. CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO),
  56. CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
  57. CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF),
  58. CMD(VIRTIO_GPU_CMD_SET_SCANOUT),
  59. CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
  60. CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
  61. CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
  62. CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING),
  63. CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO),
  64. CMD(VIRTIO_GPU_CMD_GET_CAPSET),
  65. /* 3d commands */
  66. CMD(VIRTIO_GPU_CMD_CTX_CREATE),
  67. CMD(VIRTIO_GPU_CMD_CTX_DESTROY),
  68. CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE),
  69. CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE),
  70. CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D),
  71. CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D),
  72. CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D),
  73. CMD(VIRTIO_GPU_CMD_SUBMIT_3D),
  74. /* cursor commands */
  75. CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR),
  76. CMD(VIRTIO_GPU_CMD_MOVE_CURSOR),
  77. };
  78. #undef REQ
  79. if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) {
  80. return vg_cmd_str[cmd];
  81. } else {
  82. return "unknown";
  83. }
  84. }
  85. static int
  86. vg_sock_fd_read(int sock, void *buf, ssize_t buflen)
  87. {
  88. int ret;
  89. do {
  90. ret = read(sock, buf, buflen);
  91. } while (ret < 0 && (errno == EINTR || errno == EAGAIN));
  92. g_warn_if_fail(ret == buflen);
  93. return ret;
  94. }
  95. static void
  96. vg_sock_fd_close(VuGpu *g)
  97. {
  98. if (g->sock_fd >= 0) {
  99. close(g->sock_fd);
  100. g->sock_fd = -1;
  101. }
  102. }
  103. static gboolean
  104. source_wait_cb(gint fd, GIOCondition condition, gpointer user_data)
  105. {
  106. VuGpu *g = user_data;
  107. if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) {
  108. return G_SOURCE_CONTINUE;
  109. }
  110. /* resume */
  111. g->wait_in = 0;
  112. vg_handle_ctrl(&g->dev.parent, 0);
  113. return G_SOURCE_REMOVE;
  114. }
  115. void
  116. vg_wait_ok(VuGpu *g)
  117. {
  118. assert(g->wait_in == 0);
  119. g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
  120. source_wait_cb, g);
  121. }
  122. static int
  123. vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd)
  124. {
  125. ssize_t ret;
  126. struct iovec iov = {
  127. .iov_base = (void *)buf,
  128. .iov_len = buflen,
  129. };
  130. struct msghdr msg = {
  131. .msg_iov = &iov,
  132. .msg_iovlen = 1,
  133. };
  134. union {
  135. struct cmsghdr cmsghdr;
  136. char control[CMSG_SPACE(sizeof(int))];
  137. } cmsgu;
  138. struct cmsghdr *cmsg;
  139. if (fd != -1) {
  140. msg.msg_control = cmsgu.control;
  141. msg.msg_controllen = sizeof(cmsgu.control);
  142. cmsg = CMSG_FIRSTHDR(&msg);
  143. cmsg->cmsg_len = CMSG_LEN(sizeof(int));
  144. cmsg->cmsg_level = SOL_SOCKET;
  145. cmsg->cmsg_type = SCM_RIGHTS;
  146. *((int *)CMSG_DATA(cmsg)) = fd;
  147. }
  148. do {
  149. ret = sendmsg(sock, &msg, 0);
  150. } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
  151. g_warn_if_fail(ret == buflen);
  152. return ret;
  153. }
  154. void
  155. vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd)
  156. {
  157. if (vg_sock_fd_write(vg->sock_fd, msg,
  158. VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) {
  159. vg_sock_fd_close(vg);
  160. }
  161. }
  162. bool
  163. vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size,
  164. gpointer payload)
  165. {
  166. uint32_t req, flags, size;
  167. if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 ||
  168. vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 ||
  169. vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) {
  170. goto err;
  171. }
  172. g_return_val_if_fail(req == expect_req, false);
  173. g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false);
  174. g_return_val_if_fail(size == expect_size, false);
  175. if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) {
  176. goto err;
  177. }
  178. return true;
  179. err:
  180. vg_sock_fd_close(g);
  181. return false;
  182. }
  183. static struct virtio_gpu_simple_resource *
  184. virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id)
  185. {
  186. struct virtio_gpu_simple_resource *res;
  187. QTAILQ_FOREACH(res, &g->reslist, next) {
  188. if (res->resource_id == resource_id) {
  189. return res;
  190. }
  191. }
  192. return NULL;
  193. }
  194. void
  195. vg_ctrl_response(VuGpu *g,
  196. struct virtio_gpu_ctrl_command *cmd,
  197. struct virtio_gpu_ctrl_hdr *resp,
  198. size_t resp_len)
  199. {
  200. size_t s;
  201. if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
  202. resp->flags |= VIRTIO_GPU_FLAG_FENCE;
  203. resp->fence_id = cmd->cmd_hdr.fence_id;
  204. resp->ctx_id = cmd->cmd_hdr.ctx_id;
  205. }
  206. virtio_gpu_ctrl_hdr_bswap(resp);
  207. s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
  208. if (s != resp_len) {
  209. g_critical("%s: response size incorrect %zu vs %zu",
  210. __func__, s, resp_len);
  211. }
  212. vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s);
  213. vu_queue_notify(&g->dev.parent, cmd->vq);
  214. cmd->state = VG_CMD_STATE_FINISHED;
  215. }
  216. void
  217. vg_ctrl_response_nodata(VuGpu *g,
  218. struct virtio_gpu_ctrl_command *cmd,
  219. enum virtio_gpu_ctrl_type type)
  220. {
  221. struct virtio_gpu_ctrl_hdr resp = {
  222. .type = type,
  223. };
  224. vg_ctrl_response(g, cmd, &resp, sizeof(resp));
  225. }
  226. static gboolean
  227. get_display_info_cb(gint fd, GIOCondition condition, gpointer user_data)
  228. {
  229. struct virtio_gpu_resp_display_info dpy_info = { {} };
  230. VuGpu *vg = user_data;
  231. struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq);
  232. g_debug("disp info cb");
  233. assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
  234. if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_DISPLAY_INFO,
  235. sizeof(dpy_info), &dpy_info)) {
  236. return G_SOURCE_CONTINUE;
  237. }
  238. QTAILQ_REMOVE(&vg->fenceq, cmd, next);
  239. vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info));
  240. vg->wait_in = 0;
  241. vg_handle_ctrl(&vg->dev.parent, 0);
  242. return G_SOURCE_REMOVE;
  243. }
  244. void
  245. vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
  246. {
  247. VhostUserGpuMsg msg = {
  248. .request = VHOST_USER_GPU_GET_DISPLAY_INFO,
  249. .size = 0,
  250. };
  251. assert(vg->wait_in == 0);
  252. vg_send_msg(vg, &msg, -1);
  253. vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP,
  254. get_display_info_cb, vg);
  255. cmd->state = VG_CMD_STATE_PENDING;
  256. }
  257. static gboolean
  258. get_edid_cb(gint fd, GIOCondition condition, gpointer user_data)
  259. {
  260. struct virtio_gpu_resp_edid resp_edid;
  261. VuGpu *vg = user_data;
  262. struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq);
  263. g_debug("get edid cb");
  264. assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_EDID);
  265. if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_EDID,
  266. sizeof(resp_edid), &resp_edid)) {
  267. return G_SOURCE_CONTINUE;
  268. }
  269. QTAILQ_REMOVE(&vg->fenceq, cmd, next);
  270. vg_ctrl_response(vg, cmd, &resp_edid.hdr, sizeof(resp_edid));
  271. vg->wait_in = 0;
  272. vg_handle_ctrl(&vg->dev.parent, 0);
  273. return G_SOURCE_REMOVE;
  274. }
  275. void
  276. vg_get_edid(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
  277. {
  278. struct virtio_gpu_cmd_get_edid get_edid;
  279. VUGPU_FILL_CMD(get_edid);
  280. virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
  281. VhostUserGpuMsg msg = {
  282. .request = VHOST_USER_GPU_GET_EDID,
  283. .size = sizeof(VhostUserGpuEdidRequest),
  284. .payload.edid_req = {
  285. .scanout_id = get_edid.scanout,
  286. },
  287. };
  288. assert(vg->wait_in == 0);
  289. vg_send_msg(vg, &msg, -1);
  290. vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP,
  291. get_edid_cb, vg);
  292. cmd->state = VG_CMD_STATE_PENDING;
  293. }
  294. static void
  295. vg_resource_create_2d(VuGpu *g,
  296. struct virtio_gpu_ctrl_command *cmd)
  297. {
  298. pixman_format_code_t pformat;
  299. struct virtio_gpu_simple_resource *res;
  300. struct virtio_gpu_resource_create_2d c2d;
  301. VUGPU_FILL_CMD(c2d);
  302. virtio_gpu_bswap_32(&c2d, sizeof(c2d));
  303. if (c2d.resource_id == 0) {
  304. g_critical("%s: resource id 0 is not allowed", __func__);
  305. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  306. return;
  307. }
  308. res = virtio_gpu_find_resource(g, c2d.resource_id);
  309. if (res) {
  310. g_critical("%s: resource already exists %d", __func__, c2d.resource_id);
  311. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  312. return;
  313. }
  314. res = g_new0(struct virtio_gpu_simple_resource, 1);
  315. res->width = c2d.width;
  316. res->height = c2d.height;
  317. res->format = c2d.format;
  318. res->resource_id = c2d.resource_id;
  319. pformat = virtio_gpu_get_pixman_format(c2d.format);
  320. if (!pformat) {
  321. g_critical("%s: host couldn't handle guest format %d",
  322. __func__, c2d.format);
  323. g_free(res);
  324. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  325. return;
  326. }
  327. vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height);
  328. res->image = pixman_image_create_bits(pformat,
  329. c2d.width,
  330. c2d.height,
  331. (uint32_t *)res->buffer.mmap,
  332. res->buffer.stride);
  333. if (!res->image) {
  334. g_critical("%s: resource creation failed %d %d %d",
  335. __func__, c2d.resource_id, c2d.width, c2d.height);
  336. vugbm_buffer_destroy(&res->buffer);
  337. g_free(res);
  338. cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  339. return;
  340. }
  341. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  342. }
  343. static void
  344. vg_disable_scanout(VuGpu *g, int scanout_id)
  345. {
  346. struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
  347. struct virtio_gpu_simple_resource *res;
  348. if (scanout->resource_id == 0) {
  349. return;
  350. }
  351. res = virtio_gpu_find_resource(g, scanout->resource_id);
  352. if (res) {
  353. res->scanout_bitmask &= ~(1 << scanout_id);
  354. }
  355. scanout->width = 0;
  356. scanout->height = 0;
  357. if (g->sock_fd >= 0) {
  358. VhostUserGpuMsg msg = {
  359. .request = VHOST_USER_GPU_SCANOUT,
  360. .size = sizeof(VhostUserGpuScanout),
  361. .payload.scanout.scanout_id = scanout_id,
  362. };
  363. vg_send_msg(g, &msg, -1);
  364. }
  365. }
  366. static void
  367. vg_resource_destroy(VuGpu *g,
  368. struct virtio_gpu_simple_resource *res)
  369. {
  370. int i;
  371. if (res->scanout_bitmask) {
  372. for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
  373. if (res->scanout_bitmask & (1 << i)) {
  374. vg_disable_scanout(g, i);
  375. }
  376. }
  377. }
  378. vugbm_buffer_destroy(&res->buffer);
  379. vg_cleanup_mapping(g, res);
  380. pixman_image_unref(res->image);
  381. QTAILQ_REMOVE(&g->reslist, res, next);
  382. g_free(res);
  383. }
  384. static void
  385. vg_resource_unref(VuGpu *g,
  386. struct virtio_gpu_ctrl_command *cmd)
  387. {
  388. struct virtio_gpu_simple_resource *res;
  389. struct virtio_gpu_resource_unref unref;
  390. VUGPU_FILL_CMD(unref);
  391. virtio_gpu_bswap_32(&unref, sizeof(unref));
  392. res = virtio_gpu_find_resource(g, unref.resource_id);
  393. if (!res) {
  394. g_critical("%s: illegal resource specified %d",
  395. __func__, unref.resource_id);
  396. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  397. return;
  398. }
  399. vg_resource_destroy(g, res);
  400. }
  401. int
  402. vg_create_mapping_iov(VuGpu *g,
  403. struct virtio_gpu_resource_attach_backing *ab,
  404. struct virtio_gpu_ctrl_command *cmd,
  405. struct iovec **iov)
  406. {
  407. struct virtio_gpu_mem_entry *ents;
  408. size_t esize, s;
  409. int i;
  410. if (ab->nr_entries > 16384) {
  411. g_critical("%s: nr_entries is too big (%d > 16384)",
  412. __func__, ab->nr_entries);
  413. return -1;
  414. }
  415. esize = sizeof(*ents) * ab->nr_entries;
  416. ents = g_malloc(esize);
  417. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  418. sizeof(*ab), ents, esize);
  419. if (s != esize) {
  420. g_critical("%s: command data size incorrect %zu vs %zu",
  421. __func__, s, esize);
  422. g_free(ents);
  423. return -1;
  424. }
  425. *iov = g_new0(struct iovec, ab->nr_entries);
  426. for (i = 0; i < ab->nr_entries; i++) {
  427. uint64_t len = ents[i].length;
  428. (*iov)[i].iov_len = ents[i].length;
  429. (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr);
  430. if (!(*iov)[i].iov_base || len != ents[i].length) {
  431. g_critical("%s: resource %d element %d",
  432. __func__, ab->resource_id, i);
  433. g_free(*iov);
  434. g_free(ents);
  435. *iov = NULL;
  436. return -1;
  437. }
  438. }
  439. g_free(ents);
  440. return 0;
  441. }
  442. static void
  443. vg_resource_attach_backing(VuGpu *g,
  444. struct virtio_gpu_ctrl_command *cmd)
  445. {
  446. struct virtio_gpu_simple_resource *res;
  447. struct virtio_gpu_resource_attach_backing ab;
  448. int ret;
  449. VUGPU_FILL_CMD(ab);
  450. virtio_gpu_bswap_32(&ab, sizeof(ab));
  451. res = virtio_gpu_find_resource(g, ab.resource_id);
  452. if (!res) {
  453. g_critical("%s: illegal resource specified %d",
  454. __func__, ab.resource_id);
  455. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  456. return;
  457. }
  458. if (res->iov) {
  459. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  460. return;
  461. }
  462. ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov);
  463. if (ret != 0) {
  464. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  465. return;
  466. }
  467. res->iov_cnt = ab.nr_entries;
  468. }
  469. /* Though currently only free iov, maybe later will do more work. */
  470. void vg_cleanup_mapping_iov(VuGpu *g,
  471. struct iovec *iov, uint32_t count)
  472. {
  473. g_free(iov);
  474. }
  475. static void
  476. vg_cleanup_mapping(VuGpu *g,
  477. struct virtio_gpu_simple_resource *res)
  478. {
  479. vg_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
  480. res->iov = NULL;
  481. res->iov_cnt = 0;
  482. }
  483. static void
  484. vg_resource_detach_backing(VuGpu *g,
  485. struct virtio_gpu_ctrl_command *cmd)
  486. {
  487. struct virtio_gpu_simple_resource *res;
  488. struct virtio_gpu_resource_detach_backing detach;
  489. VUGPU_FILL_CMD(detach);
  490. virtio_gpu_bswap_32(&detach, sizeof(detach));
  491. res = virtio_gpu_find_resource(g, detach.resource_id);
  492. if (!res || !res->iov) {
  493. g_critical("%s: illegal resource specified %d",
  494. __func__, detach.resource_id);
  495. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  496. return;
  497. }
  498. vg_cleanup_mapping(g, res);
  499. }
  500. static void
  501. vg_transfer_to_host_2d(VuGpu *g,
  502. struct virtio_gpu_ctrl_command *cmd)
  503. {
  504. struct virtio_gpu_simple_resource *res;
  505. int h;
  506. uint32_t src_offset, dst_offset, stride;
  507. int bpp;
  508. pixman_format_code_t format;
  509. struct virtio_gpu_transfer_to_host_2d t2d;
  510. VUGPU_FILL_CMD(t2d);
  511. virtio_gpu_t2d_bswap(&t2d);
  512. res = virtio_gpu_find_resource(g, t2d.resource_id);
  513. if (!res || !res->iov) {
  514. g_critical("%s: illegal resource specified %d",
  515. __func__, t2d.resource_id);
  516. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  517. return;
  518. }
  519. if (t2d.r.x > res->width ||
  520. t2d.r.y > res->height ||
  521. t2d.r.width > res->width ||
  522. t2d.r.height > res->height ||
  523. t2d.r.x + t2d.r.width > res->width ||
  524. t2d.r.y + t2d.r.height > res->height) {
  525. g_critical("%s: transfer bounds outside resource"
  526. " bounds for resource %d: %d %d %d %d vs %d %d",
  527. __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
  528. t2d.r.width, t2d.r.height, res->width, res->height);
  529. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  530. return;
  531. }
  532. format = pixman_image_get_format(res->image);
  533. bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
  534. stride = pixman_image_get_stride(res->image);
  535. if (t2d.offset || t2d.r.x || t2d.r.y ||
  536. t2d.r.width != pixman_image_get_width(res->image)) {
  537. void *img_data = pixman_image_get_data(res->image);
  538. for (h = 0; h < t2d.r.height; h++) {
  539. src_offset = t2d.offset + stride * h;
  540. dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
  541. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  542. img_data
  543. + dst_offset, t2d.r.width * bpp);
  544. }
  545. } else {
  546. iov_to_buf(res->iov, res->iov_cnt, 0,
  547. pixman_image_get_data(res->image),
  548. pixman_image_get_stride(res->image)
  549. * pixman_image_get_height(res->image));
  550. }
  551. }
  552. static void
  553. vg_set_scanout(VuGpu *g,
  554. struct virtio_gpu_ctrl_command *cmd)
  555. {
  556. struct virtio_gpu_simple_resource *res, *ores;
  557. struct virtio_gpu_scanout *scanout;
  558. struct virtio_gpu_set_scanout ss;
  559. int fd;
  560. VUGPU_FILL_CMD(ss);
  561. virtio_gpu_bswap_32(&ss, sizeof(ss));
  562. if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
  563. g_critical("%s: illegal scanout id specified %d",
  564. __func__, ss.scanout_id);
  565. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  566. return;
  567. }
  568. if (ss.resource_id == 0) {
  569. vg_disable_scanout(g, ss.scanout_id);
  570. return;
  571. }
  572. /* create a surface for this scanout */
  573. res = virtio_gpu_find_resource(g, ss.resource_id);
  574. if (!res) {
  575. g_critical("%s: illegal resource specified %d",
  576. __func__, ss.resource_id);
  577. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  578. return;
  579. }
  580. if (ss.r.x > res->width ||
  581. ss.r.y > res->height ||
  582. ss.r.width > res->width ||
  583. ss.r.height > res->height ||
  584. ss.r.x + ss.r.width > res->width ||
  585. ss.r.y + ss.r.height > res->height) {
  586. g_critical("%s: illegal scanout %d bounds for"
  587. " resource %d, (%d,%d)+%d,%d vs %d %d",
  588. __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
  589. ss.r.width, ss.r.height, res->width, res->height);
  590. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  591. return;
  592. }
  593. scanout = &g->scanout[ss.scanout_id];
  594. ores = virtio_gpu_find_resource(g, scanout->resource_id);
  595. if (ores) {
  596. ores->scanout_bitmask &= ~(1 << ss.scanout_id);
  597. }
  598. res->scanout_bitmask |= (1 << ss.scanout_id);
  599. scanout->resource_id = ss.resource_id;
  600. scanout->x = ss.r.x;
  601. scanout->y = ss.r.y;
  602. scanout->width = ss.r.width;
  603. scanout->height = ss.r.height;
  604. struct vugbm_buffer *buffer = &res->buffer;
  605. if (vugbm_buffer_can_get_dmabuf_fd(buffer)) {
  606. VhostUserGpuMsg msg = {
  607. .request = VHOST_USER_GPU_DMABUF_SCANOUT,
  608. .size = sizeof(VhostUserGpuDMABUFScanout),
  609. .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) {
  610. .scanout_id = ss.scanout_id,
  611. .x = ss.r.x,
  612. .y = ss.r.y,
  613. .width = ss.r.width,
  614. .height = ss.r.height,
  615. .fd_width = buffer->width,
  616. .fd_height = buffer->height,
  617. .fd_stride = buffer->stride,
  618. .fd_drm_fourcc = buffer->format
  619. }
  620. };
  621. if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) {
  622. vg_send_msg(g, &msg, fd);
  623. close(fd);
  624. }
  625. } else {
  626. VhostUserGpuMsg msg = {
  627. .request = VHOST_USER_GPU_SCANOUT,
  628. .size = sizeof(VhostUserGpuScanout),
  629. .payload.scanout = (VhostUserGpuScanout) {
  630. .scanout_id = ss.scanout_id,
  631. .width = scanout->width,
  632. .height = scanout->height
  633. }
  634. };
  635. vg_send_msg(g, &msg, -1);
  636. }
  637. }
  638. static void
  639. vg_resource_flush(VuGpu *g,
  640. struct virtio_gpu_ctrl_command *cmd)
  641. {
  642. struct virtio_gpu_simple_resource *res;
  643. struct virtio_gpu_resource_flush rf;
  644. pixman_region16_t flush_region;
  645. int i;
  646. VUGPU_FILL_CMD(rf);
  647. virtio_gpu_bswap_32(&rf, sizeof(rf));
  648. res = virtio_gpu_find_resource(g, rf.resource_id);
  649. if (!res) {
  650. g_critical("%s: illegal resource specified %d\n",
  651. __func__, rf.resource_id);
  652. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  653. return;
  654. }
  655. if (rf.r.x > res->width ||
  656. rf.r.y > res->height ||
  657. rf.r.width > res->width ||
  658. rf.r.height > res->height ||
  659. rf.r.x + rf.r.width > res->width ||
  660. rf.r.y + rf.r.height > res->height) {
  661. g_critical("%s: flush bounds outside resource"
  662. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  663. __func__, rf.resource_id, rf.r.x, rf.r.y,
  664. rf.r.width, rf.r.height, res->width, res->height);
  665. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  666. return;
  667. }
  668. pixman_region_init_rect(&flush_region,
  669. rf.r.x, rf.r.y, rf.r.width, rf.r.height);
  670. for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
  671. struct virtio_gpu_scanout *scanout;
  672. pixman_region16_t region, finalregion;
  673. pixman_box16_t *extents;
  674. if (!(res->scanout_bitmask & (1 << i))) {
  675. continue;
  676. }
  677. scanout = &g->scanout[i];
  678. pixman_region_init(&finalregion);
  679. pixman_region_init_rect(&region, scanout->x, scanout->y,
  680. scanout->width, scanout->height);
  681. pixman_region_intersect(&finalregion, &flush_region, &region);
  682. extents = pixman_region_extents(&finalregion);
  683. size_t width = extents->x2 - extents->x1;
  684. size_t height = extents->y2 - extents->y1;
  685. if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) {
  686. VhostUserGpuMsg vmsg = {
  687. .request = VHOST_USER_GPU_DMABUF_UPDATE,
  688. .size = sizeof(VhostUserGpuUpdate),
  689. .payload.update = (VhostUserGpuUpdate) {
  690. .scanout_id = i,
  691. .x = extents->x1,
  692. .y = extents->y1,
  693. .width = width,
  694. .height = height,
  695. }
  696. };
  697. vg_send_msg(g, &vmsg, -1);
  698. vg_wait_ok(g);
  699. } else {
  700. size_t bpp =
  701. PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8;
  702. size_t size = width * height * bpp;
  703. void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE +
  704. sizeof(VhostUserGpuUpdate) + size);
  705. VhostUserGpuMsg *msg = p;
  706. msg->request = VHOST_USER_GPU_UPDATE;
  707. msg->size = sizeof(VhostUserGpuUpdate) + size;
  708. msg->payload.update = (VhostUserGpuUpdate) {
  709. .scanout_id = i,
  710. .x = extents->x1,
  711. .y = extents->y1,
  712. .width = width,
  713. .height = height,
  714. };
  715. pixman_image_t *img =
  716. pixman_image_create_bits(pixman_image_get_format(res->image),
  717. msg->payload.update.width,
  718. msg->payload.update.height,
  719. p + offsetof(VhostUserGpuMsg,
  720. payload.update.data),
  721. width * bpp);
  722. pixman_image_composite(PIXMAN_OP_SRC,
  723. res->image, NULL, img,
  724. extents->x1, extents->y1,
  725. 0, 0, 0, 0,
  726. width, height);
  727. pixman_image_unref(img);
  728. vg_send_msg(g, msg, -1);
  729. g_free(msg);
  730. }
  731. pixman_region_fini(&region);
  732. pixman_region_fini(&finalregion);
  733. }
  734. pixman_region_fini(&flush_region);
  735. }
  736. static void
  737. vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
  738. {
  739. switch (cmd->cmd_hdr.type) {
  740. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  741. vg_get_display_info(vg, cmd);
  742. break;
  743. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  744. vg_resource_create_2d(vg, cmd);
  745. break;
  746. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  747. vg_resource_unref(vg, cmd);
  748. break;
  749. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  750. vg_resource_flush(vg, cmd);
  751. break;
  752. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  753. vg_transfer_to_host_2d(vg, cmd);
  754. break;
  755. case VIRTIO_GPU_CMD_SET_SCANOUT:
  756. vg_set_scanout(vg, cmd);
  757. break;
  758. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  759. vg_resource_attach_backing(vg, cmd);
  760. break;
  761. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  762. vg_resource_detach_backing(vg, cmd);
  763. break;
  764. case VIRTIO_GPU_CMD_GET_EDID:
  765. vg_get_edid(vg, cmd);
  766. break;
  767. default:
  768. g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
  769. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  770. break;
  771. }
  772. if (cmd->state == VG_CMD_STATE_NEW) {
  773. vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error :
  774. VIRTIO_GPU_RESP_OK_NODATA);
  775. }
  776. }
  777. static void
  778. vg_handle_ctrl(VuDev *dev, int qidx)
  779. {
  780. VuGpu *vg = container_of(dev, VuGpu, dev.parent);
  781. VuVirtq *vq = vu_get_queue(dev, qidx);
  782. struct virtio_gpu_ctrl_command *cmd = NULL;
  783. size_t len;
  784. for (;;) {
  785. if (vg->wait_in != 0) {
  786. return;
  787. }
  788. cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command));
  789. if (!cmd) {
  790. break;
  791. }
  792. cmd->vq = vq;
  793. cmd->error = 0;
  794. cmd->state = VG_CMD_STATE_NEW;
  795. len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  796. 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr));
  797. if (len != sizeof(cmd->cmd_hdr)) {
  798. g_warning("%s: command size incorrect %zu vs %zu\n",
  799. __func__, len, sizeof(cmd->cmd_hdr));
  800. }
  801. virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
  802. g_debug("%d %s\n", cmd->cmd_hdr.type,
  803. vg_cmd_to_string(cmd->cmd_hdr.type));
  804. if (vg->virgl) {
  805. vg_virgl_process_cmd(vg, cmd);
  806. } else {
  807. vg_process_cmd(vg, cmd);
  808. }
  809. if (cmd->state != VG_CMD_STATE_FINISHED) {
  810. QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next);
  811. vg->inflight++;
  812. } else {
  813. free(cmd);
  814. }
  815. }
  816. }
  817. static void
  818. update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data)
  819. {
  820. struct virtio_gpu_simple_resource *res;
  821. res = virtio_gpu_find_resource(g, resource_id);
  822. g_return_if_fail(res != NULL);
  823. g_return_if_fail(pixman_image_get_width(res->image) == 64);
  824. g_return_if_fail(pixman_image_get_height(res->image) == 64);
  825. g_return_if_fail(
  826. PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32);
  827. memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t));
  828. }
  829. static void
  830. vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor)
  831. {
  832. switch (cursor->hdr.type) {
  833. case VIRTIO_GPU_CMD_MOVE_CURSOR: {
  834. VhostUserGpuMsg msg = {
  835. .request = cursor->resource_id ?
  836. VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE,
  837. .size = sizeof(VhostUserGpuCursorPos),
  838. .payload.cursor_pos = {
  839. .scanout_id = cursor->pos.scanout_id,
  840. .x = cursor->pos.x,
  841. .y = cursor->pos.y,
  842. }
  843. };
  844. g_debug("%s: move", G_STRFUNC);
  845. vg_send_msg(g, &msg, -1);
  846. break;
  847. }
  848. case VIRTIO_GPU_CMD_UPDATE_CURSOR: {
  849. VhostUserGpuMsg msg = {
  850. .request = VHOST_USER_GPU_CURSOR_UPDATE,
  851. .size = sizeof(VhostUserGpuCursorUpdate),
  852. .payload.cursor_update = {
  853. .pos = {
  854. .scanout_id = cursor->pos.scanout_id,
  855. .x = cursor->pos.x,
  856. .y = cursor->pos.y,
  857. },
  858. .hot_x = cursor->hot_x,
  859. .hot_y = cursor->hot_y,
  860. }
  861. };
  862. g_debug("%s: update", G_STRFUNC);
  863. if (g->virgl) {
  864. vg_virgl_update_cursor_data(g, cursor->resource_id,
  865. msg.payload.cursor_update.data);
  866. } else {
  867. update_cursor_data_simple(g, cursor->resource_id,
  868. msg.payload.cursor_update.data);
  869. }
  870. vg_send_msg(g, &msg, -1);
  871. break;
  872. }
  873. default:
  874. g_debug("%s: unknown cmd %d", G_STRFUNC, cursor->hdr.type);
  875. break;
  876. }
  877. }
  878. static void
  879. vg_handle_cursor(VuDev *dev, int qidx)
  880. {
  881. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  882. VuVirtq *vq = vu_get_queue(dev, qidx);
  883. VuVirtqElement *elem;
  884. size_t len;
  885. struct virtio_gpu_update_cursor cursor;
  886. for (;;) {
  887. elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
  888. if (!elem) {
  889. break;
  890. }
  891. g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num);
  892. len = iov_to_buf(elem->out_sg, elem->out_num,
  893. 0, &cursor, sizeof(cursor));
  894. if (len != sizeof(cursor)) {
  895. g_warning("%s: cursor size incorrect %zu vs %zu\n",
  896. __func__, len, sizeof(cursor));
  897. } else {
  898. virtio_gpu_bswap_32(&cursor, sizeof(cursor));
  899. vg_process_cursor_cmd(g, &cursor);
  900. }
  901. vu_queue_push(dev, vq, elem, 0);
  902. vu_queue_notify(dev, vq);
  903. free(elem);
  904. }
  905. }
  906. static void
  907. vg_panic(VuDev *dev, const char *msg)
  908. {
  909. g_critical("%s\n", msg);
  910. exit(1);
  911. }
  912. static void
  913. vg_queue_set_started(VuDev *dev, int qidx, bool started)
  914. {
  915. VuVirtq *vq = vu_get_queue(dev, qidx);
  916. g_debug("queue started %d:%d\n", qidx, started);
  917. switch (qidx) {
  918. case 0:
  919. vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL);
  920. break;
  921. case 1:
  922. vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL);
  923. break;
  924. default:
  925. break;
  926. }
  927. }
  928. static gboolean
  929. protocol_features_cb(gint fd, GIOCondition condition, gpointer user_data)
  930. {
  931. const uint64_t protocol_edid = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID);
  932. const uint64_t protocol_dmabuf2 = (1 << VHOST_USER_GPU_PROTOCOL_F_DMABUF2);
  933. VuGpu *g = user_data;
  934. uint64_t protocol_features;
  935. VhostUserGpuMsg msg = {
  936. .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
  937. };
  938. if (!vg_recv_msg(g, msg.request,
  939. sizeof(protocol_features), &protocol_features)) {
  940. return G_SOURCE_CONTINUE;
  941. }
  942. protocol_features &= (protocol_edid | protocol_dmabuf2);
  943. msg = (VhostUserGpuMsg) {
  944. .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
  945. .size = sizeof(uint64_t),
  946. .payload.u64 = protocol_features,
  947. };
  948. vg_send_msg(g, &msg, -1);
  949. g->wait_in = 0;
  950. vg_handle_ctrl(&g->dev.parent, 0);
  951. if (g->edid_inited && !(protocol_features & protocol_edid)) {
  952. g_printerr("EDID feature set by the frontend but it does not support "
  953. "the EDID vhost-user-gpu protocol.\n");
  954. exit(EXIT_FAILURE);
  955. }
  956. g->use_modifiers = !!(protocol_features & protocol_dmabuf2);
  957. return G_SOURCE_REMOVE;
  958. }
  959. static void
  960. set_gpu_protocol_features(VuGpu *g)
  961. {
  962. VhostUserGpuMsg msg = {
  963. .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
  964. };
  965. vg_send_msg(g, &msg, -1);
  966. assert(g->wait_in == 0);
  967. g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
  968. protocol_features_cb, g);
  969. }
  970. static int
  971. vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply)
  972. {
  973. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  974. switch (msg->request) {
  975. case VHOST_USER_GPU_SET_SOCKET: {
  976. g_return_val_if_fail(msg->fd_num == 1, 1);
  977. g_return_val_if_fail(g->sock_fd == -1, 1);
  978. g->sock_fd = msg->fds[0];
  979. set_gpu_protocol_features(g);
  980. return 1;
  981. }
  982. default:
  983. return 0;
  984. }
  985. return 0;
  986. }
  987. static uint64_t
  988. vg_get_features(VuDev *dev)
  989. {
  990. uint64_t features = 0;
  991. if (opt_virgl) {
  992. features |= 1 << VIRTIO_GPU_F_VIRGL;
  993. }
  994. features |= 1 << VIRTIO_GPU_F_EDID;
  995. return features;
  996. }
  997. static void
  998. vg_set_features(VuDev *dev, uint64_t features)
  999. {
  1000. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  1001. bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL);
  1002. if (virgl && !g->virgl_inited) {
  1003. if (!vg_virgl_init(g)) {
  1004. vg_panic(dev, "Failed to initialize virgl");
  1005. }
  1006. g->virgl_inited = true;
  1007. }
  1008. g->edid_inited = !!(features & (1 << VIRTIO_GPU_F_EDID));
  1009. g->virgl = virgl;
  1010. }
  1011. static int
  1012. vg_get_config(VuDev *dev, uint8_t *config, uint32_t len)
  1013. {
  1014. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  1015. if (len > sizeof(struct virtio_gpu_config)) {
  1016. return -1;
  1017. }
  1018. if (opt_virgl) {
  1019. g->virtio_config.num_capsets = vg_virgl_get_num_capsets();
  1020. }
  1021. memcpy(config, &g->virtio_config, len);
  1022. return 0;
  1023. }
  1024. static int
  1025. vg_set_config(VuDev *dev, const uint8_t *data,
  1026. uint32_t offset, uint32_t size,
  1027. uint32_t flags)
  1028. {
  1029. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  1030. struct virtio_gpu_config *config = (struct virtio_gpu_config *)data;
  1031. if (config->events_clear) {
  1032. g->virtio_config.events_read &= ~config->events_clear;
  1033. }
  1034. return 0;
  1035. }
  1036. static const VuDevIface vuiface = {
  1037. .set_features = vg_set_features,
  1038. .get_features = vg_get_features,
  1039. .queue_set_started = vg_queue_set_started,
  1040. .process_msg = vg_process_msg,
  1041. .get_config = vg_get_config,
  1042. .set_config = vg_set_config,
  1043. };
  1044. static void
  1045. vg_destroy(VuGpu *g)
  1046. {
  1047. struct virtio_gpu_simple_resource *res, *tmp;
  1048. vug_deinit(&g->dev);
  1049. vg_sock_fd_close(g);
  1050. QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
  1051. vg_resource_destroy(g, res);
  1052. }
  1053. vugbm_device_destroy(&g->gdev);
  1054. }
  1055. static GOptionEntry entries[] = {
  1056. { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps,
  1057. "Print capabilities", NULL },
  1058. { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum,
  1059. "Use inherited fd socket", "FDNUM" },
  1060. { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path,
  1061. "Use UNIX socket path", "PATH" },
  1062. { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node,
  1063. "Specify DRM render node", "PATH" },
  1064. { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl,
  1065. "Turn virgl rendering on", NULL },
  1066. { NULL, }
  1067. };
  1068. int
  1069. main(int argc, char *argv[])
  1070. {
  1071. GOptionContext *context;
  1072. GError *error = NULL;
  1073. GMainLoop *loop = NULL;
  1074. int fd;
  1075. VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 };
  1076. QTAILQ_INIT(&g.reslist);
  1077. QTAILQ_INIT(&g.fenceq);
  1078. context = g_option_context_new("QEMU vhost-user-gpu");
  1079. g_option_context_add_main_entries(context, entries, NULL);
  1080. if (!g_option_context_parse(context, &argc, &argv, &error)) {
  1081. g_printerr("Option parsing failed: %s\n", error->message);
  1082. exit(EXIT_FAILURE);
  1083. }
  1084. g_option_context_free(context);
  1085. if (opt_print_caps) {
  1086. g_print("{\n");
  1087. g_print(" \"type\": \"gpu\",\n");
  1088. g_print(" \"features\": [\n");
  1089. g_print(" \"render-node\",\n");
  1090. g_print(" \"virgl\"\n");
  1091. g_print(" ]\n");
  1092. g_print("}\n");
  1093. exit(EXIT_SUCCESS);
  1094. }
  1095. g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node);
  1096. if (opt_render_node && g.drm_rnode_fd == -1) {
  1097. g_printerr("Failed to open DRM rendernode.\n");
  1098. exit(EXIT_FAILURE);
  1099. }
  1100. vugbm_device_init(&g.gdev, g.drm_rnode_fd);
  1101. if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) {
  1102. g_printerr("Please specify either --fd or --socket-path\n");
  1103. exit(EXIT_FAILURE);
  1104. }
  1105. if (opt_socket_path) {
  1106. int lsock = unix_listen(opt_socket_path, &error_fatal);
  1107. if (lsock < 0) {
  1108. g_printerr("Failed to listen on %s.\n", opt_socket_path);
  1109. exit(EXIT_FAILURE);
  1110. }
  1111. fd = accept(lsock, NULL, NULL);
  1112. close(lsock);
  1113. } else {
  1114. fd = opt_fdnum;
  1115. }
  1116. if (fd == -1) {
  1117. g_printerr("Invalid vhost-user socket.\n");
  1118. exit(EXIT_FAILURE);
  1119. }
  1120. if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) {
  1121. g_printerr("Failed to initialize libvhost-user-glib.\n");
  1122. exit(EXIT_FAILURE);
  1123. }
  1124. loop = g_main_loop_new(NULL, FALSE);
  1125. g_main_loop_run(loop);
  1126. g_main_loop_unref(loop);
  1127. vg_destroy(&g);
  1128. if (g.drm_rnode_fd >= 0) {
  1129. close(g.drm_rnode_fd);
  1130. }
  1131. return 0;
  1132. }