vhost-user-gpu.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. /*
  2. * Virtio vhost-user GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2018
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. * Marc-André Lureau <marcandre.lureau@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. */
  14. #include "qemu/osdep.h"
  15. #include "qemu/drm.h"
  16. #include "qapi/error.h"
  17. #include "qemu/sockets.h"
  18. #include <pixman.h>
  19. #include <glib-unix.h>
  20. #include "vugpu.h"
  21. #include "hw/virtio/virtio-gpu-bswap.h"
  22. #include "hw/virtio/virtio-gpu-pixman.h"
  23. #include "virgl.h"
  24. #include "vugbm.h"
  25. enum {
  26. VHOST_USER_GPU_MAX_QUEUES = 2,
  27. };
  28. struct virtio_gpu_simple_resource {
  29. uint32_t resource_id;
  30. uint32_t width;
  31. uint32_t height;
  32. uint32_t format;
  33. struct iovec *iov;
  34. unsigned int iov_cnt;
  35. uint32_t scanout_bitmask;
  36. pixman_image_t *image;
  37. struct vugbm_buffer buffer;
  38. QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
  39. };
  40. static gboolean opt_print_caps;
  41. static int opt_fdnum = -1;
  42. static char *opt_socket_path;
  43. static char *opt_render_node;
  44. static gboolean opt_virgl;
  45. static void vg_handle_ctrl(VuDev *dev, int qidx);
  46. static void vg_cleanup_mapping(VuGpu *g,
  47. struct virtio_gpu_simple_resource *res);
  48. static const char *
  49. vg_cmd_to_string(int cmd)
  50. {
  51. #define CMD(cmd) [cmd] = #cmd
  52. static const char *vg_cmd_str[] = {
  53. CMD(VIRTIO_GPU_UNDEFINED),
  54. /* 2d commands */
  55. CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO),
  56. CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
  57. CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF),
  58. CMD(VIRTIO_GPU_CMD_SET_SCANOUT),
  59. CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
  60. CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
  61. CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
  62. CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING),
  63. CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO),
  64. CMD(VIRTIO_GPU_CMD_GET_CAPSET),
  65. /* 3d commands */
  66. CMD(VIRTIO_GPU_CMD_CTX_CREATE),
  67. CMD(VIRTIO_GPU_CMD_CTX_DESTROY),
  68. CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE),
  69. CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE),
  70. CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D),
  71. CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D),
  72. CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D),
  73. CMD(VIRTIO_GPU_CMD_SUBMIT_3D),
  74. /* cursor commands */
  75. CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR),
  76. CMD(VIRTIO_GPU_CMD_MOVE_CURSOR),
  77. };
  78. #undef REQ
  79. if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) {
  80. return vg_cmd_str[cmd];
  81. } else {
  82. return "unknown";
  83. }
  84. }
  85. static int
  86. vg_sock_fd_read(int sock, void *buf, ssize_t buflen)
  87. {
  88. int ret;
  89. do {
  90. ret = read(sock, buf, buflen);
  91. } while (ret < 0 && (errno == EINTR || errno == EAGAIN));
  92. g_warn_if_fail(ret == buflen);
  93. return ret;
  94. }
  95. static void
  96. vg_sock_fd_close(VuGpu *g)
  97. {
  98. if (g->sock_fd >= 0) {
  99. close(g->sock_fd);
  100. g->sock_fd = -1;
  101. }
  102. }
  103. static gboolean
  104. source_wait_cb(gint fd, GIOCondition condition, gpointer user_data)
  105. {
  106. VuGpu *g = user_data;
  107. if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) {
  108. return G_SOURCE_CONTINUE;
  109. }
  110. /* resume */
  111. g->wait_in = 0;
  112. vg_handle_ctrl(&g->dev.parent, 0);
  113. return G_SOURCE_REMOVE;
  114. }
  115. void
  116. vg_wait_ok(VuGpu *g)
  117. {
  118. assert(g->wait_in == 0);
  119. g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
  120. source_wait_cb, g);
  121. }
  122. static int
  123. vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd)
  124. {
  125. ssize_t ret;
  126. struct iovec iov = {
  127. .iov_base = (void *)buf,
  128. .iov_len = buflen,
  129. };
  130. struct msghdr msg = {
  131. .msg_iov = &iov,
  132. .msg_iovlen = 1,
  133. };
  134. union {
  135. struct cmsghdr cmsghdr;
  136. char control[CMSG_SPACE(sizeof(int))];
  137. } cmsgu;
  138. struct cmsghdr *cmsg;
  139. if (fd != -1) {
  140. msg.msg_control = cmsgu.control;
  141. msg.msg_controllen = sizeof(cmsgu.control);
  142. cmsg = CMSG_FIRSTHDR(&msg);
  143. cmsg->cmsg_len = CMSG_LEN(sizeof(int));
  144. cmsg->cmsg_level = SOL_SOCKET;
  145. cmsg->cmsg_type = SCM_RIGHTS;
  146. *((int *)CMSG_DATA(cmsg)) = fd;
  147. }
  148. do {
  149. ret = sendmsg(sock, &msg, 0);
  150. } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
  151. g_warn_if_fail(ret == buflen);
  152. return ret;
  153. }
  154. void
  155. vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd)
  156. {
  157. if (vg_sock_fd_write(vg->sock_fd, msg,
  158. VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) {
  159. vg_sock_fd_close(vg);
  160. }
  161. }
  162. bool
  163. vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size,
  164. gpointer payload)
  165. {
  166. uint32_t req, flags, size;
  167. if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 ||
  168. vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 ||
  169. vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) {
  170. goto err;
  171. }
  172. g_return_val_if_fail(req == expect_req, false);
  173. g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false);
  174. g_return_val_if_fail(size == expect_size, false);
  175. if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) {
  176. goto err;
  177. }
  178. return true;
  179. err:
  180. vg_sock_fd_close(g);
  181. return false;
  182. }
  183. static struct virtio_gpu_simple_resource *
  184. virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id)
  185. {
  186. struct virtio_gpu_simple_resource *res;
  187. QTAILQ_FOREACH(res, &g->reslist, next) {
  188. if (res->resource_id == resource_id) {
  189. return res;
  190. }
  191. }
  192. return NULL;
  193. }
  194. void
  195. vg_ctrl_response(VuGpu *g,
  196. struct virtio_gpu_ctrl_command *cmd,
  197. struct virtio_gpu_ctrl_hdr *resp,
  198. size_t resp_len)
  199. {
  200. size_t s;
  201. if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
  202. resp->flags |= VIRTIO_GPU_FLAG_FENCE;
  203. resp->fence_id = cmd->cmd_hdr.fence_id;
  204. resp->ctx_id = cmd->cmd_hdr.ctx_id;
  205. }
  206. virtio_gpu_ctrl_hdr_bswap(resp);
  207. s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
  208. if (s != resp_len) {
  209. g_critical("%s: response size incorrect %zu vs %zu",
  210. __func__, s, resp_len);
  211. }
  212. vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s);
  213. vu_queue_notify(&g->dev.parent, cmd->vq);
  214. cmd->state = VG_CMD_STATE_FINISHED;
  215. }
  216. void
  217. vg_ctrl_response_nodata(VuGpu *g,
  218. struct virtio_gpu_ctrl_command *cmd,
  219. enum virtio_gpu_ctrl_type type)
  220. {
  221. struct virtio_gpu_ctrl_hdr resp = {
  222. .type = type,
  223. };
  224. vg_ctrl_response(g, cmd, &resp, sizeof(resp));
  225. }
  226. static gboolean
  227. get_display_info_cb(gint fd, GIOCondition condition, gpointer user_data)
  228. {
  229. struct virtio_gpu_resp_display_info dpy_info = { {} };
  230. VuGpu *vg = user_data;
  231. struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq);
  232. g_debug("disp info cb");
  233. assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
  234. if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_DISPLAY_INFO,
  235. sizeof(dpy_info), &dpy_info)) {
  236. return G_SOURCE_CONTINUE;
  237. }
  238. QTAILQ_REMOVE(&vg->fenceq, cmd, next);
  239. vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info));
  240. vg->wait_in = 0;
  241. vg_handle_ctrl(&vg->dev.parent, 0);
  242. return G_SOURCE_REMOVE;
  243. }
  244. void
  245. vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
  246. {
  247. VhostUserGpuMsg msg = {
  248. .request = VHOST_USER_GPU_GET_DISPLAY_INFO,
  249. .size = 0,
  250. };
  251. assert(vg->wait_in == 0);
  252. vg_send_msg(vg, &msg, -1);
  253. vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP,
  254. get_display_info_cb, vg);
  255. cmd->state = VG_CMD_STATE_PENDING;
  256. }
  257. static void
  258. vg_resource_create_2d(VuGpu *g,
  259. struct virtio_gpu_ctrl_command *cmd)
  260. {
  261. pixman_format_code_t pformat;
  262. struct virtio_gpu_simple_resource *res;
  263. struct virtio_gpu_resource_create_2d c2d;
  264. VUGPU_FILL_CMD(c2d);
  265. virtio_gpu_bswap_32(&c2d, sizeof(c2d));
  266. if (c2d.resource_id == 0) {
  267. g_critical("%s: resource id 0 is not allowed", __func__);
  268. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  269. return;
  270. }
  271. res = virtio_gpu_find_resource(g, c2d.resource_id);
  272. if (res) {
  273. g_critical("%s: resource already exists %d", __func__, c2d.resource_id);
  274. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  275. return;
  276. }
  277. res = g_new0(struct virtio_gpu_simple_resource, 1);
  278. res->width = c2d.width;
  279. res->height = c2d.height;
  280. res->format = c2d.format;
  281. res->resource_id = c2d.resource_id;
  282. pformat = virtio_gpu_get_pixman_format(c2d.format);
  283. if (!pformat) {
  284. g_critical("%s: host couldn't handle guest format %d",
  285. __func__, c2d.format);
  286. g_free(res);
  287. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  288. return;
  289. }
  290. vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height);
  291. res->image = pixman_image_create_bits(pformat,
  292. c2d.width,
  293. c2d.height,
  294. (uint32_t *)res->buffer.mmap,
  295. res->buffer.stride);
  296. if (!res->image) {
  297. g_critical("%s: resource creation failed %d %d %d",
  298. __func__, c2d.resource_id, c2d.width, c2d.height);
  299. vugbm_buffer_destroy(&res->buffer);
  300. g_free(res);
  301. cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  302. return;
  303. }
  304. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  305. }
  306. static void
  307. vg_disable_scanout(VuGpu *g, int scanout_id)
  308. {
  309. struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
  310. struct virtio_gpu_simple_resource *res;
  311. if (scanout->resource_id == 0) {
  312. return;
  313. }
  314. res = virtio_gpu_find_resource(g, scanout->resource_id);
  315. if (res) {
  316. res->scanout_bitmask &= ~(1 << scanout_id);
  317. }
  318. scanout->width = 0;
  319. scanout->height = 0;
  320. if (g->sock_fd >= 0) {
  321. VhostUserGpuMsg msg = {
  322. .request = VHOST_USER_GPU_SCANOUT,
  323. .size = sizeof(VhostUserGpuScanout),
  324. .payload.scanout.scanout_id = scanout_id,
  325. };
  326. vg_send_msg(g, &msg, -1);
  327. }
  328. }
  329. static void
  330. vg_resource_destroy(VuGpu *g,
  331. struct virtio_gpu_simple_resource *res)
  332. {
  333. int i;
  334. if (res->scanout_bitmask) {
  335. for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
  336. if (res->scanout_bitmask & (1 << i)) {
  337. vg_disable_scanout(g, i);
  338. }
  339. }
  340. }
  341. vugbm_buffer_destroy(&res->buffer);
  342. vg_cleanup_mapping(g, res);
  343. pixman_image_unref(res->image);
  344. QTAILQ_REMOVE(&g->reslist, res, next);
  345. g_free(res);
  346. }
  347. static void
  348. vg_resource_unref(VuGpu *g,
  349. struct virtio_gpu_ctrl_command *cmd)
  350. {
  351. struct virtio_gpu_simple_resource *res;
  352. struct virtio_gpu_resource_unref unref;
  353. VUGPU_FILL_CMD(unref);
  354. virtio_gpu_bswap_32(&unref, sizeof(unref));
  355. res = virtio_gpu_find_resource(g, unref.resource_id);
  356. if (!res) {
  357. g_critical("%s: illegal resource specified %d",
  358. __func__, unref.resource_id);
  359. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  360. return;
  361. }
  362. vg_resource_destroy(g, res);
  363. }
  364. int
  365. vg_create_mapping_iov(VuGpu *g,
  366. struct virtio_gpu_resource_attach_backing *ab,
  367. struct virtio_gpu_ctrl_command *cmd,
  368. struct iovec **iov)
  369. {
  370. struct virtio_gpu_mem_entry *ents;
  371. size_t esize, s;
  372. int i;
  373. if (ab->nr_entries > 16384) {
  374. g_critical("%s: nr_entries is too big (%d > 16384)",
  375. __func__, ab->nr_entries);
  376. return -1;
  377. }
  378. esize = sizeof(*ents) * ab->nr_entries;
  379. ents = g_malloc(esize);
  380. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  381. sizeof(*ab), ents, esize);
  382. if (s != esize) {
  383. g_critical("%s: command data size incorrect %zu vs %zu",
  384. __func__, s, esize);
  385. g_free(ents);
  386. return -1;
  387. }
  388. *iov = g_new0(struct iovec, ab->nr_entries);
  389. for (i = 0; i < ab->nr_entries; i++) {
  390. uint64_t len = ents[i].length;
  391. (*iov)[i].iov_len = ents[i].length;
  392. (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr);
  393. if (!(*iov)[i].iov_base || len != ents[i].length) {
  394. g_critical("%s: resource %d element %d",
  395. __func__, ab->resource_id, i);
  396. g_free(*iov);
  397. g_free(ents);
  398. *iov = NULL;
  399. return -1;
  400. }
  401. }
  402. g_free(ents);
  403. return 0;
  404. }
  405. static void
  406. vg_resource_attach_backing(VuGpu *g,
  407. struct virtio_gpu_ctrl_command *cmd)
  408. {
  409. struct virtio_gpu_simple_resource *res;
  410. struct virtio_gpu_resource_attach_backing ab;
  411. int ret;
  412. VUGPU_FILL_CMD(ab);
  413. virtio_gpu_bswap_32(&ab, sizeof(ab));
  414. res = virtio_gpu_find_resource(g, ab.resource_id);
  415. if (!res) {
  416. g_critical("%s: illegal resource specified %d",
  417. __func__, ab.resource_id);
  418. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  419. return;
  420. }
  421. if (res->iov) {
  422. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  423. return;
  424. }
  425. ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov);
  426. if (ret != 0) {
  427. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  428. return;
  429. }
  430. res->iov_cnt = ab.nr_entries;
  431. }
  432. /* Though currently only free iov, maybe later will do more work. */
  433. void vg_cleanup_mapping_iov(VuGpu *g,
  434. struct iovec *iov, uint32_t count)
  435. {
  436. g_free(iov);
  437. }
  438. static void
  439. vg_cleanup_mapping(VuGpu *g,
  440. struct virtio_gpu_simple_resource *res)
  441. {
  442. vg_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
  443. res->iov = NULL;
  444. res->iov_cnt = 0;
  445. }
  446. static void
  447. vg_resource_detach_backing(VuGpu *g,
  448. struct virtio_gpu_ctrl_command *cmd)
  449. {
  450. struct virtio_gpu_simple_resource *res;
  451. struct virtio_gpu_resource_detach_backing detach;
  452. VUGPU_FILL_CMD(detach);
  453. virtio_gpu_bswap_32(&detach, sizeof(detach));
  454. res = virtio_gpu_find_resource(g, detach.resource_id);
  455. if (!res || !res->iov) {
  456. g_critical("%s: illegal resource specified %d",
  457. __func__, detach.resource_id);
  458. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  459. return;
  460. }
  461. vg_cleanup_mapping(g, res);
  462. }
  463. static void
  464. vg_transfer_to_host_2d(VuGpu *g,
  465. struct virtio_gpu_ctrl_command *cmd)
  466. {
  467. struct virtio_gpu_simple_resource *res;
  468. int h;
  469. uint32_t src_offset, dst_offset, stride;
  470. int bpp;
  471. pixman_format_code_t format;
  472. struct virtio_gpu_transfer_to_host_2d t2d;
  473. VUGPU_FILL_CMD(t2d);
  474. virtio_gpu_t2d_bswap(&t2d);
  475. res = virtio_gpu_find_resource(g, t2d.resource_id);
  476. if (!res || !res->iov) {
  477. g_critical("%s: illegal resource specified %d",
  478. __func__, t2d.resource_id);
  479. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  480. return;
  481. }
  482. if (t2d.r.x > res->width ||
  483. t2d.r.y > res->height ||
  484. t2d.r.width > res->width ||
  485. t2d.r.height > res->height ||
  486. t2d.r.x + t2d.r.width > res->width ||
  487. t2d.r.y + t2d.r.height > res->height) {
  488. g_critical("%s: transfer bounds outside resource"
  489. " bounds for resource %d: %d %d %d %d vs %d %d",
  490. __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
  491. t2d.r.width, t2d.r.height, res->width, res->height);
  492. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  493. return;
  494. }
  495. format = pixman_image_get_format(res->image);
  496. bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
  497. stride = pixman_image_get_stride(res->image);
  498. if (t2d.offset || t2d.r.x || t2d.r.y ||
  499. t2d.r.width != pixman_image_get_width(res->image)) {
  500. void *img_data = pixman_image_get_data(res->image);
  501. for (h = 0; h < t2d.r.height; h++) {
  502. src_offset = t2d.offset + stride * h;
  503. dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
  504. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  505. img_data
  506. + dst_offset, t2d.r.width * bpp);
  507. }
  508. } else {
  509. iov_to_buf(res->iov, res->iov_cnt, 0,
  510. pixman_image_get_data(res->image),
  511. pixman_image_get_stride(res->image)
  512. * pixman_image_get_height(res->image));
  513. }
  514. }
  515. static void
  516. vg_set_scanout(VuGpu *g,
  517. struct virtio_gpu_ctrl_command *cmd)
  518. {
  519. struct virtio_gpu_simple_resource *res, *ores;
  520. struct virtio_gpu_scanout *scanout;
  521. struct virtio_gpu_set_scanout ss;
  522. int fd;
  523. VUGPU_FILL_CMD(ss);
  524. virtio_gpu_bswap_32(&ss, sizeof(ss));
  525. if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
  526. g_critical("%s: illegal scanout id specified %d",
  527. __func__, ss.scanout_id);
  528. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  529. return;
  530. }
  531. if (ss.resource_id == 0) {
  532. vg_disable_scanout(g, ss.scanout_id);
  533. return;
  534. }
  535. /* create a surface for this scanout */
  536. res = virtio_gpu_find_resource(g, ss.resource_id);
  537. if (!res) {
  538. g_critical("%s: illegal resource specified %d",
  539. __func__, ss.resource_id);
  540. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  541. return;
  542. }
  543. if (ss.r.x > res->width ||
  544. ss.r.y > res->height ||
  545. ss.r.width > res->width ||
  546. ss.r.height > res->height ||
  547. ss.r.x + ss.r.width > res->width ||
  548. ss.r.y + ss.r.height > res->height) {
  549. g_critical("%s: illegal scanout %d bounds for"
  550. " resource %d, (%d,%d)+%d,%d vs %d %d",
  551. __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
  552. ss.r.width, ss.r.height, res->width, res->height);
  553. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  554. return;
  555. }
  556. scanout = &g->scanout[ss.scanout_id];
  557. ores = virtio_gpu_find_resource(g, scanout->resource_id);
  558. if (ores) {
  559. ores->scanout_bitmask &= ~(1 << ss.scanout_id);
  560. }
  561. res->scanout_bitmask |= (1 << ss.scanout_id);
  562. scanout->resource_id = ss.resource_id;
  563. scanout->x = ss.r.x;
  564. scanout->y = ss.r.y;
  565. scanout->width = ss.r.width;
  566. scanout->height = ss.r.height;
  567. struct vugbm_buffer *buffer = &res->buffer;
  568. if (vugbm_buffer_can_get_dmabuf_fd(buffer)) {
  569. VhostUserGpuMsg msg = {
  570. .request = VHOST_USER_GPU_DMABUF_SCANOUT,
  571. .size = sizeof(VhostUserGpuDMABUFScanout),
  572. .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) {
  573. .scanout_id = ss.scanout_id,
  574. .x = ss.r.x,
  575. .y = ss.r.y,
  576. .width = ss.r.width,
  577. .height = ss.r.height,
  578. .fd_width = buffer->width,
  579. .fd_height = buffer->height,
  580. .fd_stride = buffer->stride,
  581. .fd_drm_fourcc = buffer->format
  582. }
  583. };
  584. if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) {
  585. vg_send_msg(g, &msg, fd);
  586. close(fd);
  587. }
  588. } else {
  589. VhostUserGpuMsg msg = {
  590. .request = VHOST_USER_GPU_SCANOUT,
  591. .size = sizeof(VhostUserGpuScanout),
  592. .payload.scanout = (VhostUserGpuScanout) {
  593. .scanout_id = ss.scanout_id,
  594. .width = scanout->width,
  595. .height = scanout->height
  596. }
  597. };
  598. vg_send_msg(g, &msg, -1);
  599. }
  600. }
  601. static void
  602. vg_resource_flush(VuGpu *g,
  603. struct virtio_gpu_ctrl_command *cmd)
  604. {
  605. struct virtio_gpu_simple_resource *res;
  606. struct virtio_gpu_resource_flush rf;
  607. pixman_region16_t flush_region;
  608. int i;
  609. VUGPU_FILL_CMD(rf);
  610. virtio_gpu_bswap_32(&rf, sizeof(rf));
  611. res = virtio_gpu_find_resource(g, rf.resource_id);
  612. if (!res) {
  613. g_critical("%s: illegal resource specified %d\n",
  614. __func__, rf.resource_id);
  615. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  616. return;
  617. }
  618. if (rf.r.x > res->width ||
  619. rf.r.y > res->height ||
  620. rf.r.width > res->width ||
  621. rf.r.height > res->height ||
  622. rf.r.x + rf.r.width > res->width ||
  623. rf.r.y + rf.r.height > res->height) {
  624. g_critical("%s: flush bounds outside resource"
  625. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  626. __func__, rf.resource_id, rf.r.x, rf.r.y,
  627. rf.r.width, rf.r.height, res->width, res->height);
  628. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  629. return;
  630. }
  631. pixman_region_init_rect(&flush_region,
  632. rf.r.x, rf.r.y, rf.r.width, rf.r.height);
  633. for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
  634. struct virtio_gpu_scanout *scanout;
  635. pixman_region16_t region, finalregion;
  636. pixman_box16_t *extents;
  637. if (!(res->scanout_bitmask & (1 << i))) {
  638. continue;
  639. }
  640. scanout = &g->scanout[i];
  641. pixman_region_init(&finalregion);
  642. pixman_region_init_rect(&region, scanout->x, scanout->y,
  643. scanout->width, scanout->height);
  644. pixman_region_intersect(&finalregion, &flush_region, &region);
  645. extents = pixman_region_extents(&finalregion);
  646. size_t width = extents->x2 - extents->x1;
  647. size_t height = extents->y2 - extents->y1;
  648. if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) {
  649. VhostUserGpuMsg vmsg = {
  650. .request = VHOST_USER_GPU_DMABUF_UPDATE,
  651. .size = sizeof(VhostUserGpuUpdate),
  652. .payload.update = (VhostUserGpuUpdate) {
  653. .scanout_id = i,
  654. .x = extents->x1,
  655. .y = extents->y1,
  656. .width = width,
  657. .height = height,
  658. }
  659. };
  660. vg_send_msg(g, &vmsg, -1);
  661. vg_wait_ok(g);
  662. } else {
  663. size_t bpp =
  664. PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8;
  665. size_t size = width * height * bpp;
  666. void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE +
  667. sizeof(VhostUserGpuUpdate) + size);
  668. VhostUserGpuMsg *msg = p;
  669. msg->request = VHOST_USER_GPU_UPDATE;
  670. msg->size = sizeof(VhostUserGpuUpdate) + size;
  671. msg->payload.update = (VhostUserGpuUpdate) {
  672. .scanout_id = i,
  673. .x = extents->x1,
  674. .y = extents->y1,
  675. .width = width,
  676. .height = height,
  677. };
  678. pixman_image_t *i =
  679. pixman_image_create_bits(pixman_image_get_format(res->image),
  680. msg->payload.update.width,
  681. msg->payload.update.height,
  682. p + offsetof(VhostUserGpuMsg,
  683. payload.update.data),
  684. width * bpp);
  685. pixman_image_composite(PIXMAN_OP_SRC,
  686. res->image, NULL, i,
  687. extents->x1, extents->y1,
  688. 0, 0, 0, 0,
  689. width, height);
  690. pixman_image_unref(i);
  691. vg_send_msg(g, msg, -1);
  692. g_free(msg);
  693. }
  694. pixman_region_fini(&region);
  695. pixman_region_fini(&finalregion);
  696. }
  697. pixman_region_fini(&flush_region);
  698. }
  699. static void
  700. vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
  701. {
  702. switch (cmd->cmd_hdr.type) {
  703. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  704. vg_get_display_info(vg, cmd);
  705. break;
  706. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  707. vg_resource_create_2d(vg, cmd);
  708. break;
  709. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  710. vg_resource_unref(vg, cmd);
  711. break;
  712. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  713. vg_resource_flush(vg, cmd);
  714. break;
  715. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  716. vg_transfer_to_host_2d(vg, cmd);
  717. break;
  718. case VIRTIO_GPU_CMD_SET_SCANOUT:
  719. vg_set_scanout(vg, cmd);
  720. break;
  721. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  722. vg_resource_attach_backing(vg, cmd);
  723. break;
  724. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  725. vg_resource_detach_backing(vg, cmd);
  726. break;
  727. /* case VIRTIO_GPU_CMD_GET_EDID: */
  728. /* break */
  729. default:
  730. g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
  731. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  732. break;
  733. }
  734. if (cmd->state == VG_CMD_STATE_NEW) {
  735. vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error :
  736. VIRTIO_GPU_RESP_OK_NODATA);
  737. }
  738. }
  739. static void
  740. vg_handle_ctrl(VuDev *dev, int qidx)
  741. {
  742. VuGpu *vg = container_of(dev, VuGpu, dev.parent);
  743. VuVirtq *vq = vu_get_queue(dev, qidx);
  744. struct virtio_gpu_ctrl_command *cmd = NULL;
  745. size_t len;
  746. for (;;) {
  747. if (vg->wait_in != 0) {
  748. return;
  749. }
  750. cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command));
  751. if (!cmd) {
  752. break;
  753. }
  754. cmd->vq = vq;
  755. cmd->error = 0;
  756. cmd->state = VG_CMD_STATE_NEW;
  757. len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  758. 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr));
  759. if (len != sizeof(cmd->cmd_hdr)) {
  760. g_warning("%s: command size incorrect %zu vs %zu\n",
  761. __func__, len, sizeof(cmd->cmd_hdr));
  762. }
  763. virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
  764. g_debug("%d %s\n", cmd->cmd_hdr.type,
  765. vg_cmd_to_string(cmd->cmd_hdr.type));
  766. if (vg->virgl) {
  767. vg_virgl_process_cmd(vg, cmd);
  768. } else {
  769. vg_process_cmd(vg, cmd);
  770. }
  771. if (cmd->state != VG_CMD_STATE_FINISHED) {
  772. QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next);
  773. vg->inflight++;
  774. } else {
  775. free(cmd);
  776. }
  777. }
  778. }
  779. static void
  780. update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data)
  781. {
  782. struct virtio_gpu_simple_resource *res;
  783. res = virtio_gpu_find_resource(g, resource_id);
  784. g_return_if_fail(res != NULL);
  785. g_return_if_fail(pixman_image_get_width(res->image) == 64);
  786. g_return_if_fail(pixman_image_get_height(res->image) == 64);
  787. g_return_if_fail(
  788. PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32);
  789. memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t));
  790. }
  791. static void
  792. vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor)
  793. {
  794. switch (cursor->hdr.type) {
  795. case VIRTIO_GPU_CMD_MOVE_CURSOR: {
  796. VhostUserGpuMsg msg = {
  797. .request = cursor->resource_id ?
  798. VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE,
  799. .size = sizeof(VhostUserGpuCursorPos),
  800. .payload.cursor_pos = {
  801. .scanout_id = cursor->pos.scanout_id,
  802. .x = cursor->pos.x,
  803. .y = cursor->pos.y,
  804. }
  805. };
  806. g_debug("%s: move", G_STRFUNC);
  807. vg_send_msg(g, &msg, -1);
  808. break;
  809. }
  810. case VIRTIO_GPU_CMD_UPDATE_CURSOR: {
  811. VhostUserGpuMsg msg = {
  812. .request = VHOST_USER_GPU_CURSOR_UPDATE,
  813. .size = sizeof(VhostUserGpuCursorUpdate),
  814. .payload.cursor_update = {
  815. .pos = {
  816. .scanout_id = cursor->pos.scanout_id,
  817. .x = cursor->pos.x,
  818. .y = cursor->pos.y,
  819. },
  820. .hot_x = cursor->hot_x,
  821. .hot_y = cursor->hot_y,
  822. }
  823. };
  824. g_debug("%s: update", G_STRFUNC);
  825. if (g->virgl) {
  826. vg_virgl_update_cursor_data(g, cursor->resource_id,
  827. msg.payload.cursor_update.data);
  828. } else {
  829. update_cursor_data_simple(g, cursor->resource_id,
  830. msg.payload.cursor_update.data);
  831. }
  832. vg_send_msg(g, &msg, -1);
  833. break;
  834. }
  835. default:
  836. g_debug("%s: unknown cmd %d", G_STRFUNC, cursor->hdr.type);
  837. break;
  838. }
  839. }
  840. static void
  841. vg_handle_cursor(VuDev *dev, int qidx)
  842. {
  843. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  844. VuVirtq *vq = vu_get_queue(dev, qidx);
  845. VuVirtqElement *elem;
  846. size_t len;
  847. struct virtio_gpu_update_cursor cursor;
  848. for (;;) {
  849. elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
  850. if (!elem) {
  851. break;
  852. }
  853. g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num);
  854. len = iov_to_buf(elem->out_sg, elem->out_num,
  855. 0, &cursor, sizeof(cursor));
  856. if (len != sizeof(cursor)) {
  857. g_warning("%s: cursor size incorrect %zu vs %zu\n",
  858. __func__, len, sizeof(cursor));
  859. } else {
  860. virtio_gpu_bswap_32(&cursor, sizeof(cursor));
  861. vg_process_cursor_cmd(g, &cursor);
  862. }
  863. vu_queue_push(dev, vq, elem, 0);
  864. vu_queue_notify(dev, vq);
  865. free(elem);
  866. }
  867. }
  868. static void
  869. vg_panic(VuDev *dev, const char *msg)
  870. {
  871. g_critical("%s\n", msg);
  872. exit(1);
  873. }
  874. static void
  875. vg_queue_set_started(VuDev *dev, int qidx, bool started)
  876. {
  877. VuVirtq *vq = vu_get_queue(dev, qidx);
  878. g_debug("queue started %d:%d\n", qidx, started);
  879. switch (qidx) {
  880. case 0:
  881. vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL);
  882. break;
  883. case 1:
  884. vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL);
  885. break;
  886. default:
  887. break;
  888. }
  889. }
  890. static gboolean
  891. protocol_features_cb(gint fd, GIOCondition condition, gpointer user_data)
  892. {
  893. VuGpu *g = user_data;
  894. uint64_t u64;
  895. VhostUserGpuMsg msg = {
  896. .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
  897. };
  898. if (!vg_recv_msg(g, msg.request, sizeof(u64), &u64)) {
  899. return G_SOURCE_CONTINUE;
  900. }
  901. msg = (VhostUserGpuMsg) {
  902. .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
  903. .size = sizeof(uint64_t),
  904. .payload.u64 = 0
  905. };
  906. vg_send_msg(g, &msg, -1);
  907. g->wait_in = 0;
  908. vg_handle_ctrl(&g->dev.parent, 0);
  909. return G_SOURCE_REMOVE;
  910. }
  911. static void
  912. set_gpu_protocol_features(VuGpu *g)
  913. {
  914. VhostUserGpuMsg msg = {
  915. .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
  916. };
  917. vg_send_msg(g, &msg, -1);
  918. assert(g->wait_in == 0);
  919. g->wait_in = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
  920. protocol_features_cb, g);
  921. }
  922. static int
  923. vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply)
  924. {
  925. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  926. switch (msg->request) {
  927. case VHOST_USER_GPU_SET_SOCKET: {
  928. g_return_val_if_fail(msg->fd_num == 1, 1);
  929. g_return_val_if_fail(g->sock_fd == -1, 1);
  930. g->sock_fd = msg->fds[0];
  931. set_gpu_protocol_features(g);
  932. return 1;
  933. }
  934. default:
  935. return 0;
  936. }
  937. return 0;
  938. }
  939. static uint64_t
  940. vg_get_features(VuDev *dev)
  941. {
  942. uint64_t features = 0;
  943. if (opt_virgl) {
  944. features |= 1 << VIRTIO_GPU_F_VIRGL;
  945. }
  946. return features;
  947. }
  948. static void
  949. vg_set_features(VuDev *dev, uint64_t features)
  950. {
  951. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  952. bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL);
  953. if (virgl && !g->virgl_inited) {
  954. if (!vg_virgl_init(g)) {
  955. vg_panic(dev, "Failed to initialize virgl");
  956. }
  957. g->virgl_inited = true;
  958. }
  959. g->virgl = virgl;
  960. }
  961. static int
  962. vg_get_config(VuDev *dev, uint8_t *config, uint32_t len)
  963. {
  964. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  965. if (len > sizeof(struct virtio_gpu_config)) {
  966. return -1;
  967. }
  968. if (opt_virgl) {
  969. g->virtio_config.num_capsets = vg_virgl_get_num_capsets();
  970. }
  971. memcpy(config, &g->virtio_config, len);
  972. return 0;
  973. }
  974. static int
  975. vg_set_config(VuDev *dev, const uint8_t *data,
  976. uint32_t offset, uint32_t size,
  977. uint32_t flags)
  978. {
  979. VuGpu *g = container_of(dev, VuGpu, dev.parent);
  980. struct virtio_gpu_config *config = (struct virtio_gpu_config *)data;
  981. if (config->events_clear) {
  982. g->virtio_config.events_read &= ~config->events_clear;
  983. }
  984. return 0;
  985. }
  986. static const VuDevIface vuiface = {
  987. .set_features = vg_set_features,
  988. .get_features = vg_get_features,
  989. .queue_set_started = vg_queue_set_started,
  990. .process_msg = vg_process_msg,
  991. .get_config = vg_get_config,
  992. .set_config = vg_set_config,
  993. };
  994. static void
  995. vg_destroy(VuGpu *g)
  996. {
  997. struct virtio_gpu_simple_resource *res, *tmp;
  998. vug_deinit(&g->dev);
  999. vg_sock_fd_close(g);
  1000. QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
  1001. vg_resource_destroy(g, res);
  1002. }
  1003. vugbm_device_destroy(&g->gdev);
  1004. }
  1005. static GOptionEntry entries[] = {
  1006. { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps,
  1007. "Print capabilities", NULL },
  1008. { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum,
  1009. "Use inherited fd socket", "FDNUM" },
  1010. { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path,
  1011. "Use UNIX socket path", "PATH" },
  1012. { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node,
  1013. "Specify DRM render node", "PATH" },
  1014. { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl,
  1015. "Turn virgl rendering on", NULL },
  1016. { NULL, }
  1017. };
  1018. int
  1019. main(int argc, char *argv[])
  1020. {
  1021. GOptionContext *context;
  1022. GError *error = NULL;
  1023. GMainLoop *loop = NULL;
  1024. int fd;
  1025. VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 };
  1026. QTAILQ_INIT(&g.reslist);
  1027. QTAILQ_INIT(&g.fenceq);
  1028. context = g_option_context_new("QEMU vhost-user-gpu");
  1029. g_option_context_add_main_entries(context, entries, NULL);
  1030. if (!g_option_context_parse(context, &argc, &argv, &error)) {
  1031. g_printerr("Option parsing failed: %s\n", error->message);
  1032. exit(EXIT_FAILURE);
  1033. }
  1034. g_option_context_free(context);
  1035. if (opt_print_caps) {
  1036. g_print("{\n");
  1037. g_print(" \"type\": \"gpu\",\n");
  1038. g_print(" \"features\": [\n");
  1039. g_print(" \"render-node\",\n");
  1040. g_print(" \"virgl\"\n");
  1041. g_print(" ]\n");
  1042. g_print("}\n");
  1043. exit(EXIT_SUCCESS);
  1044. }
  1045. g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node);
  1046. if (opt_render_node && g.drm_rnode_fd == -1) {
  1047. g_printerr("Failed to open DRM rendernode.\n");
  1048. exit(EXIT_FAILURE);
  1049. }
  1050. vugbm_device_init(&g.gdev, g.drm_rnode_fd);
  1051. if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) {
  1052. g_printerr("Please specify either --fd or --socket-path\n");
  1053. exit(EXIT_FAILURE);
  1054. }
  1055. if (opt_socket_path) {
  1056. int lsock = unix_listen(opt_socket_path, &error_fatal);
  1057. if (lsock < 0) {
  1058. g_printerr("Failed to listen on %s.\n", opt_socket_path);
  1059. exit(EXIT_FAILURE);
  1060. }
  1061. fd = accept(lsock, NULL, NULL);
  1062. close(lsock);
  1063. } else {
  1064. fd = opt_fdnum;
  1065. }
  1066. if (fd == -1) {
  1067. g_printerr("Invalid vhost-user socket.\n");
  1068. exit(EXIT_FAILURE);
  1069. }
  1070. if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) {
  1071. g_printerr("Failed to initialize libvhost-user-glib.\n");
  1072. exit(EXIT_FAILURE);
  1073. }
  1074. loop = g_main_loop_new(NULL, FALSE);
  1075. g_main_loop_run(loop);
  1076. g_main_loop_unref(loop);
  1077. vg_destroy(&g);
  1078. if (g.drm_rnode_fd >= 0) {
  1079. close(g.drm_rnode_fd);
  1080. }
  1081. return 0;
  1082. }