vhost-user-gpu.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. /*
  2. * vhost-user GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2018
  5. *
  6. * Authors:
  7. * Marc-André Lureau <marcandre.lureau@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. */
  12. #include "qemu/osdep.h"
  13. #include "qemu/error-report.h"
  14. #include "qemu/sockets.h"
  15. #include "hw/qdev-properties.h"
  16. #include "hw/virtio/virtio-gpu.h"
  17. #include "chardev/char-fe.h"
  18. #include "qapi/error.h"
  19. #include "migration/blocker.h"
  20. typedef enum VhostUserGpuRequest {
  21. VHOST_USER_GPU_NONE = 0,
  22. VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
  23. VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
  24. VHOST_USER_GPU_GET_DISPLAY_INFO,
  25. VHOST_USER_GPU_CURSOR_POS,
  26. VHOST_USER_GPU_CURSOR_POS_HIDE,
  27. VHOST_USER_GPU_CURSOR_UPDATE,
  28. VHOST_USER_GPU_SCANOUT,
  29. VHOST_USER_GPU_UPDATE,
  30. VHOST_USER_GPU_DMABUF_SCANOUT,
  31. VHOST_USER_GPU_DMABUF_UPDATE,
  32. VHOST_USER_GPU_GET_EDID,
  33. } VhostUserGpuRequest;
  34. typedef struct VhostUserGpuDisplayInfoReply {
  35. struct virtio_gpu_resp_display_info info;
  36. } VhostUserGpuDisplayInfoReply;
  37. typedef struct VhostUserGpuCursorPos {
  38. uint32_t scanout_id;
  39. uint32_t x;
  40. uint32_t y;
  41. } QEMU_PACKED VhostUserGpuCursorPos;
  42. typedef struct VhostUserGpuCursorUpdate {
  43. VhostUserGpuCursorPos pos;
  44. uint32_t hot_x;
  45. uint32_t hot_y;
  46. uint32_t data[64 * 64];
  47. } QEMU_PACKED VhostUserGpuCursorUpdate;
  48. typedef struct VhostUserGpuScanout {
  49. uint32_t scanout_id;
  50. uint32_t width;
  51. uint32_t height;
  52. } QEMU_PACKED VhostUserGpuScanout;
  53. typedef struct VhostUserGpuUpdate {
  54. uint32_t scanout_id;
  55. uint32_t x;
  56. uint32_t y;
  57. uint32_t width;
  58. uint32_t height;
  59. uint8_t data[];
  60. } QEMU_PACKED VhostUserGpuUpdate;
  61. typedef struct VhostUserGpuDMABUFScanout {
  62. uint32_t scanout_id;
  63. uint32_t x;
  64. uint32_t y;
  65. uint32_t width;
  66. uint32_t height;
  67. uint32_t fd_width;
  68. uint32_t fd_height;
  69. uint32_t fd_stride;
  70. uint32_t fd_flags;
  71. int fd_drm_fourcc;
  72. } QEMU_PACKED VhostUserGpuDMABUFScanout;
  73. typedef struct VhostUserGpuEdidRequest {
  74. uint32_t scanout_id;
  75. } QEMU_PACKED VhostUserGpuEdidRequest;
  76. typedef struct VhostUserGpuMsg {
  77. uint32_t request; /* VhostUserGpuRequest */
  78. uint32_t flags;
  79. uint32_t size; /* the following payload size */
  80. union {
  81. VhostUserGpuCursorPos cursor_pos;
  82. VhostUserGpuCursorUpdate cursor_update;
  83. VhostUserGpuScanout scanout;
  84. VhostUserGpuUpdate update;
  85. VhostUserGpuDMABUFScanout dmabuf_scanout;
  86. VhostUserGpuEdidRequest edid_req;
  87. struct virtio_gpu_resp_edid resp_edid;
  88. struct virtio_gpu_resp_display_info display_info;
  89. uint64_t u64;
  90. } payload;
  91. } QEMU_PACKED VhostUserGpuMsg;
  92. static VhostUserGpuMsg m __attribute__ ((unused));
  93. #define VHOST_USER_GPU_HDR_SIZE \
  94. (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
  95. #define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
  96. #define VHOST_USER_GPU_PROTOCOL_F_EDID 0
  97. static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
  98. static void
  99. vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
  100. {
  101. VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
  102. struct virtio_gpu_scanout *s;
  103. if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
  104. return;
  105. }
  106. s = &g->parent_obj.scanout[pos->scanout_id];
  107. if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
  108. VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
  109. if (!s->current_cursor) {
  110. s->current_cursor = cursor_alloc(64, 64);
  111. }
  112. s->current_cursor->hot_x = up->hot_x;
  113. s->current_cursor->hot_y = up->hot_y;
  114. memcpy(s->current_cursor->data, up->data,
  115. 64 * 64 * sizeof(uint32_t));
  116. dpy_cursor_define(s->con, s->current_cursor);
  117. }
  118. dpy_mouse_set(s->con, pos->x, pos->y,
  119. msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
  120. }
  121. static void
  122. vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
  123. {
  124. qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
  125. VHOST_USER_GPU_HDR_SIZE + msg->size);
  126. }
  127. static void
  128. vhost_user_gpu_unblock(VhostUserGPU *g)
  129. {
  130. VhostUserGpuMsg msg = {
  131. .request = VHOST_USER_GPU_DMABUF_UPDATE,
  132. .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
  133. };
  134. vhost_user_gpu_send_msg(g, &msg);
  135. }
  136. static void
  137. vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
  138. {
  139. QemuConsole *con = NULL;
  140. struct virtio_gpu_scanout *s;
  141. switch (msg->request) {
  142. case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
  143. VhostUserGpuMsg reply = {
  144. .request = msg->request,
  145. .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
  146. .size = sizeof(uint64_t),
  147. .payload = {
  148. .u64 = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID)
  149. }
  150. };
  151. vhost_user_gpu_send_msg(g, &reply);
  152. break;
  153. }
  154. case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
  155. break;
  156. }
  157. case VHOST_USER_GPU_GET_DISPLAY_INFO: {
  158. struct virtio_gpu_resp_display_info display_info = { {} };
  159. VhostUserGpuMsg reply = {
  160. .request = msg->request,
  161. .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
  162. .size = sizeof(struct virtio_gpu_resp_display_info),
  163. };
  164. display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
  165. virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
  166. memcpy(&reply.payload.display_info, &display_info,
  167. sizeof(display_info));
  168. vhost_user_gpu_send_msg(g, &reply);
  169. break;
  170. }
  171. case VHOST_USER_GPU_GET_EDID: {
  172. VhostUserGpuEdidRequest *m = &msg->payload.edid_req;
  173. struct virtio_gpu_resp_edid resp = { {} };
  174. VhostUserGpuMsg reply = {
  175. .request = msg->request,
  176. .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
  177. .size = sizeof(reply.payload.resp_edid),
  178. };
  179. if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
  180. error_report("invalid scanout: %d", m->scanout_id);
  181. break;
  182. }
  183. resp.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
  184. virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), m->scanout_id, &resp);
  185. memcpy(&reply.payload.resp_edid, &resp, sizeof(resp));
  186. vhost_user_gpu_send_msg(g, &reply);
  187. break;
  188. }
  189. case VHOST_USER_GPU_SCANOUT: {
  190. VhostUserGpuScanout *m = &msg->payload.scanout;
  191. if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
  192. return;
  193. }
  194. g->parent_obj.enable = 1;
  195. s = &g->parent_obj.scanout[m->scanout_id];
  196. con = s->con;
  197. if (m->width == 0) {
  198. dpy_gfx_replace_surface(con, NULL);
  199. } else {
  200. s->ds = qemu_create_displaysurface(m->width, m->height);
  201. /* replace surface on next update */
  202. }
  203. break;
  204. }
  205. case VHOST_USER_GPU_DMABUF_SCANOUT: {
  206. VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
  207. int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
  208. QemuDmaBuf *dmabuf;
  209. if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
  210. error_report("invalid scanout: %d", m->scanout_id);
  211. if (fd >= 0) {
  212. close(fd);
  213. }
  214. break;
  215. }
  216. g->parent_obj.enable = 1;
  217. con = g->parent_obj.scanout[m->scanout_id].con;
  218. dmabuf = &g->dmabuf[m->scanout_id];
  219. if (dmabuf->fd >= 0) {
  220. close(dmabuf->fd);
  221. dmabuf->fd = -1;
  222. }
  223. dpy_gl_release_dmabuf(con, dmabuf);
  224. if (fd == -1) {
  225. dpy_gl_scanout_disable(con);
  226. break;
  227. }
  228. *dmabuf = (QemuDmaBuf) {
  229. .fd = fd,
  230. .width = m->fd_width,
  231. .height = m->fd_height,
  232. .stride = m->fd_stride,
  233. .fourcc = m->fd_drm_fourcc,
  234. .y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
  235. };
  236. dpy_gl_scanout_dmabuf(con, dmabuf);
  237. break;
  238. }
  239. case VHOST_USER_GPU_DMABUF_UPDATE: {
  240. VhostUserGpuUpdate *m = &msg->payload.update;
  241. if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
  242. !g->parent_obj.scanout[m->scanout_id].con) {
  243. error_report("invalid scanout update: %d", m->scanout_id);
  244. vhost_user_gpu_unblock(g);
  245. break;
  246. }
  247. con = g->parent_obj.scanout[m->scanout_id].con;
  248. if (!console_has_gl(con)) {
  249. error_report("console doesn't support GL!");
  250. vhost_user_gpu_unblock(g);
  251. break;
  252. }
  253. g->backend_blocked = true;
  254. dpy_gl_update(con, m->x, m->y, m->width, m->height);
  255. break;
  256. }
  257. case VHOST_USER_GPU_UPDATE: {
  258. VhostUserGpuUpdate *m = &msg->payload.update;
  259. if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
  260. break;
  261. }
  262. s = &g->parent_obj.scanout[m->scanout_id];
  263. con = s->con;
  264. pixman_image_t *image =
  265. pixman_image_create_bits(PIXMAN_x8r8g8b8,
  266. m->width,
  267. m->height,
  268. (uint32_t *)m->data,
  269. m->width * 4);
  270. pixman_image_composite(PIXMAN_OP_SRC,
  271. image, NULL, s->ds->image,
  272. 0, 0, 0, 0, m->x, m->y, m->width, m->height);
  273. pixman_image_unref(image);
  274. if (qemu_console_surface(con) != s->ds) {
  275. dpy_gfx_replace_surface(con, s->ds);
  276. } else {
  277. dpy_gfx_update(con, m->x, m->y, m->width, m->height);
  278. }
  279. break;
  280. }
  281. default:
  282. g_warning("unhandled message %d %d", msg->request, msg->size);
  283. }
  284. if (con && qemu_console_is_gl_blocked(con)) {
  285. vhost_user_gpu_update_blocked(g, true);
  286. }
  287. }
  288. static void
  289. vhost_user_gpu_chr_read(void *opaque)
  290. {
  291. VhostUserGPU *g = opaque;
  292. VhostUserGpuMsg *msg = NULL;
  293. VhostUserGpuRequest request;
  294. uint32_t size, flags;
  295. int r;
  296. r = qemu_chr_fe_read_all(&g->vhost_chr,
  297. (uint8_t *)&request, sizeof(uint32_t));
  298. if (r != sizeof(uint32_t)) {
  299. error_report("failed to read msg header: %d, %d", r, errno);
  300. goto end;
  301. }
  302. r = qemu_chr_fe_read_all(&g->vhost_chr,
  303. (uint8_t *)&flags, sizeof(uint32_t));
  304. if (r != sizeof(uint32_t)) {
  305. error_report("failed to read msg flags");
  306. goto end;
  307. }
  308. r = qemu_chr_fe_read_all(&g->vhost_chr,
  309. (uint8_t *)&size, sizeof(uint32_t));
  310. if (r != sizeof(uint32_t)) {
  311. error_report("failed to read msg size");
  312. goto end;
  313. }
  314. msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
  315. r = qemu_chr_fe_read_all(&g->vhost_chr,
  316. (uint8_t *)&msg->payload, size);
  317. if (r != size) {
  318. error_report("failed to read msg payload %d != %d", r, size);
  319. goto end;
  320. }
  321. msg->request = request;
  322. msg->flags = size;
  323. msg->size = size;
  324. if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
  325. request == VHOST_USER_GPU_CURSOR_POS ||
  326. request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
  327. vhost_user_gpu_handle_cursor(g, msg);
  328. } else {
  329. vhost_user_gpu_handle_display(g, msg);
  330. }
  331. end:
  332. g_free(msg);
  333. }
  334. static void
  335. vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
  336. {
  337. qemu_set_fd_handler(g->vhost_gpu_fd,
  338. blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
  339. }
  340. static void
  341. vhost_user_gpu_gl_flushed(VirtIOGPUBase *b)
  342. {
  343. VhostUserGPU *g = VHOST_USER_GPU(b);
  344. if (g->backend_blocked) {
  345. vhost_user_gpu_unblock(g);
  346. g->backend_blocked = false;
  347. }
  348. vhost_user_gpu_update_blocked(g, false);
  349. }
  350. static bool
  351. vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
  352. {
  353. Chardev *chr;
  354. int sv[2];
  355. if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
  356. error_setg_errno(errp, errno, "socketpair() failed");
  357. return false;
  358. }
  359. chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
  360. if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
  361. error_setg(errp, "Failed to make socket chardev");
  362. goto err;
  363. }
  364. if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
  365. goto err;
  366. }
  367. if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
  368. error_setg(errp, "Failed to set vhost-user-gpu socket");
  369. qemu_chr_fe_deinit(&g->vhost_chr, false);
  370. goto err;
  371. }
  372. g->vhost_gpu_fd = sv[0];
  373. vhost_user_gpu_update_blocked(g, false);
  374. close(sv[1]);
  375. return true;
  376. err:
  377. close(sv[0]);
  378. close(sv[1]);
  379. if (chr) {
  380. object_unref(OBJECT(chr));
  381. }
  382. return false;
  383. }
  384. static void
  385. vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
  386. {
  387. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  388. VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
  389. struct virtio_gpu_config *vgconfig =
  390. (struct virtio_gpu_config *)config_data;
  391. Error *local_err = NULL;
  392. int ret;
  393. memset(config_data, 0, sizeof(struct virtio_gpu_config));
  394. ret = vhost_dev_get_config(&g->vhost->dev,
  395. config_data, sizeof(struct virtio_gpu_config),
  396. &local_err);
  397. if (ret) {
  398. error_report_err(local_err);
  399. return;
  400. }
  401. /* those fields are managed by qemu */
  402. vgconfig->num_scanouts = b->virtio_config.num_scanouts;
  403. vgconfig->events_read = b->virtio_config.events_read;
  404. vgconfig->events_clear = b->virtio_config.events_clear;
  405. }
  406. static void
  407. vhost_user_gpu_set_config(VirtIODevice *vdev,
  408. const uint8_t *config_data)
  409. {
  410. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  411. VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
  412. const struct virtio_gpu_config *vgconfig =
  413. (const struct virtio_gpu_config *)config_data;
  414. int ret;
  415. if (vgconfig->events_clear) {
  416. b->virtio_config.events_read &= ~vgconfig->events_clear;
  417. }
  418. ret = vhost_dev_set_config(&g->vhost->dev, config_data,
  419. 0, sizeof(struct virtio_gpu_config),
  420. VHOST_SET_CONFIG_TYPE_FRONTEND);
  421. if (ret) {
  422. error_report("vhost-user-gpu: set device config space failed");
  423. return;
  424. }
  425. }
  426. static void
  427. vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
  428. {
  429. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  430. Error *err = NULL;
  431. if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
  432. if (!vhost_user_gpu_do_set_socket(g, &err)) {
  433. error_report_err(err);
  434. return;
  435. }
  436. vhost_user_backend_start(g->vhost);
  437. } else {
  438. /* unblock any wait and stop processing */
  439. if (g->vhost_gpu_fd != -1) {
  440. vhost_user_gpu_update_blocked(g, true);
  441. qemu_chr_fe_deinit(&g->vhost_chr, true);
  442. g->vhost_gpu_fd = -1;
  443. }
  444. vhost_user_backend_stop(g->vhost);
  445. }
  446. }
  447. static bool
  448. vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
  449. {
  450. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  451. /*
  452. * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
  453. * as the macro of configure interrupt's IDX, If this driver does not
  454. * support, the function will return
  455. */
  456. if (idx == VIRTIO_CONFIG_IRQ_IDX) {
  457. return false;
  458. }
  459. return vhost_virtqueue_pending(&g->vhost->dev, idx);
  460. }
  461. static void
  462. vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
  463. {
  464. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  465. /*
  466. * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
  467. * as the macro of configure interrupt's IDX, If this driver does not
  468. * support, the function will return
  469. */
  470. if (idx == VIRTIO_CONFIG_IRQ_IDX) {
  471. return;
  472. }
  473. vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
  474. }
  475. static void
  476. vhost_user_gpu_instance_init(Object *obj)
  477. {
  478. VhostUserGPU *g = VHOST_USER_GPU(obj);
  479. g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
  480. object_property_add_alias(obj, "chardev",
  481. OBJECT(g->vhost), "chardev");
  482. }
  483. static void
  484. vhost_user_gpu_instance_finalize(Object *obj)
  485. {
  486. VhostUserGPU *g = VHOST_USER_GPU(obj);
  487. object_unref(OBJECT(g->vhost));
  488. }
  489. static void
  490. vhost_user_gpu_reset(VirtIODevice *vdev)
  491. {
  492. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  493. virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
  494. vhost_user_backend_stop(g->vhost);
  495. }
  496. static int
  497. vhost_user_gpu_config_change(struct vhost_dev *dev)
  498. {
  499. error_report("vhost-user-gpu: unhandled backend config change");
  500. return -1;
  501. }
  502. static const VhostDevConfigOps config_ops = {
  503. .vhost_dev_config_notifier = vhost_user_gpu_config_change,
  504. };
  505. static void
  506. vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
  507. {
  508. VhostUserGPU *g = VHOST_USER_GPU(qdev);
  509. VirtIODevice *vdev = VIRTIO_DEVICE(g);
  510. vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
  511. if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
  512. return;
  513. }
  514. /* existing backend may send DMABUF, so let's add that requirement */
  515. g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED;
  516. if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
  517. g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
  518. }
  519. if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_EDID)) {
  520. g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_EDID_ENABLED;
  521. } else {
  522. error_report("EDID requested but the backend doesn't support it.");
  523. g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED);
  524. }
  525. if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
  526. return;
  527. }
  528. g->vhost_gpu_fd = -1;
  529. }
  530. static struct vhost_dev *vhost_user_gpu_get_vhost(VirtIODevice *vdev)
  531. {
  532. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  533. return &g->vhost->dev;
  534. }
  535. static Property vhost_user_gpu_properties[] = {
  536. VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
  537. DEFINE_PROP_END_OF_LIST(),
  538. };
  539. static void
  540. vhost_user_gpu_class_init(ObjectClass *klass, void *data)
  541. {
  542. DeviceClass *dc = DEVICE_CLASS(klass);
  543. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  544. VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
  545. vgc->gl_flushed = vhost_user_gpu_gl_flushed;
  546. vdc->realize = vhost_user_gpu_device_realize;
  547. vdc->reset = vhost_user_gpu_reset;
  548. vdc->set_status = vhost_user_gpu_set_status;
  549. vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
  550. vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
  551. vdc->get_config = vhost_user_gpu_get_config;
  552. vdc->set_config = vhost_user_gpu_set_config;
  553. vdc->get_vhost = vhost_user_gpu_get_vhost;
  554. device_class_set_props(dc, vhost_user_gpu_properties);
  555. }
  556. static const TypeInfo vhost_user_gpu_info = {
  557. .name = TYPE_VHOST_USER_GPU,
  558. .parent = TYPE_VIRTIO_GPU_BASE,
  559. .instance_size = sizeof(VhostUserGPU),
  560. .instance_init = vhost_user_gpu_instance_init,
  561. .instance_finalize = vhost_user_gpu_instance_finalize,
  562. .class_init = vhost_user_gpu_class_init,
  563. };
  564. module_obj(TYPE_VHOST_USER_GPU);
  565. module_kconfig(VHOST_USER_GPU);
  566. static void vhost_user_gpu_register_types(void)
  567. {
  568. type_register_static(&vhost_user_gpu_info);
  569. }
  570. type_init(vhost_user_gpu_register_types)