vhost-user-gpu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. /*
  2. * vhost-user GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2018
  5. *
  6. * Authors:
  7. * Marc-André Lureau <marcandre.lureau@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10. * See the COPYING file in the top-level directory.
  11. */
  12. #include "qemu/osdep.h"
  13. #include "qemu/error-report.h"
  14. #include "qemu/sockets.h"
  15. #include "hw/qdev-properties.h"
  16. #include "hw/virtio/virtio-gpu.h"
  17. #include "chardev/char-fe.h"
  18. #include "qapi/error.h"
  19. #include "migration/blocker.h"
  20. typedef enum VhostUserGpuRequest {
  21. VHOST_USER_GPU_NONE = 0,
  22. VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
  23. VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
  24. VHOST_USER_GPU_GET_DISPLAY_INFO,
  25. VHOST_USER_GPU_CURSOR_POS,
  26. VHOST_USER_GPU_CURSOR_POS_HIDE,
  27. VHOST_USER_GPU_CURSOR_UPDATE,
  28. VHOST_USER_GPU_SCANOUT,
  29. VHOST_USER_GPU_UPDATE,
  30. VHOST_USER_GPU_DMABUF_SCANOUT,
  31. VHOST_USER_GPU_DMABUF_UPDATE,
  32. VHOST_USER_GPU_GET_EDID,
  33. VHOST_USER_GPU_DMABUF_SCANOUT2,
  34. } VhostUserGpuRequest;
  35. typedef struct VhostUserGpuDisplayInfoReply {
  36. struct virtio_gpu_resp_display_info info;
  37. } VhostUserGpuDisplayInfoReply;
  38. typedef struct VhostUserGpuCursorPos {
  39. uint32_t scanout_id;
  40. uint32_t x;
  41. uint32_t y;
  42. } QEMU_PACKED VhostUserGpuCursorPos;
  43. typedef struct VhostUserGpuCursorUpdate {
  44. VhostUserGpuCursorPos pos;
  45. uint32_t hot_x;
  46. uint32_t hot_y;
  47. uint32_t data[64 * 64];
  48. } QEMU_PACKED VhostUserGpuCursorUpdate;
  49. typedef struct VhostUserGpuScanout {
  50. uint32_t scanout_id;
  51. uint32_t width;
  52. uint32_t height;
  53. } QEMU_PACKED VhostUserGpuScanout;
  54. typedef struct VhostUserGpuUpdate {
  55. uint32_t scanout_id;
  56. uint32_t x;
  57. uint32_t y;
  58. uint32_t width;
  59. uint32_t height;
  60. uint8_t data[];
  61. } QEMU_PACKED VhostUserGpuUpdate;
  62. typedef struct VhostUserGpuDMABUFScanout {
  63. uint32_t scanout_id;
  64. uint32_t x;
  65. uint32_t y;
  66. uint32_t width;
  67. uint32_t height;
  68. uint32_t fd_width;
  69. uint32_t fd_height;
  70. uint32_t fd_stride;
  71. uint32_t fd_flags;
  72. int fd_drm_fourcc;
  73. } QEMU_PACKED VhostUserGpuDMABUFScanout;
  74. typedef struct VhostUserGpuDMABUFScanout2 {
  75. struct VhostUserGpuDMABUFScanout dmabuf_scanout;
  76. uint64_t modifier;
  77. } QEMU_PACKED VhostUserGpuDMABUFScanout2;
  78. typedef struct VhostUserGpuEdidRequest {
  79. uint32_t scanout_id;
  80. } QEMU_PACKED VhostUserGpuEdidRequest;
  81. typedef struct VhostUserGpuMsg {
  82. uint32_t request; /* VhostUserGpuRequest */
  83. uint32_t flags;
  84. uint32_t size; /* the following payload size */
  85. union {
  86. VhostUserGpuCursorPos cursor_pos;
  87. VhostUserGpuCursorUpdate cursor_update;
  88. VhostUserGpuScanout scanout;
  89. VhostUserGpuUpdate update;
  90. VhostUserGpuDMABUFScanout dmabuf_scanout;
  91. VhostUserGpuDMABUFScanout2 dmabuf_scanout2;
  92. VhostUserGpuEdidRequest edid_req;
  93. struct virtio_gpu_resp_edid resp_edid;
  94. struct virtio_gpu_resp_display_info display_info;
  95. uint64_t u64;
  96. } payload;
  97. } QEMU_PACKED VhostUserGpuMsg;
  98. static VhostUserGpuMsg m __attribute__ ((unused));
  99. #define VHOST_USER_GPU_HDR_SIZE \
  100. (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
  101. #define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
  102. #define VHOST_USER_GPU_PROTOCOL_F_EDID 0
  103. #define VHOST_USER_GPU_PROTOCOL_F_DMABUF2 1
  104. static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
  105. static void
  106. vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
  107. {
  108. VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
  109. struct virtio_gpu_scanout *s;
  110. if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
  111. return;
  112. }
  113. s = &g->parent_obj.scanout[pos->scanout_id];
  114. if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
  115. VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
  116. if (!s->current_cursor) {
  117. s->current_cursor = cursor_alloc(64, 64);
  118. }
  119. s->current_cursor->hot_x = up->hot_x;
  120. s->current_cursor->hot_y = up->hot_y;
  121. memcpy(s->current_cursor->data, up->data,
  122. 64 * 64 * sizeof(uint32_t));
  123. dpy_cursor_define(s->con, s->current_cursor);
  124. }
  125. dpy_mouse_set(s->con, pos->x, pos->y,
  126. msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
  127. }
  128. static void
  129. vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
  130. {
  131. qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
  132. VHOST_USER_GPU_HDR_SIZE + msg->size);
  133. }
  134. static void
  135. vhost_user_gpu_unblock(VhostUserGPU *g)
  136. {
  137. VhostUserGpuMsg msg = {
  138. .request = VHOST_USER_GPU_DMABUF_UPDATE,
  139. .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
  140. };
  141. vhost_user_gpu_send_msg(g, &msg);
  142. }
  143. static void
  144. vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
  145. {
  146. QemuConsole *con = NULL;
  147. struct virtio_gpu_scanout *s;
  148. switch (msg->request) {
  149. case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
  150. VhostUserGpuMsg reply = {
  151. .request = msg->request,
  152. .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
  153. .size = sizeof(uint64_t),
  154. .payload = {
  155. .u64 = (1 << VHOST_USER_GPU_PROTOCOL_F_EDID) |
  156. (1 << VHOST_USER_GPU_PROTOCOL_F_DMABUF2)
  157. }
  158. };
  159. vhost_user_gpu_send_msg(g, &reply);
  160. break;
  161. }
  162. case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
  163. break;
  164. }
  165. case VHOST_USER_GPU_GET_DISPLAY_INFO: {
  166. struct virtio_gpu_resp_display_info display_info = { {} };
  167. VhostUserGpuMsg reply = {
  168. .request = msg->request,
  169. .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
  170. .size = sizeof(struct virtio_gpu_resp_display_info),
  171. };
  172. display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
  173. virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
  174. memcpy(&reply.payload.display_info, &display_info,
  175. sizeof(display_info));
  176. vhost_user_gpu_send_msg(g, &reply);
  177. break;
  178. }
  179. case VHOST_USER_GPU_GET_EDID: {
  180. VhostUserGpuEdidRequest *m = &msg->payload.edid_req;
  181. struct virtio_gpu_resp_edid resp = { {} };
  182. VhostUserGpuMsg reply = {
  183. .request = msg->request,
  184. .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
  185. .size = sizeof(reply.payload.resp_edid),
  186. };
  187. if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
  188. error_report("invalid scanout: %d", m->scanout_id);
  189. break;
  190. }
  191. resp.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
  192. virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), m->scanout_id, &resp);
  193. memcpy(&reply.payload.resp_edid, &resp, sizeof(resp));
  194. vhost_user_gpu_send_msg(g, &reply);
  195. break;
  196. }
  197. case VHOST_USER_GPU_SCANOUT: {
  198. VhostUserGpuScanout *m = &msg->payload.scanout;
  199. if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
  200. return;
  201. }
  202. g->parent_obj.enable = 1;
  203. s = &g->parent_obj.scanout[m->scanout_id];
  204. con = s->con;
  205. if (m->width == 0) {
  206. dpy_gfx_replace_surface(con, NULL);
  207. } else {
  208. s->ds = qemu_create_displaysurface(m->width, m->height);
  209. /* replace surface on next update */
  210. }
  211. break;
  212. }
  213. case VHOST_USER_GPU_DMABUF_SCANOUT2:
  214. case VHOST_USER_GPU_DMABUF_SCANOUT: {
  215. VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
  216. int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
  217. uint64_t modifier = 0;
  218. QemuDmaBuf *dmabuf;
  219. if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
  220. error_report("invalid scanout: %d", m->scanout_id);
  221. if (fd >= 0) {
  222. close(fd);
  223. }
  224. break;
  225. }
  226. g->parent_obj.enable = 1;
  227. con = g->parent_obj.scanout[m->scanout_id].con;
  228. dmabuf = g->dmabuf[m->scanout_id];
  229. if (dmabuf) {
  230. qemu_dmabuf_close(dmabuf);
  231. dpy_gl_release_dmabuf(con, dmabuf);
  232. g_clear_pointer(&dmabuf, qemu_dmabuf_free);
  233. }
  234. if (fd == -1) {
  235. dpy_gl_scanout_disable(con);
  236. g->dmabuf[m->scanout_id] = NULL;
  237. break;
  238. }
  239. if (msg->request == VHOST_USER_GPU_DMABUF_SCANOUT2) {
  240. VhostUserGpuDMABUFScanout2 *m2 = &msg->payload.dmabuf_scanout2;
  241. modifier = m2->modifier;
  242. }
  243. dmabuf = qemu_dmabuf_new(m->width, m->height,
  244. m->fd_stride, 0, 0,
  245. m->fd_width, m->fd_height,
  246. m->fd_drm_fourcc, modifier,
  247. fd, false, m->fd_flags &
  248. VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP);
  249. dpy_gl_scanout_dmabuf(con, dmabuf);
  250. g->dmabuf[m->scanout_id] = dmabuf;
  251. break;
  252. }
  253. case VHOST_USER_GPU_DMABUF_UPDATE: {
  254. VhostUserGpuUpdate *m = &msg->payload.update;
  255. if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
  256. !g->parent_obj.scanout[m->scanout_id].con) {
  257. error_report("invalid scanout update: %d", m->scanout_id);
  258. vhost_user_gpu_unblock(g);
  259. break;
  260. }
  261. con = g->parent_obj.scanout[m->scanout_id].con;
  262. if (!console_has_gl(con)) {
  263. error_report("console doesn't support GL!");
  264. vhost_user_gpu_unblock(g);
  265. break;
  266. }
  267. g->backend_blocked = true;
  268. dpy_gl_update(con, m->x, m->y, m->width, m->height);
  269. break;
  270. }
  271. #ifdef CONFIG_PIXMAN
  272. case VHOST_USER_GPU_UPDATE: {
  273. VhostUserGpuUpdate *m = &msg->payload.update;
  274. if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
  275. break;
  276. }
  277. s = &g->parent_obj.scanout[m->scanout_id];
  278. con = s->con;
  279. pixman_image_t *image =
  280. pixman_image_create_bits(PIXMAN_x8r8g8b8,
  281. m->width,
  282. m->height,
  283. (uint32_t *)m->data,
  284. m->width * 4);
  285. pixman_image_composite(PIXMAN_OP_SRC,
  286. image, NULL, s->ds->image,
  287. 0, 0, 0, 0, m->x, m->y, m->width, m->height);
  288. pixman_image_unref(image);
  289. if (qemu_console_surface(con) != s->ds) {
  290. dpy_gfx_replace_surface(con, s->ds);
  291. } else {
  292. dpy_gfx_update(con, m->x, m->y, m->width, m->height);
  293. }
  294. break;
  295. }
  296. #endif
  297. default:
  298. g_warning("unhandled message %d %d", msg->request, msg->size);
  299. }
  300. if (con && qemu_console_is_gl_blocked(con)) {
  301. vhost_user_gpu_update_blocked(g, true);
  302. }
  303. }
  304. static void
  305. vhost_user_gpu_chr_read(void *opaque)
  306. {
  307. VhostUserGPU *g = opaque;
  308. VhostUserGpuMsg *msg = NULL;
  309. VhostUserGpuRequest request;
  310. uint32_t size, flags;
  311. int r;
  312. r = qemu_chr_fe_read_all(&g->vhost_chr,
  313. (uint8_t *)&request, sizeof(uint32_t));
  314. if (r != sizeof(uint32_t)) {
  315. error_report("failed to read msg header: %d, %d", r, errno);
  316. goto end;
  317. }
  318. r = qemu_chr_fe_read_all(&g->vhost_chr,
  319. (uint8_t *)&flags, sizeof(uint32_t));
  320. if (r != sizeof(uint32_t)) {
  321. error_report("failed to read msg flags");
  322. goto end;
  323. }
  324. r = qemu_chr_fe_read_all(&g->vhost_chr,
  325. (uint8_t *)&size, sizeof(uint32_t));
  326. if (r != sizeof(uint32_t)) {
  327. error_report("failed to read msg size");
  328. goto end;
  329. }
  330. msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
  331. r = qemu_chr_fe_read_all(&g->vhost_chr,
  332. (uint8_t *)&msg->payload, size);
  333. if (r != size) {
  334. error_report("failed to read msg payload %d != %d", r, size);
  335. goto end;
  336. }
  337. msg->request = request;
  338. msg->flags = flags;
  339. msg->size = size;
  340. if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
  341. request == VHOST_USER_GPU_CURSOR_POS ||
  342. request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
  343. vhost_user_gpu_handle_cursor(g, msg);
  344. } else {
  345. vhost_user_gpu_handle_display(g, msg);
  346. }
  347. end:
  348. g_free(msg);
  349. }
  350. static void
  351. vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
  352. {
  353. qemu_set_fd_handler(g->vhost_gpu_fd,
  354. blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
  355. }
  356. static void
  357. vhost_user_gpu_gl_flushed(VirtIOGPUBase *b)
  358. {
  359. VhostUserGPU *g = VHOST_USER_GPU(b);
  360. if (g->backend_blocked) {
  361. vhost_user_gpu_unblock(g);
  362. g->backend_blocked = false;
  363. }
  364. vhost_user_gpu_update_blocked(g, false);
  365. }
  366. static bool
  367. vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
  368. {
  369. Chardev *chr;
  370. int sv[2];
  371. if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
  372. error_setg_errno(errp, errno, "socketpair() failed");
  373. return false;
  374. }
  375. chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
  376. if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
  377. error_setg(errp, "Failed to make socket chardev");
  378. goto err;
  379. }
  380. if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
  381. goto err;
  382. }
  383. if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
  384. error_setg(errp, "Failed to set vhost-user-gpu socket");
  385. qemu_chr_fe_deinit(&g->vhost_chr, false);
  386. goto err;
  387. }
  388. g->vhost_gpu_fd = sv[0];
  389. vhost_user_gpu_update_blocked(g, false);
  390. close(sv[1]);
  391. return true;
  392. err:
  393. close(sv[0]);
  394. close(sv[1]);
  395. if (chr) {
  396. object_unref(OBJECT(chr));
  397. }
  398. return false;
  399. }
  400. static void
  401. vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
  402. {
  403. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  404. VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
  405. struct virtio_gpu_config *vgconfig =
  406. (struct virtio_gpu_config *)config_data;
  407. Error *local_err = NULL;
  408. int ret;
  409. memset(config_data, 0, sizeof(struct virtio_gpu_config));
  410. ret = vhost_dev_get_config(&g->vhost->dev,
  411. config_data, sizeof(struct virtio_gpu_config),
  412. &local_err);
  413. if (ret) {
  414. error_report_err(local_err);
  415. return;
  416. }
  417. /* those fields are managed by qemu */
  418. vgconfig->num_scanouts = b->virtio_config.num_scanouts;
  419. vgconfig->events_read = b->virtio_config.events_read;
  420. vgconfig->events_clear = b->virtio_config.events_clear;
  421. }
  422. static void
  423. vhost_user_gpu_set_config(VirtIODevice *vdev,
  424. const uint8_t *config_data)
  425. {
  426. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  427. VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
  428. const struct virtio_gpu_config *vgconfig =
  429. (const struct virtio_gpu_config *)config_data;
  430. int ret;
  431. if (vgconfig->events_clear) {
  432. b->virtio_config.events_read &= ~vgconfig->events_clear;
  433. }
  434. ret = vhost_dev_set_config(&g->vhost->dev, config_data,
  435. 0, sizeof(struct virtio_gpu_config),
  436. VHOST_SET_CONFIG_TYPE_FRONTEND);
  437. if (ret) {
  438. error_report("vhost-user-gpu: set device config space failed");
  439. return;
  440. }
  441. }
  442. static void
  443. vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
  444. {
  445. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  446. Error *err = NULL;
  447. if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
  448. if (!vhost_user_gpu_do_set_socket(g, &err)) {
  449. error_report_err(err);
  450. return;
  451. }
  452. vhost_user_backend_start(g->vhost);
  453. } else {
  454. /* unblock any wait and stop processing */
  455. if (g->vhost_gpu_fd != -1) {
  456. vhost_user_gpu_update_blocked(g, true);
  457. qemu_chr_fe_deinit(&g->vhost_chr, true);
  458. g->vhost_gpu_fd = -1;
  459. }
  460. vhost_user_backend_stop(g->vhost);
  461. }
  462. }
  463. static bool
  464. vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
  465. {
  466. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  467. /*
  468. * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
  469. * as the macro of configure interrupt's IDX, If this driver does not
  470. * support, the function will return
  471. */
  472. if (idx == VIRTIO_CONFIG_IRQ_IDX) {
  473. return false;
  474. }
  475. return vhost_virtqueue_pending(&g->vhost->dev, idx);
  476. }
  477. static void
  478. vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
  479. {
  480. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  481. /*
  482. * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
  483. * as the macro of configure interrupt's IDX, If this driver does not
  484. * support, the function will return
  485. */
  486. if (idx == VIRTIO_CONFIG_IRQ_IDX) {
  487. return;
  488. }
  489. vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
  490. }
  491. static void
  492. vhost_user_gpu_instance_init(Object *obj)
  493. {
  494. VhostUserGPU *g = VHOST_USER_GPU(obj);
  495. g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
  496. object_property_add_alias(obj, "chardev",
  497. OBJECT(g->vhost), "chardev");
  498. }
  499. static void
  500. vhost_user_gpu_instance_finalize(Object *obj)
  501. {
  502. VhostUserGPU *g = VHOST_USER_GPU(obj);
  503. object_unref(OBJECT(g->vhost));
  504. }
  505. static void
  506. vhost_user_gpu_reset(VirtIODevice *vdev)
  507. {
  508. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  509. virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
  510. vhost_user_backend_stop(g->vhost);
  511. }
  512. static int
  513. vhost_user_gpu_config_change(struct vhost_dev *dev)
  514. {
  515. error_report("vhost-user-gpu: unhandled backend config change");
  516. return -1;
  517. }
  518. static const VhostDevConfigOps config_ops = {
  519. .vhost_dev_config_notifier = vhost_user_gpu_config_change,
  520. };
  521. static void
  522. vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
  523. {
  524. VhostUserGPU *g = VHOST_USER_GPU(qdev);
  525. VirtIODevice *vdev = VIRTIO_DEVICE(g);
  526. vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
  527. if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
  528. return;
  529. }
  530. /* existing backend may send DMABUF, so let's add that requirement */
  531. g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED;
  532. if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
  533. g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
  534. }
  535. if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_EDID)) {
  536. g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_EDID_ENABLED;
  537. } else {
  538. error_report("EDID requested but the backend doesn't support it.");
  539. g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED);
  540. }
  541. if (virtio_has_feature(g->vhost->dev.features,
  542. VIRTIO_GPU_F_RESOURCE_UUID)) {
  543. g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED;
  544. }
  545. if (virtio_has_feature(g->vhost->dev.features,
  546. VIRTIO_GPU_F_RESOURCE_UUID)) {
  547. g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_RESOURCE_UUID_ENABLED;
  548. }
  549. if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
  550. return;
  551. }
  552. g->vhost_gpu_fd = -1;
  553. }
  554. static struct vhost_dev *vhost_user_gpu_get_vhost(VirtIODevice *vdev)
  555. {
  556. VhostUserGPU *g = VHOST_USER_GPU(vdev);
  557. return g->vhost ? &g->vhost->dev : NULL;
  558. }
  559. static const Property vhost_user_gpu_properties[] = {
  560. VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
  561. };
  562. static void
  563. vhost_user_gpu_class_init(ObjectClass *klass, void *data)
  564. {
  565. DeviceClass *dc = DEVICE_CLASS(klass);
  566. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  567. VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
  568. vgc->gl_flushed = vhost_user_gpu_gl_flushed;
  569. vdc->realize = vhost_user_gpu_device_realize;
  570. vdc->reset = vhost_user_gpu_reset;
  571. vdc->set_status = vhost_user_gpu_set_status;
  572. vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
  573. vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
  574. vdc->get_config = vhost_user_gpu_get_config;
  575. vdc->set_config = vhost_user_gpu_set_config;
  576. vdc->get_vhost = vhost_user_gpu_get_vhost;
  577. device_class_set_props(dc, vhost_user_gpu_properties);
  578. }
  579. static const TypeInfo vhost_user_gpu_info = {
  580. .name = TYPE_VHOST_USER_GPU,
  581. .parent = TYPE_VIRTIO_GPU_BASE,
  582. .instance_size = sizeof(VhostUserGPU),
  583. .instance_init = vhost_user_gpu_instance_init,
  584. .instance_finalize = vhost_user_gpu_instance_finalize,
  585. .class_init = vhost_user_gpu_class_init,
  586. };
  587. module_obj(TYPE_VHOST_USER_GPU);
  588. module_kconfig(VHOST_USER_GPU);
  589. static void vhost_user_gpu_register_types(void)
  590. {
  591. type_register_static(&vhost_user_gpu_info);
  592. }
  593. type_init(vhost_user_gpu_register_types)