2
0

virtio-gpu-rutabaga.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. #include "qemu/osdep.h"
  3. #include "qapi/error.h"
  4. #include "qemu/error-report.h"
  5. #include "qemu/iov.h"
  6. #include "trace.h"
  7. #include "hw/virtio/virtio.h"
  8. #include "hw/virtio/virtio-gpu.h"
  9. #include "hw/virtio/virtio-gpu-pixman.h"
  10. #include "hw/virtio/virtio-iommu.h"
  11. #include <glib/gmem.h>
  12. #include <rutabaga_gfx/rutabaga_gfx_ffi.h>
  13. #define CHECK(condition, cmd) \
  14. do { \
  15. if (!(condition)) { \
  16. error_report("CHECK failed in %s() %s:" "%d", __func__, \
  17. __FILE__, __LINE__); \
  18. (cmd)->error = VIRTIO_GPU_RESP_ERR_UNSPEC; \
  19. return; \
  20. } \
  21. } while (0)
  22. struct rutabaga_aio_data {
  23. struct VirtIOGPURutabaga *vr;
  24. struct rutabaga_fence fence;
  25. };
  26. static void
  27. virtio_gpu_rutabaga_update_cursor(VirtIOGPU *g, struct virtio_gpu_scanout *s,
  28. uint32_t resource_id)
  29. {
  30. struct virtio_gpu_simple_resource *res;
  31. struct rutabaga_transfer transfer = { 0 };
  32. struct iovec transfer_iovec;
  33. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  34. res = virtio_gpu_find_resource(g, resource_id);
  35. if (!res) {
  36. return;
  37. }
  38. if (res->width != s->current_cursor->width ||
  39. res->height != s->current_cursor->height) {
  40. return;
  41. }
  42. transfer.x = 0;
  43. transfer.y = 0;
  44. transfer.z = 0;
  45. transfer.w = res->width;
  46. transfer.h = res->height;
  47. transfer.d = 1;
  48. transfer_iovec.iov_base = s->current_cursor->data;
  49. transfer_iovec.iov_len = res->width * res->height * 4;
  50. rutabaga_resource_transfer_read(vr->rutabaga, 0,
  51. resource_id, &transfer,
  52. &transfer_iovec);
  53. }
  54. static void
  55. virtio_gpu_rutabaga_gl_flushed(VirtIOGPUBase *b)
  56. {
  57. VirtIOGPU *g = VIRTIO_GPU(b);
  58. virtio_gpu_process_cmdq(g);
  59. }
  60. static void
  61. rutabaga_cmd_create_resource_2d(VirtIOGPU *g,
  62. struct virtio_gpu_ctrl_command *cmd)
  63. {
  64. int32_t result;
  65. struct rutabaga_create_3d rc_3d = { 0 };
  66. struct virtio_gpu_simple_resource *res;
  67. struct virtio_gpu_resource_create_2d c2d;
  68. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  69. VIRTIO_GPU_FILL_CMD(c2d);
  70. trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  71. c2d.width, c2d.height);
  72. rc_3d.target = 2;
  73. rc_3d.format = c2d.format;
  74. rc_3d.bind = (1 << 1);
  75. rc_3d.width = c2d.width;
  76. rc_3d.height = c2d.height;
  77. rc_3d.depth = 1;
  78. rc_3d.array_size = 1;
  79. rc_3d.last_level = 0;
  80. rc_3d.nr_samples = 0;
  81. rc_3d.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
  82. result = rutabaga_resource_create_3d(vr->rutabaga, c2d.resource_id, &rc_3d);
  83. CHECK(!result, cmd);
  84. res = g_new0(struct virtio_gpu_simple_resource, 1);
  85. res->width = c2d.width;
  86. res->height = c2d.height;
  87. res->format = c2d.format;
  88. res->resource_id = c2d.resource_id;
  89. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  90. }
  91. static void
  92. rutabaga_cmd_create_resource_3d(VirtIOGPU *g,
  93. struct virtio_gpu_ctrl_command *cmd)
  94. {
  95. int32_t result;
  96. struct rutabaga_create_3d rc_3d = { 0 };
  97. struct virtio_gpu_simple_resource *res;
  98. struct virtio_gpu_resource_create_3d c3d;
  99. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  100. VIRTIO_GPU_FILL_CMD(c3d);
  101. trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format,
  102. c3d.width, c3d.height, c3d.depth);
  103. rc_3d.target = c3d.target;
  104. rc_3d.format = c3d.format;
  105. rc_3d.bind = c3d.bind;
  106. rc_3d.width = c3d.width;
  107. rc_3d.height = c3d.height;
  108. rc_3d.depth = c3d.depth;
  109. rc_3d.array_size = c3d.array_size;
  110. rc_3d.last_level = c3d.last_level;
  111. rc_3d.nr_samples = c3d.nr_samples;
  112. rc_3d.flags = c3d.flags;
  113. result = rutabaga_resource_create_3d(vr->rutabaga, c3d.resource_id, &rc_3d);
  114. CHECK(!result, cmd);
  115. res = g_new0(struct virtio_gpu_simple_resource, 1);
  116. res->width = c3d.width;
  117. res->height = c3d.height;
  118. res->format = c3d.format;
  119. res->resource_id = c3d.resource_id;
  120. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  121. }
  122. static void
  123. virtio_gpu_rutabaga_resource_unref(VirtIOGPU *g,
  124. struct virtio_gpu_simple_resource *res,
  125. Error **errp)
  126. {
  127. int32_t result;
  128. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  129. result = rutabaga_resource_unref(vr->rutabaga, res->resource_id);
  130. if (result) {
  131. error_setg_errno(errp,
  132. (int)result,
  133. "%s: rutabaga_resource_unref returned %"PRIi32
  134. " for resource_id = %"PRIu32, __func__, result,
  135. res->resource_id);
  136. }
  137. if (res->image) {
  138. pixman_image_unref(res->image);
  139. }
  140. QTAILQ_REMOVE(&g->reslist, res, next);
  141. g_free(res);
  142. }
  143. static void
  144. rutabaga_cmd_resource_unref(VirtIOGPU *g,
  145. struct virtio_gpu_ctrl_command *cmd)
  146. {
  147. int32_t result = 0;
  148. struct virtio_gpu_simple_resource *res;
  149. struct virtio_gpu_resource_unref unref;
  150. Error *local_err = NULL;
  151. VIRTIO_GPU_FILL_CMD(unref);
  152. trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  153. res = virtio_gpu_find_resource(g, unref.resource_id);
  154. CHECK(res, cmd);
  155. virtio_gpu_rutabaga_resource_unref(g, res, &local_err);
  156. if (local_err) {
  157. error_report_err(local_err);
  158. /* local_err was freed, do not reuse it. */
  159. local_err = NULL;
  160. result = 1;
  161. }
  162. CHECK(!result, cmd);
  163. }
  164. static void
  165. rutabaga_cmd_context_create(VirtIOGPU *g,
  166. struct virtio_gpu_ctrl_command *cmd)
  167. {
  168. int32_t result;
  169. struct virtio_gpu_ctx_create cc;
  170. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  171. VIRTIO_GPU_FILL_CMD(cc);
  172. trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id,
  173. cc.debug_name);
  174. result = rutabaga_context_create(vr->rutabaga, cc.hdr.ctx_id,
  175. cc.context_init, cc.debug_name, cc.nlen);
  176. CHECK(!result, cmd);
  177. }
  178. static void
  179. rutabaga_cmd_context_destroy(VirtIOGPU *g,
  180. struct virtio_gpu_ctrl_command *cmd)
  181. {
  182. int32_t result;
  183. struct virtio_gpu_ctx_destroy cd;
  184. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  185. VIRTIO_GPU_FILL_CMD(cd);
  186. trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id);
  187. result = rutabaga_context_destroy(vr->rutabaga, cd.hdr.ctx_id);
  188. CHECK(!result, cmd);
  189. }
  190. static void
  191. rutabaga_cmd_resource_flush(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
  192. {
  193. int32_t result, i;
  194. struct virtio_gpu_scanout *scanout = NULL;
  195. struct virtio_gpu_simple_resource *res;
  196. struct rutabaga_transfer transfer = { 0 };
  197. struct iovec transfer_iovec;
  198. struct virtio_gpu_resource_flush rf;
  199. bool found = false;
  200. VirtIOGPUBase *vb = VIRTIO_GPU_BASE(g);
  201. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  202. if (vr->headless) {
  203. return;
  204. }
  205. VIRTIO_GPU_FILL_CMD(rf);
  206. trace_virtio_gpu_cmd_res_flush(rf.resource_id,
  207. rf.r.width, rf.r.height, rf.r.x, rf.r.y);
  208. res = virtio_gpu_find_resource(g, rf.resource_id);
  209. CHECK(res, cmd);
  210. for (i = 0; i < vb->conf.max_outputs; i++) {
  211. scanout = &vb->scanout[i];
  212. if (i == res->scanout_bitmask) {
  213. found = true;
  214. break;
  215. }
  216. }
  217. if (!found) {
  218. return;
  219. }
  220. transfer.x = 0;
  221. transfer.y = 0;
  222. transfer.z = 0;
  223. transfer.w = res->width;
  224. transfer.h = res->height;
  225. transfer.d = 1;
  226. transfer_iovec.iov_base = pixman_image_get_data(res->image);
  227. transfer_iovec.iov_len = res->width * res->height * 4;
  228. result = rutabaga_resource_transfer_read(vr->rutabaga, 0,
  229. rf.resource_id, &transfer,
  230. &transfer_iovec);
  231. CHECK(!result, cmd);
  232. dpy_gfx_update_full(scanout->con);
  233. }
  234. static void
  235. rutabaga_cmd_set_scanout(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
  236. {
  237. struct virtio_gpu_simple_resource *res;
  238. struct virtio_gpu_scanout *scanout = NULL;
  239. struct virtio_gpu_set_scanout ss;
  240. VirtIOGPUBase *vb = VIRTIO_GPU_BASE(g);
  241. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  242. if (vr->headless) {
  243. return;
  244. }
  245. VIRTIO_GPU_FILL_CMD(ss);
  246. trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
  247. ss.r.width, ss.r.height, ss.r.x, ss.r.y);
  248. CHECK(ss.scanout_id < VIRTIO_GPU_MAX_SCANOUTS, cmd);
  249. scanout = &vb->scanout[ss.scanout_id];
  250. if (ss.resource_id == 0) {
  251. dpy_gfx_replace_surface(scanout->con, NULL);
  252. dpy_gl_scanout_disable(scanout->con);
  253. return;
  254. }
  255. res = virtio_gpu_find_resource(g, ss.resource_id);
  256. CHECK(res, cmd);
  257. if (!res->image) {
  258. pixman_format_code_t pformat;
  259. pformat = virtio_gpu_get_pixman_format(res->format);
  260. CHECK(pformat, cmd);
  261. res->image = pixman_image_create_bits(pformat,
  262. res->width,
  263. res->height,
  264. NULL, 0);
  265. CHECK(res->image, cmd);
  266. pixman_image_ref(res->image);
  267. }
  268. vb->enable = 1;
  269. /* realloc the surface ptr */
  270. scanout->ds = qemu_create_displaysurface_pixman(res->image);
  271. dpy_gfx_replace_surface(scanout->con, NULL);
  272. dpy_gfx_replace_surface(scanout->con, scanout->ds);
  273. res->scanout_bitmask = ss.scanout_id;
  274. }
  275. static void
  276. rutabaga_cmd_submit_3d(VirtIOGPU *g,
  277. struct virtio_gpu_ctrl_command *cmd)
  278. {
  279. int32_t result;
  280. struct virtio_gpu_cmd_submit cs;
  281. struct rutabaga_command rutabaga_cmd = { 0 };
  282. g_autofree uint8_t *buf = NULL;
  283. size_t s;
  284. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  285. VIRTIO_GPU_FILL_CMD(cs);
  286. trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size);
  287. buf = g_new0(uint8_t, cs.size);
  288. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  289. sizeof(cs), buf, cs.size);
  290. CHECK(s == cs.size, cmd);
  291. rutabaga_cmd.ctx_id = cs.hdr.ctx_id;
  292. rutabaga_cmd.cmd = buf;
  293. rutabaga_cmd.cmd_size = cs.size;
  294. result = rutabaga_submit_command(vr->rutabaga, &rutabaga_cmd);
  295. CHECK(!result, cmd);
  296. }
  297. static void
  298. rutabaga_cmd_transfer_to_host_2d(VirtIOGPU *g,
  299. struct virtio_gpu_ctrl_command *cmd)
  300. {
  301. int32_t result;
  302. struct rutabaga_transfer transfer = { 0 };
  303. struct virtio_gpu_transfer_to_host_2d t2d;
  304. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  305. VIRTIO_GPU_FILL_CMD(t2d);
  306. trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
  307. transfer.x = t2d.r.x;
  308. transfer.y = t2d.r.y;
  309. transfer.z = 0;
  310. transfer.w = t2d.r.width;
  311. transfer.h = t2d.r.height;
  312. transfer.d = 1;
  313. result = rutabaga_resource_transfer_write(vr->rutabaga, 0, t2d.resource_id,
  314. &transfer);
  315. CHECK(!result, cmd);
  316. }
  317. static void
  318. rutabaga_cmd_transfer_to_host_3d(VirtIOGPU *g,
  319. struct virtio_gpu_ctrl_command *cmd)
  320. {
  321. int32_t result;
  322. struct rutabaga_transfer transfer = { 0 };
  323. struct virtio_gpu_transfer_host_3d t3d;
  324. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  325. VIRTIO_GPU_FILL_CMD(t3d);
  326. trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id);
  327. transfer.x = t3d.box.x;
  328. transfer.y = t3d.box.y;
  329. transfer.z = t3d.box.z;
  330. transfer.w = t3d.box.w;
  331. transfer.h = t3d.box.h;
  332. transfer.d = t3d.box.d;
  333. transfer.level = t3d.level;
  334. transfer.stride = t3d.stride;
  335. transfer.layer_stride = t3d.layer_stride;
  336. transfer.offset = t3d.offset;
  337. result = rutabaga_resource_transfer_write(vr->rutabaga, t3d.hdr.ctx_id,
  338. t3d.resource_id, &transfer);
  339. CHECK(!result, cmd);
  340. }
  341. static void
  342. rutabaga_cmd_transfer_from_host_3d(VirtIOGPU *g,
  343. struct virtio_gpu_ctrl_command *cmd)
  344. {
  345. int32_t result;
  346. struct rutabaga_transfer transfer = { 0 };
  347. struct virtio_gpu_transfer_host_3d t3d;
  348. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  349. VIRTIO_GPU_FILL_CMD(t3d);
  350. trace_virtio_gpu_cmd_res_xfer_fromh_3d(t3d.resource_id);
  351. transfer.x = t3d.box.x;
  352. transfer.y = t3d.box.y;
  353. transfer.z = t3d.box.z;
  354. transfer.w = t3d.box.w;
  355. transfer.h = t3d.box.h;
  356. transfer.d = t3d.box.d;
  357. transfer.level = t3d.level;
  358. transfer.stride = t3d.stride;
  359. transfer.layer_stride = t3d.layer_stride;
  360. transfer.offset = t3d.offset;
  361. result = rutabaga_resource_transfer_read(vr->rutabaga, t3d.hdr.ctx_id,
  362. t3d.resource_id, &transfer, NULL);
  363. CHECK(!result, cmd);
  364. }
  365. static void
  366. rutabaga_cmd_attach_backing(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
  367. {
  368. struct rutabaga_iovecs vecs = { 0 };
  369. struct virtio_gpu_simple_resource *res;
  370. struct virtio_gpu_resource_attach_backing att_rb;
  371. int ret;
  372. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  373. VIRTIO_GPU_FILL_CMD(att_rb);
  374. trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id);
  375. res = virtio_gpu_find_resource(g, att_rb.resource_id);
  376. CHECK(res, cmd);
  377. CHECK(!res->iov, cmd);
  378. ret = virtio_gpu_create_mapping_iov(g, att_rb.nr_entries, sizeof(att_rb),
  379. cmd, NULL, &res->iov, &res->iov_cnt);
  380. CHECK(!ret, cmd);
  381. vecs.iovecs = res->iov;
  382. vecs.num_iovecs = res->iov_cnt;
  383. ret = rutabaga_resource_attach_backing(vr->rutabaga, att_rb.resource_id,
  384. &vecs);
  385. if (ret != 0) {
  386. virtio_gpu_cleanup_mapping(g, res);
  387. }
  388. CHECK(!ret, cmd);
  389. }
  390. static void
  391. rutabaga_cmd_detach_backing(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
  392. {
  393. struct virtio_gpu_simple_resource *res;
  394. struct virtio_gpu_resource_detach_backing detach_rb;
  395. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  396. VIRTIO_GPU_FILL_CMD(detach_rb);
  397. trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id);
  398. res = virtio_gpu_find_resource(g, detach_rb.resource_id);
  399. CHECK(res, cmd);
  400. rutabaga_resource_detach_backing(vr->rutabaga,
  401. detach_rb.resource_id);
  402. virtio_gpu_cleanup_mapping(g, res);
  403. }
  404. static void
  405. rutabaga_cmd_ctx_attach_resource(VirtIOGPU *g,
  406. struct virtio_gpu_ctrl_command *cmd)
  407. {
  408. int32_t result;
  409. struct virtio_gpu_ctx_resource att_res;
  410. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  411. VIRTIO_GPU_FILL_CMD(att_res);
  412. trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id,
  413. att_res.resource_id);
  414. result = rutabaga_context_attach_resource(vr->rutabaga, att_res.hdr.ctx_id,
  415. att_res.resource_id);
  416. CHECK(!result, cmd);
  417. }
  418. static void
  419. rutabaga_cmd_ctx_detach_resource(VirtIOGPU *g,
  420. struct virtio_gpu_ctrl_command *cmd)
  421. {
  422. int32_t result;
  423. struct virtio_gpu_ctx_resource det_res;
  424. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  425. VIRTIO_GPU_FILL_CMD(det_res);
  426. trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id,
  427. det_res.resource_id);
  428. result = rutabaga_context_detach_resource(vr->rutabaga, det_res.hdr.ctx_id,
  429. det_res.resource_id);
  430. CHECK(!result, cmd);
  431. }
  432. static void
  433. rutabaga_cmd_get_capset_info(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
  434. {
  435. int32_t result;
  436. struct virtio_gpu_get_capset_info info;
  437. struct virtio_gpu_resp_capset_info resp;
  438. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  439. VIRTIO_GPU_FILL_CMD(info);
  440. result = rutabaga_get_capset_info(vr->rutabaga, info.capset_index,
  441. &resp.capset_id, &resp.capset_max_version,
  442. &resp.capset_max_size);
  443. CHECK(!result, cmd);
  444. resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
  445. virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
  446. }
  447. static void
  448. rutabaga_cmd_get_capset(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd)
  449. {
  450. int32_t result;
  451. struct virtio_gpu_get_capset gc;
  452. struct virtio_gpu_resp_capset *resp;
  453. uint32_t capset_size, capset_version;
  454. uint32_t current_id, i;
  455. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  456. VIRTIO_GPU_FILL_CMD(gc);
  457. for (i = 0; i < vr->num_capsets; i++) {
  458. result = rutabaga_get_capset_info(vr->rutabaga, i,
  459. &current_id, &capset_version,
  460. &capset_size);
  461. CHECK(!result, cmd);
  462. if (current_id == gc.capset_id) {
  463. break;
  464. }
  465. }
  466. CHECK(i < vr->num_capsets, cmd);
  467. resp = g_malloc0(sizeof(*resp) + capset_size);
  468. resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
  469. rutabaga_get_capset(vr->rutabaga, gc.capset_id, gc.capset_version,
  470. resp->capset_data, capset_size);
  471. virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + capset_size);
  472. g_free(resp);
  473. }
  474. static void
  475. rutabaga_cmd_resource_create_blob(VirtIOGPU *g,
  476. struct virtio_gpu_ctrl_command *cmd)
  477. {
  478. int result;
  479. struct rutabaga_iovecs vecs = { 0 };
  480. g_autofree struct virtio_gpu_simple_resource *res = NULL;
  481. struct virtio_gpu_resource_create_blob cblob;
  482. struct rutabaga_create_blob rc_blob = { 0 };
  483. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  484. VIRTIO_GPU_FILL_CMD(cblob);
  485. trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
  486. CHECK(cblob.resource_id != 0, cmd);
  487. res = g_new0(struct virtio_gpu_simple_resource, 1);
  488. res->resource_id = cblob.resource_id;
  489. res->blob_size = cblob.size;
  490. if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
  491. result = virtio_gpu_create_mapping_iov(g, cblob.nr_entries,
  492. sizeof(cblob), cmd, &res->addrs,
  493. &res->iov, &res->iov_cnt);
  494. CHECK(!result, cmd);
  495. }
  496. rc_blob.blob_id = cblob.blob_id;
  497. rc_blob.blob_mem = cblob.blob_mem;
  498. rc_blob.blob_flags = cblob.blob_flags;
  499. rc_blob.size = cblob.size;
  500. vecs.iovecs = res->iov;
  501. vecs.num_iovecs = res->iov_cnt;
  502. result = rutabaga_resource_create_blob(vr->rutabaga, cblob.hdr.ctx_id,
  503. cblob.resource_id, &rc_blob, &vecs,
  504. NULL);
  505. if (result && cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) {
  506. virtio_gpu_cleanup_mapping(g, res);
  507. }
  508. CHECK(!result, cmd);
  509. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  510. res = NULL;
  511. }
  512. static void
  513. rutabaga_cmd_resource_map_blob(VirtIOGPU *g,
  514. struct virtio_gpu_ctrl_command *cmd)
  515. {
  516. int32_t result;
  517. uint32_t map_info = 0;
  518. uint32_t slot = 0;
  519. struct virtio_gpu_simple_resource *res;
  520. struct rutabaga_mapping mapping = { 0 };
  521. struct virtio_gpu_resource_map_blob mblob;
  522. struct virtio_gpu_resp_map_info resp = { 0 };
  523. VirtIOGPUBase *vb = VIRTIO_GPU_BASE(g);
  524. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  525. VIRTIO_GPU_FILL_CMD(mblob);
  526. CHECK(mblob.resource_id != 0, cmd);
  527. res = virtio_gpu_find_resource(g, mblob.resource_id);
  528. CHECK(res, cmd);
  529. result = rutabaga_resource_map_info(vr->rutabaga, mblob.resource_id,
  530. &map_info);
  531. CHECK(!result, cmd);
  532. /*
  533. * RUTABAGA_MAP_ACCESS_* flags are not part of the virtio-gpu spec, but do
  534. * exist to potentially allow the hypervisor to restrict write access to
  535. * memory. QEMU does not need to use this functionality at the moment.
  536. */
  537. resp.map_info = map_info & RUTABAGA_MAP_CACHE_MASK;
  538. result = rutabaga_resource_map(vr->rutabaga, mblob.resource_id, &mapping);
  539. CHECK(!result, cmd);
  540. /*
  541. * There is small risk of the MemoryRegion dereferencing the pointer after
  542. * rutabaga unmaps it. Please see discussion here:
  543. *
  544. * https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg05141.html
  545. *
  546. * It is highly unlikely to happen in practice and doesn't affect known
  547. * use cases. However, it should be fixed and is noted here for posterity.
  548. */
  549. for (slot = 0; slot < MAX_SLOTS; slot++) {
  550. if (vr->memory_regions[slot].used) {
  551. continue;
  552. }
  553. MemoryRegion *mr = &(vr->memory_regions[slot].mr);
  554. memory_region_init_ram_ptr(mr, OBJECT(vr), "blob", mapping.size,
  555. mapping.ptr);
  556. memory_region_add_subregion(&vb->hostmem, mblob.offset, mr);
  557. vr->memory_regions[slot].resource_id = mblob.resource_id;
  558. vr->memory_regions[slot].used = 1;
  559. break;
  560. }
  561. if (slot >= MAX_SLOTS) {
  562. result = rutabaga_resource_unmap(vr->rutabaga, mblob.resource_id);
  563. CHECK(!result, cmd);
  564. }
  565. CHECK(slot < MAX_SLOTS, cmd);
  566. resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO;
  567. virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
  568. }
  569. static void
  570. rutabaga_cmd_resource_unmap_blob(VirtIOGPU *g,
  571. struct virtio_gpu_ctrl_command *cmd)
  572. {
  573. int32_t result;
  574. uint32_t slot = 0;
  575. struct virtio_gpu_simple_resource *res;
  576. struct virtio_gpu_resource_unmap_blob ublob;
  577. VirtIOGPUBase *vb = VIRTIO_GPU_BASE(g);
  578. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  579. VIRTIO_GPU_FILL_CMD(ublob);
  580. CHECK(ublob.resource_id != 0, cmd);
  581. res = virtio_gpu_find_resource(g, ublob.resource_id);
  582. CHECK(res, cmd);
  583. for (slot = 0; slot < MAX_SLOTS; slot++) {
  584. if (vr->memory_regions[slot].resource_id != ublob.resource_id) {
  585. continue;
  586. }
  587. MemoryRegion *mr = &(vr->memory_regions[slot].mr);
  588. memory_region_del_subregion(&vb->hostmem, mr);
  589. vr->memory_regions[slot].resource_id = 0;
  590. vr->memory_regions[slot].used = 0;
  591. break;
  592. }
  593. CHECK(slot < MAX_SLOTS, cmd);
  594. result = rutabaga_resource_unmap(vr->rutabaga, res->resource_id);
  595. CHECK(!result, cmd);
  596. }
  597. static void
  598. virtio_gpu_rutabaga_process_cmd(VirtIOGPU *g,
  599. struct virtio_gpu_ctrl_command *cmd)
  600. {
  601. struct rutabaga_fence fence = { 0 };
  602. int32_t result;
  603. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  604. VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
  605. switch (cmd->cmd_hdr.type) {
  606. case VIRTIO_GPU_CMD_CTX_CREATE:
  607. rutabaga_cmd_context_create(g, cmd);
  608. break;
  609. case VIRTIO_GPU_CMD_CTX_DESTROY:
  610. rutabaga_cmd_context_destroy(g, cmd);
  611. break;
  612. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  613. rutabaga_cmd_create_resource_2d(g, cmd);
  614. break;
  615. case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
  616. rutabaga_cmd_create_resource_3d(g, cmd);
  617. break;
  618. case VIRTIO_GPU_CMD_SUBMIT_3D:
  619. rutabaga_cmd_submit_3d(g, cmd);
  620. break;
  621. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  622. rutabaga_cmd_transfer_to_host_2d(g, cmd);
  623. break;
  624. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
  625. rutabaga_cmd_transfer_to_host_3d(g, cmd);
  626. break;
  627. case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
  628. rutabaga_cmd_transfer_from_host_3d(g, cmd);
  629. break;
  630. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  631. rutabaga_cmd_attach_backing(g, cmd);
  632. break;
  633. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  634. rutabaga_cmd_detach_backing(g, cmd);
  635. break;
  636. case VIRTIO_GPU_CMD_SET_SCANOUT:
  637. rutabaga_cmd_set_scanout(g, cmd);
  638. break;
  639. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  640. rutabaga_cmd_resource_flush(g, cmd);
  641. break;
  642. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  643. rutabaga_cmd_resource_unref(g, cmd);
  644. break;
  645. case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
  646. rutabaga_cmd_ctx_attach_resource(g, cmd);
  647. break;
  648. case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
  649. rutabaga_cmd_ctx_detach_resource(g, cmd);
  650. break;
  651. case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
  652. rutabaga_cmd_get_capset_info(g, cmd);
  653. break;
  654. case VIRTIO_GPU_CMD_GET_CAPSET:
  655. rutabaga_cmd_get_capset(g, cmd);
  656. break;
  657. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  658. virtio_gpu_get_display_info(g, cmd);
  659. break;
  660. case VIRTIO_GPU_CMD_GET_EDID:
  661. virtio_gpu_get_edid(g, cmd);
  662. break;
  663. case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
  664. rutabaga_cmd_resource_create_blob(g, cmd);
  665. break;
  666. case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB:
  667. rutabaga_cmd_resource_map_blob(g, cmd);
  668. break;
  669. case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB:
  670. rutabaga_cmd_resource_unmap_blob(g, cmd);
  671. break;
  672. default:
  673. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  674. break;
  675. }
  676. if (cmd->finished) {
  677. return;
  678. }
  679. if (cmd->error) {
  680. error_report("%s: ctrl 0x%x, error 0x%x", __func__,
  681. cmd->cmd_hdr.type, cmd->error);
  682. virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error);
  683. return;
  684. }
  685. if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
  686. virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  687. return;
  688. }
  689. fence.flags = cmd->cmd_hdr.flags;
  690. fence.ctx_id = cmd->cmd_hdr.ctx_id;
  691. fence.fence_id = cmd->cmd_hdr.fence_id;
  692. fence.ring_idx = cmd->cmd_hdr.ring_idx;
  693. trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
  694. result = rutabaga_create_fence(vr->rutabaga, &fence);
  695. CHECK(!result, cmd);
  696. }
  697. static void
  698. virtio_gpu_rutabaga_aio_cb(void *opaque)
  699. {
  700. struct rutabaga_aio_data *data = opaque;
  701. VirtIOGPU *g = VIRTIO_GPU(data->vr);
  702. struct rutabaga_fence fence_data = data->fence;
  703. struct virtio_gpu_ctrl_command *cmd, *tmp;
  704. uint32_t signaled_ctx_specific = fence_data.flags &
  705. RUTABAGA_FLAG_INFO_RING_IDX;
  706. QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
  707. /*
  708. * Due to context specific timelines.
  709. */
  710. uint32_t target_ctx_specific = cmd->cmd_hdr.flags &
  711. RUTABAGA_FLAG_INFO_RING_IDX;
  712. if (signaled_ctx_specific != target_ctx_specific) {
  713. continue;
  714. }
  715. if (signaled_ctx_specific &&
  716. (cmd->cmd_hdr.ring_idx != fence_data.ring_idx)) {
  717. continue;
  718. }
  719. if (cmd->cmd_hdr.fence_id > fence_data.fence_id) {
  720. continue;
  721. }
  722. trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
  723. virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  724. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  725. g_free(cmd);
  726. }
  727. g_free(data);
  728. }
  729. static void
  730. virtio_gpu_rutabaga_fence_cb(uint64_t user_data,
  731. const struct rutabaga_fence *fence)
  732. {
  733. struct rutabaga_aio_data *data;
  734. VirtIOGPU *g = (VirtIOGPU *)user_data;
  735. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  736. /*
  737. * gfxstream and both cross-domain (and even newer versions virglrenderer:
  738. * see VIRGL_RENDERER_ASYNC_FENCE_CB) like to signal fence completion on
  739. * threads ("callback threads") that are different from the thread that
  740. * processes the command queue ("main thread").
  741. *
  742. * crosvm and other virtio-gpu 1.1 implementations enable callback threads
  743. * via locking. However, on QEMU a deadlock is observed if
  744. * virtio_gpu_ctrl_response_nodata(..) [used in the fence callback] is used
  745. * from a thread that is not the main thread.
  746. *
  747. * The reason is QEMU's internal locking is designed to work with QEMU
  748. * threads (see rcu_register_thread()) and not generic C/C++/Rust threads.
  749. * For now, we can workaround this by scheduling the return of the
  750. * fence descriptors on the main thread.
  751. */
  752. data = g_new0(struct rutabaga_aio_data, 1);
  753. data->vr = vr;
  754. data->fence = *fence;
  755. aio_bh_schedule_oneshot(qemu_get_aio_context(),
  756. virtio_gpu_rutabaga_aio_cb,
  757. data);
  758. }
  759. static void
  760. virtio_gpu_rutabaga_debug_cb(uint64_t user_data,
  761. const struct rutabaga_debug *debug)
  762. {
  763. switch (debug->debug_type) {
  764. case RUTABAGA_DEBUG_ERROR:
  765. error_report("%s", debug->message);
  766. break;
  767. case RUTABAGA_DEBUG_WARN:
  768. warn_report("%s", debug->message);
  769. break;
  770. case RUTABAGA_DEBUG_INFO:
  771. info_report("%s", debug->message);
  772. break;
  773. default:
  774. error_report("unknown debug type: %u", debug->debug_type);
  775. }
  776. }
  777. static bool virtio_gpu_rutabaga_init(VirtIOGPU *g, Error **errp)
  778. {
  779. int result;
  780. struct rutabaga_builder builder = { 0 };
  781. struct rutabaga_channel channel = { 0 };
  782. struct rutabaga_channels channels = { 0 };
  783. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  784. vr->rutabaga = NULL;
  785. builder.wsi = RUTABAGA_WSI_SURFACELESS;
  786. /*
  787. * Currently, if WSI is specified, the only valid strings are "surfaceless"
  788. * or "headless". Surfaceless doesn't create a native window surface, but
  789. * does copy from the render target to the Pixman buffer if a virtio-gpu
  790. * 2D hypercall is issued. Surfacless is the default.
  791. *
  792. * Headless is like surfaceless, but doesn't copy to the Pixman buffer. The
  793. * use case is automated testing environments where there is no need to view
  794. * results.
  795. *
  796. * In the future, more performant virtio-gpu 2D UI integration may be added.
  797. */
  798. if (vr->wsi) {
  799. if (g_str_equal(vr->wsi, "surfaceless")) {
  800. vr->headless = false;
  801. } else if (g_str_equal(vr->wsi, "headless")) {
  802. vr->headless = true;
  803. } else {
  804. error_setg(errp, "invalid wsi option selected");
  805. return false;
  806. }
  807. }
  808. builder.fence_cb = virtio_gpu_rutabaga_fence_cb;
  809. builder.debug_cb = virtio_gpu_rutabaga_debug_cb;
  810. builder.capset_mask = vr->capset_mask;
  811. builder.user_data = (uint64_t)g;
  812. /*
  813. * If the user doesn't specify the wayland socket path, we try to infer
  814. * the socket via a process similar to the one used by libwayland.
  815. * libwayland does the following:
  816. *
  817. * 1) If $WAYLAND_DISPLAY is set, attempt to connect to
  818. * $XDG_RUNTIME_DIR/$WAYLAND_DISPLAY
  819. * 2) Otherwise, attempt to connect to $XDG_RUNTIME_DIR/wayland-0
  820. * 3) Otherwise, don't pass a wayland socket to rutabaga. If a guest
  821. * wayland proxy is launched, it will fail to work.
  822. */
  823. channel.channel_type = RUTABAGA_CHANNEL_TYPE_WAYLAND;
  824. g_autofree gchar *path = NULL;
  825. if (!vr->wayland_socket_path) {
  826. const gchar *runtime_dir = g_get_user_runtime_dir();
  827. const gchar *display = g_getenv("WAYLAND_DISPLAY");
  828. if (!display) {
  829. display = "wayland-0";
  830. }
  831. if (runtime_dir) {
  832. path = g_build_filename(runtime_dir, display, NULL);
  833. channel.channel_name = path;
  834. }
  835. } else {
  836. channel.channel_name = vr->wayland_socket_path;
  837. }
  838. if ((builder.capset_mask & (1 << RUTABAGA_CAPSET_CROSS_DOMAIN))) {
  839. if (channel.channel_name) {
  840. channels.channels = &channel;
  841. channels.num_channels = 1;
  842. builder.channels = &channels;
  843. }
  844. }
  845. result = rutabaga_init(&builder, &vr->rutabaga);
  846. if (result) {
  847. error_setg_errno(errp, -result, "Failed to init rutabaga");
  848. return false;
  849. }
  850. return true;
  851. }
  852. static int virtio_gpu_rutabaga_get_num_capsets(VirtIOGPU *g)
  853. {
  854. int result;
  855. uint32_t num_capsets;
  856. VirtIOGPURutabaga *vr = VIRTIO_GPU_RUTABAGA(g);
  857. result = rutabaga_get_num_capsets(vr->rutabaga, &num_capsets);
  858. if (result) {
  859. error_report("Failed to get capsets");
  860. return 0;
  861. }
  862. vr->num_capsets = num_capsets;
  863. return num_capsets;
  864. }
  865. static void virtio_gpu_rutabaga_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  866. {
  867. VirtIOGPU *g = VIRTIO_GPU(vdev);
  868. struct virtio_gpu_ctrl_command *cmd;
  869. if (!virtio_queue_ready(vq)) {
  870. return;
  871. }
  872. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  873. while (cmd) {
  874. cmd->vq = vq;
  875. cmd->error = 0;
  876. cmd->finished = false;
  877. QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
  878. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  879. }
  880. virtio_gpu_process_cmdq(g);
  881. }
  882. static void virtio_gpu_rutabaga_realize(DeviceState *qdev, Error **errp)
  883. {
  884. int num_capsets;
  885. VirtIOGPUBase *bdev = VIRTIO_GPU_BASE(qdev);
  886. VirtIOGPU *gpudev = VIRTIO_GPU(qdev);
  887. #if HOST_BIG_ENDIAN
  888. error_setg(errp, "rutabaga is not supported on bigendian platforms");
  889. return;
  890. #endif
  891. if (!virtio_gpu_rutabaga_init(gpudev, errp)) {
  892. return;
  893. }
  894. num_capsets = virtio_gpu_rutabaga_get_num_capsets(gpudev);
  895. if (!num_capsets) {
  896. return;
  897. }
  898. bdev->conf.flags |= (1 << VIRTIO_GPU_FLAG_RUTABAGA_ENABLED);
  899. bdev->conf.flags |= (1 << VIRTIO_GPU_FLAG_BLOB_ENABLED);
  900. bdev->conf.flags |= (1 << VIRTIO_GPU_FLAG_CONTEXT_INIT_ENABLED);
  901. bdev->virtio_config.num_capsets = num_capsets;
  902. virtio_gpu_device_realize(qdev, errp);
  903. }
  904. static const Property virtio_gpu_rutabaga_properties[] = {
  905. DEFINE_PROP_BIT64("gfxstream-vulkan", VirtIOGPURutabaga, capset_mask,
  906. RUTABAGA_CAPSET_GFXSTREAM_VULKAN, false),
  907. DEFINE_PROP_BIT64("cross-domain", VirtIOGPURutabaga, capset_mask,
  908. RUTABAGA_CAPSET_CROSS_DOMAIN, false),
  909. DEFINE_PROP_BIT64("x-gfxstream-gles", VirtIOGPURutabaga, capset_mask,
  910. RUTABAGA_CAPSET_GFXSTREAM_GLES, false),
  911. DEFINE_PROP_BIT64("x-gfxstream-composer", VirtIOGPURutabaga, capset_mask,
  912. RUTABAGA_CAPSET_GFXSTREAM_COMPOSER, false),
  913. DEFINE_PROP_STRING("wayland-socket-path", VirtIOGPURutabaga,
  914. wayland_socket_path),
  915. DEFINE_PROP_STRING("wsi", VirtIOGPURutabaga, wsi),
  916. };
  917. static void virtio_gpu_rutabaga_class_init(ObjectClass *klass, void *data)
  918. {
  919. DeviceClass *dc = DEVICE_CLASS(klass);
  920. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  921. VirtIOGPUBaseClass *vbc = VIRTIO_GPU_BASE_CLASS(klass);
  922. VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
  923. vbc->gl_flushed = virtio_gpu_rutabaga_gl_flushed;
  924. vgc->handle_ctrl = virtio_gpu_rutabaga_handle_ctrl;
  925. vgc->process_cmd = virtio_gpu_rutabaga_process_cmd;
  926. vgc->update_cursor_data = virtio_gpu_rutabaga_update_cursor;
  927. vgc->resource_destroy = virtio_gpu_rutabaga_resource_unref;
  928. vdc->realize = virtio_gpu_rutabaga_realize;
  929. device_class_set_props(dc, virtio_gpu_rutabaga_properties);
  930. }
  931. static const TypeInfo virtio_gpu_rutabaga_info[] = {
  932. {
  933. .name = TYPE_VIRTIO_GPU_RUTABAGA,
  934. .parent = TYPE_VIRTIO_GPU,
  935. .instance_size = sizeof(VirtIOGPURutabaga),
  936. .class_init = virtio_gpu_rutabaga_class_init,
  937. },
  938. };
  939. DEFINE_TYPES(virtio_gpu_rutabaga_info)
  940. module_obj(TYPE_VIRTIO_GPU_RUTABAGA);
  941. module_kconfig(VIRTIO_GPU);
  942. module_dep("hw-display-virtio-gpu");