virtio-gpu.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/units.h"
  15. #include "qemu/iov.h"
  16. #include "sysemu/cpus.h"
  17. #include "ui/console.h"
  18. #include "ui/rect.h"
  19. #include "trace.h"
  20. #include "sysemu/dma.h"
  21. #include "sysemu/sysemu.h"
  22. #include "hw/virtio/virtio.h"
  23. #include "migration/qemu-file-types.h"
  24. #include "hw/virtio/virtio-gpu.h"
  25. #include "hw/virtio/virtio-gpu-bswap.h"
  26. #include "hw/virtio/virtio-gpu-pixman.h"
  27. #include "hw/virtio/virtio-bus.h"
  28. #include "hw/qdev-properties.h"
  29. #include "qemu/log.h"
  30. #include "qemu/module.h"
  31. #include "qapi/error.h"
  32. #include "qemu/error-report.h"
  33. #define VIRTIO_GPU_VM_VERSION 1
  34. static struct virtio_gpu_simple_resource *
  35. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  36. bool require_backing,
  37. const char *caller, uint32_t *error);
  38. static void virtio_gpu_reset_bh(void *opaque);
  39. void virtio_gpu_update_cursor_data(VirtIOGPU *g,
  40. struct virtio_gpu_scanout *s,
  41. uint32_t resource_id)
  42. {
  43. struct virtio_gpu_simple_resource *res;
  44. uint32_t pixels;
  45. void *data;
  46. res = virtio_gpu_find_check_resource(g, resource_id, false,
  47. __func__, NULL);
  48. if (!res) {
  49. return;
  50. }
  51. if (res->blob_size) {
  52. if (res->blob_size < (s->current_cursor->width *
  53. s->current_cursor->height * 4)) {
  54. return;
  55. }
  56. data = res->blob;
  57. } else {
  58. if (pixman_image_get_width(res->image) != s->current_cursor->width ||
  59. pixman_image_get_height(res->image) != s->current_cursor->height) {
  60. return;
  61. }
  62. data = pixman_image_get_data(res->image);
  63. }
  64. pixels = s->current_cursor->width * s->current_cursor->height;
  65. memcpy(s->current_cursor->data, data,
  66. pixels * sizeof(uint32_t));
  67. }
  68. static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
  69. {
  70. struct virtio_gpu_scanout *s;
  71. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  72. bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
  73. if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
  74. return;
  75. }
  76. s = &g->parent_obj.scanout[cursor->pos.scanout_id];
  77. trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
  78. cursor->pos.x,
  79. cursor->pos.y,
  80. move ? "move" : "update",
  81. cursor->resource_id);
  82. if (!move) {
  83. if (!s->current_cursor) {
  84. s->current_cursor = cursor_alloc(64, 64);
  85. }
  86. s->current_cursor->hot_x = cursor->hot_x;
  87. s->current_cursor->hot_y = cursor->hot_y;
  88. if (cursor->resource_id > 0) {
  89. vgc->update_cursor_data(g, s, cursor->resource_id);
  90. }
  91. dpy_cursor_define(s->con, s->current_cursor);
  92. s->cursor = *cursor;
  93. } else {
  94. s->cursor.pos.x = cursor->pos.x;
  95. s->cursor.pos.y = cursor->pos.y;
  96. }
  97. dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, cursor->resource_id);
  98. }
  99. struct virtio_gpu_simple_resource *
  100. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
  101. {
  102. struct virtio_gpu_simple_resource *res;
  103. QTAILQ_FOREACH(res, &g->reslist, next) {
  104. if (res->resource_id == resource_id) {
  105. return res;
  106. }
  107. }
  108. return NULL;
  109. }
  110. static struct virtio_gpu_simple_resource *
  111. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  112. bool require_backing,
  113. const char *caller, uint32_t *error)
  114. {
  115. struct virtio_gpu_simple_resource *res;
  116. res = virtio_gpu_find_resource(g, resource_id);
  117. if (!res) {
  118. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
  119. caller, resource_id);
  120. if (error) {
  121. *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  122. }
  123. return NULL;
  124. }
  125. if (require_backing) {
  126. if (!res->iov || (!res->image && !res->blob)) {
  127. qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
  128. caller, resource_id);
  129. if (error) {
  130. *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  131. }
  132. return NULL;
  133. }
  134. }
  135. return res;
  136. }
  137. void virtio_gpu_ctrl_response(VirtIOGPU *g,
  138. struct virtio_gpu_ctrl_command *cmd,
  139. struct virtio_gpu_ctrl_hdr *resp,
  140. size_t resp_len)
  141. {
  142. size_t s;
  143. if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
  144. resp->flags |= VIRTIO_GPU_FLAG_FENCE;
  145. resp->fence_id = cmd->cmd_hdr.fence_id;
  146. resp->ctx_id = cmd->cmd_hdr.ctx_id;
  147. }
  148. virtio_gpu_ctrl_hdr_bswap(resp);
  149. s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
  150. if (s != resp_len) {
  151. qemu_log_mask(LOG_GUEST_ERROR,
  152. "%s: response size incorrect %zu vs %zu\n",
  153. __func__, s, resp_len);
  154. }
  155. virtqueue_push(cmd->vq, &cmd->elem, s);
  156. virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
  157. cmd->finished = true;
  158. }
  159. void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
  160. struct virtio_gpu_ctrl_command *cmd,
  161. enum virtio_gpu_ctrl_type type)
  162. {
  163. struct virtio_gpu_ctrl_hdr resp;
  164. memset(&resp, 0, sizeof(resp));
  165. resp.type = type;
  166. virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
  167. }
  168. void virtio_gpu_get_display_info(VirtIOGPU *g,
  169. struct virtio_gpu_ctrl_command *cmd)
  170. {
  171. struct virtio_gpu_resp_display_info display_info;
  172. trace_virtio_gpu_cmd_get_display_info();
  173. memset(&display_info, 0, sizeof(display_info));
  174. display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
  175. virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
  176. virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
  177. sizeof(display_info));
  178. }
  179. void virtio_gpu_get_edid(VirtIOGPU *g,
  180. struct virtio_gpu_ctrl_command *cmd)
  181. {
  182. struct virtio_gpu_resp_edid edid;
  183. struct virtio_gpu_cmd_get_edid get_edid;
  184. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  185. VIRTIO_GPU_FILL_CMD(get_edid);
  186. virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
  187. if (get_edid.scanout >= b->conf.max_outputs) {
  188. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  189. return;
  190. }
  191. trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
  192. memset(&edid, 0, sizeof(edid));
  193. edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
  194. virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
  195. virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
  196. }
  197. static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
  198. uint32_t width, uint32_t height)
  199. {
  200. /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
  201. * pixman_image_create_bits will fail in case it overflow.
  202. */
  203. int bpp = PIXMAN_FORMAT_BPP(pformat);
  204. int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
  205. return height * stride;
  206. }
  207. static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
  208. struct virtio_gpu_ctrl_command *cmd)
  209. {
  210. pixman_format_code_t pformat;
  211. struct virtio_gpu_simple_resource *res;
  212. struct virtio_gpu_resource_create_2d c2d;
  213. VIRTIO_GPU_FILL_CMD(c2d);
  214. virtio_gpu_bswap_32(&c2d, sizeof(c2d));
  215. trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  216. c2d.width, c2d.height);
  217. if (c2d.resource_id == 0) {
  218. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  219. __func__);
  220. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  221. return;
  222. }
  223. res = virtio_gpu_find_resource(g, c2d.resource_id);
  224. if (res) {
  225. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  226. __func__, c2d.resource_id);
  227. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  228. return;
  229. }
  230. res = g_new0(struct virtio_gpu_simple_resource, 1);
  231. res->width = c2d.width;
  232. res->height = c2d.height;
  233. res->format = c2d.format;
  234. res->resource_id = c2d.resource_id;
  235. pformat = virtio_gpu_get_pixman_format(c2d.format);
  236. if (!pformat) {
  237. qemu_log_mask(LOG_GUEST_ERROR,
  238. "%s: host couldn't handle guest format %d\n",
  239. __func__, c2d.format);
  240. g_free(res);
  241. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  242. return;
  243. }
  244. res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
  245. if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
  246. void *bits = NULL;
  247. #ifdef WIN32
  248. bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
  249. if (!bits) {
  250. goto end;
  251. }
  252. #endif
  253. res->image = pixman_image_create_bits(
  254. pformat,
  255. c2d.width,
  256. c2d.height,
  257. bits, c2d.height ? res->hostmem / c2d.height : 0);
  258. #ifdef WIN32
  259. if (res->image) {
  260. pixman_image_set_destroy_function(res->image, qemu_pixman_win32_image_destroy, res->handle);
  261. }
  262. #endif
  263. }
  264. #ifdef WIN32
  265. end:
  266. #endif
  267. if (!res->image) {
  268. qemu_log_mask(LOG_GUEST_ERROR,
  269. "%s: resource creation failed %d %d %d\n",
  270. __func__, c2d.resource_id, c2d.width, c2d.height);
  271. g_free(res);
  272. cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  273. return;
  274. }
  275. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  276. g->hostmem += res->hostmem;
  277. }
  278. static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
  279. struct virtio_gpu_ctrl_command *cmd)
  280. {
  281. struct virtio_gpu_simple_resource *res;
  282. struct virtio_gpu_resource_create_blob cblob;
  283. int ret;
  284. VIRTIO_GPU_FILL_CMD(cblob);
  285. virtio_gpu_create_blob_bswap(&cblob);
  286. trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
  287. if (cblob.resource_id == 0) {
  288. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  289. __func__);
  290. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  291. return;
  292. }
  293. if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
  294. cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
  295. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
  296. __func__);
  297. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  298. return;
  299. }
  300. if (virtio_gpu_find_resource(g, cblob.resource_id)) {
  301. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  302. __func__, cblob.resource_id);
  303. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  304. return;
  305. }
  306. res = g_new0(struct virtio_gpu_simple_resource, 1);
  307. res->resource_id = cblob.resource_id;
  308. res->blob_size = cblob.size;
  309. ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
  310. cmd, &res->addrs, &res->iov,
  311. &res->iov_cnt);
  312. if (ret != 0) {
  313. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  314. g_free(res);
  315. return;
  316. }
  317. virtio_gpu_init_udmabuf(res);
  318. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  319. }
  320. static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
  321. {
  322. struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
  323. struct virtio_gpu_simple_resource *res;
  324. if (scanout->resource_id == 0) {
  325. return;
  326. }
  327. res = virtio_gpu_find_resource(g, scanout->resource_id);
  328. if (res) {
  329. res->scanout_bitmask &= ~(1 << scanout_id);
  330. }
  331. dpy_gfx_replace_surface(scanout->con, NULL);
  332. scanout->resource_id = 0;
  333. scanout->ds = NULL;
  334. scanout->width = 0;
  335. scanout->height = 0;
  336. }
  337. static void virtio_gpu_resource_destroy(VirtIOGPU *g,
  338. struct virtio_gpu_simple_resource *res,
  339. Error **errp)
  340. {
  341. int i;
  342. if (res->scanout_bitmask) {
  343. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  344. if (res->scanout_bitmask & (1 << i)) {
  345. virtio_gpu_disable_scanout(g, i);
  346. }
  347. }
  348. }
  349. qemu_pixman_image_unref(res->image);
  350. virtio_gpu_cleanup_mapping(g, res);
  351. QTAILQ_REMOVE(&g->reslist, res, next);
  352. g->hostmem -= res->hostmem;
  353. g_free(res);
  354. }
  355. static void virtio_gpu_resource_unref(VirtIOGPU *g,
  356. struct virtio_gpu_ctrl_command *cmd)
  357. {
  358. struct virtio_gpu_simple_resource *res;
  359. struct virtio_gpu_resource_unref unref;
  360. VIRTIO_GPU_FILL_CMD(unref);
  361. virtio_gpu_bswap_32(&unref, sizeof(unref));
  362. trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  363. res = virtio_gpu_find_resource(g, unref.resource_id);
  364. if (!res) {
  365. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  366. __func__, unref.resource_id);
  367. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  368. return;
  369. }
  370. /*
  371. * virtio_gpu_resource_destroy does not set any errors, so pass a NULL errp
  372. * to ignore them.
  373. */
  374. virtio_gpu_resource_destroy(g, res, NULL);
  375. }
  376. static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
  377. struct virtio_gpu_ctrl_command *cmd)
  378. {
  379. struct virtio_gpu_simple_resource *res;
  380. int h, bpp;
  381. uint32_t src_offset, dst_offset, stride;
  382. pixman_format_code_t format;
  383. struct virtio_gpu_transfer_to_host_2d t2d;
  384. void *img_data;
  385. VIRTIO_GPU_FILL_CMD(t2d);
  386. virtio_gpu_t2d_bswap(&t2d);
  387. trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
  388. res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
  389. __func__, &cmd->error);
  390. if (!res || res->blob) {
  391. return;
  392. }
  393. if (t2d.r.x > res->width ||
  394. t2d.r.y > res->height ||
  395. t2d.r.width > res->width ||
  396. t2d.r.height > res->height ||
  397. t2d.r.x + t2d.r.width > res->width ||
  398. t2d.r.y + t2d.r.height > res->height) {
  399. qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
  400. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  401. __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
  402. t2d.r.width, t2d.r.height, res->width, res->height);
  403. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  404. return;
  405. }
  406. format = pixman_image_get_format(res->image);
  407. bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
  408. stride = pixman_image_get_stride(res->image);
  409. img_data = pixman_image_get_data(res->image);
  410. if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
  411. for (h = 0; h < t2d.r.height; h++) {
  412. src_offset = t2d.offset + stride * h;
  413. dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
  414. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  415. (uint8_t *)img_data + dst_offset,
  416. t2d.r.width * bpp);
  417. }
  418. } else {
  419. src_offset = t2d.offset;
  420. dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
  421. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  422. (uint8_t *)img_data + dst_offset,
  423. stride * t2d.r.height);
  424. }
  425. }
  426. static void virtio_gpu_resource_flush(VirtIOGPU *g,
  427. struct virtio_gpu_ctrl_command *cmd)
  428. {
  429. struct virtio_gpu_simple_resource *res;
  430. struct virtio_gpu_resource_flush rf;
  431. struct virtio_gpu_scanout *scanout;
  432. QemuRect flush_rect;
  433. bool within_bounds = false;
  434. bool update_submitted = false;
  435. int i;
  436. VIRTIO_GPU_FILL_CMD(rf);
  437. virtio_gpu_bswap_32(&rf, sizeof(rf));
  438. trace_virtio_gpu_cmd_res_flush(rf.resource_id,
  439. rf.r.width, rf.r.height, rf.r.x, rf.r.y);
  440. res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
  441. __func__, &cmd->error);
  442. if (!res) {
  443. return;
  444. }
  445. if (res->blob) {
  446. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  447. scanout = &g->parent_obj.scanout[i];
  448. if (scanout->resource_id == res->resource_id &&
  449. rf.r.x < scanout->x + scanout->width &&
  450. rf.r.x + rf.r.width >= scanout->x &&
  451. rf.r.y < scanout->y + scanout->height &&
  452. rf.r.y + rf.r.height >= scanout->y) {
  453. within_bounds = true;
  454. if (console_has_gl(scanout->con)) {
  455. dpy_gl_update(scanout->con, 0, 0, scanout->width,
  456. scanout->height);
  457. update_submitted = true;
  458. }
  459. }
  460. }
  461. if (update_submitted) {
  462. return;
  463. }
  464. if (!within_bounds) {
  465. qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
  466. " bounds for flush %d: %d %d %d %d\n",
  467. __func__, rf.resource_id, rf.r.x, rf.r.y,
  468. rf.r.width, rf.r.height);
  469. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  470. return;
  471. }
  472. }
  473. if (!res->blob &&
  474. (rf.r.x > res->width ||
  475. rf.r.y > res->height ||
  476. rf.r.width > res->width ||
  477. rf.r.height > res->height ||
  478. rf.r.x + rf.r.width > res->width ||
  479. rf.r.y + rf.r.height > res->height)) {
  480. qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
  481. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  482. __func__, rf.resource_id, rf.r.x, rf.r.y,
  483. rf.r.width, rf.r.height, res->width, res->height);
  484. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  485. return;
  486. }
  487. qemu_rect_init(&flush_rect, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
  488. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  489. QemuRect rect;
  490. if (!(res->scanout_bitmask & (1 << i))) {
  491. continue;
  492. }
  493. scanout = &g->parent_obj.scanout[i];
  494. qemu_rect_init(&rect, scanout->x, scanout->y,
  495. scanout->width, scanout->height);
  496. /* work out the area we need to update for each console */
  497. if (qemu_rect_intersect(&flush_rect, &rect, &rect)) {
  498. qemu_rect_translate(&rect, -scanout->x, -scanout->y);
  499. dpy_gfx_update(g->parent_obj.scanout[i].con,
  500. rect.x, rect.y, rect.width, rect.height);
  501. }
  502. }
  503. }
  504. static void virtio_unref_resource(pixman_image_t *image, void *data)
  505. {
  506. pixman_image_unref(data);
  507. }
  508. static void virtio_gpu_update_scanout(VirtIOGPU *g,
  509. uint32_t scanout_id,
  510. struct virtio_gpu_simple_resource *res,
  511. struct virtio_gpu_framebuffer *fb,
  512. struct virtio_gpu_rect *r)
  513. {
  514. struct virtio_gpu_simple_resource *ores;
  515. struct virtio_gpu_scanout *scanout;
  516. scanout = &g->parent_obj.scanout[scanout_id];
  517. ores = virtio_gpu_find_resource(g, scanout->resource_id);
  518. if (ores) {
  519. ores->scanout_bitmask &= ~(1 << scanout_id);
  520. }
  521. res->scanout_bitmask |= (1 << scanout_id);
  522. scanout->resource_id = res->resource_id;
  523. scanout->x = r->x;
  524. scanout->y = r->y;
  525. scanout->width = r->width;
  526. scanout->height = r->height;
  527. scanout->fb = *fb;
  528. }
  529. static bool virtio_gpu_do_set_scanout(VirtIOGPU *g,
  530. uint32_t scanout_id,
  531. struct virtio_gpu_framebuffer *fb,
  532. struct virtio_gpu_simple_resource *res,
  533. struct virtio_gpu_rect *r,
  534. uint32_t *error)
  535. {
  536. struct virtio_gpu_scanout *scanout;
  537. uint8_t *data;
  538. scanout = &g->parent_obj.scanout[scanout_id];
  539. if (r->x > fb->width ||
  540. r->y > fb->height ||
  541. r->width < 16 ||
  542. r->height < 16 ||
  543. r->width > fb->width ||
  544. r->height > fb->height ||
  545. r->x + r->width > fb->width ||
  546. r->y + r->height > fb->height) {
  547. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
  548. " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
  549. __func__, scanout_id, res->resource_id,
  550. r->x, r->y, r->width, r->height,
  551. fb->width, fb->height);
  552. *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  553. return false;
  554. }
  555. g->parent_obj.enable = 1;
  556. if (res->blob) {
  557. if (console_has_gl(scanout->con)) {
  558. if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
  559. virtio_gpu_update_scanout(g, scanout_id, res, fb, r);
  560. } else {
  561. *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  562. return false;
  563. }
  564. return true;
  565. }
  566. data = res->blob;
  567. } else {
  568. data = (uint8_t *)pixman_image_get_data(res->image);
  569. }
  570. /* create a surface for this scanout */
  571. if ((res->blob && !console_has_gl(scanout->con)) ||
  572. !scanout->ds ||
  573. surface_data(scanout->ds) != data + fb->offset ||
  574. scanout->width != r->width ||
  575. scanout->height != r->height) {
  576. pixman_image_t *rect;
  577. void *ptr = data + fb->offset;
  578. rect = pixman_image_create_bits(fb->format, r->width, r->height,
  579. ptr, fb->stride);
  580. if (res->image) {
  581. pixman_image_ref(res->image);
  582. pixman_image_set_destroy_function(rect, virtio_unref_resource,
  583. res->image);
  584. }
  585. /* realloc the surface ptr */
  586. scanout->ds = qemu_create_displaysurface_pixman(rect);
  587. #ifdef WIN32
  588. qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset);
  589. #endif
  590. pixman_image_unref(rect);
  591. dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
  592. scanout->ds);
  593. }
  594. virtio_gpu_update_scanout(g, scanout_id, res, fb, r);
  595. return true;
  596. }
  597. static void virtio_gpu_set_scanout(VirtIOGPU *g,
  598. struct virtio_gpu_ctrl_command *cmd)
  599. {
  600. struct virtio_gpu_simple_resource *res;
  601. struct virtio_gpu_framebuffer fb = { 0 };
  602. struct virtio_gpu_set_scanout ss;
  603. VIRTIO_GPU_FILL_CMD(ss);
  604. virtio_gpu_bswap_32(&ss, sizeof(ss));
  605. trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
  606. ss.r.width, ss.r.height, ss.r.x, ss.r.y);
  607. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  608. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  609. __func__, ss.scanout_id);
  610. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  611. return;
  612. }
  613. if (ss.resource_id == 0) {
  614. virtio_gpu_disable_scanout(g, ss.scanout_id);
  615. return;
  616. }
  617. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  618. __func__, &cmd->error);
  619. if (!res) {
  620. return;
  621. }
  622. fb.format = pixman_image_get_format(res->image);
  623. fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
  624. fb.width = pixman_image_get_width(res->image);
  625. fb.height = pixman_image_get_height(res->image);
  626. fb.stride = pixman_image_get_stride(res->image);
  627. fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
  628. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  629. &fb, res, &ss.r, &cmd->error);
  630. }
  631. static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
  632. struct virtio_gpu_ctrl_command *cmd)
  633. {
  634. struct virtio_gpu_simple_resource *res;
  635. struct virtio_gpu_framebuffer fb = { 0 };
  636. struct virtio_gpu_set_scanout_blob ss;
  637. uint64_t fbend;
  638. VIRTIO_GPU_FILL_CMD(ss);
  639. virtio_gpu_scanout_blob_bswap(&ss);
  640. trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
  641. ss.r.width, ss.r.height, ss.r.x,
  642. ss.r.y);
  643. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  644. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  645. __func__, ss.scanout_id);
  646. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  647. return;
  648. }
  649. if (ss.resource_id == 0) {
  650. virtio_gpu_disable_scanout(g, ss.scanout_id);
  651. return;
  652. }
  653. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  654. __func__, &cmd->error);
  655. if (!res) {
  656. return;
  657. }
  658. fb.format = virtio_gpu_get_pixman_format(ss.format);
  659. if (!fb.format) {
  660. qemu_log_mask(LOG_GUEST_ERROR,
  661. "%s: host couldn't handle guest format %d\n",
  662. __func__, ss.format);
  663. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  664. return;
  665. }
  666. fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
  667. fb.width = ss.width;
  668. fb.height = ss.height;
  669. fb.stride = ss.strides[0];
  670. fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
  671. fbend = fb.offset;
  672. fbend += fb.stride * (ss.r.height - 1);
  673. fbend += fb.bytes_pp * ss.r.width;
  674. if (fbend > res->blob_size) {
  675. qemu_log_mask(LOG_GUEST_ERROR,
  676. "%s: fb end out of range\n",
  677. __func__);
  678. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  679. return;
  680. }
  681. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  682. &fb, res, &ss.r, &cmd->error);
  683. }
  684. int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
  685. uint32_t nr_entries, uint32_t offset,
  686. struct virtio_gpu_ctrl_command *cmd,
  687. uint64_t **addr, struct iovec **iov,
  688. uint32_t *niov)
  689. {
  690. struct virtio_gpu_mem_entry *ents;
  691. size_t esize, s;
  692. int e, v;
  693. if (nr_entries > 16384) {
  694. qemu_log_mask(LOG_GUEST_ERROR,
  695. "%s: nr_entries is too big (%d > 16384)\n",
  696. __func__, nr_entries);
  697. return -1;
  698. }
  699. esize = sizeof(*ents) * nr_entries;
  700. ents = g_malloc(esize);
  701. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  702. offset, ents, esize);
  703. if (s != esize) {
  704. qemu_log_mask(LOG_GUEST_ERROR,
  705. "%s: command data size incorrect %zu vs %zu\n",
  706. __func__, s, esize);
  707. g_free(ents);
  708. return -1;
  709. }
  710. *iov = NULL;
  711. if (addr) {
  712. *addr = NULL;
  713. }
  714. for (e = 0, v = 0; e < nr_entries; e++) {
  715. uint64_t a = le64_to_cpu(ents[e].addr);
  716. uint32_t l = le32_to_cpu(ents[e].length);
  717. hwaddr len;
  718. void *map;
  719. do {
  720. len = l;
  721. map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
  722. DMA_DIRECTION_TO_DEVICE,
  723. MEMTXATTRS_UNSPECIFIED);
  724. if (!map) {
  725. qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
  726. " element %d\n", __func__, e);
  727. virtio_gpu_cleanup_mapping_iov(g, *iov, v);
  728. g_free(ents);
  729. *iov = NULL;
  730. if (addr) {
  731. g_free(*addr);
  732. *addr = NULL;
  733. }
  734. return -1;
  735. }
  736. if (!(v % 16)) {
  737. *iov = g_renew(struct iovec, *iov, v + 16);
  738. if (addr) {
  739. *addr = g_renew(uint64_t, *addr, v + 16);
  740. }
  741. }
  742. (*iov)[v].iov_base = map;
  743. (*iov)[v].iov_len = len;
  744. if (addr) {
  745. (*addr)[v] = a;
  746. }
  747. a += len;
  748. l -= len;
  749. v += 1;
  750. } while (l > 0);
  751. }
  752. *niov = v;
  753. g_free(ents);
  754. return 0;
  755. }
  756. void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
  757. struct iovec *iov, uint32_t count)
  758. {
  759. int i;
  760. for (i = 0; i < count; i++) {
  761. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  762. iov[i].iov_base, iov[i].iov_len,
  763. DMA_DIRECTION_TO_DEVICE,
  764. iov[i].iov_len);
  765. }
  766. g_free(iov);
  767. }
  768. void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  769. struct virtio_gpu_simple_resource *res)
  770. {
  771. virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
  772. res->iov = NULL;
  773. res->iov_cnt = 0;
  774. g_free(res->addrs);
  775. res->addrs = NULL;
  776. if (res->blob) {
  777. virtio_gpu_fini_udmabuf(res);
  778. }
  779. }
  780. static void
  781. virtio_gpu_resource_attach_backing(VirtIOGPU *g,
  782. struct virtio_gpu_ctrl_command *cmd)
  783. {
  784. struct virtio_gpu_simple_resource *res;
  785. struct virtio_gpu_resource_attach_backing ab;
  786. int ret;
  787. VIRTIO_GPU_FILL_CMD(ab);
  788. virtio_gpu_bswap_32(&ab, sizeof(ab));
  789. trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
  790. res = virtio_gpu_find_resource(g, ab.resource_id);
  791. if (!res) {
  792. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  793. __func__, ab.resource_id);
  794. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  795. return;
  796. }
  797. if (res->iov) {
  798. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  799. return;
  800. }
  801. ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
  802. &res->addrs, &res->iov, &res->iov_cnt);
  803. if (ret != 0) {
  804. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  805. return;
  806. }
  807. }
  808. static void
  809. virtio_gpu_resource_detach_backing(VirtIOGPU *g,
  810. struct virtio_gpu_ctrl_command *cmd)
  811. {
  812. struct virtio_gpu_simple_resource *res;
  813. struct virtio_gpu_resource_detach_backing detach;
  814. VIRTIO_GPU_FILL_CMD(detach);
  815. virtio_gpu_bswap_32(&detach, sizeof(detach));
  816. trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
  817. res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
  818. __func__, &cmd->error);
  819. if (!res) {
  820. return;
  821. }
  822. virtio_gpu_cleanup_mapping(g, res);
  823. }
  824. void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
  825. struct virtio_gpu_ctrl_command *cmd)
  826. {
  827. VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
  828. virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
  829. switch (cmd->cmd_hdr.type) {
  830. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  831. virtio_gpu_get_display_info(g, cmd);
  832. break;
  833. case VIRTIO_GPU_CMD_GET_EDID:
  834. virtio_gpu_get_edid(g, cmd);
  835. break;
  836. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  837. virtio_gpu_resource_create_2d(g, cmd);
  838. break;
  839. case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
  840. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  841. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  842. break;
  843. }
  844. virtio_gpu_resource_create_blob(g, cmd);
  845. break;
  846. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  847. virtio_gpu_resource_unref(g, cmd);
  848. break;
  849. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  850. virtio_gpu_resource_flush(g, cmd);
  851. break;
  852. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  853. virtio_gpu_transfer_to_host_2d(g, cmd);
  854. break;
  855. case VIRTIO_GPU_CMD_SET_SCANOUT:
  856. virtio_gpu_set_scanout(g, cmd);
  857. break;
  858. case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
  859. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  860. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  861. break;
  862. }
  863. virtio_gpu_set_scanout_blob(g, cmd);
  864. break;
  865. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  866. virtio_gpu_resource_attach_backing(g, cmd);
  867. break;
  868. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  869. virtio_gpu_resource_detach_backing(g, cmd);
  870. break;
  871. default:
  872. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  873. break;
  874. }
  875. if (!cmd->finished) {
  876. if (!g->parent_obj.renderer_blocked) {
  877. virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
  878. VIRTIO_GPU_RESP_OK_NODATA);
  879. }
  880. }
  881. }
  882. static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
  883. {
  884. VirtIOGPU *g = VIRTIO_GPU(vdev);
  885. qemu_bh_schedule(g->ctrl_bh);
  886. }
  887. static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
  888. {
  889. VirtIOGPU *g = VIRTIO_GPU(vdev);
  890. qemu_bh_schedule(g->cursor_bh);
  891. }
  892. void virtio_gpu_process_cmdq(VirtIOGPU *g)
  893. {
  894. struct virtio_gpu_ctrl_command *cmd;
  895. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  896. if (g->processing_cmdq) {
  897. return;
  898. }
  899. g->processing_cmdq = true;
  900. while (!QTAILQ_EMPTY(&g->cmdq)) {
  901. cmd = QTAILQ_FIRST(&g->cmdq);
  902. if (g->parent_obj.renderer_blocked) {
  903. break;
  904. }
  905. /* process command */
  906. vgc->process_cmd(g, cmd);
  907. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  908. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  909. g->stats.requests++;
  910. }
  911. if (!cmd->finished) {
  912. QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
  913. g->inflight++;
  914. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  915. if (g->stats.max_inflight < g->inflight) {
  916. g->stats.max_inflight = g->inflight;
  917. }
  918. fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
  919. }
  920. } else {
  921. g_free(cmd);
  922. }
  923. }
  924. g->processing_cmdq = false;
  925. }
  926. static void virtio_gpu_process_fenceq(VirtIOGPU *g)
  927. {
  928. struct virtio_gpu_ctrl_command *cmd, *tmp;
  929. QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
  930. trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
  931. virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  932. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  933. g_free(cmd);
  934. g->inflight--;
  935. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  936. fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
  937. }
  938. }
  939. }
  940. static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
  941. {
  942. VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
  943. virtio_gpu_process_fenceq(g);
  944. virtio_gpu_process_cmdq(g);
  945. }
  946. static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  947. {
  948. VirtIOGPU *g = VIRTIO_GPU(vdev);
  949. struct virtio_gpu_ctrl_command *cmd;
  950. if (!virtio_queue_ready(vq)) {
  951. return;
  952. }
  953. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  954. while (cmd) {
  955. cmd->vq = vq;
  956. cmd->error = 0;
  957. cmd->finished = false;
  958. QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
  959. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  960. }
  961. virtio_gpu_process_cmdq(g);
  962. }
  963. static void virtio_gpu_ctrl_bh(void *opaque)
  964. {
  965. VirtIOGPU *g = opaque;
  966. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  967. vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq);
  968. }
  969. static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
  970. {
  971. VirtIOGPU *g = VIRTIO_GPU(vdev);
  972. VirtQueueElement *elem;
  973. size_t s;
  974. struct virtio_gpu_update_cursor cursor_info;
  975. if (!virtio_queue_ready(vq)) {
  976. return;
  977. }
  978. for (;;) {
  979. elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
  980. if (!elem) {
  981. break;
  982. }
  983. s = iov_to_buf(elem->out_sg, elem->out_num, 0,
  984. &cursor_info, sizeof(cursor_info));
  985. if (s != sizeof(cursor_info)) {
  986. qemu_log_mask(LOG_GUEST_ERROR,
  987. "%s: cursor size incorrect %zu vs %zu\n",
  988. __func__, s, sizeof(cursor_info));
  989. } else {
  990. virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
  991. update_cursor(g, &cursor_info);
  992. }
  993. virtqueue_push(vq, elem, 0);
  994. virtio_notify(vdev, vq);
  995. g_free(elem);
  996. }
  997. }
  998. static void virtio_gpu_cursor_bh(void *opaque)
  999. {
  1000. VirtIOGPU *g = opaque;
  1001. virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
  1002. }
  1003. static bool scanout_vmstate_after_v2(void *opaque, int version)
  1004. {
  1005. struct VirtIOGPUBase *base = container_of(opaque, VirtIOGPUBase, scanout);
  1006. struct VirtIOGPU *gpu = container_of(base, VirtIOGPU, parent_obj);
  1007. return gpu->scanout_vmstate_version >= 2;
  1008. }
  1009. static const VMStateDescription vmstate_virtio_gpu_scanout = {
  1010. .name = "virtio-gpu-one-scanout",
  1011. .version_id = 1,
  1012. .fields = (const VMStateField[]) {
  1013. VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
  1014. VMSTATE_UINT32(width, struct virtio_gpu_scanout),
  1015. VMSTATE_UINT32(height, struct virtio_gpu_scanout),
  1016. VMSTATE_INT32(x, struct virtio_gpu_scanout),
  1017. VMSTATE_INT32(y, struct virtio_gpu_scanout),
  1018. VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
  1019. VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
  1020. VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
  1021. VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
  1022. VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
  1023. VMSTATE_UINT32_TEST(fb.format, struct virtio_gpu_scanout,
  1024. scanout_vmstate_after_v2),
  1025. VMSTATE_UINT32_TEST(fb.bytes_pp, struct virtio_gpu_scanout,
  1026. scanout_vmstate_after_v2),
  1027. VMSTATE_UINT32_TEST(fb.width, struct virtio_gpu_scanout,
  1028. scanout_vmstate_after_v2),
  1029. VMSTATE_UINT32_TEST(fb.height, struct virtio_gpu_scanout,
  1030. scanout_vmstate_after_v2),
  1031. VMSTATE_UINT32_TEST(fb.stride, struct virtio_gpu_scanout,
  1032. scanout_vmstate_after_v2),
  1033. VMSTATE_UINT32_TEST(fb.offset, struct virtio_gpu_scanout,
  1034. scanout_vmstate_after_v2),
  1035. VMSTATE_END_OF_LIST()
  1036. },
  1037. };
  1038. static const VMStateDescription vmstate_virtio_gpu_scanouts = {
  1039. .name = "virtio-gpu-scanouts",
  1040. .version_id = 1,
  1041. .fields = (const VMStateField[]) {
  1042. VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
  1043. VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
  1044. struct VirtIOGPU, NULL),
  1045. VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
  1046. parent_obj.conf.max_outputs, 1,
  1047. vmstate_virtio_gpu_scanout,
  1048. struct virtio_gpu_scanout),
  1049. VMSTATE_END_OF_LIST()
  1050. },
  1051. };
  1052. static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
  1053. const VMStateField *field, JSONWriter *vmdesc)
  1054. {
  1055. VirtIOGPU *g = opaque;
  1056. struct virtio_gpu_simple_resource *res;
  1057. int i;
  1058. /* in 2d mode we should never find unprocessed commands here */
  1059. assert(QTAILQ_EMPTY(&g->cmdq));
  1060. QTAILQ_FOREACH(res, &g->reslist, next) {
  1061. if (res->blob_size) {
  1062. continue;
  1063. }
  1064. qemu_put_be32(f, res->resource_id);
  1065. qemu_put_be32(f, res->width);
  1066. qemu_put_be32(f, res->height);
  1067. qemu_put_be32(f, res->format);
  1068. qemu_put_be32(f, res->iov_cnt);
  1069. for (i = 0; i < res->iov_cnt; i++) {
  1070. qemu_put_be64(f, res->addrs[i]);
  1071. qemu_put_be32(f, res->iov[i].iov_len);
  1072. }
  1073. qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
  1074. pixman_image_get_stride(res->image) * res->height);
  1075. }
  1076. qemu_put_be32(f, 0); /* end of list */
  1077. return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
  1078. }
  1079. static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g,
  1080. struct virtio_gpu_simple_resource *res)
  1081. {
  1082. int i;
  1083. for (i = 0; i < res->iov_cnt; i++) {
  1084. hwaddr len = res->iov[i].iov_len;
  1085. res->iov[i].iov_base =
  1086. dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
  1087. DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
  1088. if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
  1089. /* Clean up the half-a-mapping we just created... */
  1090. if (res->iov[i].iov_base) {
  1091. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base,
  1092. len, DMA_DIRECTION_TO_DEVICE, 0);
  1093. }
  1094. /* ...and the mappings for previous loop iterations */
  1095. res->iov_cnt = i;
  1096. virtio_gpu_cleanup_mapping(g, res);
  1097. return false;
  1098. }
  1099. }
  1100. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  1101. g->hostmem += res->hostmem;
  1102. return true;
  1103. }
  1104. static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
  1105. const VMStateField *field)
  1106. {
  1107. VirtIOGPU *g = opaque;
  1108. struct virtio_gpu_simple_resource *res;
  1109. uint32_t resource_id, pformat;
  1110. void *bits = NULL;
  1111. int i;
  1112. g->hostmem = 0;
  1113. resource_id = qemu_get_be32(f);
  1114. while (resource_id != 0) {
  1115. res = virtio_gpu_find_resource(g, resource_id);
  1116. if (res) {
  1117. return -EINVAL;
  1118. }
  1119. res = g_new0(struct virtio_gpu_simple_resource, 1);
  1120. res->resource_id = resource_id;
  1121. res->width = qemu_get_be32(f);
  1122. res->height = qemu_get_be32(f);
  1123. res->format = qemu_get_be32(f);
  1124. res->iov_cnt = qemu_get_be32(f);
  1125. /* allocate */
  1126. pformat = virtio_gpu_get_pixman_format(res->format);
  1127. if (!pformat) {
  1128. g_free(res);
  1129. return -EINVAL;
  1130. }
  1131. res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
  1132. #ifdef WIN32
  1133. bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
  1134. if (!bits) {
  1135. g_free(res);
  1136. return -EINVAL;
  1137. }
  1138. #endif
  1139. res->image = pixman_image_create_bits(
  1140. pformat,
  1141. res->width, res->height,
  1142. bits, res->height ? res->hostmem / res->height : 0);
  1143. if (!res->image) {
  1144. g_free(res);
  1145. return -EINVAL;
  1146. }
  1147. #ifdef WIN32
  1148. pixman_image_set_destroy_function(res->image, qemu_pixman_win32_image_destroy, res->handle);
  1149. #endif
  1150. res->addrs = g_new(uint64_t, res->iov_cnt);
  1151. res->iov = g_new(struct iovec, res->iov_cnt);
  1152. /* read data */
  1153. for (i = 0; i < res->iov_cnt; i++) {
  1154. res->addrs[i] = qemu_get_be64(f);
  1155. res->iov[i].iov_len = qemu_get_be32(f);
  1156. }
  1157. qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
  1158. pixman_image_get_stride(res->image) * res->height);
  1159. if (!virtio_gpu_load_restore_mapping(g, res)) {
  1160. pixman_image_unref(res->image);
  1161. g_free(res);
  1162. return -EINVAL;
  1163. }
  1164. resource_id = qemu_get_be32(f);
  1165. }
  1166. /* load & apply scanout state */
  1167. vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
  1168. return 0;
  1169. }
  1170. static int virtio_gpu_blob_save(QEMUFile *f, void *opaque, size_t size,
  1171. const VMStateField *field, JSONWriter *vmdesc)
  1172. {
  1173. VirtIOGPU *g = opaque;
  1174. struct virtio_gpu_simple_resource *res;
  1175. int i;
  1176. /* in 2d mode we should never find unprocessed commands here */
  1177. assert(QTAILQ_EMPTY(&g->cmdq));
  1178. QTAILQ_FOREACH(res, &g->reslist, next) {
  1179. if (!res->blob_size) {
  1180. continue;
  1181. }
  1182. assert(!res->image);
  1183. qemu_put_be32(f, res->resource_id);
  1184. qemu_put_be32(f, res->blob_size);
  1185. qemu_put_be32(f, res->iov_cnt);
  1186. for (i = 0; i < res->iov_cnt; i++) {
  1187. qemu_put_be64(f, res->addrs[i]);
  1188. qemu_put_be32(f, res->iov[i].iov_len);
  1189. }
  1190. }
  1191. qemu_put_be32(f, 0); /* end of list */
  1192. return 0;
  1193. }
  1194. static int virtio_gpu_blob_load(QEMUFile *f, void *opaque, size_t size,
  1195. const VMStateField *field)
  1196. {
  1197. VirtIOGPU *g = opaque;
  1198. struct virtio_gpu_simple_resource *res;
  1199. uint32_t resource_id;
  1200. int i;
  1201. resource_id = qemu_get_be32(f);
  1202. while (resource_id != 0) {
  1203. res = virtio_gpu_find_resource(g, resource_id);
  1204. if (res) {
  1205. return -EINVAL;
  1206. }
  1207. res = g_new0(struct virtio_gpu_simple_resource, 1);
  1208. res->resource_id = resource_id;
  1209. res->blob_size = qemu_get_be32(f);
  1210. res->iov_cnt = qemu_get_be32(f);
  1211. res->addrs = g_new(uint64_t, res->iov_cnt);
  1212. res->iov = g_new(struct iovec, res->iov_cnt);
  1213. /* read data */
  1214. for (i = 0; i < res->iov_cnt; i++) {
  1215. res->addrs[i] = qemu_get_be64(f);
  1216. res->iov[i].iov_len = qemu_get_be32(f);
  1217. }
  1218. if (!virtio_gpu_load_restore_mapping(g, res)) {
  1219. g_free(res);
  1220. return -EINVAL;
  1221. }
  1222. virtio_gpu_init_udmabuf(res);
  1223. resource_id = qemu_get_be32(f);
  1224. }
  1225. return 0;
  1226. }
  1227. static int virtio_gpu_post_load(void *opaque, int version_id)
  1228. {
  1229. VirtIOGPU *g = opaque;
  1230. struct virtio_gpu_scanout *scanout;
  1231. struct virtio_gpu_simple_resource *res;
  1232. int i;
  1233. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  1234. scanout = &g->parent_obj.scanout[i];
  1235. if (!scanout->resource_id) {
  1236. continue;
  1237. }
  1238. res = virtio_gpu_find_resource(g, scanout->resource_id);
  1239. if (!res) {
  1240. return -EINVAL;
  1241. }
  1242. if (scanout->fb.format != 0) {
  1243. uint32_t error = 0;
  1244. struct virtio_gpu_rect r = {
  1245. .x = scanout->x,
  1246. .y = scanout->y,
  1247. .width = scanout->width,
  1248. .height = scanout->height
  1249. };
  1250. if (!virtio_gpu_do_set_scanout(g, i, &scanout->fb, res, &r, &error)) {
  1251. return -EINVAL;
  1252. }
  1253. } else {
  1254. /* legacy v1 migration support */
  1255. if (!res->image) {
  1256. return -EINVAL;
  1257. }
  1258. scanout->ds = qemu_create_displaysurface_pixman(res->image);
  1259. #ifdef WIN32
  1260. qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0);
  1261. #endif
  1262. dpy_gfx_replace_surface(scanout->con, scanout->ds);
  1263. }
  1264. dpy_gfx_update_full(scanout->con);
  1265. if (scanout->cursor.resource_id) {
  1266. update_cursor(g, &scanout->cursor);
  1267. }
  1268. res->scanout_bitmask |= (1 << i);
  1269. }
  1270. return 0;
  1271. }
  1272. void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
  1273. {
  1274. VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
  1275. VirtIOGPU *g = VIRTIO_GPU(qdev);
  1276. if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  1277. if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) &&
  1278. !virtio_gpu_have_udmabuf()) {
  1279. error_setg(errp, "need rutabaga or udmabuf for blob resources");
  1280. return;
  1281. }
  1282. if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
  1283. error_setg(errp, "blobs and virgl are not compatible (yet)");
  1284. return;
  1285. }
  1286. }
  1287. if (!virtio_gpu_base_device_realize(qdev,
  1288. virtio_gpu_handle_ctrl_cb,
  1289. virtio_gpu_handle_cursor_cb,
  1290. errp)) {
  1291. return;
  1292. }
  1293. g->ctrl_vq = virtio_get_queue(vdev, 0);
  1294. g->cursor_vq = virtio_get_queue(vdev, 1);
  1295. g->ctrl_bh = virtio_bh_new_guarded(qdev, virtio_gpu_ctrl_bh, g);
  1296. g->cursor_bh = virtio_bh_new_guarded(qdev, virtio_gpu_cursor_bh, g);
  1297. g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g);
  1298. qemu_cond_init(&g->reset_cond);
  1299. QTAILQ_INIT(&g->reslist);
  1300. QTAILQ_INIT(&g->cmdq);
  1301. QTAILQ_INIT(&g->fenceq);
  1302. }
  1303. static void virtio_gpu_device_unrealize(DeviceState *qdev)
  1304. {
  1305. VirtIOGPU *g = VIRTIO_GPU(qdev);
  1306. g_clear_pointer(&g->ctrl_bh, qemu_bh_delete);
  1307. g_clear_pointer(&g->cursor_bh, qemu_bh_delete);
  1308. g_clear_pointer(&g->reset_bh, qemu_bh_delete);
  1309. qemu_cond_destroy(&g->reset_cond);
  1310. virtio_gpu_base_device_unrealize(qdev);
  1311. }
  1312. static void virtio_gpu_reset_bh(void *opaque)
  1313. {
  1314. VirtIOGPU *g = VIRTIO_GPU(opaque);
  1315. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  1316. struct virtio_gpu_simple_resource *res, *tmp;
  1317. uint32_t resource_id;
  1318. Error *local_err = NULL;
  1319. int i = 0;
  1320. QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
  1321. resource_id = res->resource_id;
  1322. vgc->resource_destroy(g, res, &local_err);
  1323. if (local_err) {
  1324. error_append_hint(&local_err, "%s: %s resource_destroy"
  1325. "for resource_id = %"PRIu32" failed.\n",
  1326. __func__, object_get_typename(OBJECT(g)),
  1327. resource_id);
  1328. /* error_report_err frees the error object for us */
  1329. error_report_err(local_err);
  1330. local_err = NULL;
  1331. }
  1332. }
  1333. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  1334. dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
  1335. }
  1336. g->reset_finished = true;
  1337. qemu_cond_signal(&g->reset_cond);
  1338. }
  1339. void virtio_gpu_reset(VirtIODevice *vdev)
  1340. {
  1341. VirtIOGPU *g = VIRTIO_GPU(vdev);
  1342. struct virtio_gpu_ctrl_command *cmd;
  1343. if (qemu_in_vcpu_thread()) {
  1344. g->reset_finished = false;
  1345. qemu_bh_schedule(g->reset_bh);
  1346. while (!g->reset_finished) {
  1347. qemu_cond_wait_bql(&g->reset_cond);
  1348. }
  1349. } else {
  1350. aio_bh_call(g->reset_bh);
  1351. }
  1352. while (!QTAILQ_EMPTY(&g->cmdq)) {
  1353. cmd = QTAILQ_FIRST(&g->cmdq);
  1354. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  1355. g_free(cmd);
  1356. }
  1357. while (!QTAILQ_EMPTY(&g->fenceq)) {
  1358. cmd = QTAILQ_FIRST(&g->fenceq);
  1359. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  1360. g->inflight--;
  1361. g_free(cmd);
  1362. }
  1363. virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
  1364. }
  1365. static void
  1366. virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
  1367. {
  1368. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1369. memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
  1370. }
  1371. static void
  1372. virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
  1373. {
  1374. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1375. const struct virtio_gpu_config *vgconfig =
  1376. (const struct virtio_gpu_config *)config;
  1377. if (vgconfig->events_clear) {
  1378. g->virtio_config.events_read &= ~vgconfig->events_clear;
  1379. }
  1380. }
  1381. static bool virtio_gpu_blob_state_needed(void *opaque)
  1382. {
  1383. VirtIOGPU *g = VIRTIO_GPU(opaque);
  1384. return virtio_gpu_blob_enabled(g->parent_obj.conf);
  1385. }
  1386. const VMStateDescription vmstate_virtio_gpu_blob_state = {
  1387. .name = "virtio-gpu/blob",
  1388. .minimum_version_id = VIRTIO_GPU_VM_VERSION,
  1389. .version_id = VIRTIO_GPU_VM_VERSION,
  1390. .needed = virtio_gpu_blob_state_needed,
  1391. .fields = (const VMStateField[]){
  1392. {
  1393. .name = "virtio-gpu/blob",
  1394. .info = &(const VMStateInfo) {
  1395. .name = "blob",
  1396. .get = virtio_gpu_blob_load,
  1397. .put = virtio_gpu_blob_save,
  1398. },
  1399. .flags = VMS_SINGLE,
  1400. } /* device */,
  1401. VMSTATE_END_OF_LIST()
  1402. },
  1403. };
  1404. /*
  1405. * For historical reasons virtio_gpu does not adhere to virtio migration
  1406. * scheme as described in doc/virtio-migration.txt, in a sense that no
  1407. * save/load callback are provided to the core. Instead the device data
  1408. * is saved/loaded after the core data.
  1409. *
  1410. * Because of this we need a special vmsd.
  1411. */
  1412. static const VMStateDescription vmstate_virtio_gpu = {
  1413. .name = "virtio-gpu",
  1414. .minimum_version_id = VIRTIO_GPU_VM_VERSION,
  1415. .version_id = VIRTIO_GPU_VM_VERSION,
  1416. .fields = (const VMStateField[]) {
  1417. VMSTATE_VIRTIO_DEVICE /* core */,
  1418. {
  1419. .name = "virtio-gpu",
  1420. .info = &(const VMStateInfo) {
  1421. .name = "virtio-gpu",
  1422. .get = virtio_gpu_load,
  1423. .put = virtio_gpu_save,
  1424. },
  1425. .flags = VMS_SINGLE,
  1426. } /* device */,
  1427. VMSTATE_END_OF_LIST()
  1428. },
  1429. .subsections = (const VMStateDescription * const []) {
  1430. &vmstate_virtio_gpu_blob_state,
  1431. NULL
  1432. },
  1433. .post_load = virtio_gpu_post_load,
  1434. };
  1435. static Property virtio_gpu_properties[] = {
  1436. VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
  1437. DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
  1438. 256 * MiB),
  1439. DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
  1440. VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
  1441. DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0),
  1442. DEFINE_PROP_UINT8("x-scanout-vmstate-version", VirtIOGPU, scanout_vmstate_version, 2),
  1443. DEFINE_PROP_END_OF_LIST(),
  1444. };
  1445. static void virtio_gpu_class_init(ObjectClass *klass, void *data)
  1446. {
  1447. DeviceClass *dc = DEVICE_CLASS(klass);
  1448. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  1449. VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
  1450. VirtIOGPUBaseClass *vgbc = &vgc->parent;
  1451. vgc->handle_ctrl = virtio_gpu_handle_ctrl;
  1452. vgc->process_cmd = virtio_gpu_simple_process_cmd;
  1453. vgc->update_cursor_data = virtio_gpu_update_cursor_data;
  1454. vgc->resource_destroy = virtio_gpu_resource_destroy;
  1455. vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
  1456. vdc->realize = virtio_gpu_device_realize;
  1457. vdc->unrealize = virtio_gpu_device_unrealize;
  1458. vdc->reset = virtio_gpu_reset;
  1459. vdc->get_config = virtio_gpu_get_config;
  1460. vdc->set_config = virtio_gpu_set_config;
  1461. dc->vmsd = &vmstate_virtio_gpu;
  1462. device_class_set_props(dc, virtio_gpu_properties);
  1463. }
  1464. static const TypeInfo virtio_gpu_info = {
  1465. .name = TYPE_VIRTIO_GPU,
  1466. .parent = TYPE_VIRTIO_GPU_BASE,
  1467. .instance_size = sizeof(VirtIOGPU),
  1468. .class_size = sizeof(VirtIOGPUClass),
  1469. .class_init = virtio_gpu_class_init,
  1470. };
  1471. module_obj(TYPE_VIRTIO_GPU);
  1472. module_kconfig(VIRTIO_GPU);
  1473. static void virtio_register_types(void)
  1474. {
  1475. type_register_static(&virtio_gpu_info);
  1476. }
  1477. type_init(virtio_register_types)