2
0

virtio-gpu.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/units.h"
  15. #include "qemu/iov.h"
  16. #include "system/cpus.h"
  17. #include "ui/console.h"
  18. #include "ui/rect.h"
  19. #include "trace.h"
  20. #include "system/dma.h"
  21. #include "system/system.h"
  22. #include "hw/virtio/virtio.h"
  23. #include "migration/qemu-file-types.h"
  24. #include "hw/virtio/virtio-gpu.h"
  25. #include "hw/virtio/virtio-gpu-bswap.h"
  26. #include "hw/virtio/virtio-gpu-pixman.h"
  27. #include "hw/virtio/virtio-bus.h"
  28. #include "hw/qdev-properties.h"
  29. #include "qemu/log.h"
  30. #include "qemu/memfd.h"
  31. #include "qemu/module.h"
  32. #include "qapi/error.h"
  33. #include "qemu/error-report.h"
  34. #define VIRTIO_GPU_VM_VERSION 1
  35. static struct virtio_gpu_simple_resource *
  36. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  37. bool require_backing,
  38. const char *caller, uint32_t *error);
  39. static void virtio_gpu_reset_bh(void *opaque);
  40. void virtio_gpu_update_cursor_data(VirtIOGPU *g,
  41. struct virtio_gpu_scanout *s,
  42. uint32_t resource_id)
  43. {
  44. struct virtio_gpu_simple_resource *res;
  45. uint32_t pixels;
  46. void *data;
  47. res = virtio_gpu_find_check_resource(g, resource_id, false,
  48. __func__, NULL);
  49. if (!res) {
  50. return;
  51. }
  52. if (res->blob_size) {
  53. if (res->blob_size < (s->current_cursor->width *
  54. s->current_cursor->height * 4)) {
  55. return;
  56. }
  57. data = res->blob;
  58. } else {
  59. if (pixman_image_get_width(res->image) != s->current_cursor->width ||
  60. pixman_image_get_height(res->image) != s->current_cursor->height) {
  61. return;
  62. }
  63. data = pixman_image_get_data(res->image);
  64. }
  65. pixels = s->current_cursor->width * s->current_cursor->height;
  66. memcpy(s->current_cursor->data, data,
  67. pixels * sizeof(uint32_t));
  68. }
  69. static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
  70. {
  71. struct virtio_gpu_scanout *s;
  72. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  73. bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
  74. if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
  75. return;
  76. }
  77. s = &g->parent_obj.scanout[cursor->pos.scanout_id];
  78. trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
  79. cursor->pos.x,
  80. cursor->pos.y,
  81. move ? "move" : "update",
  82. cursor->resource_id);
  83. if (!move) {
  84. if (!s->current_cursor) {
  85. s->current_cursor = cursor_alloc(64, 64);
  86. }
  87. s->current_cursor->hot_x = cursor->hot_x;
  88. s->current_cursor->hot_y = cursor->hot_y;
  89. if (cursor->resource_id > 0) {
  90. vgc->update_cursor_data(g, s, cursor->resource_id);
  91. }
  92. dpy_cursor_define(s->con, s->current_cursor);
  93. s->cursor = *cursor;
  94. } else {
  95. s->cursor.pos.x = cursor->pos.x;
  96. s->cursor.pos.y = cursor->pos.y;
  97. }
  98. dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, cursor->resource_id);
  99. }
  100. struct virtio_gpu_simple_resource *
  101. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
  102. {
  103. struct virtio_gpu_simple_resource *res;
  104. QTAILQ_FOREACH(res, &g->reslist, next) {
  105. if (res->resource_id == resource_id) {
  106. return res;
  107. }
  108. }
  109. return NULL;
  110. }
  111. static struct virtio_gpu_simple_resource *
  112. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  113. bool require_backing,
  114. const char *caller, uint32_t *error)
  115. {
  116. struct virtio_gpu_simple_resource *res;
  117. res = virtio_gpu_find_resource(g, resource_id);
  118. if (!res) {
  119. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
  120. caller, resource_id);
  121. if (error) {
  122. *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  123. }
  124. return NULL;
  125. }
  126. if (require_backing) {
  127. if (!res->iov || (!res->image && !res->blob)) {
  128. qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
  129. caller, resource_id);
  130. if (error) {
  131. *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  132. }
  133. return NULL;
  134. }
  135. }
  136. return res;
  137. }
  138. void virtio_gpu_ctrl_response(VirtIOGPU *g,
  139. struct virtio_gpu_ctrl_command *cmd,
  140. struct virtio_gpu_ctrl_hdr *resp,
  141. size_t resp_len)
  142. {
  143. size_t s;
  144. if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
  145. resp->flags |= VIRTIO_GPU_FLAG_FENCE;
  146. resp->fence_id = cmd->cmd_hdr.fence_id;
  147. resp->ctx_id = cmd->cmd_hdr.ctx_id;
  148. }
  149. virtio_gpu_ctrl_hdr_bswap(resp);
  150. s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
  151. if (s != resp_len) {
  152. qemu_log_mask(LOG_GUEST_ERROR,
  153. "%s: response size incorrect %zu vs %zu\n",
  154. __func__, s, resp_len);
  155. }
  156. virtqueue_push(cmd->vq, &cmd->elem, s);
  157. virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
  158. cmd->finished = true;
  159. }
  160. void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
  161. struct virtio_gpu_ctrl_command *cmd,
  162. enum virtio_gpu_ctrl_type type)
  163. {
  164. struct virtio_gpu_ctrl_hdr resp;
  165. memset(&resp, 0, sizeof(resp));
  166. resp.type = type;
  167. virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
  168. }
  169. void virtio_gpu_get_display_info(VirtIOGPU *g,
  170. struct virtio_gpu_ctrl_command *cmd)
  171. {
  172. struct virtio_gpu_resp_display_info display_info;
  173. trace_virtio_gpu_cmd_get_display_info();
  174. memset(&display_info, 0, sizeof(display_info));
  175. display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
  176. virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
  177. virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
  178. sizeof(display_info));
  179. }
  180. void virtio_gpu_get_edid(VirtIOGPU *g,
  181. struct virtio_gpu_ctrl_command *cmd)
  182. {
  183. struct virtio_gpu_resp_edid edid;
  184. struct virtio_gpu_cmd_get_edid get_edid;
  185. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  186. VIRTIO_GPU_FILL_CMD(get_edid);
  187. virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
  188. if (get_edid.scanout >= b->conf.max_outputs) {
  189. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  190. return;
  191. }
  192. trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
  193. memset(&edid, 0, sizeof(edid));
  194. edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
  195. virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
  196. virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
  197. }
  198. static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
  199. uint32_t width, uint32_t height)
  200. {
  201. /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
  202. * pixman_image_create_bits will fail in case it overflow.
  203. */
  204. int bpp = PIXMAN_FORMAT_BPP(pformat);
  205. int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
  206. return height * stride;
  207. }
  208. static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
  209. struct virtio_gpu_ctrl_command *cmd)
  210. {
  211. pixman_format_code_t pformat;
  212. struct virtio_gpu_simple_resource *res;
  213. struct virtio_gpu_resource_create_2d c2d;
  214. VIRTIO_GPU_FILL_CMD(c2d);
  215. virtio_gpu_bswap_32(&c2d, sizeof(c2d));
  216. trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  217. c2d.width, c2d.height);
  218. if (c2d.resource_id == 0) {
  219. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  220. __func__);
  221. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  222. return;
  223. }
  224. res = virtio_gpu_find_resource(g, c2d.resource_id);
  225. if (res) {
  226. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  227. __func__, c2d.resource_id);
  228. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  229. return;
  230. }
  231. res = g_new0(struct virtio_gpu_simple_resource, 1);
  232. res->width = c2d.width;
  233. res->height = c2d.height;
  234. res->format = c2d.format;
  235. res->resource_id = c2d.resource_id;
  236. pformat = virtio_gpu_get_pixman_format(c2d.format);
  237. if (!pformat) {
  238. qemu_log_mask(LOG_GUEST_ERROR,
  239. "%s: host couldn't handle guest format %d\n",
  240. __func__, c2d.format);
  241. g_free(res);
  242. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  243. return;
  244. }
  245. res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
  246. if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
  247. if (!qemu_pixman_image_new_shareable(
  248. &res->image,
  249. &res->share_handle,
  250. "virtio-gpu res",
  251. pformat,
  252. c2d.width,
  253. c2d.height,
  254. c2d.height ? res->hostmem / c2d.height : 0,
  255. &error_warn)) {
  256. goto end;
  257. }
  258. }
  259. end:
  260. if (!res->image) {
  261. qemu_log_mask(LOG_GUEST_ERROR,
  262. "%s: resource creation failed %d %d %d\n",
  263. __func__, c2d.resource_id, c2d.width, c2d.height);
  264. g_free(res);
  265. cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  266. return;
  267. }
  268. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  269. g->hostmem += res->hostmem;
  270. }
  271. static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
  272. struct virtio_gpu_ctrl_command *cmd)
  273. {
  274. struct virtio_gpu_simple_resource *res;
  275. struct virtio_gpu_resource_create_blob cblob;
  276. int ret;
  277. VIRTIO_GPU_FILL_CMD(cblob);
  278. virtio_gpu_create_blob_bswap(&cblob);
  279. trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
  280. if (cblob.resource_id == 0) {
  281. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  282. __func__);
  283. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  284. return;
  285. }
  286. if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
  287. cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
  288. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
  289. __func__);
  290. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  291. return;
  292. }
  293. if (virtio_gpu_find_resource(g, cblob.resource_id)) {
  294. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  295. __func__, cblob.resource_id);
  296. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  297. return;
  298. }
  299. res = g_new0(struct virtio_gpu_simple_resource, 1);
  300. res->resource_id = cblob.resource_id;
  301. res->blob_size = cblob.size;
  302. ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
  303. cmd, &res->addrs, &res->iov,
  304. &res->iov_cnt);
  305. if (ret != 0) {
  306. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  307. g_free(res);
  308. return;
  309. }
  310. virtio_gpu_init_udmabuf(res);
  311. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  312. }
  313. void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
  314. {
  315. struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
  316. struct virtio_gpu_simple_resource *res;
  317. if (scanout->resource_id == 0) {
  318. return;
  319. }
  320. res = virtio_gpu_find_resource(g, scanout->resource_id);
  321. if (res) {
  322. res->scanout_bitmask &= ~(1 << scanout_id);
  323. }
  324. dpy_gfx_replace_surface(scanout->con, NULL);
  325. scanout->resource_id = 0;
  326. scanout->ds = NULL;
  327. scanout->width = 0;
  328. scanout->height = 0;
  329. }
  330. static void virtio_gpu_resource_destroy(VirtIOGPU *g,
  331. struct virtio_gpu_simple_resource *res,
  332. Error **errp)
  333. {
  334. int i;
  335. if (res->scanout_bitmask) {
  336. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  337. if (res->scanout_bitmask & (1 << i)) {
  338. virtio_gpu_disable_scanout(g, i);
  339. }
  340. }
  341. }
  342. qemu_pixman_image_unref(res->image);
  343. virtio_gpu_cleanup_mapping(g, res);
  344. QTAILQ_REMOVE(&g->reslist, res, next);
  345. g->hostmem -= res->hostmem;
  346. g_free(res);
  347. }
  348. static void virtio_gpu_resource_unref(VirtIOGPU *g,
  349. struct virtio_gpu_ctrl_command *cmd)
  350. {
  351. struct virtio_gpu_simple_resource *res;
  352. struct virtio_gpu_resource_unref unref;
  353. VIRTIO_GPU_FILL_CMD(unref);
  354. virtio_gpu_bswap_32(&unref, sizeof(unref));
  355. trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  356. res = virtio_gpu_find_resource(g, unref.resource_id);
  357. if (!res) {
  358. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  359. __func__, unref.resource_id);
  360. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  361. return;
  362. }
  363. /*
  364. * virtio_gpu_resource_destroy does not set any errors, so pass a NULL errp
  365. * to ignore them.
  366. */
  367. virtio_gpu_resource_destroy(g, res, NULL);
  368. }
  369. static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
  370. struct virtio_gpu_ctrl_command *cmd)
  371. {
  372. struct virtio_gpu_simple_resource *res;
  373. int h, bpp;
  374. uint32_t src_offset, dst_offset, stride;
  375. pixman_format_code_t format;
  376. struct virtio_gpu_transfer_to_host_2d t2d;
  377. void *img_data;
  378. VIRTIO_GPU_FILL_CMD(t2d);
  379. virtio_gpu_t2d_bswap(&t2d);
  380. trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
  381. res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
  382. __func__, &cmd->error);
  383. if (!res || res->blob) {
  384. return;
  385. }
  386. if (t2d.r.x > res->width ||
  387. t2d.r.y > res->height ||
  388. t2d.r.width > res->width ||
  389. t2d.r.height > res->height ||
  390. t2d.r.x + t2d.r.width > res->width ||
  391. t2d.r.y + t2d.r.height > res->height) {
  392. qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
  393. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  394. __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
  395. t2d.r.width, t2d.r.height, res->width, res->height);
  396. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  397. return;
  398. }
  399. format = pixman_image_get_format(res->image);
  400. bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
  401. stride = pixman_image_get_stride(res->image);
  402. img_data = pixman_image_get_data(res->image);
  403. if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
  404. for (h = 0; h < t2d.r.height; h++) {
  405. src_offset = t2d.offset + stride * h;
  406. dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
  407. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  408. (uint8_t *)img_data + dst_offset,
  409. t2d.r.width * bpp);
  410. }
  411. } else {
  412. src_offset = t2d.offset;
  413. dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
  414. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  415. (uint8_t *)img_data + dst_offset,
  416. stride * t2d.r.height);
  417. }
  418. }
  419. static void virtio_gpu_resource_flush(VirtIOGPU *g,
  420. struct virtio_gpu_ctrl_command *cmd)
  421. {
  422. struct virtio_gpu_simple_resource *res;
  423. struct virtio_gpu_resource_flush rf;
  424. struct virtio_gpu_scanout *scanout;
  425. QemuRect flush_rect;
  426. bool within_bounds = false;
  427. bool update_submitted = false;
  428. int i;
  429. VIRTIO_GPU_FILL_CMD(rf);
  430. virtio_gpu_bswap_32(&rf, sizeof(rf));
  431. trace_virtio_gpu_cmd_res_flush(rf.resource_id,
  432. rf.r.width, rf.r.height, rf.r.x, rf.r.y);
  433. res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
  434. __func__, &cmd->error);
  435. if (!res) {
  436. return;
  437. }
  438. if (res->blob) {
  439. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  440. scanout = &g->parent_obj.scanout[i];
  441. if (scanout->resource_id == res->resource_id &&
  442. rf.r.x < scanout->x + scanout->width &&
  443. rf.r.x + rf.r.width >= scanout->x &&
  444. rf.r.y < scanout->y + scanout->height &&
  445. rf.r.y + rf.r.height >= scanout->y) {
  446. within_bounds = true;
  447. if (console_has_gl(scanout->con)) {
  448. dpy_gl_update(scanout->con, 0, 0, scanout->width,
  449. scanout->height);
  450. update_submitted = true;
  451. }
  452. }
  453. }
  454. if (update_submitted) {
  455. return;
  456. }
  457. if (!within_bounds) {
  458. qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
  459. " bounds for flush %d: %d %d %d %d\n",
  460. __func__, rf.resource_id, rf.r.x, rf.r.y,
  461. rf.r.width, rf.r.height);
  462. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  463. return;
  464. }
  465. }
  466. if (!res->blob &&
  467. (rf.r.x > res->width ||
  468. rf.r.y > res->height ||
  469. rf.r.width > res->width ||
  470. rf.r.height > res->height ||
  471. rf.r.x + rf.r.width > res->width ||
  472. rf.r.y + rf.r.height > res->height)) {
  473. qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
  474. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  475. __func__, rf.resource_id, rf.r.x, rf.r.y,
  476. rf.r.width, rf.r.height, res->width, res->height);
  477. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  478. return;
  479. }
  480. qemu_rect_init(&flush_rect, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
  481. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  482. QemuRect rect;
  483. if (!(res->scanout_bitmask & (1 << i))) {
  484. continue;
  485. }
  486. scanout = &g->parent_obj.scanout[i];
  487. qemu_rect_init(&rect, scanout->x, scanout->y,
  488. scanout->width, scanout->height);
  489. /* work out the area we need to update for each console */
  490. if (qemu_rect_intersect(&flush_rect, &rect, &rect)) {
  491. qemu_rect_translate(&rect, -scanout->x, -scanout->y);
  492. dpy_gfx_update(g->parent_obj.scanout[i].con,
  493. rect.x, rect.y, rect.width, rect.height);
  494. }
  495. }
  496. }
  497. static void virtio_unref_resource(pixman_image_t *image, void *data)
  498. {
  499. pixman_image_unref(data);
  500. }
  501. void virtio_gpu_update_scanout(VirtIOGPU *g,
  502. uint32_t scanout_id,
  503. struct virtio_gpu_simple_resource *res,
  504. struct virtio_gpu_framebuffer *fb,
  505. struct virtio_gpu_rect *r)
  506. {
  507. struct virtio_gpu_simple_resource *ores;
  508. struct virtio_gpu_scanout *scanout;
  509. scanout = &g->parent_obj.scanout[scanout_id];
  510. ores = virtio_gpu_find_resource(g, scanout->resource_id);
  511. if (ores) {
  512. ores->scanout_bitmask &= ~(1 << scanout_id);
  513. }
  514. res->scanout_bitmask |= (1 << scanout_id);
  515. scanout->resource_id = res->resource_id;
  516. scanout->x = r->x;
  517. scanout->y = r->y;
  518. scanout->width = r->width;
  519. scanout->height = r->height;
  520. scanout->fb = *fb;
  521. }
  522. static bool virtio_gpu_do_set_scanout(VirtIOGPU *g,
  523. uint32_t scanout_id,
  524. struct virtio_gpu_framebuffer *fb,
  525. struct virtio_gpu_simple_resource *res,
  526. struct virtio_gpu_rect *r,
  527. uint32_t *error)
  528. {
  529. struct virtio_gpu_scanout *scanout;
  530. uint8_t *data;
  531. scanout = &g->parent_obj.scanout[scanout_id];
  532. if (r->x > fb->width ||
  533. r->y > fb->height ||
  534. r->width < 16 ||
  535. r->height < 16 ||
  536. r->width > fb->width ||
  537. r->height > fb->height ||
  538. r->x + r->width > fb->width ||
  539. r->y + r->height > fb->height) {
  540. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
  541. " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
  542. __func__, scanout_id, res->resource_id,
  543. r->x, r->y, r->width, r->height,
  544. fb->width, fb->height);
  545. *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  546. return false;
  547. }
  548. g->parent_obj.enable = 1;
  549. if (res->blob) {
  550. if (console_has_gl(scanout->con)) {
  551. if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
  552. virtio_gpu_update_scanout(g, scanout_id, res, fb, r);
  553. } else {
  554. *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  555. return false;
  556. }
  557. return true;
  558. }
  559. data = res->blob;
  560. } else {
  561. data = (uint8_t *)pixman_image_get_data(res->image);
  562. }
  563. /* create a surface for this scanout */
  564. if ((res->blob && !console_has_gl(scanout->con)) ||
  565. !scanout->ds ||
  566. surface_data(scanout->ds) != data + fb->offset ||
  567. scanout->width != r->width ||
  568. scanout->height != r->height) {
  569. pixman_image_t *rect;
  570. void *ptr = data + fb->offset;
  571. rect = pixman_image_create_bits(fb->format, r->width, r->height,
  572. ptr, fb->stride);
  573. if (res->image) {
  574. pixman_image_ref(res->image);
  575. pixman_image_set_destroy_function(rect, virtio_unref_resource,
  576. res->image);
  577. }
  578. /* realloc the surface ptr */
  579. scanout->ds = qemu_create_displaysurface_pixman(rect);
  580. qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, fb->offset);
  581. pixman_image_unref(rect);
  582. dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
  583. scanout->ds);
  584. }
  585. virtio_gpu_update_scanout(g, scanout_id, res, fb, r);
  586. return true;
  587. }
  588. static void virtio_gpu_set_scanout(VirtIOGPU *g,
  589. struct virtio_gpu_ctrl_command *cmd)
  590. {
  591. struct virtio_gpu_simple_resource *res;
  592. struct virtio_gpu_framebuffer fb = { 0 };
  593. struct virtio_gpu_set_scanout ss;
  594. VIRTIO_GPU_FILL_CMD(ss);
  595. virtio_gpu_bswap_32(&ss, sizeof(ss));
  596. trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
  597. ss.r.width, ss.r.height, ss.r.x, ss.r.y);
  598. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  599. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  600. __func__, ss.scanout_id);
  601. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  602. return;
  603. }
  604. if (ss.resource_id == 0) {
  605. virtio_gpu_disable_scanout(g, ss.scanout_id);
  606. return;
  607. }
  608. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  609. __func__, &cmd->error);
  610. if (!res) {
  611. return;
  612. }
  613. fb.format = pixman_image_get_format(res->image);
  614. fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
  615. fb.width = pixman_image_get_width(res->image);
  616. fb.height = pixman_image_get_height(res->image);
  617. fb.stride = pixman_image_get_stride(res->image);
  618. fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
  619. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  620. &fb, res, &ss.r, &cmd->error);
  621. }
  622. bool virtio_gpu_scanout_blob_to_fb(struct virtio_gpu_framebuffer *fb,
  623. struct virtio_gpu_set_scanout_blob *ss,
  624. uint64_t blob_size)
  625. {
  626. uint64_t fbend;
  627. fb->format = virtio_gpu_get_pixman_format(ss->format);
  628. if (!fb->format) {
  629. qemu_log_mask(LOG_GUEST_ERROR,
  630. "%s: host couldn't handle guest format %d\n",
  631. __func__, ss->format);
  632. return false;
  633. }
  634. fb->bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb->format), 8);
  635. fb->width = ss->width;
  636. fb->height = ss->height;
  637. fb->stride = ss->strides[0];
  638. fb->offset = ss->offsets[0] + ss->r.x * fb->bytes_pp + ss->r.y * fb->stride;
  639. fbend = fb->offset;
  640. fbend += (uint64_t) fb->stride * ss->r.height;
  641. if (fbend > blob_size) {
  642. qemu_log_mask(LOG_GUEST_ERROR,
  643. "%s: fb end out of range\n",
  644. __func__);
  645. return false;
  646. }
  647. return true;
  648. }
  649. static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
  650. struct virtio_gpu_ctrl_command *cmd)
  651. {
  652. struct virtio_gpu_simple_resource *res;
  653. struct virtio_gpu_framebuffer fb = { 0 };
  654. struct virtio_gpu_set_scanout_blob ss;
  655. VIRTIO_GPU_FILL_CMD(ss);
  656. virtio_gpu_scanout_blob_bswap(&ss);
  657. trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
  658. ss.r.width, ss.r.height, ss.r.x,
  659. ss.r.y);
  660. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  661. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  662. __func__, ss.scanout_id);
  663. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  664. return;
  665. }
  666. if (ss.resource_id == 0) {
  667. virtio_gpu_disable_scanout(g, ss.scanout_id);
  668. return;
  669. }
  670. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  671. __func__, &cmd->error);
  672. if (!res) {
  673. return;
  674. }
  675. if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->blob_size)) {
  676. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  677. return;
  678. }
  679. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  680. &fb, res, &ss.r, &cmd->error);
  681. }
  682. int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
  683. uint32_t nr_entries, uint32_t offset,
  684. struct virtio_gpu_ctrl_command *cmd,
  685. uint64_t **addr, struct iovec **iov,
  686. uint32_t *niov)
  687. {
  688. struct virtio_gpu_mem_entry *ents;
  689. size_t esize, s;
  690. int e, v;
  691. if (nr_entries > 16384) {
  692. qemu_log_mask(LOG_GUEST_ERROR,
  693. "%s: nr_entries is too big (%d > 16384)\n",
  694. __func__, nr_entries);
  695. return -1;
  696. }
  697. esize = sizeof(*ents) * nr_entries;
  698. ents = g_malloc(esize);
  699. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  700. offset, ents, esize);
  701. if (s != esize) {
  702. qemu_log_mask(LOG_GUEST_ERROR,
  703. "%s: command data size incorrect %zu vs %zu\n",
  704. __func__, s, esize);
  705. g_free(ents);
  706. return -1;
  707. }
  708. *iov = NULL;
  709. if (addr) {
  710. *addr = NULL;
  711. }
  712. for (e = 0, v = 0; e < nr_entries; e++) {
  713. uint64_t a = le64_to_cpu(ents[e].addr);
  714. uint32_t l = le32_to_cpu(ents[e].length);
  715. hwaddr len;
  716. void *map;
  717. do {
  718. len = l;
  719. map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
  720. DMA_DIRECTION_TO_DEVICE,
  721. MEMTXATTRS_UNSPECIFIED);
  722. if (!map) {
  723. qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
  724. " element %d\n", __func__, e);
  725. virtio_gpu_cleanup_mapping_iov(g, *iov, v);
  726. g_free(ents);
  727. *iov = NULL;
  728. if (addr) {
  729. g_free(*addr);
  730. *addr = NULL;
  731. }
  732. return -1;
  733. }
  734. if (!(v % 16)) {
  735. *iov = g_renew(struct iovec, *iov, v + 16);
  736. if (addr) {
  737. *addr = g_renew(uint64_t, *addr, v + 16);
  738. }
  739. }
  740. (*iov)[v].iov_base = map;
  741. (*iov)[v].iov_len = len;
  742. if (addr) {
  743. (*addr)[v] = a;
  744. }
  745. a += len;
  746. l -= len;
  747. v += 1;
  748. } while (l > 0);
  749. }
  750. *niov = v;
  751. g_free(ents);
  752. return 0;
  753. }
  754. void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
  755. struct iovec *iov, uint32_t count)
  756. {
  757. int i;
  758. for (i = 0; i < count; i++) {
  759. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  760. iov[i].iov_base, iov[i].iov_len,
  761. DMA_DIRECTION_TO_DEVICE,
  762. iov[i].iov_len);
  763. }
  764. g_free(iov);
  765. }
  766. void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  767. struct virtio_gpu_simple_resource *res)
  768. {
  769. virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
  770. res->iov = NULL;
  771. res->iov_cnt = 0;
  772. g_free(res->addrs);
  773. res->addrs = NULL;
  774. if (res->blob) {
  775. virtio_gpu_fini_udmabuf(res);
  776. }
  777. }
  778. static void
  779. virtio_gpu_resource_attach_backing(VirtIOGPU *g,
  780. struct virtio_gpu_ctrl_command *cmd)
  781. {
  782. struct virtio_gpu_simple_resource *res;
  783. struct virtio_gpu_resource_attach_backing ab;
  784. int ret;
  785. VIRTIO_GPU_FILL_CMD(ab);
  786. virtio_gpu_bswap_32(&ab, sizeof(ab));
  787. trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
  788. res = virtio_gpu_find_resource(g, ab.resource_id);
  789. if (!res) {
  790. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  791. __func__, ab.resource_id);
  792. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  793. return;
  794. }
  795. if (res->iov) {
  796. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  797. return;
  798. }
  799. ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
  800. &res->addrs, &res->iov, &res->iov_cnt);
  801. if (ret != 0) {
  802. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  803. return;
  804. }
  805. }
  806. static void
  807. virtio_gpu_resource_detach_backing(VirtIOGPU *g,
  808. struct virtio_gpu_ctrl_command *cmd)
  809. {
  810. struct virtio_gpu_simple_resource *res;
  811. struct virtio_gpu_resource_detach_backing detach;
  812. VIRTIO_GPU_FILL_CMD(detach);
  813. virtio_gpu_bswap_32(&detach, sizeof(detach));
  814. trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
  815. res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
  816. __func__, &cmd->error);
  817. if (!res) {
  818. return;
  819. }
  820. virtio_gpu_cleanup_mapping(g, res);
  821. }
  822. void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
  823. struct virtio_gpu_ctrl_command *cmd)
  824. {
  825. VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
  826. virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
  827. switch (cmd->cmd_hdr.type) {
  828. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  829. virtio_gpu_get_display_info(g, cmd);
  830. break;
  831. case VIRTIO_GPU_CMD_GET_EDID:
  832. virtio_gpu_get_edid(g, cmd);
  833. break;
  834. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  835. virtio_gpu_resource_create_2d(g, cmd);
  836. break;
  837. case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
  838. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  839. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  840. break;
  841. }
  842. virtio_gpu_resource_create_blob(g, cmd);
  843. break;
  844. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  845. virtio_gpu_resource_unref(g, cmd);
  846. break;
  847. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  848. virtio_gpu_resource_flush(g, cmd);
  849. break;
  850. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  851. virtio_gpu_transfer_to_host_2d(g, cmd);
  852. break;
  853. case VIRTIO_GPU_CMD_SET_SCANOUT:
  854. virtio_gpu_set_scanout(g, cmd);
  855. break;
  856. case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
  857. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  858. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  859. break;
  860. }
  861. virtio_gpu_set_scanout_blob(g, cmd);
  862. break;
  863. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  864. virtio_gpu_resource_attach_backing(g, cmd);
  865. break;
  866. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  867. virtio_gpu_resource_detach_backing(g, cmd);
  868. break;
  869. default:
  870. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  871. break;
  872. }
  873. if (!cmd->finished) {
  874. if (!g->parent_obj.renderer_blocked) {
  875. virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
  876. VIRTIO_GPU_RESP_OK_NODATA);
  877. }
  878. }
  879. }
  880. static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
  881. {
  882. VirtIOGPU *g = VIRTIO_GPU(vdev);
  883. qemu_bh_schedule(g->ctrl_bh);
  884. }
  885. static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
  886. {
  887. VirtIOGPU *g = VIRTIO_GPU(vdev);
  888. qemu_bh_schedule(g->cursor_bh);
  889. }
  890. void virtio_gpu_process_cmdq(VirtIOGPU *g)
  891. {
  892. struct virtio_gpu_ctrl_command *cmd;
  893. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  894. if (g->processing_cmdq) {
  895. return;
  896. }
  897. g->processing_cmdq = true;
  898. while (!QTAILQ_EMPTY(&g->cmdq)) {
  899. cmd = QTAILQ_FIRST(&g->cmdq);
  900. if (g->parent_obj.renderer_blocked) {
  901. break;
  902. }
  903. /* process command */
  904. vgc->process_cmd(g, cmd);
  905. /* command suspended */
  906. if (!cmd->finished && !(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
  907. trace_virtio_gpu_cmd_suspended(cmd->cmd_hdr.type);
  908. break;
  909. }
  910. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  911. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  912. g->stats.requests++;
  913. }
  914. if (!cmd->finished) {
  915. QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
  916. g->inflight++;
  917. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  918. if (g->stats.max_inflight < g->inflight) {
  919. g->stats.max_inflight = g->inflight;
  920. }
  921. trace_virtio_gpu_inc_inflight_fences(g->inflight);
  922. }
  923. } else {
  924. g_free(cmd);
  925. }
  926. }
  927. g->processing_cmdq = false;
  928. }
  929. static void virtio_gpu_process_fenceq(VirtIOGPU *g)
  930. {
  931. struct virtio_gpu_ctrl_command *cmd, *tmp;
  932. QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
  933. trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
  934. virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  935. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  936. g_free(cmd);
  937. g->inflight--;
  938. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  939. trace_virtio_gpu_dec_inflight_fences(g->inflight);
  940. }
  941. }
  942. }
  943. static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
  944. {
  945. VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
  946. virtio_gpu_process_fenceq(g);
  947. virtio_gpu_process_cmdq(g);
  948. }
  949. static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  950. {
  951. VirtIOGPU *g = VIRTIO_GPU(vdev);
  952. struct virtio_gpu_ctrl_command *cmd;
  953. if (!virtio_queue_ready(vq)) {
  954. return;
  955. }
  956. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  957. while (cmd) {
  958. cmd->vq = vq;
  959. cmd->error = 0;
  960. cmd->finished = false;
  961. QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
  962. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  963. }
  964. virtio_gpu_process_cmdq(g);
  965. }
  966. static void virtio_gpu_ctrl_bh(void *opaque)
  967. {
  968. VirtIOGPU *g = opaque;
  969. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  970. vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq);
  971. }
  972. static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
  973. {
  974. VirtIOGPU *g = VIRTIO_GPU(vdev);
  975. VirtQueueElement *elem;
  976. size_t s;
  977. struct virtio_gpu_update_cursor cursor_info;
  978. if (!virtio_queue_ready(vq)) {
  979. return;
  980. }
  981. for (;;) {
  982. elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
  983. if (!elem) {
  984. break;
  985. }
  986. s = iov_to_buf(elem->out_sg, elem->out_num, 0,
  987. &cursor_info, sizeof(cursor_info));
  988. if (s != sizeof(cursor_info)) {
  989. qemu_log_mask(LOG_GUEST_ERROR,
  990. "%s: cursor size incorrect %zu vs %zu\n",
  991. __func__, s, sizeof(cursor_info));
  992. } else {
  993. virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
  994. update_cursor(g, &cursor_info);
  995. }
  996. virtqueue_push(vq, elem, 0);
  997. virtio_notify(vdev, vq);
  998. g_free(elem);
  999. }
  1000. }
  1001. static void virtio_gpu_cursor_bh(void *opaque)
  1002. {
  1003. VirtIOGPU *g = opaque;
  1004. virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
  1005. }
  1006. static bool scanout_vmstate_after_v2(void *opaque, int version)
  1007. {
  1008. struct VirtIOGPUBase *base = container_of(opaque, VirtIOGPUBase, scanout);
  1009. struct VirtIOGPU *gpu = container_of(base, VirtIOGPU, parent_obj);
  1010. return gpu->scanout_vmstate_version >= 2;
  1011. }
  1012. static const VMStateDescription vmstate_virtio_gpu_scanout = {
  1013. .name = "virtio-gpu-one-scanout",
  1014. .version_id = 1,
  1015. .fields = (const VMStateField[]) {
  1016. VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
  1017. VMSTATE_UINT32(width, struct virtio_gpu_scanout),
  1018. VMSTATE_UINT32(height, struct virtio_gpu_scanout),
  1019. VMSTATE_INT32(x, struct virtio_gpu_scanout),
  1020. VMSTATE_INT32(y, struct virtio_gpu_scanout),
  1021. VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
  1022. VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
  1023. VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
  1024. VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
  1025. VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
  1026. VMSTATE_UINT32_TEST(fb.format, struct virtio_gpu_scanout,
  1027. scanout_vmstate_after_v2),
  1028. VMSTATE_UINT32_TEST(fb.bytes_pp, struct virtio_gpu_scanout,
  1029. scanout_vmstate_after_v2),
  1030. VMSTATE_UINT32_TEST(fb.width, struct virtio_gpu_scanout,
  1031. scanout_vmstate_after_v2),
  1032. VMSTATE_UINT32_TEST(fb.height, struct virtio_gpu_scanout,
  1033. scanout_vmstate_after_v2),
  1034. VMSTATE_UINT32_TEST(fb.stride, struct virtio_gpu_scanout,
  1035. scanout_vmstate_after_v2),
  1036. VMSTATE_UINT32_TEST(fb.offset, struct virtio_gpu_scanout,
  1037. scanout_vmstate_after_v2),
  1038. VMSTATE_END_OF_LIST()
  1039. },
  1040. };
  1041. static const VMStateDescription vmstate_virtio_gpu_scanouts = {
  1042. .name = "virtio-gpu-scanouts",
  1043. .version_id = 1,
  1044. .fields = (const VMStateField[]) {
  1045. VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
  1046. VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
  1047. struct VirtIOGPU, NULL),
  1048. VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
  1049. parent_obj.conf.max_outputs, 1,
  1050. vmstate_virtio_gpu_scanout,
  1051. struct virtio_gpu_scanout),
  1052. VMSTATE_END_OF_LIST()
  1053. },
  1054. };
  1055. static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
  1056. const VMStateField *field, JSONWriter *vmdesc)
  1057. {
  1058. VirtIOGPU *g = opaque;
  1059. struct virtio_gpu_simple_resource *res;
  1060. int i;
  1061. /* in 2d mode we should never find unprocessed commands here */
  1062. assert(QTAILQ_EMPTY(&g->cmdq));
  1063. QTAILQ_FOREACH(res, &g->reslist, next) {
  1064. if (res->blob_size) {
  1065. continue;
  1066. }
  1067. qemu_put_be32(f, res->resource_id);
  1068. qemu_put_be32(f, res->width);
  1069. qemu_put_be32(f, res->height);
  1070. qemu_put_be32(f, res->format);
  1071. qemu_put_be32(f, res->iov_cnt);
  1072. for (i = 0; i < res->iov_cnt; i++) {
  1073. qemu_put_be64(f, res->addrs[i]);
  1074. qemu_put_be32(f, res->iov[i].iov_len);
  1075. }
  1076. qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
  1077. pixman_image_get_stride(res->image) * res->height);
  1078. }
  1079. qemu_put_be32(f, 0); /* end of list */
  1080. return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
  1081. }
  1082. static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g,
  1083. struct virtio_gpu_simple_resource *res)
  1084. {
  1085. int i;
  1086. for (i = 0; i < res->iov_cnt; i++) {
  1087. hwaddr len = res->iov[i].iov_len;
  1088. res->iov[i].iov_base =
  1089. dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
  1090. DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED);
  1091. if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
  1092. /* Clean up the half-a-mapping we just created... */
  1093. if (res->iov[i].iov_base) {
  1094. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base,
  1095. len, DMA_DIRECTION_TO_DEVICE, 0);
  1096. }
  1097. /* ...and the mappings for previous loop iterations */
  1098. res->iov_cnt = i;
  1099. virtio_gpu_cleanup_mapping(g, res);
  1100. return false;
  1101. }
  1102. }
  1103. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  1104. g->hostmem += res->hostmem;
  1105. return true;
  1106. }
  1107. static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
  1108. const VMStateField *field)
  1109. {
  1110. VirtIOGPU *g = opaque;
  1111. struct virtio_gpu_simple_resource *res;
  1112. uint32_t resource_id, pformat;
  1113. int i;
  1114. g->hostmem = 0;
  1115. resource_id = qemu_get_be32(f);
  1116. while (resource_id != 0) {
  1117. res = virtio_gpu_find_resource(g, resource_id);
  1118. if (res) {
  1119. return -EINVAL;
  1120. }
  1121. res = g_new0(struct virtio_gpu_simple_resource, 1);
  1122. res->resource_id = resource_id;
  1123. res->width = qemu_get_be32(f);
  1124. res->height = qemu_get_be32(f);
  1125. res->format = qemu_get_be32(f);
  1126. res->iov_cnt = qemu_get_be32(f);
  1127. /* allocate */
  1128. pformat = virtio_gpu_get_pixman_format(res->format);
  1129. if (!pformat) {
  1130. g_free(res);
  1131. return -EINVAL;
  1132. }
  1133. res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
  1134. if (!qemu_pixman_image_new_shareable(&res->image,
  1135. &res->share_handle,
  1136. "virtio-gpu res",
  1137. pformat,
  1138. res->width,
  1139. res->height,
  1140. res->height ? res->hostmem / res->height : 0,
  1141. &error_warn)) {
  1142. g_free(res);
  1143. return -EINVAL;
  1144. }
  1145. res->addrs = g_new(uint64_t, res->iov_cnt);
  1146. res->iov = g_new(struct iovec, res->iov_cnt);
  1147. /* read data */
  1148. for (i = 0; i < res->iov_cnt; i++) {
  1149. res->addrs[i] = qemu_get_be64(f);
  1150. res->iov[i].iov_len = qemu_get_be32(f);
  1151. }
  1152. qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
  1153. pixman_image_get_stride(res->image) * res->height);
  1154. if (!virtio_gpu_load_restore_mapping(g, res)) {
  1155. pixman_image_unref(res->image);
  1156. g_free(res);
  1157. return -EINVAL;
  1158. }
  1159. resource_id = qemu_get_be32(f);
  1160. }
  1161. /* load & apply scanout state */
  1162. vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
  1163. return 0;
  1164. }
  1165. static int virtio_gpu_blob_save(QEMUFile *f, void *opaque, size_t size,
  1166. const VMStateField *field, JSONWriter *vmdesc)
  1167. {
  1168. VirtIOGPU *g = opaque;
  1169. struct virtio_gpu_simple_resource *res;
  1170. int i;
  1171. /* in 2d mode we should never find unprocessed commands here */
  1172. assert(QTAILQ_EMPTY(&g->cmdq));
  1173. QTAILQ_FOREACH(res, &g->reslist, next) {
  1174. if (!res->blob_size) {
  1175. continue;
  1176. }
  1177. assert(!res->image);
  1178. qemu_put_be32(f, res->resource_id);
  1179. qemu_put_be32(f, res->blob_size);
  1180. qemu_put_be32(f, res->iov_cnt);
  1181. for (i = 0; i < res->iov_cnt; i++) {
  1182. qemu_put_be64(f, res->addrs[i]);
  1183. qemu_put_be32(f, res->iov[i].iov_len);
  1184. }
  1185. }
  1186. qemu_put_be32(f, 0); /* end of list */
  1187. return 0;
  1188. }
  1189. static int virtio_gpu_blob_load(QEMUFile *f, void *opaque, size_t size,
  1190. const VMStateField *field)
  1191. {
  1192. VirtIOGPU *g = opaque;
  1193. struct virtio_gpu_simple_resource *res;
  1194. uint32_t resource_id;
  1195. int i;
  1196. resource_id = qemu_get_be32(f);
  1197. while (resource_id != 0) {
  1198. res = virtio_gpu_find_resource(g, resource_id);
  1199. if (res) {
  1200. return -EINVAL;
  1201. }
  1202. res = g_new0(struct virtio_gpu_simple_resource, 1);
  1203. res->resource_id = resource_id;
  1204. res->blob_size = qemu_get_be32(f);
  1205. res->iov_cnt = qemu_get_be32(f);
  1206. res->addrs = g_new(uint64_t, res->iov_cnt);
  1207. res->iov = g_new(struct iovec, res->iov_cnt);
  1208. /* read data */
  1209. for (i = 0; i < res->iov_cnt; i++) {
  1210. res->addrs[i] = qemu_get_be64(f);
  1211. res->iov[i].iov_len = qemu_get_be32(f);
  1212. }
  1213. if (!virtio_gpu_load_restore_mapping(g, res)) {
  1214. g_free(res);
  1215. return -EINVAL;
  1216. }
  1217. virtio_gpu_init_udmabuf(res);
  1218. resource_id = qemu_get_be32(f);
  1219. }
  1220. return 0;
  1221. }
  1222. static int virtio_gpu_post_load(void *opaque, int version_id)
  1223. {
  1224. VirtIOGPU *g = opaque;
  1225. struct virtio_gpu_scanout *scanout;
  1226. struct virtio_gpu_simple_resource *res;
  1227. int i;
  1228. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  1229. scanout = &g->parent_obj.scanout[i];
  1230. if (!scanout->resource_id) {
  1231. continue;
  1232. }
  1233. res = virtio_gpu_find_resource(g, scanout->resource_id);
  1234. if (!res) {
  1235. return -EINVAL;
  1236. }
  1237. if (scanout->fb.format != 0) {
  1238. uint32_t error = 0;
  1239. struct virtio_gpu_rect r = {
  1240. .x = scanout->x,
  1241. .y = scanout->y,
  1242. .width = scanout->width,
  1243. .height = scanout->height
  1244. };
  1245. if (!virtio_gpu_do_set_scanout(g, i, &scanout->fb, res, &r, &error)) {
  1246. return -EINVAL;
  1247. }
  1248. } else {
  1249. /* legacy v1 migration support */
  1250. if (!res->image) {
  1251. return -EINVAL;
  1252. }
  1253. scanout->ds = qemu_create_displaysurface_pixman(res->image);
  1254. qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, 0);
  1255. dpy_gfx_replace_surface(scanout->con, scanout->ds);
  1256. }
  1257. dpy_gfx_update_full(scanout->con);
  1258. if (scanout->cursor.resource_id) {
  1259. update_cursor(g, &scanout->cursor);
  1260. }
  1261. res->scanout_bitmask |= (1 << i);
  1262. }
  1263. return 0;
  1264. }
  1265. void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
  1266. {
  1267. VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
  1268. VirtIOGPU *g = VIRTIO_GPU(qdev);
  1269. if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  1270. if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) &&
  1271. !virtio_gpu_virgl_enabled(g->parent_obj.conf) &&
  1272. !virtio_gpu_have_udmabuf()) {
  1273. error_setg(errp, "need rutabaga or udmabuf for blob resources");
  1274. return;
  1275. }
  1276. #ifdef VIRGL_VERSION_MAJOR
  1277. #if VIRGL_VERSION_MAJOR < 1
  1278. if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
  1279. error_setg(errp, "old virglrenderer, blob resources unsupported");
  1280. return;
  1281. }
  1282. #endif
  1283. #endif
  1284. }
  1285. if (virtio_gpu_venus_enabled(g->parent_obj.conf)) {
  1286. #ifdef VIRGL_VERSION_MAJOR
  1287. #if VIRGL_VERSION_MAJOR >= 1
  1288. if (!virtio_gpu_blob_enabled(g->parent_obj.conf) ||
  1289. !virtio_gpu_hostmem_enabled(g->parent_obj.conf)) {
  1290. error_setg(errp, "venus requires enabled blob and hostmem options");
  1291. return;
  1292. }
  1293. #else
  1294. error_setg(errp, "old virglrenderer, venus unsupported");
  1295. return;
  1296. #endif
  1297. #endif
  1298. }
  1299. if (!virtio_gpu_base_device_realize(qdev,
  1300. virtio_gpu_handle_ctrl_cb,
  1301. virtio_gpu_handle_cursor_cb,
  1302. errp)) {
  1303. return;
  1304. }
  1305. g->ctrl_vq = virtio_get_queue(vdev, 0);
  1306. g->cursor_vq = virtio_get_queue(vdev, 1);
  1307. g->ctrl_bh = virtio_bh_new_guarded(qdev, virtio_gpu_ctrl_bh, g);
  1308. g->cursor_bh = virtio_bh_new_guarded(qdev, virtio_gpu_cursor_bh, g);
  1309. g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g);
  1310. qemu_cond_init(&g->reset_cond);
  1311. QTAILQ_INIT(&g->reslist);
  1312. QTAILQ_INIT(&g->cmdq);
  1313. QTAILQ_INIT(&g->fenceq);
  1314. }
  1315. static void virtio_gpu_device_unrealize(DeviceState *qdev)
  1316. {
  1317. VirtIOGPU *g = VIRTIO_GPU(qdev);
  1318. g_clear_pointer(&g->ctrl_bh, qemu_bh_delete);
  1319. g_clear_pointer(&g->cursor_bh, qemu_bh_delete);
  1320. g_clear_pointer(&g->reset_bh, qemu_bh_delete);
  1321. qemu_cond_destroy(&g->reset_cond);
  1322. virtio_gpu_base_device_unrealize(qdev);
  1323. }
  1324. static void virtio_gpu_reset_bh(void *opaque)
  1325. {
  1326. VirtIOGPU *g = VIRTIO_GPU(opaque);
  1327. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  1328. struct virtio_gpu_simple_resource *res, *tmp;
  1329. uint32_t resource_id;
  1330. Error *local_err = NULL;
  1331. int i = 0;
  1332. QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
  1333. resource_id = res->resource_id;
  1334. vgc->resource_destroy(g, res, &local_err);
  1335. if (local_err) {
  1336. error_append_hint(&local_err, "%s: %s resource_destroy"
  1337. "for resource_id = %"PRIu32" failed.\n",
  1338. __func__, object_get_typename(OBJECT(g)),
  1339. resource_id);
  1340. /* error_report_err frees the error object for us */
  1341. error_report_err(local_err);
  1342. local_err = NULL;
  1343. }
  1344. }
  1345. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  1346. dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
  1347. }
  1348. g->reset_finished = true;
  1349. qemu_cond_signal(&g->reset_cond);
  1350. }
  1351. void virtio_gpu_reset(VirtIODevice *vdev)
  1352. {
  1353. VirtIOGPU *g = VIRTIO_GPU(vdev);
  1354. struct virtio_gpu_ctrl_command *cmd;
  1355. if (qemu_in_vcpu_thread()) {
  1356. g->reset_finished = false;
  1357. qemu_bh_schedule(g->reset_bh);
  1358. while (!g->reset_finished) {
  1359. qemu_cond_wait_bql(&g->reset_cond);
  1360. }
  1361. } else {
  1362. aio_bh_call(g->reset_bh);
  1363. }
  1364. while (!QTAILQ_EMPTY(&g->cmdq)) {
  1365. cmd = QTAILQ_FIRST(&g->cmdq);
  1366. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  1367. g_free(cmd);
  1368. }
  1369. while (!QTAILQ_EMPTY(&g->fenceq)) {
  1370. cmd = QTAILQ_FIRST(&g->fenceq);
  1371. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  1372. g->inflight--;
  1373. g_free(cmd);
  1374. }
  1375. virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
  1376. }
  1377. static void
  1378. virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
  1379. {
  1380. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1381. memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
  1382. }
  1383. static void
  1384. virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
  1385. {
  1386. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1387. const struct virtio_gpu_config *vgconfig =
  1388. (const struct virtio_gpu_config *)config;
  1389. if (vgconfig->events_clear) {
  1390. g->virtio_config.events_read &= ~vgconfig->events_clear;
  1391. }
  1392. }
  1393. static bool virtio_gpu_blob_state_needed(void *opaque)
  1394. {
  1395. VirtIOGPU *g = VIRTIO_GPU(opaque);
  1396. return virtio_gpu_blob_enabled(g->parent_obj.conf);
  1397. }
  1398. const VMStateDescription vmstate_virtio_gpu_blob_state = {
  1399. .name = "virtio-gpu/blob",
  1400. .minimum_version_id = VIRTIO_GPU_VM_VERSION,
  1401. .version_id = VIRTIO_GPU_VM_VERSION,
  1402. .needed = virtio_gpu_blob_state_needed,
  1403. .fields = (const VMStateField[]){
  1404. {
  1405. .name = "virtio-gpu/blob",
  1406. .info = &(const VMStateInfo) {
  1407. .name = "blob",
  1408. .get = virtio_gpu_blob_load,
  1409. .put = virtio_gpu_blob_save,
  1410. },
  1411. .flags = VMS_SINGLE,
  1412. } /* device */,
  1413. VMSTATE_END_OF_LIST()
  1414. },
  1415. };
  1416. /*
  1417. * For historical reasons virtio_gpu does not adhere to virtio migration
  1418. * scheme as described in doc/virtio-migration.txt, in a sense that no
  1419. * save/load callback are provided to the core. Instead the device data
  1420. * is saved/loaded after the core data.
  1421. *
  1422. * Because of this we need a special vmsd.
  1423. */
  1424. static const VMStateDescription vmstate_virtio_gpu = {
  1425. .name = "virtio-gpu",
  1426. .minimum_version_id = VIRTIO_GPU_VM_VERSION,
  1427. .version_id = VIRTIO_GPU_VM_VERSION,
  1428. .fields = (const VMStateField[]) {
  1429. VMSTATE_VIRTIO_DEVICE /* core */,
  1430. {
  1431. .name = "virtio-gpu",
  1432. .info = &(const VMStateInfo) {
  1433. .name = "virtio-gpu",
  1434. .get = virtio_gpu_load,
  1435. .put = virtio_gpu_save,
  1436. },
  1437. .flags = VMS_SINGLE,
  1438. } /* device */,
  1439. VMSTATE_END_OF_LIST()
  1440. },
  1441. .subsections = (const VMStateDescription * const []) {
  1442. &vmstate_virtio_gpu_blob_state,
  1443. NULL
  1444. },
  1445. .post_load = virtio_gpu_post_load,
  1446. };
  1447. static const Property virtio_gpu_properties[] = {
  1448. VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
  1449. DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
  1450. 256 * MiB),
  1451. DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
  1452. VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
  1453. DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0),
  1454. DEFINE_PROP_UINT8("x-scanout-vmstate-version", VirtIOGPU, scanout_vmstate_version, 2),
  1455. };
  1456. static void virtio_gpu_class_init(ObjectClass *klass, void *data)
  1457. {
  1458. DeviceClass *dc = DEVICE_CLASS(klass);
  1459. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  1460. VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
  1461. VirtIOGPUBaseClass *vgbc = &vgc->parent;
  1462. vgc->handle_ctrl = virtio_gpu_handle_ctrl;
  1463. vgc->process_cmd = virtio_gpu_simple_process_cmd;
  1464. vgc->update_cursor_data = virtio_gpu_update_cursor_data;
  1465. vgc->resource_destroy = virtio_gpu_resource_destroy;
  1466. vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
  1467. vdc->realize = virtio_gpu_device_realize;
  1468. vdc->unrealize = virtio_gpu_device_unrealize;
  1469. vdc->reset = virtio_gpu_reset;
  1470. vdc->get_config = virtio_gpu_get_config;
  1471. vdc->set_config = virtio_gpu_set_config;
  1472. dc->vmsd = &vmstate_virtio_gpu;
  1473. device_class_set_props(dc, virtio_gpu_properties);
  1474. }
  1475. static const TypeInfo virtio_gpu_info = {
  1476. .name = TYPE_VIRTIO_GPU,
  1477. .parent = TYPE_VIRTIO_GPU_BASE,
  1478. .instance_size = sizeof(VirtIOGPU),
  1479. .class_size = sizeof(VirtIOGPUClass),
  1480. .class_init = virtio_gpu_class_init,
  1481. };
  1482. module_obj(TYPE_VIRTIO_GPU);
  1483. module_kconfig(VIRTIO_GPU);
  1484. static void virtio_register_types(void)
  1485. {
  1486. type_register_static(&virtio_gpu_info);
  1487. }
  1488. type_init(virtio_register_types)