virtio-gpu.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/units.h"
  15. #include "qemu/iov.h"
  16. #include "sysemu/cpus.h"
  17. #include "ui/console.h"
  18. #include "trace.h"
  19. #include "sysemu/dma.h"
  20. #include "sysemu/sysemu.h"
  21. #include "hw/virtio/virtio.h"
  22. #include "migration/qemu-file-types.h"
  23. #include "hw/virtio/virtio-gpu.h"
  24. #include "hw/virtio/virtio-gpu-bswap.h"
  25. #include "hw/virtio/virtio-gpu-pixman.h"
  26. #include "hw/virtio/virtio-bus.h"
  27. #include "hw/qdev-properties.h"
  28. #include "qemu/log.h"
  29. #include "qemu/module.h"
  30. #include "qapi/error.h"
  31. #include "qemu/error-report.h"
  32. #define VIRTIO_GPU_VM_VERSION 1
  33. static struct virtio_gpu_simple_resource*
  34. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
  35. static struct virtio_gpu_simple_resource *
  36. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  37. bool require_backing,
  38. const char *caller, uint32_t *error);
  39. static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  40. struct virtio_gpu_simple_resource *res);
  41. static void virtio_gpu_reset_bh(void *opaque);
  42. void virtio_gpu_update_cursor_data(VirtIOGPU *g,
  43. struct virtio_gpu_scanout *s,
  44. uint32_t resource_id)
  45. {
  46. struct virtio_gpu_simple_resource *res;
  47. uint32_t pixels;
  48. void *data;
  49. res = virtio_gpu_find_check_resource(g, resource_id, false,
  50. __func__, NULL);
  51. if (!res) {
  52. return;
  53. }
  54. if (res->blob_size) {
  55. if (res->blob_size < (s->current_cursor->width *
  56. s->current_cursor->height * 4)) {
  57. return;
  58. }
  59. data = res->blob;
  60. } else {
  61. if (pixman_image_get_width(res->image) != s->current_cursor->width ||
  62. pixman_image_get_height(res->image) != s->current_cursor->height) {
  63. return;
  64. }
  65. data = pixman_image_get_data(res->image);
  66. }
  67. pixels = s->current_cursor->width * s->current_cursor->height;
  68. memcpy(s->current_cursor->data, data,
  69. pixels * sizeof(uint32_t));
  70. }
  71. static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
  72. {
  73. struct virtio_gpu_scanout *s;
  74. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  75. bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
  76. if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
  77. return;
  78. }
  79. s = &g->parent_obj.scanout[cursor->pos.scanout_id];
  80. trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
  81. cursor->pos.x,
  82. cursor->pos.y,
  83. move ? "move" : "update",
  84. cursor->resource_id);
  85. if (!move) {
  86. if (!s->current_cursor) {
  87. s->current_cursor = cursor_alloc(64, 64);
  88. }
  89. s->current_cursor->hot_x = cursor->hot_x;
  90. s->current_cursor->hot_y = cursor->hot_y;
  91. if (cursor->resource_id > 0) {
  92. vgc->update_cursor_data(g, s, cursor->resource_id);
  93. }
  94. dpy_cursor_define(s->con, s->current_cursor);
  95. s->cursor = *cursor;
  96. } else {
  97. s->cursor.pos.x = cursor->pos.x;
  98. s->cursor.pos.y = cursor->pos.y;
  99. }
  100. dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
  101. cursor->resource_id ? 1 : 0);
  102. }
  103. static struct virtio_gpu_simple_resource *
  104. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
  105. {
  106. struct virtio_gpu_simple_resource *res;
  107. QTAILQ_FOREACH(res, &g->reslist, next) {
  108. if (res->resource_id == resource_id) {
  109. return res;
  110. }
  111. }
  112. return NULL;
  113. }
  114. static struct virtio_gpu_simple_resource *
  115. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  116. bool require_backing,
  117. const char *caller, uint32_t *error)
  118. {
  119. struct virtio_gpu_simple_resource *res;
  120. res = virtio_gpu_find_resource(g, resource_id);
  121. if (!res) {
  122. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
  123. caller, resource_id);
  124. if (error) {
  125. *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  126. }
  127. return NULL;
  128. }
  129. if (require_backing) {
  130. if (!res->iov || (!res->image && !res->blob)) {
  131. qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
  132. caller, resource_id);
  133. if (error) {
  134. *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  135. }
  136. return NULL;
  137. }
  138. }
  139. return res;
  140. }
  141. void virtio_gpu_ctrl_response(VirtIOGPU *g,
  142. struct virtio_gpu_ctrl_command *cmd,
  143. struct virtio_gpu_ctrl_hdr *resp,
  144. size_t resp_len)
  145. {
  146. size_t s;
  147. if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
  148. resp->flags |= VIRTIO_GPU_FLAG_FENCE;
  149. resp->fence_id = cmd->cmd_hdr.fence_id;
  150. resp->ctx_id = cmd->cmd_hdr.ctx_id;
  151. }
  152. virtio_gpu_ctrl_hdr_bswap(resp);
  153. s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
  154. if (s != resp_len) {
  155. qemu_log_mask(LOG_GUEST_ERROR,
  156. "%s: response size incorrect %zu vs %zu\n",
  157. __func__, s, resp_len);
  158. }
  159. virtqueue_push(cmd->vq, &cmd->elem, s);
  160. virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
  161. cmd->finished = true;
  162. }
  163. void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
  164. struct virtio_gpu_ctrl_command *cmd,
  165. enum virtio_gpu_ctrl_type type)
  166. {
  167. struct virtio_gpu_ctrl_hdr resp;
  168. memset(&resp, 0, sizeof(resp));
  169. resp.type = type;
  170. virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
  171. }
  172. void virtio_gpu_get_display_info(VirtIOGPU *g,
  173. struct virtio_gpu_ctrl_command *cmd)
  174. {
  175. struct virtio_gpu_resp_display_info display_info;
  176. trace_virtio_gpu_cmd_get_display_info();
  177. memset(&display_info, 0, sizeof(display_info));
  178. display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
  179. virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
  180. virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
  181. sizeof(display_info));
  182. }
  183. void virtio_gpu_get_edid(VirtIOGPU *g,
  184. struct virtio_gpu_ctrl_command *cmd)
  185. {
  186. struct virtio_gpu_resp_edid edid;
  187. struct virtio_gpu_cmd_get_edid get_edid;
  188. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  189. VIRTIO_GPU_FILL_CMD(get_edid);
  190. virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
  191. if (get_edid.scanout >= b->conf.max_outputs) {
  192. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  193. return;
  194. }
  195. trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
  196. memset(&edid, 0, sizeof(edid));
  197. edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
  198. virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
  199. virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
  200. }
  201. static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
  202. uint32_t width, uint32_t height)
  203. {
  204. /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
  205. * pixman_image_create_bits will fail in case it overflow.
  206. */
  207. int bpp = PIXMAN_FORMAT_BPP(pformat);
  208. int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
  209. return height * stride;
  210. }
  211. #ifdef WIN32
  212. static void
  213. win32_pixman_image_destroy(pixman_image_t *image, void *data)
  214. {
  215. HANDLE handle = data;
  216. qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn);
  217. }
  218. #endif
  219. static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
  220. struct virtio_gpu_ctrl_command *cmd)
  221. {
  222. pixman_format_code_t pformat;
  223. struct virtio_gpu_simple_resource *res;
  224. struct virtio_gpu_resource_create_2d c2d;
  225. VIRTIO_GPU_FILL_CMD(c2d);
  226. virtio_gpu_bswap_32(&c2d, sizeof(c2d));
  227. trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  228. c2d.width, c2d.height);
  229. if (c2d.resource_id == 0) {
  230. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  231. __func__);
  232. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  233. return;
  234. }
  235. res = virtio_gpu_find_resource(g, c2d.resource_id);
  236. if (res) {
  237. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  238. __func__, c2d.resource_id);
  239. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  240. return;
  241. }
  242. res = g_new0(struct virtio_gpu_simple_resource, 1);
  243. res->width = c2d.width;
  244. res->height = c2d.height;
  245. res->format = c2d.format;
  246. res->resource_id = c2d.resource_id;
  247. pformat = virtio_gpu_get_pixman_format(c2d.format);
  248. if (!pformat) {
  249. qemu_log_mask(LOG_GUEST_ERROR,
  250. "%s: host couldn't handle guest format %d\n",
  251. __func__, c2d.format);
  252. g_free(res);
  253. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  254. return;
  255. }
  256. res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
  257. if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
  258. void *bits = NULL;
  259. #ifdef WIN32
  260. bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
  261. if (!bits) {
  262. goto end;
  263. }
  264. #endif
  265. res->image = pixman_image_create_bits(
  266. pformat,
  267. c2d.width,
  268. c2d.height,
  269. bits, c2d.height ? res->hostmem / c2d.height : 0);
  270. #ifdef WIN32
  271. if (res->image) {
  272. pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
  273. }
  274. #endif
  275. }
  276. #ifdef WIN32
  277. end:
  278. #endif
  279. if (!res->image) {
  280. qemu_log_mask(LOG_GUEST_ERROR,
  281. "%s: resource creation failed %d %d %d\n",
  282. __func__, c2d.resource_id, c2d.width, c2d.height);
  283. g_free(res);
  284. cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  285. return;
  286. }
  287. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  288. g->hostmem += res->hostmem;
  289. }
  290. static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
  291. struct virtio_gpu_ctrl_command *cmd)
  292. {
  293. struct virtio_gpu_simple_resource *res;
  294. struct virtio_gpu_resource_create_blob cblob;
  295. int ret;
  296. VIRTIO_GPU_FILL_CMD(cblob);
  297. virtio_gpu_create_blob_bswap(&cblob);
  298. trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
  299. if (cblob.resource_id == 0) {
  300. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  301. __func__);
  302. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  303. return;
  304. }
  305. if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
  306. cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
  307. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
  308. __func__);
  309. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  310. return;
  311. }
  312. if (virtio_gpu_find_resource(g, cblob.resource_id)) {
  313. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  314. __func__, cblob.resource_id);
  315. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  316. return;
  317. }
  318. res = g_new0(struct virtio_gpu_simple_resource, 1);
  319. res->resource_id = cblob.resource_id;
  320. res->blob_size = cblob.size;
  321. ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
  322. cmd, &res->addrs, &res->iov,
  323. &res->iov_cnt);
  324. if (ret != 0) {
  325. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  326. g_free(res);
  327. return;
  328. }
  329. virtio_gpu_init_udmabuf(res);
  330. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  331. }
  332. static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
  333. {
  334. struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
  335. struct virtio_gpu_simple_resource *res;
  336. if (scanout->resource_id == 0) {
  337. return;
  338. }
  339. res = virtio_gpu_find_resource(g, scanout->resource_id);
  340. if (res) {
  341. res->scanout_bitmask &= ~(1 << scanout_id);
  342. }
  343. dpy_gfx_replace_surface(scanout->con, NULL);
  344. scanout->resource_id = 0;
  345. scanout->ds = NULL;
  346. scanout->width = 0;
  347. scanout->height = 0;
  348. }
  349. static void virtio_gpu_resource_destroy(VirtIOGPU *g,
  350. struct virtio_gpu_simple_resource *res)
  351. {
  352. int i;
  353. if (res->scanout_bitmask) {
  354. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  355. if (res->scanout_bitmask & (1 << i)) {
  356. virtio_gpu_disable_scanout(g, i);
  357. }
  358. }
  359. }
  360. qemu_pixman_image_unref(res->image);
  361. virtio_gpu_cleanup_mapping(g, res);
  362. QTAILQ_REMOVE(&g->reslist, res, next);
  363. g->hostmem -= res->hostmem;
  364. g_free(res);
  365. }
  366. static void virtio_gpu_resource_unref(VirtIOGPU *g,
  367. struct virtio_gpu_ctrl_command *cmd)
  368. {
  369. struct virtio_gpu_simple_resource *res;
  370. struct virtio_gpu_resource_unref unref;
  371. VIRTIO_GPU_FILL_CMD(unref);
  372. virtio_gpu_bswap_32(&unref, sizeof(unref));
  373. trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  374. res = virtio_gpu_find_resource(g, unref.resource_id);
  375. if (!res) {
  376. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  377. __func__, unref.resource_id);
  378. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  379. return;
  380. }
  381. virtio_gpu_resource_destroy(g, res);
  382. }
  383. static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
  384. struct virtio_gpu_ctrl_command *cmd)
  385. {
  386. struct virtio_gpu_simple_resource *res;
  387. int h, bpp;
  388. uint32_t src_offset, dst_offset, stride;
  389. pixman_format_code_t format;
  390. struct virtio_gpu_transfer_to_host_2d t2d;
  391. void *img_data;
  392. VIRTIO_GPU_FILL_CMD(t2d);
  393. virtio_gpu_t2d_bswap(&t2d);
  394. trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
  395. res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
  396. __func__, &cmd->error);
  397. if (!res || res->blob) {
  398. return;
  399. }
  400. if (t2d.r.x > res->width ||
  401. t2d.r.y > res->height ||
  402. t2d.r.width > res->width ||
  403. t2d.r.height > res->height ||
  404. t2d.r.x + t2d.r.width > res->width ||
  405. t2d.r.y + t2d.r.height > res->height) {
  406. qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
  407. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  408. __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
  409. t2d.r.width, t2d.r.height, res->width, res->height);
  410. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  411. return;
  412. }
  413. format = pixman_image_get_format(res->image);
  414. bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
  415. stride = pixman_image_get_stride(res->image);
  416. img_data = pixman_image_get_data(res->image);
  417. if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
  418. for (h = 0; h < t2d.r.height; h++) {
  419. src_offset = t2d.offset + stride * h;
  420. dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
  421. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  422. (uint8_t *)img_data + dst_offset,
  423. t2d.r.width * bpp);
  424. }
  425. } else {
  426. src_offset = t2d.offset;
  427. dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
  428. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  429. (uint8_t *)img_data + dst_offset,
  430. stride * t2d.r.height);
  431. }
  432. }
  433. static void virtio_gpu_resource_flush(VirtIOGPU *g,
  434. struct virtio_gpu_ctrl_command *cmd)
  435. {
  436. struct virtio_gpu_simple_resource *res;
  437. struct virtio_gpu_resource_flush rf;
  438. struct virtio_gpu_scanout *scanout;
  439. pixman_region16_t flush_region;
  440. bool within_bounds = false;
  441. bool update_submitted = false;
  442. int i;
  443. VIRTIO_GPU_FILL_CMD(rf);
  444. virtio_gpu_bswap_32(&rf, sizeof(rf));
  445. trace_virtio_gpu_cmd_res_flush(rf.resource_id,
  446. rf.r.width, rf.r.height, rf.r.x, rf.r.y);
  447. res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
  448. __func__, &cmd->error);
  449. if (!res) {
  450. return;
  451. }
  452. if (res->blob) {
  453. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  454. scanout = &g->parent_obj.scanout[i];
  455. if (scanout->resource_id == res->resource_id &&
  456. rf.r.x < scanout->x + scanout->width &&
  457. rf.r.x + rf.r.width >= scanout->x &&
  458. rf.r.y < scanout->y + scanout->height &&
  459. rf.r.y + rf.r.height >= scanout->y) {
  460. within_bounds = true;
  461. if (console_has_gl(scanout->con)) {
  462. dpy_gl_update(scanout->con, 0, 0, scanout->width,
  463. scanout->height);
  464. update_submitted = true;
  465. }
  466. }
  467. }
  468. if (update_submitted) {
  469. return;
  470. }
  471. if (!within_bounds) {
  472. qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
  473. " bounds for flush %d: %d %d %d %d\n",
  474. __func__, rf.resource_id, rf.r.x, rf.r.y,
  475. rf.r.width, rf.r.height);
  476. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  477. return;
  478. }
  479. }
  480. if (!res->blob &&
  481. (rf.r.x > res->width ||
  482. rf.r.y > res->height ||
  483. rf.r.width > res->width ||
  484. rf.r.height > res->height ||
  485. rf.r.x + rf.r.width > res->width ||
  486. rf.r.y + rf.r.height > res->height)) {
  487. qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
  488. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  489. __func__, rf.resource_id, rf.r.x, rf.r.y,
  490. rf.r.width, rf.r.height, res->width, res->height);
  491. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  492. return;
  493. }
  494. pixman_region_init_rect(&flush_region,
  495. rf.r.x, rf.r.y, rf.r.width, rf.r.height);
  496. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  497. pixman_region16_t region, finalregion;
  498. pixman_box16_t *extents;
  499. if (!(res->scanout_bitmask & (1 << i))) {
  500. continue;
  501. }
  502. scanout = &g->parent_obj.scanout[i];
  503. pixman_region_init(&finalregion);
  504. pixman_region_init_rect(&region, scanout->x, scanout->y,
  505. scanout->width, scanout->height);
  506. pixman_region_intersect(&finalregion, &flush_region, &region);
  507. pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
  508. extents = pixman_region_extents(&finalregion);
  509. /* work out the area we need to update for each console */
  510. dpy_gfx_update(g->parent_obj.scanout[i].con,
  511. extents->x1, extents->y1,
  512. extents->x2 - extents->x1,
  513. extents->y2 - extents->y1);
  514. pixman_region_fini(&region);
  515. pixman_region_fini(&finalregion);
  516. }
  517. pixman_region_fini(&flush_region);
  518. }
  519. static void virtio_unref_resource(pixman_image_t *image, void *data)
  520. {
  521. pixman_image_unref(data);
  522. }
  523. static void virtio_gpu_update_scanout(VirtIOGPU *g,
  524. uint32_t scanout_id,
  525. struct virtio_gpu_simple_resource *res,
  526. struct virtio_gpu_rect *r)
  527. {
  528. struct virtio_gpu_simple_resource *ores;
  529. struct virtio_gpu_scanout *scanout;
  530. scanout = &g->parent_obj.scanout[scanout_id];
  531. ores = virtio_gpu_find_resource(g, scanout->resource_id);
  532. if (ores) {
  533. ores->scanout_bitmask &= ~(1 << scanout_id);
  534. }
  535. res->scanout_bitmask |= (1 << scanout_id);
  536. scanout->resource_id = res->resource_id;
  537. scanout->x = r->x;
  538. scanout->y = r->y;
  539. scanout->width = r->width;
  540. scanout->height = r->height;
  541. }
  542. static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
  543. uint32_t scanout_id,
  544. struct virtio_gpu_framebuffer *fb,
  545. struct virtio_gpu_simple_resource *res,
  546. struct virtio_gpu_rect *r,
  547. uint32_t *error)
  548. {
  549. struct virtio_gpu_scanout *scanout;
  550. uint8_t *data;
  551. scanout = &g->parent_obj.scanout[scanout_id];
  552. if (r->x > fb->width ||
  553. r->y > fb->height ||
  554. r->width < 16 ||
  555. r->height < 16 ||
  556. r->width > fb->width ||
  557. r->height > fb->height ||
  558. r->x + r->width > fb->width ||
  559. r->y + r->height > fb->height) {
  560. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
  561. " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
  562. __func__, scanout_id, res->resource_id,
  563. r->x, r->y, r->width, r->height,
  564. fb->width, fb->height);
  565. *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  566. return;
  567. }
  568. g->parent_obj.enable = 1;
  569. if (res->blob) {
  570. if (console_has_gl(scanout->con)) {
  571. if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
  572. virtio_gpu_update_scanout(g, scanout_id, res, r);
  573. } else {
  574. *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  575. }
  576. return;
  577. }
  578. data = res->blob;
  579. } else {
  580. data = (uint8_t *)pixman_image_get_data(res->image);
  581. }
  582. /* create a surface for this scanout */
  583. if ((res->blob && !console_has_gl(scanout->con)) ||
  584. !scanout->ds ||
  585. surface_data(scanout->ds) != data + fb->offset ||
  586. scanout->width != r->width ||
  587. scanout->height != r->height) {
  588. pixman_image_t *rect;
  589. void *ptr = data + fb->offset;
  590. rect = pixman_image_create_bits(fb->format, r->width, r->height,
  591. ptr, fb->stride);
  592. if (res->image) {
  593. pixman_image_ref(res->image);
  594. pixman_image_set_destroy_function(rect, virtio_unref_resource,
  595. res->image);
  596. }
  597. /* realloc the surface ptr */
  598. scanout->ds = qemu_create_displaysurface_pixman(rect);
  599. if (!scanout->ds) {
  600. *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  601. return;
  602. }
  603. #ifdef WIN32
  604. qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset);
  605. #endif
  606. pixman_image_unref(rect);
  607. dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
  608. scanout->ds);
  609. }
  610. virtio_gpu_update_scanout(g, scanout_id, res, r);
  611. }
  612. static void virtio_gpu_set_scanout(VirtIOGPU *g,
  613. struct virtio_gpu_ctrl_command *cmd)
  614. {
  615. struct virtio_gpu_simple_resource *res;
  616. struct virtio_gpu_framebuffer fb = { 0 };
  617. struct virtio_gpu_set_scanout ss;
  618. VIRTIO_GPU_FILL_CMD(ss);
  619. virtio_gpu_bswap_32(&ss, sizeof(ss));
  620. trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
  621. ss.r.width, ss.r.height, ss.r.x, ss.r.y);
  622. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  623. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  624. __func__, ss.scanout_id);
  625. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  626. return;
  627. }
  628. if (ss.resource_id == 0) {
  629. virtio_gpu_disable_scanout(g, ss.scanout_id);
  630. return;
  631. }
  632. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  633. __func__, &cmd->error);
  634. if (!res) {
  635. return;
  636. }
  637. fb.format = pixman_image_get_format(res->image);
  638. fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
  639. fb.width = pixman_image_get_width(res->image);
  640. fb.height = pixman_image_get_height(res->image);
  641. fb.stride = pixman_image_get_stride(res->image);
  642. fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
  643. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  644. &fb, res, &ss.r, &cmd->error);
  645. }
  646. static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
  647. struct virtio_gpu_ctrl_command *cmd)
  648. {
  649. struct virtio_gpu_simple_resource *res;
  650. struct virtio_gpu_framebuffer fb = { 0 };
  651. struct virtio_gpu_set_scanout_blob ss;
  652. uint64_t fbend;
  653. VIRTIO_GPU_FILL_CMD(ss);
  654. virtio_gpu_scanout_blob_bswap(&ss);
  655. trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
  656. ss.r.width, ss.r.height, ss.r.x,
  657. ss.r.y);
  658. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  659. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  660. __func__, ss.scanout_id);
  661. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  662. return;
  663. }
  664. if (ss.resource_id == 0) {
  665. virtio_gpu_disable_scanout(g, ss.scanout_id);
  666. return;
  667. }
  668. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  669. __func__, &cmd->error);
  670. if (!res) {
  671. return;
  672. }
  673. fb.format = virtio_gpu_get_pixman_format(ss.format);
  674. if (!fb.format) {
  675. qemu_log_mask(LOG_GUEST_ERROR,
  676. "%s: host couldn't handle guest format %d\n",
  677. __func__, ss.format);
  678. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  679. return;
  680. }
  681. fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
  682. fb.width = ss.width;
  683. fb.height = ss.height;
  684. fb.stride = ss.strides[0];
  685. fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
  686. fbend = fb.offset;
  687. fbend += fb.stride * (ss.r.height - 1);
  688. fbend += fb.bytes_pp * ss.r.width;
  689. if (fbend > res->blob_size) {
  690. qemu_log_mask(LOG_GUEST_ERROR,
  691. "%s: fb end out of range\n",
  692. __func__);
  693. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  694. return;
  695. }
  696. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  697. &fb, res, &ss.r, &cmd->error);
  698. }
  699. int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
  700. uint32_t nr_entries, uint32_t offset,
  701. struct virtio_gpu_ctrl_command *cmd,
  702. uint64_t **addr, struct iovec **iov,
  703. uint32_t *niov)
  704. {
  705. struct virtio_gpu_mem_entry *ents;
  706. size_t esize, s;
  707. int e, v;
  708. if (nr_entries > 16384) {
  709. qemu_log_mask(LOG_GUEST_ERROR,
  710. "%s: nr_entries is too big (%d > 16384)\n",
  711. __func__, nr_entries);
  712. return -1;
  713. }
  714. esize = sizeof(*ents) * nr_entries;
  715. ents = g_malloc(esize);
  716. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  717. offset, ents, esize);
  718. if (s != esize) {
  719. qemu_log_mask(LOG_GUEST_ERROR,
  720. "%s: command data size incorrect %zu vs %zu\n",
  721. __func__, s, esize);
  722. g_free(ents);
  723. return -1;
  724. }
  725. *iov = NULL;
  726. if (addr) {
  727. *addr = NULL;
  728. }
  729. for (e = 0, v = 0; e < nr_entries; e++) {
  730. uint64_t a = le64_to_cpu(ents[e].addr);
  731. uint32_t l = le32_to_cpu(ents[e].length);
  732. hwaddr len;
  733. void *map;
  734. do {
  735. len = l;
  736. map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
  737. DMA_DIRECTION_TO_DEVICE,
  738. MEMTXATTRS_UNSPECIFIED);
  739. if (!map) {
  740. qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
  741. " element %d\n", __func__, e);
  742. virtio_gpu_cleanup_mapping_iov(g, *iov, v);
  743. g_free(ents);
  744. *iov = NULL;
  745. if (addr) {
  746. g_free(*addr);
  747. *addr = NULL;
  748. }
  749. return -1;
  750. }
  751. if (!(v % 16)) {
  752. *iov = g_renew(struct iovec, *iov, v + 16);
  753. if (addr) {
  754. *addr = g_renew(uint64_t, *addr, v + 16);
  755. }
  756. }
  757. (*iov)[v].iov_base = map;
  758. (*iov)[v].iov_len = len;
  759. if (addr) {
  760. (*addr)[v] = a;
  761. }
  762. a += len;
  763. l -= len;
  764. v += 1;
  765. } while (l > 0);
  766. }
  767. *niov = v;
  768. g_free(ents);
  769. return 0;
  770. }
  771. void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
  772. struct iovec *iov, uint32_t count)
  773. {
  774. int i;
  775. for (i = 0; i < count; i++) {
  776. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  777. iov[i].iov_base, iov[i].iov_len,
  778. DMA_DIRECTION_TO_DEVICE,
  779. iov[i].iov_len);
  780. }
  781. g_free(iov);
  782. }
  783. static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  784. struct virtio_gpu_simple_resource *res)
  785. {
  786. virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
  787. res->iov = NULL;
  788. res->iov_cnt = 0;
  789. g_free(res->addrs);
  790. res->addrs = NULL;
  791. if (res->blob) {
  792. virtio_gpu_fini_udmabuf(res);
  793. }
  794. }
  795. static void
  796. virtio_gpu_resource_attach_backing(VirtIOGPU *g,
  797. struct virtio_gpu_ctrl_command *cmd)
  798. {
  799. struct virtio_gpu_simple_resource *res;
  800. struct virtio_gpu_resource_attach_backing ab;
  801. int ret;
  802. VIRTIO_GPU_FILL_CMD(ab);
  803. virtio_gpu_bswap_32(&ab, sizeof(ab));
  804. trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
  805. res = virtio_gpu_find_resource(g, ab.resource_id);
  806. if (!res) {
  807. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  808. __func__, ab.resource_id);
  809. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  810. return;
  811. }
  812. if (res->iov) {
  813. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  814. return;
  815. }
  816. ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
  817. &res->addrs, &res->iov, &res->iov_cnt);
  818. if (ret != 0) {
  819. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  820. return;
  821. }
  822. }
  823. static void
  824. virtio_gpu_resource_detach_backing(VirtIOGPU *g,
  825. struct virtio_gpu_ctrl_command *cmd)
  826. {
  827. struct virtio_gpu_simple_resource *res;
  828. struct virtio_gpu_resource_detach_backing detach;
  829. VIRTIO_GPU_FILL_CMD(detach);
  830. virtio_gpu_bswap_32(&detach, sizeof(detach));
  831. trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
  832. res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
  833. __func__, &cmd->error);
  834. if (!res) {
  835. return;
  836. }
  837. virtio_gpu_cleanup_mapping(g, res);
  838. }
  839. void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
  840. struct virtio_gpu_ctrl_command *cmd)
  841. {
  842. VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
  843. virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
  844. switch (cmd->cmd_hdr.type) {
  845. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  846. virtio_gpu_get_display_info(g, cmd);
  847. break;
  848. case VIRTIO_GPU_CMD_GET_EDID:
  849. virtio_gpu_get_edid(g, cmd);
  850. break;
  851. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  852. virtio_gpu_resource_create_2d(g, cmd);
  853. break;
  854. case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
  855. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  856. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  857. break;
  858. }
  859. virtio_gpu_resource_create_blob(g, cmd);
  860. break;
  861. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  862. virtio_gpu_resource_unref(g, cmd);
  863. break;
  864. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  865. virtio_gpu_resource_flush(g, cmd);
  866. break;
  867. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  868. virtio_gpu_transfer_to_host_2d(g, cmd);
  869. break;
  870. case VIRTIO_GPU_CMD_SET_SCANOUT:
  871. virtio_gpu_set_scanout(g, cmd);
  872. break;
  873. case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
  874. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  875. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  876. break;
  877. }
  878. virtio_gpu_set_scanout_blob(g, cmd);
  879. break;
  880. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  881. virtio_gpu_resource_attach_backing(g, cmd);
  882. break;
  883. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  884. virtio_gpu_resource_detach_backing(g, cmd);
  885. break;
  886. default:
  887. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  888. break;
  889. }
  890. if (!cmd->finished) {
  891. if (!g->parent_obj.renderer_blocked) {
  892. virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
  893. VIRTIO_GPU_RESP_OK_NODATA);
  894. }
  895. }
  896. }
  897. static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
  898. {
  899. VirtIOGPU *g = VIRTIO_GPU(vdev);
  900. qemu_bh_schedule(g->ctrl_bh);
  901. }
  902. static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
  903. {
  904. VirtIOGPU *g = VIRTIO_GPU(vdev);
  905. qemu_bh_schedule(g->cursor_bh);
  906. }
  907. void virtio_gpu_process_cmdq(VirtIOGPU *g)
  908. {
  909. struct virtio_gpu_ctrl_command *cmd;
  910. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  911. if (g->processing_cmdq) {
  912. return;
  913. }
  914. g->processing_cmdq = true;
  915. while (!QTAILQ_EMPTY(&g->cmdq)) {
  916. cmd = QTAILQ_FIRST(&g->cmdq);
  917. if (g->parent_obj.renderer_blocked) {
  918. break;
  919. }
  920. /* process command */
  921. vgc->process_cmd(g, cmd);
  922. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  923. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  924. g->stats.requests++;
  925. }
  926. if (!cmd->finished) {
  927. QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
  928. g->inflight++;
  929. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  930. if (g->stats.max_inflight < g->inflight) {
  931. g->stats.max_inflight = g->inflight;
  932. }
  933. fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
  934. }
  935. } else {
  936. g_free(cmd);
  937. }
  938. }
  939. g->processing_cmdq = false;
  940. }
  941. static void virtio_gpu_process_fenceq(VirtIOGPU *g)
  942. {
  943. struct virtio_gpu_ctrl_command *cmd, *tmp;
  944. QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
  945. trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
  946. virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  947. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  948. g_free(cmd);
  949. g->inflight--;
  950. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  951. fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
  952. }
  953. }
  954. }
  955. static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
  956. {
  957. VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
  958. virtio_gpu_process_fenceq(g);
  959. virtio_gpu_process_cmdq(g);
  960. }
  961. static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  962. {
  963. VirtIOGPU *g = VIRTIO_GPU(vdev);
  964. struct virtio_gpu_ctrl_command *cmd;
  965. if (!virtio_queue_ready(vq)) {
  966. return;
  967. }
  968. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  969. while (cmd) {
  970. cmd->vq = vq;
  971. cmd->error = 0;
  972. cmd->finished = false;
  973. QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
  974. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  975. }
  976. virtio_gpu_process_cmdq(g);
  977. }
  978. static void virtio_gpu_ctrl_bh(void *opaque)
  979. {
  980. VirtIOGPU *g = opaque;
  981. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  982. vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
  983. }
  984. static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
  985. {
  986. VirtIOGPU *g = VIRTIO_GPU(vdev);
  987. VirtQueueElement *elem;
  988. size_t s;
  989. struct virtio_gpu_update_cursor cursor_info;
  990. if (!virtio_queue_ready(vq)) {
  991. return;
  992. }
  993. for (;;) {
  994. elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
  995. if (!elem) {
  996. break;
  997. }
  998. s = iov_to_buf(elem->out_sg, elem->out_num, 0,
  999. &cursor_info, sizeof(cursor_info));
  1000. if (s != sizeof(cursor_info)) {
  1001. qemu_log_mask(LOG_GUEST_ERROR,
  1002. "%s: cursor size incorrect %zu vs %zu\n",
  1003. __func__, s, sizeof(cursor_info));
  1004. } else {
  1005. virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
  1006. update_cursor(g, &cursor_info);
  1007. }
  1008. virtqueue_push(vq, elem, 0);
  1009. virtio_notify(vdev, vq);
  1010. g_free(elem);
  1011. }
  1012. }
  1013. static void virtio_gpu_cursor_bh(void *opaque)
  1014. {
  1015. VirtIOGPU *g = opaque;
  1016. virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
  1017. }
  1018. static const VMStateDescription vmstate_virtio_gpu_scanout = {
  1019. .name = "virtio-gpu-one-scanout",
  1020. .version_id = 1,
  1021. .fields = (VMStateField[]) {
  1022. VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
  1023. VMSTATE_UINT32(width, struct virtio_gpu_scanout),
  1024. VMSTATE_UINT32(height, struct virtio_gpu_scanout),
  1025. VMSTATE_INT32(x, struct virtio_gpu_scanout),
  1026. VMSTATE_INT32(y, struct virtio_gpu_scanout),
  1027. VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
  1028. VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
  1029. VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
  1030. VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
  1031. VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
  1032. VMSTATE_END_OF_LIST()
  1033. },
  1034. };
  1035. static const VMStateDescription vmstate_virtio_gpu_scanouts = {
  1036. .name = "virtio-gpu-scanouts",
  1037. .version_id = 1,
  1038. .fields = (VMStateField[]) {
  1039. VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
  1040. VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
  1041. struct VirtIOGPU, NULL),
  1042. VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
  1043. parent_obj.conf.max_outputs, 1,
  1044. vmstate_virtio_gpu_scanout,
  1045. struct virtio_gpu_scanout),
  1046. VMSTATE_END_OF_LIST()
  1047. },
  1048. };
  1049. static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
  1050. const VMStateField *field, JSONWriter *vmdesc)
  1051. {
  1052. VirtIOGPU *g = opaque;
  1053. struct virtio_gpu_simple_resource *res;
  1054. int i;
  1055. /* in 2d mode we should never find unprocessed commands here */
  1056. assert(QTAILQ_EMPTY(&g->cmdq));
  1057. QTAILQ_FOREACH(res, &g->reslist, next) {
  1058. qemu_put_be32(f, res->resource_id);
  1059. qemu_put_be32(f, res->width);
  1060. qemu_put_be32(f, res->height);
  1061. qemu_put_be32(f, res->format);
  1062. qemu_put_be32(f, res->iov_cnt);
  1063. for (i = 0; i < res->iov_cnt; i++) {
  1064. qemu_put_be64(f, res->addrs[i]);
  1065. qemu_put_be32(f, res->iov[i].iov_len);
  1066. }
  1067. qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
  1068. pixman_image_get_stride(res->image) * res->height);
  1069. }
  1070. qemu_put_be32(f, 0); /* end of list */
  1071. return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
  1072. }
  1073. static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
  1074. const VMStateField *field)
  1075. {
  1076. VirtIOGPU *g = opaque;
  1077. struct virtio_gpu_simple_resource *res;
  1078. struct virtio_gpu_scanout *scanout;
  1079. uint32_t resource_id, pformat;
  1080. void *bits = NULL;
  1081. int i;
  1082. g->hostmem = 0;
  1083. resource_id = qemu_get_be32(f);
  1084. while (resource_id != 0) {
  1085. res = virtio_gpu_find_resource(g, resource_id);
  1086. if (res) {
  1087. return -EINVAL;
  1088. }
  1089. res = g_new0(struct virtio_gpu_simple_resource, 1);
  1090. res->resource_id = resource_id;
  1091. res->width = qemu_get_be32(f);
  1092. res->height = qemu_get_be32(f);
  1093. res->format = qemu_get_be32(f);
  1094. res->iov_cnt = qemu_get_be32(f);
  1095. /* allocate */
  1096. pformat = virtio_gpu_get_pixman_format(res->format);
  1097. if (!pformat) {
  1098. g_free(res);
  1099. return -EINVAL;
  1100. }
  1101. res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
  1102. #ifdef WIN32
  1103. bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
  1104. if (!bits) {
  1105. g_free(res);
  1106. return -EINVAL;
  1107. }
  1108. #endif
  1109. res->image = pixman_image_create_bits(
  1110. pformat,
  1111. res->width, res->height,
  1112. bits, res->height ? res->hostmem / res->height : 0);
  1113. if (!res->image) {
  1114. g_free(res);
  1115. return -EINVAL;
  1116. }
  1117. res->addrs = g_new(uint64_t, res->iov_cnt);
  1118. res->iov = g_new(struct iovec, res->iov_cnt);
  1119. /* read data */
  1120. for (i = 0; i < res->iov_cnt; i++) {
  1121. res->addrs[i] = qemu_get_be64(f);
  1122. res->iov[i].iov_len = qemu_get_be32(f);
  1123. }
  1124. qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
  1125. pixman_image_get_stride(res->image) * res->height);
  1126. /* restore mapping */
  1127. for (i = 0; i < res->iov_cnt; i++) {
  1128. hwaddr len = res->iov[i].iov_len;
  1129. res->iov[i].iov_base =
  1130. dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
  1131. DMA_DIRECTION_TO_DEVICE,
  1132. MEMTXATTRS_UNSPECIFIED);
  1133. if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
  1134. /* Clean up the half-a-mapping we just created... */
  1135. if (res->iov[i].iov_base) {
  1136. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  1137. res->iov[i].iov_base,
  1138. len,
  1139. DMA_DIRECTION_TO_DEVICE,
  1140. 0);
  1141. }
  1142. /* ...and the mappings for previous loop iterations */
  1143. res->iov_cnt = i;
  1144. virtio_gpu_cleanup_mapping(g, res);
  1145. pixman_image_unref(res->image);
  1146. g_free(res);
  1147. return -EINVAL;
  1148. }
  1149. }
  1150. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  1151. g->hostmem += res->hostmem;
  1152. resource_id = qemu_get_be32(f);
  1153. }
  1154. /* load & apply scanout state */
  1155. vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
  1156. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  1157. /* FIXME: should take scanout.r.{x,y} into account */
  1158. scanout = &g->parent_obj.scanout[i];
  1159. if (!scanout->resource_id) {
  1160. continue;
  1161. }
  1162. res = virtio_gpu_find_resource(g, scanout->resource_id);
  1163. if (!res) {
  1164. return -EINVAL;
  1165. }
  1166. scanout->ds = qemu_create_displaysurface_pixman(res->image);
  1167. if (!scanout->ds) {
  1168. return -EINVAL;
  1169. }
  1170. #ifdef WIN32
  1171. qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0);
  1172. #endif
  1173. dpy_gfx_replace_surface(scanout->con, scanout->ds);
  1174. dpy_gfx_update_full(scanout->con);
  1175. if (scanout->cursor.resource_id) {
  1176. update_cursor(g, &scanout->cursor);
  1177. }
  1178. res->scanout_bitmask |= (1 << i);
  1179. }
  1180. return 0;
  1181. }
  1182. void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
  1183. {
  1184. VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
  1185. VirtIOGPU *g = VIRTIO_GPU(qdev);
  1186. if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  1187. if (!virtio_gpu_have_udmabuf()) {
  1188. error_setg(errp, "cannot enable blob resources without udmabuf");
  1189. return;
  1190. }
  1191. if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
  1192. error_setg(errp, "blobs and virgl are not compatible (yet)");
  1193. return;
  1194. }
  1195. }
  1196. if (!virtio_gpu_base_device_realize(qdev,
  1197. virtio_gpu_handle_ctrl_cb,
  1198. virtio_gpu_handle_cursor_cb,
  1199. errp)) {
  1200. return;
  1201. }
  1202. g->ctrl_vq = virtio_get_queue(vdev, 0);
  1203. g->cursor_vq = virtio_get_queue(vdev, 1);
  1204. g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g,
  1205. &qdev->mem_reentrancy_guard);
  1206. g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g,
  1207. &qdev->mem_reentrancy_guard);
  1208. g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g);
  1209. qemu_cond_init(&g->reset_cond);
  1210. QTAILQ_INIT(&g->reslist);
  1211. QTAILQ_INIT(&g->cmdq);
  1212. QTAILQ_INIT(&g->fenceq);
  1213. }
  1214. static void virtio_gpu_device_unrealize(DeviceState *qdev)
  1215. {
  1216. VirtIOGPU *g = VIRTIO_GPU(qdev);
  1217. g_clear_pointer(&g->ctrl_bh, qemu_bh_delete);
  1218. g_clear_pointer(&g->cursor_bh, qemu_bh_delete);
  1219. g_clear_pointer(&g->reset_bh, qemu_bh_delete);
  1220. qemu_cond_destroy(&g->reset_cond);
  1221. virtio_gpu_base_device_unrealize(qdev);
  1222. }
  1223. static void virtio_gpu_reset_bh(void *opaque)
  1224. {
  1225. VirtIOGPU *g = VIRTIO_GPU(opaque);
  1226. struct virtio_gpu_simple_resource *res, *tmp;
  1227. int i = 0;
  1228. QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
  1229. virtio_gpu_resource_destroy(g, res);
  1230. }
  1231. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  1232. dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
  1233. }
  1234. g->reset_finished = true;
  1235. qemu_cond_signal(&g->reset_cond);
  1236. }
  1237. void virtio_gpu_reset(VirtIODevice *vdev)
  1238. {
  1239. VirtIOGPU *g = VIRTIO_GPU(vdev);
  1240. struct virtio_gpu_ctrl_command *cmd;
  1241. if (qemu_in_vcpu_thread()) {
  1242. g->reset_finished = false;
  1243. qemu_bh_schedule(g->reset_bh);
  1244. while (!g->reset_finished) {
  1245. qemu_cond_wait_iothread(&g->reset_cond);
  1246. }
  1247. } else {
  1248. virtio_gpu_reset_bh(g);
  1249. }
  1250. while (!QTAILQ_EMPTY(&g->cmdq)) {
  1251. cmd = QTAILQ_FIRST(&g->cmdq);
  1252. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  1253. g_free(cmd);
  1254. }
  1255. while (!QTAILQ_EMPTY(&g->fenceq)) {
  1256. cmd = QTAILQ_FIRST(&g->fenceq);
  1257. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  1258. g->inflight--;
  1259. g_free(cmd);
  1260. }
  1261. virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
  1262. }
  1263. static void
  1264. virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
  1265. {
  1266. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1267. memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
  1268. }
  1269. static void
  1270. virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
  1271. {
  1272. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1273. const struct virtio_gpu_config *vgconfig =
  1274. (const struct virtio_gpu_config *)config;
  1275. if (vgconfig->events_clear) {
  1276. g->virtio_config.events_read &= ~vgconfig->events_clear;
  1277. }
  1278. }
  1279. /*
  1280. * For historical reasons virtio_gpu does not adhere to virtio migration
  1281. * scheme as described in doc/virtio-migration.txt, in a sense that no
  1282. * save/load callback are provided to the core. Instead the device data
  1283. * is saved/loaded after the core data.
  1284. *
  1285. * Because of this we need a special vmsd.
  1286. */
  1287. static const VMStateDescription vmstate_virtio_gpu = {
  1288. .name = "virtio-gpu",
  1289. .minimum_version_id = VIRTIO_GPU_VM_VERSION,
  1290. .version_id = VIRTIO_GPU_VM_VERSION,
  1291. .fields = (VMStateField[]) {
  1292. VMSTATE_VIRTIO_DEVICE /* core */,
  1293. {
  1294. .name = "virtio-gpu",
  1295. .info = &(const VMStateInfo) {
  1296. .name = "virtio-gpu",
  1297. .get = virtio_gpu_load,
  1298. .put = virtio_gpu_save,
  1299. },
  1300. .flags = VMS_SINGLE,
  1301. } /* device */,
  1302. VMSTATE_END_OF_LIST()
  1303. },
  1304. };
  1305. static Property virtio_gpu_properties[] = {
  1306. VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
  1307. DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
  1308. 256 * MiB),
  1309. DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
  1310. VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
  1311. DEFINE_PROP_END_OF_LIST(),
  1312. };
  1313. static void virtio_gpu_class_init(ObjectClass *klass, void *data)
  1314. {
  1315. DeviceClass *dc = DEVICE_CLASS(klass);
  1316. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  1317. VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
  1318. VirtIOGPUBaseClass *vgbc = &vgc->parent;
  1319. vgc->handle_ctrl = virtio_gpu_handle_ctrl;
  1320. vgc->process_cmd = virtio_gpu_simple_process_cmd;
  1321. vgc->update_cursor_data = virtio_gpu_update_cursor_data;
  1322. vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
  1323. vdc->realize = virtio_gpu_device_realize;
  1324. vdc->unrealize = virtio_gpu_device_unrealize;
  1325. vdc->reset = virtio_gpu_reset;
  1326. vdc->get_config = virtio_gpu_get_config;
  1327. vdc->set_config = virtio_gpu_set_config;
  1328. dc->vmsd = &vmstate_virtio_gpu;
  1329. device_class_set_props(dc, virtio_gpu_properties);
  1330. }
  1331. static const TypeInfo virtio_gpu_info = {
  1332. .name = TYPE_VIRTIO_GPU,
  1333. .parent = TYPE_VIRTIO_GPU_BASE,
  1334. .instance_size = sizeof(VirtIOGPU),
  1335. .class_size = sizeof(VirtIOGPUClass),
  1336. .class_init = virtio_gpu_class_init,
  1337. };
  1338. module_obj(TYPE_VIRTIO_GPU);
  1339. module_kconfig(VIRTIO_GPU);
  1340. static void virtio_register_types(void)
  1341. {
  1342. type_register_static(&virtio_gpu_info);
  1343. }
  1344. type_init(virtio_register_types)