virtio-gpu.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/units.h"
  15. #include "qemu/iov.h"
  16. #include "ui/console.h"
  17. #include "trace.h"
  18. #include "sysemu/dma.h"
  19. #include "sysemu/sysemu.h"
  20. #include "hw/virtio/virtio.h"
  21. #include "migration/qemu-file-types.h"
  22. #include "hw/virtio/virtio-gpu.h"
  23. #include "hw/virtio/virtio-gpu-bswap.h"
  24. #include "hw/virtio/virtio-gpu-pixman.h"
  25. #include "hw/virtio/virtio-bus.h"
  26. #include "hw/display/edid.h"
  27. #include "hw/qdev-properties.h"
  28. #include "qemu/log.h"
  29. #include "qemu/module.h"
  30. #include "qapi/error.h"
  31. #include "qemu/error-report.h"
  32. #define VIRTIO_GPU_VM_VERSION 1
  33. static struct virtio_gpu_simple_resource*
  34. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
  35. static struct virtio_gpu_simple_resource *
  36. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  37. bool require_backing,
  38. const char *caller, uint32_t *error);
  39. static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  40. struct virtio_gpu_simple_resource *res);
  41. void virtio_gpu_update_cursor_data(VirtIOGPU *g,
  42. struct virtio_gpu_scanout *s,
  43. uint32_t resource_id)
  44. {
  45. struct virtio_gpu_simple_resource *res;
  46. uint32_t pixels;
  47. void *data;
  48. res = virtio_gpu_find_check_resource(g, resource_id, false,
  49. __func__, NULL);
  50. if (!res) {
  51. return;
  52. }
  53. if (res->blob_size) {
  54. if (res->blob_size < (s->current_cursor->width *
  55. s->current_cursor->height * 4)) {
  56. return;
  57. }
  58. data = res->blob;
  59. } else {
  60. if (pixman_image_get_width(res->image) != s->current_cursor->width ||
  61. pixman_image_get_height(res->image) != s->current_cursor->height) {
  62. return;
  63. }
  64. data = pixman_image_get_data(res->image);
  65. }
  66. pixels = s->current_cursor->width * s->current_cursor->height;
  67. memcpy(s->current_cursor->data, data,
  68. pixels * sizeof(uint32_t));
  69. }
  70. static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
  71. {
  72. struct virtio_gpu_scanout *s;
  73. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  74. bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
  75. if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
  76. return;
  77. }
  78. s = &g->parent_obj.scanout[cursor->pos.scanout_id];
  79. trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
  80. cursor->pos.x,
  81. cursor->pos.y,
  82. move ? "move" : "update",
  83. cursor->resource_id);
  84. if (!move) {
  85. if (!s->current_cursor) {
  86. s->current_cursor = cursor_alloc(64, 64);
  87. }
  88. s->current_cursor->hot_x = cursor->hot_x;
  89. s->current_cursor->hot_y = cursor->hot_y;
  90. if (cursor->resource_id > 0) {
  91. vgc->update_cursor_data(g, s, cursor->resource_id);
  92. }
  93. dpy_cursor_define(s->con, s->current_cursor);
  94. s->cursor = *cursor;
  95. } else {
  96. s->cursor.pos.x = cursor->pos.x;
  97. s->cursor.pos.y = cursor->pos.y;
  98. }
  99. dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
  100. cursor->resource_id ? 1 : 0);
  101. }
  102. static struct virtio_gpu_simple_resource *
  103. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
  104. {
  105. struct virtio_gpu_simple_resource *res;
  106. QTAILQ_FOREACH(res, &g->reslist, next) {
  107. if (res->resource_id == resource_id) {
  108. return res;
  109. }
  110. }
  111. return NULL;
  112. }
  113. static struct virtio_gpu_simple_resource *
  114. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  115. bool require_backing,
  116. const char *caller, uint32_t *error)
  117. {
  118. struct virtio_gpu_simple_resource *res;
  119. res = virtio_gpu_find_resource(g, resource_id);
  120. if (!res) {
  121. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
  122. caller, resource_id);
  123. if (error) {
  124. *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  125. }
  126. return NULL;
  127. }
  128. if (require_backing) {
  129. if (!res->iov || (!res->image && !res->blob)) {
  130. qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
  131. caller, resource_id);
  132. if (error) {
  133. *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  134. }
  135. return NULL;
  136. }
  137. }
  138. return res;
  139. }
  140. void virtio_gpu_ctrl_response(VirtIOGPU *g,
  141. struct virtio_gpu_ctrl_command *cmd,
  142. struct virtio_gpu_ctrl_hdr *resp,
  143. size_t resp_len)
  144. {
  145. size_t s;
  146. if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
  147. resp->flags |= VIRTIO_GPU_FLAG_FENCE;
  148. resp->fence_id = cmd->cmd_hdr.fence_id;
  149. resp->ctx_id = cmd->cmd_hdr.ctx_id;
  150. }
  151. virtio_gpu_ctrl_hdr_bswap(resp);
  152. s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
  153. if (s != resp_len) {
  154. qemu_log_mask(LOG_GUEST_ERROR,
  155. "%s: response size incorrect %zu vs %zu\n",
  156. __func__, s, resp_len);
  157. }
  158. virtqueue_push(cmd->vq, &cmd->elem, s);
  159. virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
  160. cmd->finished = true;
  161. }
  162. void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
  163. struct virtio_gpu_ctrl_command *cmd,
  164. enum virtio_gpu_ctrl_type type)
  165. {
  166. struct virtio_gpu_ctrl_hdr resp;
  167. memset(&resp, 0, sizeof(resp));
  168. resp.type = type;
  169. virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
  170. }
  171. void virtio_gpu_get_display_info(VirtIOGPU *g,
  172. struct virtio_gpu_ctrl_command *cmd)
  173. {
  174. struct virtio_gpu_resp_display_info display_info;
  175. trace_virtio_gpu_cmd_get_display_info();
  176. memset(&display_info, 0, sizeof(display_info));
  177. display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
  178. virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
  179. virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
  180. sizeof(display_info));
  181. }
  182. static void
  183. virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
  184. struct virtio_gpu_resp_edid *edid)
  185. {
  186. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  187. qemu_edid_info info = {
  188. .width_mm = b->req_state[scanout].width_mm,
  189. .height_mm = b->req_state[scanout].height_mm,
  190. .prefx = b->req_state[scanout].width,
  191. .prefy = b->req_state[scanout].height,
  192. };
  193. edid->size = cpu_to_le32(sizeof(edid->edid));
  194. qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
  195. }
  196. void virtio_gpu_get_edid(VirtIOGPU *g,
  197. struct virtio_gpu_ctrl_command *cmd)
  198. {
  199. struct virtio_gpu_resp_edid edid;
  200. struct virtio_gpu_cmd_get_edid get_edid;
  201. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  202. VIRTIO_GPU_FILL_CMD(get_edid);
  203. virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
  204. if (get_edid.scanout >= b->conf.max_outputs) {
  205. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  206. return;
  207. }
  208. trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
  209. memset(&edid, 0, sizeof(edid));
  210. edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
  211. virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
  212. virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
  213. }
  214. static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
  215. uint32_t width, uint32_t height)
  216. {
  217. /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
  218. * pixman_image_create_bits will fail in case it overflow.
  219. */
  220. int bpp = PIXMAN_FORMAT_BPP(pformat);
  221. int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
  222. return height * stride;
  223. }
  224. static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
  225. struct virtio_gpu_ctrl_command *cmd)
  226. {
  227. pixman_format_code_t pformat;
  228. struct virtio_gpu_simple_resource *res;
  229. struct virtio_gpu_resource_create_2d c2d;
  230. VIRTIO_GPU_FILL_CMD(c2d);
  231. virtio_gpu_bswap_32(&c2d, sizeof(c2d));
  232. trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  233. c2d.width, c2d.height);
  234. if (c2d.resource_id == 0) {
  235. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  236. __func__);
  237. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  238. return;
  239. }
  240. res = virtio_gpu_find_resource(g, c2d.resource_id);
  241. if (res) {
  242. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  243. __func__, c2d.resource_id);
  244. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  245. return;
  246. }
  247. res = g_new0(struct virtio_gpu_simple_resource, 1);
  248. res->width = c2d.width;
  249. res->height = c2d.height;
  250. res->format = c2d.format;
  251. res->resource_id = c2d.resource_id;
  252. pformat = virtio_gpu_get_pixman_format(c2d.format);
  253. if (!pformat) {
  254. qemu_log_mask(LOG_GUEST_ERROR,
  255. "%s: host couldn't handle guest format %d\n",
  256. __func__, c2d.format);
  257. g_free(res);
  258. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  259. return;
  260. }
  261. res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
  262. if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
  263. res->image = pixman_image_create_bits(pformat,
  264. c2d.width,
  265. c2d.height,
  266. NULL, 0);
  267. }
  268. if (!res->image) {
  269. qemu_log_mask(LOG_GUEST_ERROR,
  270. "%s: resource creation failed %d %d %d\n",
  271. __func__, c2d.resource_id, c2d.width, c2d.height);
  272. g_free(res);
  273. cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  274. return;
  275. }
  276. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  277. g->hostmem += res->hostmem;
  278. }
  279. static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
  280. struct virtio_gpu_ctrl_command *cmd)
  281. {
  282. struct virtio_gpu_simple_resource *res;
  283. struct virtio_gpu_resource_create_blob cblob;
  284. int ret;
  285. VIRTIO_GPU_FILL_CMD(cblob);
  286. virtio_gpu_create_blob_bswap(&cblob);
  287. trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
  288. if (cblob.resource_id == 0) {
  289. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  290. __func__);
  291. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  292. return;
  293. }
  294. if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
  295. cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
  296. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
  297. __func__);
  298. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  299. return;
  300. }
  301. if (virtio_gpu_find_resource(g, cblob.resource_id)) {
  302. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  303. __func__, cblob.resource_id);
  304. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  305. return;
  306. }
  307. res = g_new0(struct virtio_gpu_simple_resource, 1);
  308. res->resource_id = cblob.resource_id;
  309. res->blob_size = cblob.size;
  310. ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
  311. cmd, &res->addrs, &res->iov,
  312. &res->iov_cnt);
  313. if (ret != 0) {
  314. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  315. g_free(res);
  316. return;
  317. }
  318. virtio_gpu_init_udmabuf(res);
  319. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  320. }
  321. static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
  322. {
  323. struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
  324. struct virtio_gpu_simple_resource *res;
  325. if (scanout->resource_id == 0) {
  326. return;
  327. }
  328. res = virtio_gpu_find_resource(g, scanout->resource_id);
  329. if (res) {
  330. res->scanout_bitmask &= ~(1 << scanout_id);
  331. }
  332. dpy_gfx_replace_surface(scanout->con, NULL);
  333. scanout->resource_id = 0;
  334. scanout->ds = NULL;
  335. scanout->width = 0;
  336. scanout->height = 0;
  337. }
  338. static void virtio_gpu_resource_destroy(VirtIOGPU *g,
  339. struct virtio_gpu_simple_resource *res)
  340. {
  341. int i;
  342. if (res->scanout_bitmask) {
  343. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  344. if (res->scanout_bitmask & (1 << i)) {
  345. virtio_gpu_disable_scanout(g, i);
  346. }
  347. }
  348. }
  349. qemu_pixman_image_unref(res->image);
  350. virtio_gpu_cleanup_mapping(g, res);
  351. QTAILQ_REMOVE(&g->reslist, res, next);
  352. g->hostmem -= res->hostmem;
  353. g_free(res);
  354. }
  355. static void virtio_gpu_resource_unref(VirtIOGPU *g,
  356. struct virtio_gpu_ctrl_command *cmd)
  357. {
  358. struct virtio_gpu_simple_resource *res;
  359. struct virtio_gpu_resource_unref unref;
  360. VIRTIO_GPU_FILL_CMD(unref);
  361. virtio_gpu_bswap_32(&unref, sizeof(unref));
  362. trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  363. res = virtio_gpu_find_resource(g, unref.resource_id);
  364. if (!res) {
  365. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  366. __func__, unref.resource_id);
  367. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  368. return;
  369. }
  370. virtio_gpu_resource_destroy(g, res);
  371. }
  372. static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
  373. struct virtio_gpu_ctrl_command *cmd)
  374. {
  375. struct virtio_gpu_simple_resource *res;
  376. int h;
  377. uint32_t src_offset, dst_offset, stride;
  378. int bpp;
  379. pixman_format_code_t format;
  380. struct virtio_gpu_transfer_to_host_2d t2d;
  381. VIRTIO_GPU_FILL_CMD(t2d);
  382. virtio_gpu_t2d_bswap(&t2d);
  383. trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
  384. res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
  385. __func__, &cmd->error);
  386. if (!res || res->blob) {
  387. return;
  388. }
  389. if (t2d.r.x > res->width ||
  390. t2d.r.y > res->height ||
  391. t2d.r.width > res->width ||
  392. t2d.r.height > res->height ||
  393. t2d.r.x + t2d.r.width > res->width ||
  394. t2d.r.y + t2d.r.height > res->height) {
  395. qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
  396. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  397. __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
  398. t2d.r.width, t2d.r.height, res->width, res->height);
  399. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  400. return;
  401. }
  402. format = pixman_image_get_format(res->image);
  403. bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
  404. stride = pixman_image_get_stride(res->image);
  405. if (t2d.offset || t2d.r.x || t2d.r.y ||
  406. t2d.r.width != pixman_image_get_width(res->image)) {
  407. void *img_data = pixman_image_get_data(res->image);
  408. for (h = 0; h < t2d.r.height; h++) {
  409. src_offset = t2d.offset + stride * h;
  410. dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
  411. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  412. (uint8_t *)img_data
  413. + dst_offset, t2d.r.width * bpp);
  414. }
  415. } else {
  416. iov_to_buf(res->iov, res->iov_cnt, 0,
  417. pixman_image_get_data(res->image),
  418. pixman_image_get_stride(res->image)
  419. * pixman_image_get_height(res->image));
  420. }
  421. }
  422. static void virtio_gpu_resource_flush(VirtIOGPU *g,
  423. struct virtio_gpu_ctrl_command *cmd)
  424. {
  425. struct virtio_gpu_simple_resource *res;
  426. struct virtio_gpu_resource_flush rf;
  427. struct virtio_gpu_scanout *scanout;
  428. pixman_region16_t flush_region;
  429. int i;
  430. VIRTIO_GPU_FILL_CMD(rf);
  431. virtio_gpu_bswap_32(&rf, sizeof(rf));
  432. trace_virtio_gpu_cmd_res_flush(rf.resource_id,
  433. rf.r.width, rf.r.height, rf.r.x, rf.r.y);
  434. res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
  435. __func__, &cmd->error);
  436. if (!res) {
  437. return;
  438. }
  439. if (res->blob) {
  440. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  441. scanout = &g->parent_obj.scanout[i];
  442. if (scanout->resource_id == res->resource_id &&
  443. console_has_gl(scanout->con)) {
  444. dpy_gl_update(scanout->con, 0, 0, scanout->width,
  445. scanout->height);
  446. }
  447. }
  448. return;
  449. }
  450. if (!res->blob &&
  451. (rf.r.x > res->width ||
  452. rf.r.y > res->height ||
  453. rf.r.width > res->width ||
  454. rf.r.height > res->height ||
  455. rf.r.x + rf.r.width > res->width ||
  456. rf.r.y + rf.r.height > res->height)) {
  457. qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
  458. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  459. __func__, rf.resource_id, rf.r.x, rf.r.y,
  460. rf.r.width, rf.r.height, res->width, res->height);
  461. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  462. return;
  463. }
  464. pixman_region_init_rect(&flush_region,
  465. rf.r.x, rf.r.y, rf.r.width, rf.r.height);
  466. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  467. pixman_region16_t region, finalregion;
  468. pixman_box16_t *extents;
  469. if (!(res->scanout_bitmask & (1 << i))) {
  470. continue;
  471. }
  472. scanout = &g->parent_obj.scanout[i];
  473. pixman_region_init(&finalregion);
  474. pixman_region_init_rect(&region, scanout->x, scanout->y,
  475. scanout->width, scanout->height);
  476. pixman_region_intersect(&finalregion, &flush_region, &region);
  477. pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
  478. extents = pixman_region_extents(&finalregion);
  479. /* work out the area we need to update for each console */
  480. dpy_gfx_update(g->parent_obj.scanout[i].con,
  481. extents->x1, extents->y1,
  482. extents->x2 - extents->x1,
  483. extents->y2 - extents->y1);
  484. pixman_region_fini(&region);
  485. pixman_region_fini(&finalregion);
  486. }
  487. pixman_region_fini(&flush_region);
  488. }
  489. static void virtio_unref_resource(pixman_image_t *image, void *data)
  490. {
  491. pixman_image_unref(data);
  492. }
  493. static void virtio_gpu_update_scanout(VirtIOGPU *g,
  494. uint32_t scanout_id,
  495. struct virtio_gpu_simple_resource *res,
  496. struct virtio_gpu_rect *r)
  497. {
  498. struct virtio_gpu_simple_resource *ores;
  499. struct virtio_gpu_scanout *scanout;
  500. scanout = &g->parent_obj.scanout[scanout_id];
  501. ores = virtio_gpu_find_resource(g, scanout->resource_id);
  502. if (ores) {
  503. ores->scanout_bitmask &= ~(1 << scanout_id);
  504. }
  505. res->scanout_bitmask |= (1 << scanout_id);
  506. scanout->resource_id = res->resource_id;
  507. scanout->x = r->x;
  508. scanout->y = r->y;
  509. scanout->width = r->width;
  510. scanout->height = r->height;
  511. }
  512. static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
  513. uint32_t scanout_id,
  514. struct virtio_gpu_framebuffer *fb,
  515. struct virtio_gpu_simple_resource *res,
  516. struct virtio_gpu_rect *r,
  517. uint32_t *error)
  518. {
  519. struct virtio_gpu_scanout *scanout;
  520. uint8_t *data;
  521. scanout = &g->parent_obj.scanout[scanout_id];
  522. if (r->x > fb->width ||
  523. r->y > fb->height ||
  524. r->width < 16 ||
  525. r->height < 16 ||
  526. r->width > fb->width ||
  527. r->height > fb->height ||
  528. r->x + r->width > fb->width ||
  529. r->y + r->height > fb->height) {
  530. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
  531. " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
  532. __func__, scanout_id, res->resource_id,
  533. r->x, r->y, r->width, r->height,
  534. fb->width, fb->height);
  535. *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  536. return;
  537. }
  538. g->parent_obj.enable = 1;
  539. if (res->blob) {
  540. if (console_has_gl(scanout->con)) {
  541. if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
  542. virtio_gpu_update_scanout(g, scanout_id, res, r);
  543. return;
  544. }
  545. }
  546. data = res->blob;
  547. } else {
  548. data = (uint8_t *)pixman_image_get_data(res->image);
  549. }
  550. /* create a surface for this scanout */
  551. if ((res->blob && !console_has_gl(scanout->con)) ||
  552. !scanout->ds ||
  553. surface_data(scanout->ds) != data + fb->offset ||
  554. scanout->width != r->width ||
  555. scanout->height != r->height) {
  556. pixman_image_t *rect;
  557. void *ptr = data + fb->offset;
  558. rect = pixman_image_create_bits(fb->format, r->width, r->height,
  559. ptr, fb->stride);
  560. if (res->image) {
  561. pixman_image_ref(res->image);
  562. pixman_image_set_destroy_function(rect, virtio_unref_resource,
  563. res->image);
  564. }
  565. /* realloc the surface ptr */
  566. scanout->ds = qemu_create_displaysurface_pixman(rect);
  567. if (!scanout->ds) {
  568. *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  569. return;
  570. }
  571. pixman_image_unref(rect);
  572. dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
  573. scanout->ds);
  574. }
  575. virtio_gpu_update_scanout(g, scanout_id, res, r);
  576. }
  577. static void virtio_gpu_set_scanout(VirtIOGPU *g,
  578. struct virtio_gpu_ctrl_command *cmd)
  579. {
  580. struct virtio_gpu_simple_resource *res;
  581. struct virtio_gpu_framebuffer fb = { 0 };
  582. struct virtio_gpu_set_scanout ss;
  583. VIRTIO_GPU_FILL_CMD(ss);
  584. virtio_gpu_bswap_32(&ss, sizeof(ss));
  585. trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
  586. ss.r.width, ss.r.height, ss.r.x, ss.r.y);
  587. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  588. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  589. __func__, ss.scanout_id);
  590. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  591. return;
  592. }
  593. if (ss.resource_id == 0) {
  594. virtio_gpu_disable_scanout(g, ss.scanout_id);
  595. return;
  596. }
  597. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  598. __func__, &cmd->error);
  599. if (!res) {
  600. return;
  601. }
  602. fb.format = pixman_image_get_format(res->image);
  603. fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
  604. fb.width = pixman_image_get_width(res->image);
  605. fb.height = pixman_image_get_height(res->image);
  606. fb.stride = pixman_image_get_stride(res->image);
  607. fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
  608. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  609. &fb, res, &ss.r, &cmd->error);
  610. }
  611. static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
  612. struct virtio_gpu_ctrl_command *cmd)
  613. {
  614. struct virtio_gpu_simple_resource *res;
  615. struct virtio_gpu_framebuffer fb = { 0 };
  616. struct virtio_gpu_set_scanout_blob ss;
  617. uint64_t fbend;
  618. VIRTIO_GPU_FILL_CMD(ss);
  619. virtio_gpu_scanout_blob_bswap(&ss);
  620. trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
  621. ss.r.width, ss.r.height, ss.r.x,
  622. ss.r.y);
  623. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  624. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  625. __func__, ss.scanout_id);
  626. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  627. return;
  628. }
  629. if (ss.resource_id == 0) {
  630. virtio_gpu_disable_scanout(g, ss.scanout_id);
  631. return;
  632. }
  633. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  634. __func__, &cmd->error);
  635. if (!res) {
  636. return;
  637. }
  638. fb.format = virtio_gpu_get_pixman_format(ss.format);
  639. if (!fb.format) {
  640. qemu_log_mask(LOG_GUEST_ERROR,
  641. "%s: host couldn't handle guest format %d\n",
  642. __func__, ss.format);
  643. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  644. return;
  645. }
  646. fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
  647. fb.width = ss.width;
  648. fb.height = ss.height;
  649. fb.stride = ss.strides[0];
  650. fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
  651. fbend = fb.offset;
  652. fbend += fb.stride * (ss.r.height - 1);
  653. fbend += fb.bytes_pp * ss.r.width;
  654. if (fbend > res->blob_size) {
  655. qemu_log_mask(LOG_GUEST_ERROR,
  656. "%s: fb end out of range\n",
  657. __func__);
  658. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  659. return;
  660. }
  661. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  662. &fb, res, &ss.r, &cmd->error);
  663. }
  664. int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
  665. uint32_t nr_entries, uint32_t offset,
  666. struct virtio_gpu_ctrl_command *cmd,
  667. uint64_t **addr, struct iovec **iov,
  668. uint32_t *niov)
  669. {
  670. struct virtio_gpu_mem_entry *ents;
  671. size_t esize, s;
  672. int e, v;
  673. if (nr_entries > 16384) {
  674. qemu_log_mask(LOG_GUEST_ERROR,
  675. "%s: nr_entries is too big (%d > 16384)\n",
  676. __func__, nr_entries);
  677. return -1;
  678. }
  679. esize = sizeof(*ents) * nr_entries;
  680. ents = g_malloc(esize);
  681. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  682. offset, ents, esize);
  683. if (s != esize) {
  684. qemu_log_mask(LOG_GUEST_ERROR,
  685. "%s: command data size incorrect %zu vs %zu\n",
  686. __func__, s, esize);
  687. g_free(ents);
  688. return -1;
  689. }
  690. *iov = NULL;
  691. if (addr) {
  692. *addr = NULL;
  693. }
  694. for (e = 0, v = 0; e < nr_entries; e++) {
  695. uint64_t a = le64_to_cpu(ents[e].addr);
  696. uint32_t l = le32_to_cpu(ents[e].length);
  697. hwaddr len;
  698. void *map;
  699. do {
  700. len = l;
  701. map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
  702. DMA_DIRECTION_TO_DEVICE,
  703. MEMTXATTRS_UNSPECIFIED);
  704. if (!map) {
  705. qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
  706. " element %d\n", __func__, e);
  707. virtio_gpu_cleanup_mapping_iov(g, *iov, v);
  708. g_free(ents);
  709. *iov = NULL;
  710. if (addr) {
  711. g_free(*addr);
  712. *addr = NULL;
  713. }
  714. return -1;
  715. }
  716. if (!(v % 16)) {
  717. *iov = g_renew(struct iovec, *iov, v + 16);
  718. if (addr) {
  719. *addr = g_renew(uint64_t, *addr, v + 16);
  720. }
  721. }
  722. (*iov)[v].iov_base = map;
  723. (*iov)[v].iov_len = len;
  724. if (addr) {
  725. (*addr)[v] = a;
  726. }
  727. a += len;
  728. l -= len;
  729. v += 1;
  730. } while (l > 0);
  731. }
  732. *niov = v;
  733. g_free(ents);
  734. return 0;
  735. }
  736. void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
  737. struct iovec *iov, uint32_t count)
  738. {
  739. int i;
  740. for (i = 0; i < count; i++) {
  741. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  742. iov[i].iov_base, iov[i].iov_len,
  743. DMA_DIRECTION_TO_DEVICE,
  744. iov[i].iov_len);
  745. }
  746. g_free(iov);
  747. }
  748. static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  749. struct virtio_gpu_simple_resource *res)
  750. {
  751. virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
  752. res->iov = NULL;
  753. res->iov_cnt = 0;
  754. g_free(res->addrs);
  755. res->addrs = NULL;
  756. if (res->blob) {
  757. virtio_gpu_fini_udmabuf(res);
  758. }
  759. }
  760. static void
  761. virtio_gpu_resource_attach_backing(VirtIOGPU *g,
  762. struct virtio_gpu_ctrl_command *cmd)
  763. {
  764. struct virtio_gpu_simple_resource *res;
  765. struct virtio_gpu_resource_attach_backing ab;
  766. int ret;
  767. VIRTIO_GPU_FILL_CMD(ab);
  768. virtio_gpu_bswap_32(&ab, sizeof(ab));
  769. trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
  770. res = virtio_gpu_find_resource(g, ab.resource_id);
  771. if (!res) {
  772. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  773. __func__, ab.resource_id);
  774. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  775. return;
  776. }
  777. if (res->iov) {
  778. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  779. return;
  780. }
  781. ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
  782. &res->addrs, &res->iov, &res->iov_cnt);
  783. if (ret != 0) {
  784. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  785. return;
  786. }
  787. }
  788. static void
  789. virtio_gpu_resource_detach_backing(VirtIOGPU *g,
  790. struct virtio_gpu_ctrl_command *cmd)
  791. {
  792. struct virtio_gpu_simple_resource *res;
  793. struct virtio_gpu_resource_detach_backing detach;
  794. VIRTIO_GPU_FILL_CMD(detach);
  795. virtio_gpu_bswap_32(&detach, sizeof(detach));
  796. trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
  797. res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
  798. __func__, &cmd->error);
  799. if (!res) {
  800. return;
  801. }
  802. virtio_gpu_cleanup_mapping(g, res);
  803. }
  804. void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
  805. struct virtio_gpu_ctrl_command *cmd)
  806. {
  807. VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
  808. virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
  809. switch (cmd->cmd_hdr.type) {
  810. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  811. virtio_gpu_get_display_info(g, cmd);
  812. break;
  813. case VIRTIO_GPU_CMD_GET_EDID:
  814. virtio_gpu_get_edid(g, cmd);
  815. break;
  816. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  817. virtio_gpu_resource_create_2d(g, cmd);
  818. break;
  819. case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
  820. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  821. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  822. break;
  823. }
  824. virtio_gpu_resource_create_blob(g, cmd);
  825. break;
  826. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  827. virtio_gpu_resource_unref(g, cmd);
  828. break;
  829. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  830. virtio_gpu_resource_flush(g, cmd);
  831. break;
  832. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  833. virtio_gpu_transfer_to_host_2d(g, cmd);
  834. break;
  835. case VIRTIO_GPU_CMD_SET_SCANOUT:
  836. virtio_gpu_set_scanout(g, cmd);
  837. break;
  838. case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
  839. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  840. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  841. break;
  842. }
  843. virtio_gpu_set_scanout_blob(g, cmd);
  844. break;
  845. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  846. virtio_gpu_resource_attach_backing(g, cmd);
  847. break;
  848. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  849. virtio_gpu_resource_detach_backing(g, cmd);
  850. break;
  851. default:
  852. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  853. break;
  854. }
  855. if (!cmd->finished) {
  856. if (!g->parent_obj.renderer_blocked) {
  857. virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
  858. VIRTIO_GPU_RESP_OK_NODATA);
  859. }
  860. }
  861. }
  862. static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
  863. {
  864. VirtIOGPU *g = VIRTIO_GPU(vdev);
  865. qemu_bh_schedule(g->ctrl_bh);
  866. }
  867. static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
  868. {
  869. VirtIOGPU *g = VIRTIO_GPU(vdev);
  870. qemu_bh_schedule(g->cursor_bh);
  871. }
  872. void virtio_gpu_process_cmdq(VirtIOGPU *g)
  873. {
  874. struct virtio_gpu_ctrl_command *cmd;
  875. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  876. if (g->processing_cmdq) {
  877. return;
  878. }
  879. g->processing_cmdq = true;
  880. while (!QTAILQ_EMPTY(&g->cmdq)) {
  881. cmd = QTAILQ_FIRST(&g->cmdq);
  882. if (g->parent_obj.renderer_blocked) {
  883. break;
  884. }
  885. /* process command */
  886. vgc->process_cmd(g, cmd);
  887. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  888. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  889. g->stats.requests++;
  890. }
  891. if (!cmd->finished) {
  892. QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
  893. g->inflight++;
  894. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  895. if (g->stats.max_inflight < g->inflight) {
  896. g->stats.max_inflight = g->inflight;
  897. }
  898. fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
  899. }
  900. } else {
  901. g_free(cmd);
  902. }
  903. }
  904. g->processing_cmdq = false;
  905. }
  906. static void virtio_gpu_process_fenceq(VirtIOGPU *g)
  907. {
  908. struct virtio_gpu_ctrl_command *cmd, *tmp;
  909. QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
  910. trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
  911. virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  912. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  913. g_free(cmd);
  914. g->inflight--;
  915. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  916. fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
  917. }
  918. }
  919. }
  920. static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
  921. {
  922. VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
  923. virtio_gpu_process_fenceq(g);
  924. virtio_gpu_process_cmdq(g);
  925. }
  926. static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  927. {
  928. VirtIOGPU *g = VIRTIO_GPU(vdev);
  929. struct virtio_gpu_ctrl_command *cmd;
  930. if (!virtio_queue_ready(vq)) {
  931. return;
  932. }
  933. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  934. while (cmd) {
  935. cmd->vq = vq;
  936. cmd->error = 0;
  937. cmd->finished = false;
  938. QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
  939. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  940. }
  941. virtio_gpu_process_cmdq(g);
  942. }
  943. static void virtio_gpu_ctrl_bh(void *opaque)
  944. {
  945. VirtIOGPU *g = opaque;
  946. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  947. vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
  948. }
  949. static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
  950. {
  951. VirtIOGPU *g = VIRTIO_GPU(vdev);
  952. VirtQueueElement *elem;
  953. size_t s;
  954. struct virtio_gpu_update_cursor cursor_info;
  955. if (!virtio_queue_ready(vq)) {
  956. return;
  957. }
  958. for (;;) {
  959. elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
  960. if (!elem) {
  961. break;
  962. }
  963. s = iov_to_buf(elem->out_sg, elem->out_num, 0,
  964. &cursor_info, sizeof(cursor_info));
  965. if (s != sizeof(cursor_info)) {
  966. qemu_log_mask(LOG_GUEST_ERROR,
  967. "%s: cursor size incorrect %zu vs %zu\n",
  968. __func__, s, sizeof(cursor_info));
  969. } else {
  970. virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
  971. update_cursor(g, &cursor_info);
  972. }
  973. virtqueue_push(vq, elem, 0);
  974. virtio_notify(vdev, vq);
  975. g_free(elem);
  976. }
  977. }
  978. static void virtio_gpu_cursor_bh(void *opaque)
  979. {
  980. VirtIOGPU *g = opaque;
  981. virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
  982. }
  983. static const VMStateDescription vmstate_virtio_gpu_scanout = {
  984. .name = "virtio-gpu-one-scanout",
  985. .version_id = 1,
  986. .fields = (VMStateField[]) {
  987. VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
  988. VMSTATE_UINT32(width, struct virtio_gpu_scanout),
  989. VMSTATE_UINT32(height, struct virtio_gpu_scanout),
  990. VMSTATE_INT32(x, struct virtio_gpu_scanout),
  991. VMSTATE_INT32(y, struct virtio_gpu_scanout),
  992. VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
  993. VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
  994. VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
  995. VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
  996. VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
  997. VMSTATE_END_OF_LIST()
  998. },
  999. };
  1000. static const VMStateDescription vmstate_virtio_gpu_scanouts = {
  1001. .name = "virtio-gpu-scanouts",
  1002. .version_id = 1,
  1003. .fields = (VMStateField[]) {
  1004. VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
  1005. VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
  1006. struct VirtIOGPU, NULL),
  1007. VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
  1008. parent_obj.conf.max_outputs, 1,
  1009. vmstate_virtio_gpu_scanout,
  1010. struct virtio_gpu_scanout),
  1011. VMSTATE_END_OF_LIST()
  1012. },
  1013. };
  1014. static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
  1015. const VMStateField *field, JSONWriter *vmdesc)
  1016. {
  1017. VirtIOGPU *g = opaque;
  1018. struct virtio_gpu_simple_resource *res;
  1019. int i;
  1020. /* in 2d mode we should never find unprocessed commands here */
  1021. assert(QTAILQ_EMPTY(&g->cmdq));
  1022. QTAILQ_FOREACH(res, &g->reslist, next) {
  1023. qemu_put_be32(f, res->resource_id);
  1024. qemu_put_be32(f, res->width);
  1025. qemu_put_be32(f, res->height);
  1026. qemu_put_be32(f, res->format);
  1027. qemu_put_be32(f, res->iov_cnt);
  1028. for (i = 0; i < res->iov_cnt; i++) {
  1029. qemu_put_be64(f, res->addrs[i]);
  1030. qemu_put_be32(f, res->iov[i].iov_len);
  1031. }
  1032. qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
  1033. pixman_image_get_stride(res->image) * res->height);
  1034. }
  1035. qemu_put_be32(f, 0); /* end of list */
  1036. return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
  1037. }
  1038. static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
  1039. const VMStateField *field)
  1040. {
  1041. VirtIOGPU *g = opaque;
  1042. struct virtio_gpu_simple_resource *res;
  1043. struct virtio_gpu_scanout *scanout;
  1044. uint32_t resource_id, pformat;
  1045. int i;
  1046. g->hostmem = 0;
  1047. resource_id = qemu_get_be32(f);
  1048. while (resource_id != 0) {
  1049. res = virtio_gpu_find_resource(g, resource_id);
  1050. if (res) {
  1051. return -EINVAL;
  1052. }
  1053. res = g_new0(struct virtio_gpu_simple_resource, 1);
  1054. res->resource_id = resource_id;
  1055. res->width = qemu_get_be32(f);
  1056. res->height = qemu_get_be32(f);
  1057. res->format = qemu_get_be32(f);
  1058. res->iov_cnt = qemu_get_be32(f);
  1059. /* allocate */
  1060. pformat = virtio_gpu_get_pixman_format(res->format);
  1061. if (!pformat) {
  1062. g_free(res);
  1063. return -EINVAL;
  1064. }
  1065. res->image = pixman_image_create_bits(pformat,
  1066. res->width, res->height,
  1067. NULL, 0);
  1068. if (!res->image) {
  1069. g_free(res);
  1070. return -EINVAL;
  1071. }
  1072. res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
  1073. res->addrs = g_new(uint64_t, res->iov_cnt);
  1074. res->iov = g_new(struct iovec, res->iov_cnt);
  1075. /* read data */
  1076. for (i = 0; i < res->iov_cnt; i++) {
  1077. res->addrs[i] = qemu_get_be64(f);
  1078. res->iov[i].iov_len = qemu_get_be32(f);
  1079. }
  1080. qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
  1081. pixman_image_get_stride(res->image) * res->height);
  1082. /* restore mapping */
  1083. for (i = 0; i < res->iov_cnt; i++) {
  1084. hwaddr len = res->iov[i].iov_len;
  1085. res->iov[i].iov_base =
  1086. dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
  1087. DMA_DIRECTION_TO_DEVICE,
  1088. MEMTXATTRS_UNSPECIFIED);
  1089. if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
  1090. /* Clean up the half-a-mapping we just created... */
  1091. if (res->iov[i].iov_base) {
  1092. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  1093. res->iov[i].iov_base,
  1094. len,
  1095. DMA_DIRECTION_TO_DEVICE,
  1096. 0);
  1097. }
  1098. /* ...and the mappings for previous loop iterations */
  1099. res->iov_cnt = i;
  1100. virtio_gpu_cleanup_mapping(g, res);
  1101. pixman_image_unref(res->image);
  1102. g_free(res);
  1103. return -EINVAL;
  1104. }
  1105. }
  1106. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  1107. g->hostmem += res->hostmem;
  1108. resource_id = qemu_get_be32(f);
  1109. }
  1110. /* load & apply scanout state */
  1111. vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
  1112. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  1113. scanout = &g->parent_obj.scanout[i];
  1114. if (!scanout->resource_id) {
  1115. continue;
  1116. }
  1117. res = virtio_gpu_find_resource(g, scanout->resource_id);
  1118. if (!res) {
  1119. return -EINVAL;
  1120. }
  1121. scanout->ds = qemu_create_displaysurface_pixman(res->image);
  1122. if (!scanout->ds) {
  1123. return -EINVAL;
  1124. }
  1125. dpy_gfx_replace_surface(scanout->con, scanout->ds);
  1126. dpy_gfx_update_full(scanout->con);
  1127. if (scanout->cursor.resource_id) {
  1128. update_cursor(g, &scanout->cursor);
  1129. }
  1130. res->scanout_bitmask |= (1 << i);
  1131. }
  1132. return 0;
  1133. }
  1134. void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
  1135. {
  1136. VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
  1137. VirtIOGPU *g = VIRTIO_GPU(qdev);
  1138. if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  1139. if (!virtio_gpu_have_udmabuf()) {
  1140. error_setg(errp, "cannot enable blob resources without udmabuf");
  1141. return;
  1142. }
  1143. if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
  1144. error_setg(errp, "blobs and virgl are not compatible (yet)");
  1145. return;
  1146. }
  1147. }
  1148. if (!virtio_gpu_base_device_realize(qdev,
  1149. virtio_gpu_handle_ctrl_cb,
  1150. virtio_gpu_handle_cursor_cb,
  1151. errp)) {
  1152. return;
  1153. }
  1154. g->ctrl_vq = virtio_get_queue(vdev, 0);
  1155. g->cursor_vq = virtio_get_queue(vdev, 1);
  1156. g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
  1157. g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
  1158. QTAILQ_INIT(&g->reslist);
  1159. QTAILQ_INIT(&g->cmdq);
  1160. QTAILQ_INIT(&g->fenceq);
  1161. }
  1162. void virtio_gpu_reset(VirtIODevice *vdev)
  1163. {
  1164. VirtIOGPU *g = VIRTIO_GPU(vdev);
  1165. struct virtio_gpu_simple_resource *res, *tmp;
  1166. struct virtio_gpu_ctrl_command *cmd;
  1167. QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
  1168. virtio_gpu_resource_destroy(g, res);
  1169. }
  1170. while (!QTAILQ_EMPTY(&g->cmdq)) {
  1171. cmd = QTAILQ_FIRST(&g->cmdq);
  1172. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  1173. g_free(cmd);
  1174. }
  1175. while (!QTAILQ_EMPTY(&g->fenceq)) {
  1176. cmd = QTAILQ_FIRST(&g->fenceq);
  1177. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  1178. g->inflight--;
  1179. g_free(cmd);
  1180. }
  1181. virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
  1182. }
  1183. static void
  1184. virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
  1185. {
  1186. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1187. memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
  1188. }
  1189. static void
  1190. virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
  1191. {
  1192. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1193. const struct virtio_gpu_config *vgconfig =
  1194. (const struct virtio_gpu_config *)config;
  1195. if (vgconfig->events_clear) {
  1196. g->virtio_config.events_read &= ~vgconfig->events_clear;
  1197. }
  1198. }
  1199. /*
  1200. * For historical reasons virtio_gpu does not adhere to virtio migration
  1201. * scheme as described in doc/virtio-migration.txt, in a sense that no
  1202. * save/load callback are provided to the core. Instead the device data
  1203. * is saved/loaded after the core data.
  1204. *
  1205. * Because of this we need a special vmsd.
  1206. */
  1207. static const VMStateDescription vmstate_virtio_gpu = {
  1208. .name = "virtio-gpu",
  1209. .minimum_version_id = VIRTIO_GPU_VM_VERSION,
  1210. .version_id = VIRTIO_GPU_VM_VERSION,
  1211. .fields = (VMStateField[]) {
  1212. VMSTATE_VIRTIO_DEVICE /* core */,
  1213. {
  1214. .name = "virtio-gpu",
  1215. .info = &(const VMStateInfo) {
  1216. .name = "virtio-gpu",
  1217. .get = virtio_gpu_load,
  1218. .put = virtio_gpu_save,
  1219. },
  1220. .flags = VMS_SINGLE,
  1221. } /* device */,
  1222. VMSTATE_END_OF_LIST()
  1223. },
  1224. };
  1225. static Property virtio_gpu_properties[] = {
  1226. VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
  1227. DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
  1228. 256 * MiB),
  1229. DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
  1230. VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
  1231. DEFINE_PROP_END_OF_LIST(),
  1232. };
  1233. static void virtio_gpu_class_init(ObjectClass *klass, void *data)
  1234. {
  1235. DeviceClass *dc = DEVICE_CLASS(klass);
  1236. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  1237. VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
  1238. VirtIOGPUBaseClass *vgbc = &vgc->parent;
  1239. vgc->handle_ctrl = virtio_gpu_handle_ctrl;
  1240. vgc->process_cmd = virtio_gpu_simple_process_cmd;
  1241. vgc->update_cursor_data = virtio_gpu_update_cursor_data;
  1242. vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
  1243. vdc->realize = virtio_gpu_device_realize;
  1244. vdc->reset = virtio_gpu_reset;
  1245. vdc->get_config = virtio_gpu_get_config;
  1246. vdc->set_config = virtio_gpu_set_config;
  1247. dc->vmsd = &vmstate_virtio_gpu;
  1248. device_class_set_props(dc, virtio_gpu_properties);
  1249. }
  1250. static const TypeInfo virtio_gpu_info = {
  1251. .name = TYPE_VIRTIO_GPU,
  1252. .parent = TYPE_VIRTIO_GPU_BASE,
  1253. .instance_size = sizeof(VirtIOGPU),
  1254. .class_size = sizeof(VirtIOGPUClass),
  1255. .class_init = virtio_gpu_class_init,
  1256. };
  1257. module_obj(TYPE_VIRTIO_GPU);
  1258. static void virtio_register_types(void)
  1259. {
  1260. type_register_static(&virtio_gpu_info);
  1261. }
  1262. type_init(virtio_register_types)