virtio-gpu.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/units.h"
  15. #include "qemu/iov.h"
  16. #include "ui/console.h"
  17. #include "trace.h"
  18. #include "sysemu/dma.h"
  19. #include "sysemu/sysemu.h"
  20. #include "hw/virtio/virtio.h"
  21. #include "migration/qemu-file-types.h"
  22. #include "hw/virtio/virtio-gpu.h"
  23. #include "hw/virtio/virtio-gpu-bswap.h"
  24. #include "hw/virtio/virtio-gpu-pixman.h"
  25. #include "hw/virtio/virtio-bus.h"
  26. #include "hw/display/edid.h"
  27. #include "hw/qdev-properties.h"
  28. #include "qemu/log.h"
  29. #include "qemu/module.h"
  30. #include "qapi/error.h"
  31. #include "qemu/error-report.h"
  32. #define VIRTIO_GPU_VM_VERSION 1
  33. static struct virtio_gpu_simple_resource*
  34. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
  35. static struct virtio_gpu_simple_resource *
  36. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  37. bool require_backing,
  38. const char *caller, uint32_t *error);
  39. static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  40. struct virtio_gpu_simple_resource *res);
  41. void virtio_gpu_update_cursor_data(VirtIOGPU *g,
  42. struct virtio_gpu_scanout *s,
  43. uint32_t resource_id)
  44. {
  45. struct virtio_gpu_simple_resource *res;
  46. uint32_t pixels;
  47. void *data;
  48. res = virtio_gpu_find_check_resource(g, resource_id, false,
  49. __func__, NULL);
  50. if (!res) {
  51. return;
  52. }
  53. if (res->blob_size) {
  54. if (res->blob_size < (s->current_cursor->width *
  55. s->current_cursor->height * 4)) {
  56. return;
  57. }
  58. data = res->blob;
  59. } else {
  60. if (pixman_image_get_width(res->image) != s->current_cursor->width ||
  61. pixman_image_get_height(res->image) != s->current_cursor->height) {
  62. return;
  63. }
  64. data = pixman_image_get_data(res->image);
  65. }
  66. pixels = s->current_cursor->width * s->current_cursor->height;
  67. memcpy(s->current_cursor->data, data,
  68. pixels * sizeof(uint32_t));
  69. }
  70. static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
  71. {
  72. struct virtio_gpu_scanout *s;
  73. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  74. bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
  75. if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
  76. return;
  77. }
  78. s = &g->parent_obj.scanout[cursor->pos.scanout_id];
  79. trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
  80. cursor->pos.x,
  81. cursor->pos.y,
  82. move ? "move" : "update",
  83. cursor->resource_id);
  84. if (!move) {
  85. if (!s->current_cursor) {
  86. s->current_cursor = cursor_alloc(64, 64);
  87. }
  88. s->current_cursor->hot_x = cursor->hot_x;
  89. s->current_cursor->hot_y = cursor->hot_y;
  90. if (cursor->resource_id > 0) {
  91. vgc->update_cursor_data(g, s, cursor->resource_id);
  92. }
  93. dpy_cursor_define(s->con, s->current_cursor);
  94. s->cursor = *cursor;
  95. } else {
  96. s->cursor.pos.x = cursor->pos.x;
  97. s->cursor.pos.y = cursor->pos.y;
  98. }
  99. dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
  100. cursor->resource_id ? 1 : 0);
  101. }
  102. static struct virtio_gpu_simple_resource *
  103. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
  104. {
  105. struct virtio_gpu_simple_resource *res;
  106. QTAILQ_FOREACH(res, &g->reslist, next) {
  107. if (res->resource_id == resource_id) {
  108. return res;
  109. }
  110. }
  111. return NULL;
  112. }
  113. static struct virtio_gpu_simple_resource *
  114. virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
  115. bool require_backing,
  116. const char *caller, uint32_t *error)
  117. {
  118. struct virtio_gpu_simple_resource *res;
  119. res = virtio_gpu_find_resource(g, resource_id);
  120. if (!res) {
  121. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
  122. caller, resource_id);
  123. if (error) {
  124. *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  125. }
  126. return NULL;
  127. }
  128. if (require_backing) {
  129. if (!res->iov || (!res->image && !res->blob)) {
  130. qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
  131. caller, resource_id);
  132. if (error) {
  133. *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  134. }
  135. return NULL;
  136. }
  137. }
  138. return res;
  139. }
  140. void virtio_gpu_ctrl_response(VirtIOGPU *g,
  141. struct virtio_gpu_ctrl_command *cmd,
  142. struct virtio_gpu_ctrl_hdr *resp,
  143. size_t resp_len)
  144. {
  145. size_t s;
  146. if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
  147. resp->flags |= VIRTIO_GPU_FLAG_FENCE;
  148. resp->fence_id = cmd->cmd_hdr.fence_id;
  149. resp->ctx_id = cmd->cmd_hdr.ctx_id;
  150. }
  151. virtio_gpu_ctrl_hdr_bswap(resp);
  152. s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
  153. if (s != resp_len) {
  154. qemu_log_mask(LOG_GUEST_ERROR,
  155. "%s: response size incorrect %zu vs %zu\n",
  156. __func__, s, resp_len);
  157. }
  158. virtqueue_push(cmd->vq, &cmd->elem, s);
  159. virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
  160. cmd->finished = true;
  161. }
  162. void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
  163. struct virtio_gpu_ctrl_command *cmd,
  164. enum virtio_gpu_ctrl_type type)
  165. {
  166. struct virtio_gpu_ctrl_hdr resp;
  167. memset(&resp, 0, sizeof(resp));
  168. resp.type = type;
  169. virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
  170. }
  171. void virtio_gpu_get_display_info(VirtIOGPU *g,
  172. struct virtio_gpu_ctrl_command *cmd)
  173. {
  174. struct virtio_gpu_resp_display_info display_info;
  175. trace_virtio_gpu_cmd_get_display_info();
  176. memset(&display_info, 0, sizeof(display_info));
  177. display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
  178. virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
  179. virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
  180. sizeof(display_info));
  181. }
  182. static void
  183. virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
  184. struct virtio_gpu_resp_edid *edid)
  185. {
  186. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  187. qemu_edid_info info = {
  188. .width_mm = b->req_state[scanout].width_mm,
  189. .height_mm = b->req_state[scanout].height_mm,
  190. .prefx = b->req_state[scanout].width,
  191. .prefy = b->req_state[scanout].height,
  192. .refresh_rate = b->req_state[scanout].refresh_rate,
  193. };
  194. edid->size = cpu_to_le32(sizeof(edid->edid));
  195. qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
  196. }
  197. void virtio_gpu_get_edid(VirtIOGPU *g,
  198. struct virtio_gpu_ctrl_command *cmd)
  199. {
  200. struct virtio_gpu_resp_edid edid;
  201. struct virtio_gpu_cmd_get_edid get_edid;
  202. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  203. VIRTIO_GPU_FILL_CMD(get_edid);
  204. virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
  205. if (get_edid.scanout >= b->conf.max_outputs) {
  206. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  207. return;
  208. }
  209. trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
  210. memset(&edid, 0, sizeof(edid));
  211. edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
  212. virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
  213. virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
  214. }
  215. static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
  216. uint32_t width, uint32_t height)
  217. {
  218. /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
  219. * pixman_image_create_bits will fail in case it overflow.
  220. */
  221. int bpp = PIXMAN_FORMAT_BPP(pformat);
  222. int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
  223. return height * stride;
  224. }
  225. static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
  226. struct virtio_gpu_ctrl_command *cmd)
  227. {
  228. pixman_format_code_t pformat;
  229. struct virtio_gpu_simple_resource *res;
  230. struct virtio_gpu_resource_create_2d c2d;
  231. VIRTIO_GPU_FILL_CMD(c2d);
  232. virtio_gpu_bswap_32(&c2d, sizeof(c2d));
  233. trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  234. c2d.width, c2d.height);
  235. if (c2d.resource_id == 0) {
  236. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  237. __func__);
  238. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  239. return;
  240. }
  241. res = virtio_gpu_find_resource(g, c2d.resource_id);
  242. if (res) {
  243. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  244. __func__, c2d.resource_id);
  245. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  246. return;
  247. }
  248. res = g_new0(struct virtio_gpu_simple_resource, 1);
  249. res->width = c2d.width;
  250. res->height = c2d.height;
  251. res->format = c2d.format;
  252. res->resource_id = c2d.resource_id;
  253. pformat = virtio_gpu_get_pixman_format(c2d.format);
  254. if (!pformat) {
  255. qemu_log_mask(LOG_GUEST_ERROR,
  256. "%s: host couldn't handle guest format %d\n",
  257. __func__, c2d.format);
  258. g_free(res);
  259. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  260. return;
  261. }
  262. res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
  263. if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
  264. res->image = pixman_image_create_bits(pformat,
  265. c2d.width,
  266. c2d.height,
  267. NULL, 0);
  268. }
  269. if (!res->image) {
  270. qemu_log_mask(LOG_GUEST_ERROR,
  271. "%s: resource creation failed %d %d %d\n",
  272. __func__, c2d.resource_id, c2d.width, c2d.height);
  273. g_free(res);
  274. cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  275. return;
  276. }
  277. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  278. g->hostmem += res->hostmem;
  279. }
  280. static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
  281. struct virtio_gpu_ctrl_command *cmd)
  282. {
  283. struct virtio_gpu_simple_resource *res;
  284. struct virtio_gpu_resource_create_blob cblob;
  285. int ret;
  286. VIRTIO_GPU_FILL_CMD(cblob);
  287. virtio_gpu_create_blob_bswap(&cblob);
  288. trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
  289. if (cblob.resource_id == 0) {
  290. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  291. __func__);
  292. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  293. return;
  294. }
  295. if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
  296. cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
  297. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
  298. __func__);
  299. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  300. return;
  301. }
  302. if (virtio_gpu_find_resource(g, cblob.resource_id)) {
  303. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  304. __func__, cblob.resource_id);
  305. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  306. return;
  307. }
  308. res = g_new0(struct virtio_gpu_simple_resource, 1);
  309. res->resource_id = cblob.resource_id;
  310. res->blob_size = cblob.size;
  311. ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
  312. cmd, &res->addrs, &res->iov,
  313. &res->iov_cnt);
  314. if (ret != 0) {
  315. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  316. g_free(res);
  317. return;
  318. }
  319. virtio_gpu_init_udmabuf(res);
  320. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  321. }
  322. static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
  323. {
  324. struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
  325. struct virtio_gpu_simple_resource *res;
  326. if (scanout->resource_id == 0) {
  327. return;
  328. }
  329. res = virtio_gpu_find_resource(g, scanout->resource_id);
  330. if (res) {
  331. res->scanout_bitmask &= ~(1 << scanout_id);
  332. }
  333. dpy_gfx_replace_surface(scanout->con, NULL);
  334. scanout->resource_id = 0;
  335. scanout->ds = NULL;
  336. scanout->width = 0;
  337. scanout->height = 0;
  338. }
  339. static void virtio_gpu_resource_destroy(VirtIOGPU *g,
  340. struct virtio_gpu_simple_resource *res)
  341. {
  342. int i;
  343. if (res->scanout_bitmask) {
  344. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  345. if (res->scanout_bitmask & (1 << i)) {
  346. virtio_gpu_disable_scanout(g, i);
  347. }
  348. }
  349. }
  350. qemu_pixman_image_unref(res->image);
  351. virtio_gpu_cleanup_mapping(g, res);
  352. QTAILQ_REMOVE(&g->reslist, res, next);
  353. g->hostmem -= res->hostmem;
  354. g_free(res);
  355. }
  356. static void virtio_gpu_resource_unref(VirtIOGPU *g,
  357. struct virtio_gpu_ctrl_command *cmd)
  358. {
  359. struct virtio_gpu_simple_resource *res;
  360. struct virtio_gpu_resource_unref unref;
  361. VIRTIO_GPU_FILL_CMD(unref);
  362. virtio_gpu_bswap_32(&unref, sizeof(unref));
  363. trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  364. res = virtio_gpu_find_resource(g, unref.resource_id);
  365. if (!res) {
  366. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  367. __func__, unref.resource_id);
  368. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  369. return;
  370. }
  371. virtio_gpu_resource_destroy(g, res);
  372. }
  373. static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
  374. struct virtio_gpu_ctrl_command *cmd)
  375. {
  376. struct virtio_gpu_simple_resource *res;
  377. int h;
  378. uint32_t src_offset, dst_offset, stride;
  379. int bpp;
  380. pixman_format_code_t format;
  381. struct virtio_gpu_transfer_to_host_2d t2d;
  382. VIRTIO_GPU_FILL_CMD(t2d);
  383. virtio_gpu_t2d_bswap(&t2d);
  384. trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
  385. res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
  386. __func__, &cmd->error);
  387. if (!res || res->blob) {
  388. return;
  389. }
  390. if (t2d.r.x > res->width ||
  391. t2d.r.y > res->height ||
  392. t2d.r.width > res->width ||
  393. t2d.r.height > res->height ||
  394. t2d.r.x + t2d.r.width > res->width ||
  395. t2d.r.y + t2d.r.height > res->height) {
  396. qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
  397. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  398. __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
  399. t2d.r.width, t2d.r.height, res->width, res->height);
  400. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  401. return;
  402. }
  403. format = pixman_image_get_format(res->image);
  404. bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
  405. stride = pixman_image_get_stride(res->image);
  406. if (t2d.offset || t2d.r.x || t2d.r.y ||
  407. t2d.r.width != pixman_image_get_width(res->image)) {
  408. void *img_data = pixman_image_get_data(res->image);
  409. for (h = 0; h < t2d.r.height; h++) {
  410. src_offset = t2d.offset + stride * h;
  411. dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
  412. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  413. (uint8_t *)img_data
  414. + dst_offset, t2d.r.width * bpp);
  415. }
  416. } else {
  417. iov_to_buf(res->iov, res->iov_cnt, 0,
  418. pixman_image_get_data(res->image),
  419. pixman_image_get_stride(res->image)
  420. * pixman_image_get_height(res->image));
  421. }
  422. }
  423. static void virtio_gpu_resource_flush(VirtIOGPU *g,
  424. struct virtio_gpu_ctrl_command *cmd)
  425. {
  426. struct virtio_gpu_simple_resource *res;
  427. struct virtio_gpu_resource_flush rf;
  428. struct virtio_gpu_scanout *scanout;
  429. pixman_region16_t flush_region;
  430. int i;
  431. VIRTIO_GPU_FILL_CMD(rf);
  432. virtio_gpu_bswap_32(&rf, sizeof(rf));
  433. trace_virtio_gpu_cmd_res_flush(rf.resource_id,
  434. rf.r.width, rf.r.height, rf.r.x, rf.r.y);
  435. res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
  436. __func__, &cmd->error);
  437. if (!res) {
  438. return;
  439. }
  440. if (res->blob) {
  441. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  442. scanout = &g->parent_obj.scanout[i];
  443. if (scanout->resource_id == res->resource_id &&
  444. rf.r.x < scanout->x + scanout->width &&
  445. rf.r.x + rf.r.width >= scanout->x &&
  446. rf.r.y < scanout->y + scanout->height &&
  447. rf.r.y + rf.r.height >= scanout->y &&
  448. console_has_gl(scanout->con)) {
  449. dpy_gl_update(scanout->con, 0, 0, scanout->width,
  450. scanout->height);
  451. }
  452. }
  453. return;
  454. }
  455. if (!res->blob &&
  456. (rf.r.x > res->width ||
  457. rf.r.y > res->height ||
  458. rf.r.width > res->width ||
  459. rf.r.height > res->height ||
  460. rf.r.x + rf.r.width > res->width ||
  461. rf.r.y + rf.r.height > res->height)) {
  462. qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
  463. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  464. __func__, rf.resource_id, rf.r.x, rf.r.y,
  465. rf.r.width, rf.r.height, res->width, res->height);
  466. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  467. return;
  468. }
  469. pixman_region_init_rect(&flush_region,
  470. rf.r.x, rf.r.y, rf.r.width, rf.r.height);
  471. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  472. pixman_region16_t region, finalregion;
  473. pixman_box16_t *extents;
  474. if (!(res->scanout_bitmask & (1 << i))) {
  475. continue;
  476. }
  477. scanout = &g->parent_obj.scanout[i];
  478. pixman_region_init(&finalregion);
  479. pixman_region_init_rect(&region, scanout->x, scanout->y,
  480. scanout->width, scanout->height);
  481. pixman_region_intersect(&finalregion, &flush_region, &region);
  482. pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
  483. extents = pixman_region_extents(&finalregion);
  484. /* work out the area we need to update for each console */
  485. dpy_gfx_update(g->parent_obj.scanout[i].con,
  486. extents->x1, extents->y1,
  487. extents->x2 - extents->x1,
  488. extents->y2 - extents->y1);
  489. pixman_region_fini(&region);
  490. pixman_region_fini(&finalregion);
  491. }
  492. pixman_region_fini(&flush_region);
  493. }
  494. static void virtio_unref_resource(pixman_image_t *image, void *data)
  495. {
  496. pixman_image_unref(data);
  497. }
  498. static void virtio_gpu_update_scanout(VirtIOGPU *g,
  499. uint32_t scanout_id,
  500. struct virtio_gpu_simple_resource *res,
  501. struct virtio_gpu_rect *r)
  502. {
  503. struct virtio_gpu_simple_resource *ores;
  504. struct virtio_gpu_scanout *scanout;
  505. scanout = &g->parent_obj.scanout[scanout_id];
  506. ores = virtio_gpu_find_resource(g, scanout->resource_id);
  507. if (ores) {
  508. ores->scanout_bitmask &= ~(1 << scanout_id);
  509. }
  510. res->scanout_bitmask |= (1 << scanout_id);
  511. scanout->resource_id = res->resource_id;
  512. scanout->x = r->x;
  513. scanout->y = r->y;
  514. scanout->width = r->width;
  515. scanout->height = r->height;
  516. }
  517. static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
  518. uint32_t scanout_id,
  519. struct virtio_gpu_framebuffer *fb,
  520. struct virtio_gpu_simple_resource *res,
  521. struct virtio_gpu_rect *r,
  522. uint32_t *error)
  523. {
  524. struct virtio_gpu_scanout *scanout;
  525. uint8_t *data;
  526. scanout = &g->parent_obj.scanout[scanout_id];
  527. if (r->x > fb->width ||
  528. r->y > fb->height ||
  529. r->width < 16 ||
  530. r->height < 16 ||
  531. r->width > fb->width ||
  532. r->height > fb->height ||
  533. r->x + r->width > fb->width ||
  534. r->y + r->height > fb->height) {
  535. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
  536. " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
  537. __func__, scanout_id, res->resource_id,
  538. r->x, r->y, r->width, r->height,
  539. fb->width, fb->height);
  540. *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  541. return;
  542. }
  543. g->parent_obj.enable = 1;
  544. if (res->blob) {
  545. if (console_has_gl(scanout->con)) {
  546. if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
  547. virtio_gpu_update_scanout(g, scanout_id, res, r);
  548. return;
  549. }
  550. }
  551. data = res->blob;
  552. } else {
  553. data = (uint8_t *)pixman_image_get_data(res->image);
  554. }
  555. /* create a surface for this scanout */
  556. if ((res->blob && !console_has_gl(scanout->con)) ||
  557. !scanout->ds ||
  558. surface_data(scanout->ds) != data + fb->offset ||
  559. scanout->width != r->width ||
  560. scanout->height != r->height) {
  561. pixman_image_t *rect;
  562. void *ptr = data + fb->offset;
  563. rect = pixman_image_create_bits(fb->format, r->width, r->height,
  564. ptr, fb->stride);
  565. if (res->image) {
  566. pixman_image_ref(res->image);
  567. pixman_image_set_destroy_function(rect, virtio_unref_resource,
  568. res->image);
  569. }
  570. /* realloc the surface ptr */
  571. scanout->ds = qemu_create_displaysurface_pixman(rect);
  572. if (!scanout->ds) {
  573. *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  574. return;
  575. }
  576. pixman_image_unref(rect);
  577. dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
  578. scanout->ds);
  579. }
  580. virtio_gpu_update_scanout(g, scanout_id, res, r);
  581. }
  582. static void virtio_gpu_set_scanout(VirtIOGPU *g,
  583. struct virtio_gpu_ctrl_command *cmd)
  584. {
  585. struct virtio_gpu_simple_resource *res;
  586. struct virtio_gpu_framebuffer fb = { 0 };
  587. struct virtio_gpu_set_scanout ss;
  588. VIRTIO_GPU_FILL_CMD(ss);
  589. virtio_gpu_bswap_32(&ss, sizeof(ss));
  590. trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
  591. ss.r.width, ss.r.height, ss.r.x, ss.r.y);
  592. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  593. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  594. __func__, ss.scanout_id);
  595. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  596. return;
  597. }
  598. if (ss.resource_id == 0) {
  599. virtio_gpu_disable_scanout(g, ss.scanout_id);
  600. return;
  601. }
  602. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  603. __func__, &cmd->error);
  604. if (!res) {
  605. return;
  606. }
  607. fb.format = pixman_image_get_format(res->image);
  608. fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
  609. fb.width = pixman_image_get_width(res->image);
  610. fb.height = pixman_image_get_height(res->image);
  611. fb.stride = pixman_image_get_stride(res->image);
  612. fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
  613. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  614. &fb, res, &ss.r, &cmd->error);
  615. }
  616. static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
  617. struct virtio_gpu_ctrl_command *cmd)
  618. {
  619. struct virtio_gpu_simple_resource *res;
  620. struct virtio_gpu_framebuffer fb = { 0 };
  621. struct virtio_gpu_set_scanout_blob ss;
  622. uint64_t fbend;
  623. VIRTIO_GPU_FILL_CMD(ss);
  624. virtio_gpu_scanout_blob_bswap(&ss);
  625. trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
  626. ss.r.width, ss.r.height, ss.r.x,
  627. ss.r.y);
  628. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  629. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  630. __func__, ss.scanout_id);
  631. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  632. return;
  633. }
  634. if (ss.resource_id == 0) {
  635. virtio_gpu_disable_scanout(g, ss.scanout_id);
  636. return;
  637. }
  638. res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
  639. __func__, &cmd->error);
  640. if (!res) {
  641. return;
  642. }
  643. fb.format = virtio_gpu_get_pixman_format(ss.format);
  644. if (!fb.format) {
  645. qemu_log_mask(LOG_GUEST_ERROR,
  646. "%s: host couldn't handle guest format %d\n",
  647. __func__, ss.format);
  648. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  649. return;
  650. }
  651. fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
  652. fb.width = ss.width;
  653. fb.height = ss.height;
  654. fb.stride = ss.strides[0];
  655. fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
  656. fbend = fb.offset;
  657. fbend += fb.stride * (ss.r.height - 1);
  658. fbend += fb.bytes_pp * ss.r.width;
  659. if (fbend > res->blob_size) {
  660. qemu_log_mask(LOG_GUEST_ERROR,
  661. "%s: fb end out of range\n",
  662. __func__);
  663. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  664. return;
  665. }
  666. virtio_gpu_do_set_scanout(g, ss.scanout_id,
  667. &fb, res, &ss.r, &cmd->error);
  668. }
  669. int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
  670. uint32_t nr_entries, uint32_t offset,
  671. struct virtio_gpu_ctrl_command *cmd,
  672. uint64_t **addr, struct iovec **iov,
  673. uint32_t *niov)
  674. {
  675. struct virtio_gpu_mem_entry *ents;
  676. size_t esize, s;
  677. int e, v;
  678. if (nr_entries > 16384) {
  679. qemu_log_mask(LOG_GUEST_ERROR,
  680. "%s: nr_entries is too big (%d > 16384)\n",
  681. __func__, nr_entries);
  682. return -1;
  683. }
  684. esize = sizeof(*ents) * nr_entries;
  685. ents = g_malloc(esize);
  686. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  687. offset, ents, esize);
  688. if (s != esize) {
  689. qemu_log_mask(LOG_GUEST_ERROR,
  690. "%s: command data size incorrect %zu vs %zu\n",
  691. __func__, s, esize);
  692. g_free(ents);
  693. return -1;
  694. }
  695. *iov = NULL;
  696. if (addr) {
  697. *addr = NULL;
  698. }
  699. for (e = 0, v = 0; e < nr_entries; e++) {
  700. uint64_t a = le64_to_cpu(ents[e].addr);
  701. uint32_t l = le32_to_cpu(ents[e].length);
  702. hwaddr len;
  703. void *map;
  704. do {
  705. len = l;
  706. map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
  707. DMA_DIRECTION_TO_DEVICE,
  708. MEMTXATTRS_UNSPECIFIED);
  709. if (!map) {
  710. qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
  711. " element %d\n", __func__, e);
  712. virtio_gpu_cleanup_mapping_iov(g, *iov, v);
  713. g_free(ents);
  714. *iov = NULL;
  715. if (addr) {
  716. g_free(*addr);
  717. *addr = NULL;
  718. }
  719. return -1;
  720. }
  721. if (!(v % 16)) {
  722. *iov = g_renew(struct iovec, *iov, v + 16);
  723. if (addr) {
  724. *addr = g_renew(uint64_t, *addr, v + 16);
  725. }
  726. }
  727. (*iov)[v].iov_base = map;
  728. (*iov)[v].iov_len = len;
  729. if (addr) {
  730. (*addr)[v] = a;
  731. }
  732. a += len;
  733. l -= len;
  734. v += 1;
  735. } while (l > 0);
  736. }
  737. *niov = v;
  738. g_free(ents);
  739. return 0;
  740. }
  741. void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
  742. struct iovec *iov, uint32_t count)
  743. {
  744. int i;
  745. for (i = 0; i < count; i++) {
  746. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  747. iov[i].iov_base, iov[i].iov_len,
  748. DMA_DIRECTION_TO_DEVICE,
  749. iov[i].iov_len);
  750. }
  751. g_free(iov);
  752. }
  753. static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  754. struct virtio_gpu_simple_resource *res)
  755. {
  756. virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
  757. res->iov = NULL;
  758. res->iov_cnt = 0;
  759. g_free(res->addrs);
  760. res->addrs = NULL;
  761. if (res->blob) {
  762. virtio_gpu_fini_udmabuf(res);
  763. }
  764. }
  765. static void
  766. virtio_gpu_resource_attach_backing(VirtIOGPU *g,
  767. struct virtio_gpu_ctrl_command *cmd)
  768. {
  769. struct virtio_gpu_simple_resource *res;
  770. struct virtio_gpu_resource_attach_backing ab;
  771. int ret;
  772. VIRTIO_GPU_FILL_CMD(ab);
  773. virtio_gpu_bswap_32(&ab, sizeof(ab));
  774. trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
  775. res = virtio_gpu_find_resource(g, ab.resource_id);
  776. if (!res) {
  777. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  778. __func__, ab.resource_id);
  779. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  780. return;
  781. }
  782. if (res->iov) {
  783. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  784. return;
  785. }
  786. ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
  787. &res->addrs, &res->iov, &res->iov_cnt);
  788. if (ret != 0) {
  789. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  790. return;
  791. }
  792. }
  793. static void
  794. virtio_gpu_resource_detach_backing(VirtIOGPU *g,
  795. struct virtio_gpu_ctrl_command *cmd)
  796. {
  797. struct virtio_gpu_simple_resource *res;
  798. struct virtio_gpu_resource_detach_backing detach;
  799. VIRTIO_GPU_FILL_CMD(detach);
  800. virtio_gpu_bswap_32(&detach, sizeof(detach));
  801. trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
  802. res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
  803. __func__, &cmd->error);
  804. if (!res) {
  805. return;
  806. }
  807. virtio_gpu_cleanup_mapping(g, res);
  808. }
  809. void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
  810. struct virtio_gpu_ctrl_command *cmd)
  811. {
  812. VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
  813. virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
  814. switch (cmd->cmd_hdr.type) {
  815. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  816. virtio_gpu_get_display_info(g, cmd);
  817. break;
  818. case VIRTIO_GPU_CMD_GET_EDID:
  819. virtio_gpu_get_edid(g, cmd);
  820. break;
  821. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  822. virtio_gpu_resource_create_2d(g, cmd);
  823. break;
  824. case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
  825. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  826. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  827. break;
  828. }
  829. virtio_gpu_resource_create_blob(g, cmd);
  830. break;
  831. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  832. virtio_gpu_resource_unref(g, cmd);
  833. break;
  834. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  835. virtio_gpu_resource_flush(g, cmd);
  836. break;
  837. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  838. virtio_gpu_transfer_to_host_2d(g, cmd);
  839. break;
  840. case VIRTIO_GPU_CMD_SET_SCANOUT:
  841. virtio_gpu_set_scanout(g, cmd);
  842. break;
  843. case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
  844. if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  845. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  846. break;
  847. }
  848. virtio_gpu_set_scanout_blob(g, cmd);
  849. break;
  850. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  851. virtio_gpu_resource_attach_backing(g, cmd);
  852. break;
  853. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  854. virtio_gpu_resource_detach_backing(g, cmd);
  855. break;
  856. default:
  857. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  858. break;
  859. }
  860. if (!cmd->finished) {
  861. if (!g->parent_obj.renderer_blocked) {
  862. virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
  863. VIRTIO_GPU_RESP_OK_NODATA);
  864. }
  865. }
  866. }
  867. static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
  868. {
  869. VirtIOGPU *g = VIRTIO_GPU(vdev);
  870. qemu_bh_schedule(g->ctrl_bh);
  871. }
  872. static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
  873. {
  874. VirtIOGPU *g = VIRTIO_GPU(vdev);
  875. qemu_bh_schedule(g->cursor_bh);
  876. }
  877. void virtio_gpu_process_cmdq(VirtIOGPU *g)
  878. {
  879. struct virtio_gpu_ctrl_command *cmd;
  880. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  881. if (g->processing_cmdq) {
  882. return;
  883. }
  884. g->processing_cmdq = true;
  885. while (!QTAILQ_EMPTY(&g->cmdq)) {
  886. cmd = QTAILQ_FIRST(&g->cmdq);
  887. if (g->parent_obj.renderer_blocked) {
  888. break;
  889. }
  890. /* process command */
  891. vgc->process_cmd(g, cmd);
  892. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  893. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  894. g->stats.requests++;
  895. }
  896. if (!cmd->finished) {
  897. QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
  898. g->inflight++;
  899. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  900. if (g->stats.max_inflight < g->inflight) {
  901. g->stats.max_inflight = g->inflight;
  902. }
  903. fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
  904. }
  905. } else {
  906. g_free(cmd);
  907. }
  908. }
  909. g->processing_cmdq = false;
  910. }
  911. static void virtio_gpu_process_fenceq(VirtIOGPU *g)
  912. {
  913. struct virtio_gpu_ctrl_command *cmd, *tmp;
  914. QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
  915. trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
  916. virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
  917. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  918. g_free(cmd);
  919. g->inflight--;
  920. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  921. fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
  922. }
  923. }
  924. }
  925. static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
  926. {
  927. VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
  928. virtio_gpu_process_fenceq(g);
  929. virtio_gpu_process_cmdq(g);
  930. }
  931. static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  932. {
  933. VirtIOGPU *g = VIRTIO_GPU(vdev);
  934. struct virtio_gpu_ctrl_command *cmd;
  935. if (!virtio_queue_ready(vq)) {
  936. return;
  937. }
  938. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  939. while (cmd) {
  940. cmd->vq = vq;
  941. cmd->error = 0;
  942. cmd->finished = false;
  943. QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
  944. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  945. }
  946. virtio_gpu_process_cmdq(g);
  947. }
  948. static void virtio_gpu_ctrl_bh(void *opaque)
  949. {
  950. VirtIOGPU *g = opaque;
  951. VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
  952. vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
  953. }
  954. static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
  955. {
  956. VirtIOGPU *g = VIRTIO_GPU(vdev);
  957. VirtQueueElement *elem;
  958. size_t s;
  959. struct virtio_gpu_update_cursor cursor_info;
  960. if (!virtio_queue_ready(vq)) {
  961. return;
  962. }
  963. for (;;) {
  964. elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
  965. if (!elem) {
  966. break;
  967. }
  968. s = iov_to_buf(elem->out_sg, elem->out_num, 0,
  969. &cursor_info, sizeof(cursor_info));
  970. if (s != sizeof(cursor_info)) {
  971. qemu_log_mask(LOG_GUEST_ERROR,
  972. "%s: cursor size incorrect %zu vs %zu\n",
  973. __func__, s, sizeof(cursor_info));
  974. } else {
  975. virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
  976. update_cursor(g, &cursor_info);
  977. }
  978. virtqueue_push(vq, elem, 0);
  979. virtio_notify(vdev, vq);
  980. g_free(elem);
  981. }
  982. }
  983. static void virtio_gpu_cursor_bh(void *opaque)
  984. {
  985. VirtIOGPU *g = opaque;
  986. virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
  987. }
  988. static const VMStateDescription vmstate_virtio_gpu_scanout = {
  989. .name = "virtio-gpu-one-scanout",
  990. .version_id = 1,
  991. .fields = (VMStateField[]) {
  992. VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
  993. VMSTATE_UINT32(width, struct virtio_gpu_scanout),
  994. VMSTATE_UINT32(height, struct virtio_gpu_scanout),
  995. VMSTATE_INT32(x, struct virtio_gpu_scanout),
  996. VMSTATE_INT32(y, struct virtio_gpu_scanout),
  997. VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
  998. VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
  999. VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
  1000. VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
  1001. VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
  1002. VMSTATE_END_OF_LIST()
  1003. },
  1004. };
  1005. static const VMStateDescription vmstate_virtio_gpu_scanouts = {
  1006. .name = "virtio-gpu-scanouts",
  1007. .version_id = 1,
  1008. .fields = (VMStateField[]) {
  1009. VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
  1010. VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
  1011. struct VirtIOGPU, NULL),
  1012. VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
  1013. parent_obj.conf.max_outputs, 1,
  1014. vmstate_virtio_gpu_scanout,
  1015. struct virtio_gpu_scanout),
  1016. VMSTATE_END_OF_LIST()
  1017. },
  1018. };
  1019. static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
  1020. const VMStateField *field, JSONWriter *vmdesc)
  1021. {
  1022. VirtIOGPU *g = opaque;
  1023. struct virtio_gpu_simple_resource *res;
  1024. int i;
  1025. /* in 2d mode we should never find unprocessed commands here */
  1026. assert(QTAILQ_EMPTY(&g->cmdq));
  1027. QTAILQ_FOREACH(res, &g->reslist, next) {
  1028. qemu_put_be32(f, res->resource_id);
  1029. qemu_put_be32(f, res->width);
  1030. qemu_put_be32(f, res->height);
  1031. qemu_put_be32(f, res->format);
  1032. qemu_put_be32(f, res->iov_cnt);
  1033. for (i = 0; i < res->iov_cnt; i++) {
  1034. qemu_put_be64(f, res->addrs[i]);
  1035. qemu_put_be32(f, res->iov[i].iov_len);
  1036. }
  1037. qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
  1038. pixman_image_get_stride(res->image) * res->height);
  1039. }
  1040. qemu_put_be32(f, 0); /* end of list */
  1041. return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
  1042. }
  1043. static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
  1044. const VMStateField *field)
  1045. {
  1046. VirtIOGPU *g = opaque;
  1047. struct virtio_gpu_simple_resource *res;
  1048. struct virtio_gpu_scanout *scanout;
  1049. uint32_t resource_id, pformat;
  1050. int i;
  1051. g->hostmem = 0;
  1052. resource_id = qemu_get_be32(f);
  1053. while (resource_id != 0) {
  1054. res = virtio_gpu_find_resource(g, resource_id);
  1055. if (res) {
  1056. return -EINVAL;
  1057. }
  1058. res = g_new0(struct virtio_gpu_simple_resource, 1);
  1059. res->resource_id = resource_id;
  1060. res->width = qemu_get_be32(f);
  1061. res->height = qemu_get_be32(f);
  1062. res->format = qemu_get_be32(f);
  1063. res->iov_cnt = qemu_get_be32(f);
  1064. /* allocate */
  1065. pformat = virtio_gpu_get_pixman_format(res->format);
  1066. if (!pformat) {
  1067. g_free(res);
  1068. return -EINVAL;
  1069. }
  1070. res->image = pixman_image_create_bits(pformat,
  1071. res->width, res->height,
  1072. NULL, 0);
  1073. if (!res->image) {
  1074. g_free(res);
  1075. return -EINVAL;
  1076. }
  1077. res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
  1078. res->addrs = g_new(uint64_t, res->iov_cnt);
  1079. res->iov = g_new(struct iovec, res->iov_cnt);
  1080. /* read data */
  1081. for (i = 0; i < res->iov_cnt; i++) {
  1082. res->addrs[i] = qemu_get_be64(f);
  1083. res->iov[i].iov_len = qemu_get_be32(f);
  1084. }
  1085. qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
  1086. pixman_image_get_stride(res->image) * res->height);
  1087. /* restore mapping */
  1088. for (i = 0; i < res->iov_cnt; i++) {
  1089. hwaddr len = res->iov[i].iov_len;
  1090. res->iov[i].iov_base =
  1091. dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
  1092. DMA_DIRECTION_TO_DEVICE,
  1093. MEMTXATTRS_UNSPECIFIED);
  1094. if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
  1095. /* Clean up the half-a-mapping we just created... */
  1096. if (res->iov[i].iov_base) {
  1097. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  1098. res->iov[i].iov_base,
  1099. len,
  1100. DMA_DIRECTION_TO_DEVICE,
  1101. 0);
  1102. }
  1103. /* ...and the mappings for previous loop iterations */
  1104. res->iov_cnt = i;
  1105. virtio_gpu_cleanup_mapping(g, res);
  1106. pixman_image_unref(res->image);
  1107. g_free(res);
  1108. return -EINVAL;
  1109. }
  1110. }
  1111. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  1112. g->hostmem += res->hostmem;
  1113. resource_id = qemu_get_be32(f);
  1114. }
  1115. /* load & apply scanout state */
  1116. vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
  1117. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  1118. scanout = &g->parent_obj.scanout[i];
  1119. if (!scanout->resource_id) {
  1120. continue;
  1121. }
  1122. res = virtio_gpu_find_resource(g, scanout->resource_id);
  1123. if (!res) {
  1124. return -EINVAL;
  1125. }
  1126. scanout->ds = qemu_create_displaysurface_pixman(res->image);
  1127. if (!scanout->ds) {
  1128. return -EINVAL;
  1129. }
  1130. dpy_gfx_replace_surface(scanout->con, scanout->ds);
  1131. dpy_gfx_update_full(scanout->con);
  1132. if (scanout->cursor.resource_id) {
  1133. update_cursor(g, &scanout->cursor);
  1134. }
  1135. res->scanout_bitmask |= (1 << i);
  1136. }
  1137. return 0;
  1138. }
  1139. void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
  1140. {
  1141. VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
  1142. VirtIOGPU *g = VIRTIO_GPU(qdev);
  1143. if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
  1144. if (!virtio_gpu_have_udmabuf()) {
  1145. error_setg(errp, "cannot enable blob resources without udmabuf");
  1146. return;
  1147. }
  1148. if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
  1149. error_setg(errp, "blobs and virgl are not compatible (yet)");
  1150. return;
  1151. }
  1152. }
  1153. if (!virtio_gpu_base_device_realize(qdev,
  1154. virtio_gpu_handle_ctrl_cb,
  1155. virtio_gpu_handle_cursor_cb,
  1156. errp)) {
  1157. return;
  1158. }
  1159. g->ctrl_vq = virtio_get_queue(vdev, 0);
  1160. g->cursor_vq = virtio_get_queue(vdev, 1);
  1161. g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
  1162. g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
  1163. QTAILQ_INIT(&g->reslist);
  1164. QTAILQ_INIT(&g->cmdq);
  1165. QTAILQ_INIT(&g->fenceq);
  1166. }
  1167. void virtio_gpu_reset(VirtIODevice *vdev)
  1168. {
  1169. VirtIOGPU *g = VIRTIO_GPU(vdev);
  1170. struct virtio_gpu_simple_resource *res, *tmp;
  1171. struct virtio_gpu_ctrl_command *cmd;
  1172. QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
  1173. virtio_gpu_resource_destroy(g, res);
  1174. }
  1175. while (!QTAILQ_EMPTY(&g->cmdq)) {
  1176. cmd = QTAILQ_FIRST(&g->cmdq);
  1177. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  1178. g_free(cmd);
  1179. }
  1180. while (!QTAILQ_EMPTY(&g->fenceq)) {
  1181. cmd = QTAILQ_FIRST(&g->fenceq);
  1182. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  1183. g->inflight--;
  1184. g_free(cmd);
  1185. }
  1186. virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
  1187. }
  1188. static void
  1189. virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
  1190. {
  1191. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1192. memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
  1193. }
  1194. static void
  1195. virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
  1196. {
  1197. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1198. const struct virtio_gpu_config *vgconfig =
  1199. (const struct virtio_gpu_config *)config;
  1200. if (vgconfig->events_clear) {
  1201. g->virtio_config.events_read &= ~vgconfig->events_clear;
  1202. }
  1203. }
  1204. /*
  1205. * For historical reasons virtio_gpu does not adhere to virtio migration
  1206. * scheme as described in doc/virtio-migration.txt, in a sense that no
  1207. * save/load callback are provided to the core. Instead the device data
  1208. * is saved/loaded after the core data.
  1209. *
  1210. * Because of this we need a special vmsd.
  1211. */
  1212. static const VMStateDescription vmstate_virtio_gpu = {
  1213. .name = "virtio-gpu",
  1214. .minimum_version_id = VIRTIO_GPU_VM_VERSION,
  1215. .version_id = VIRTIO_GPU_VM_VERSION,
  1216. .fields = (VMStateField[]) {
  1217. VMSTATE_VIRTIO_DEVICE /* core */,
  1218. {
  1219. .name = "virtio-gpu",
  1220. .info = &(const VMStateInfo) {
  1221. .name = "virtio-gpu",
  1222. .get = virtio_gpu_load,
  1223. .put = virtio_gpu_save,
  1224. },
  1225. .flags = VMS_SINGLE,
  1226. } /* device */,
  1227. VMSTATE_END_OF_LIST()
  1228. },
  1229. };
  1230. static Property virtio_gpu_properties[] = {
  1231. VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
  1232. DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
  1233. 256 * MiB),
  1234. DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
  1235. VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
  1236. DEFINE_PROP_END_OF_LIST(),
  1237. };
  1238. static void virtio_gpu_class_init(ObjectClass *klass, void *data)
  1239. {
  1240. DeviceClass *dc = DEVICE_CLASS(klass);
  1241. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  1242. VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
  1243. VirtIOGPUBaseClass *vgbc = &vgc->parent;
  1244. vgc->handle_ctrl = virtio_gpu_handle_ctrl;
  1245. vgc->process_cmd = virtio_gpu_simple_process_cmd;
  1246. vgc->update_cursor_data = virtio_gpu_update_cursor_data;
  1247. vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
  1248. vdc->realize = virtio_gpu_device_realize;
  1249. vdc->reset = virtio_gpu_reset;
  1250. vdc->get_config = virtio_gpu_get_config;
  1251. vdc->set_config = virtio_gpu_set_config;
  1252. dc->vmsd = &vmstate_virtio_gpu;
  1253. device_class_set_props(dc, virtio_gpu_properties);
  1254. }
  1255. static const TypeInfo virtio_gpu_info = {
  1256. .name = TYPE_VIRTIO_GPU,
  1257. .parent = TYPE_VIRTIO_GPU_BASE,
  1258. .instance_size = sizeof(VirtIOGPU),
  1259. .class_size = sizeof(VirtIOGPUClass),
  1260. .class_init = virtio_gpu_class_init,
  1261. };
  1262. module_obj(TYPE_VIRTIO_GPU);
  1263. module_kconfig(VIRTIO_GPU);
  1264. static void virtio_register_types(void)
  1265. {
  1266. type_register_static(&virtio_gpu_info);
  1267. }
  1268. type_init(virtio_register_types)