virtio-gpu.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /*
  2. * Virtio GPU Device
  3. *
  4. * Copyright Red Hat, Inc. 2013-2014
  5. *
  6. * Authors:
  7. * Dave Airlie <airlied@redhat.com>
  8. * Gerd Hoffmann <kraxel@redhat.com>
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11. * See the COPYING file in the top-level directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/units.h"
  15. #include "qemu/iov.h"
  16. #include "ui/console.h"
  17. #include "trace.h"
  18. #include "sysemu/dma.h"
  19. #include "sysemu/sysemu.h"
  20. #include "hw/virtio/virtio.h"
  21. #include "migration/qemu-file-types.h"
  22. #include "hw/virtio/virtio-gpu.h"
  23. #include "hw/virtio/virtio-gpu-bswap.h"
  24. #include "hw/virtio/virtio-gpu-pixman.h"
  25. #include "hw/virtio/virtio-bus.h"
  26. #include "hw/display/edid.h"
  27. #include "hw/qdev-properties.h"
  28. #include "qemu/log.h"
  29. #include "qemu/module.h"
  30. #include "qapi/error.h"
  31. #include "qemu/error-report.h"
  32. #define VIRTIO_GPU_VM_VERSION 1
  33. static struct virtio_gpu_simple_resource*
  34. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
  35. static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  36. struct virtio_gpu_simple_resource *res);
  37. #ifdef CONFIG_VIRGL
  38. #include <virglrenderer.h>
  39. #define VIRGL(_g, _virgl, _simple, ...) \
  40. do { \
  41. if (_g->parent_obj.use_virgl_renderer) { \
  42. _virgl(__VA_ARGS__); \
  43. } else { \
  44. _simple(__VA_ARGS__); \
  45. } \
  46. } while (0)
  47. #else
  48. #define VIRGL(_g, _virgl, _simple, ...) \
  49. do { \
  50. _simple(__VA_ARGS__); \
  51. } while (0)
  52. #endif
  53. static void update_cursor_data_simple(VirtIOGPU *g,
  54. struct virtio_gpu_scanout *s,
  55. uint32_t resource_id)
  56. {
  57. struct virtio_gpu_simple_resource *res;
  58. uint32_t pixels;
  59. res = virtio_gpu_find_resource(g, resource_id);
  60. if (!res) {
  61. return;
  62. }
  63. if (pixman_image_get_width(res->image) != s->current_cursor->width ||
  64. pixman_image_get_height(res->image) != s->current_cursor->height) {
  65. return;
  66. }
  67. pixels = s->current_cursor->width * s->current_cursor->height;
  68. memcpy(s->current_cursor->data,
  69. pixman_image_get_data(res->image),
  70. pixels * sizeof(uint32_t));
  71. }
  72. #ifdef CONFIG_VIRGL
  73. static void update_cursor_data_virgl(VirtIOGPU *g,
  74. struct virtio_gpu_scanout *s,
  75. uint32_t resource_id)
  76. {
  77. uint32_t width, height;
  78. uint32_t pixels, *data;
  79. data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
  80. if (!data) {
  81. return;
  82. }
  83. if (width != s->current_cursor->width ||
  84. height != s->current_cursor->height) {
  85. free(data);
  86. return;
  87. }
  88. pixels = s->current_cursor->width * s->current_cursor->height;
  89. memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
  90. free(data);
  91. }
  92. #endif
  93. static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
  94. {
  95. struct virtio_gpu_scanout *s;
  96. bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
  97. if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
  98. return;
  99. }
  100. s = &g->parent_obj.scanout[cursor->pos.scanout_id];
  101. trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
  102. cursor->pos.x,
  103. cursor->pos.y,
  104. move ? "move" : "update",
  105. cursor->resource_id);
  106. if (!move) {
  107. if (!s->current_cursor) {
  108. s->current_cursor = cursor_alloc(64, 64);
  109. }
  110. s->current_cursor->hot_x = cursor->hot_x;
  111. s->current_cursor->hot_y = cursor->hot_y;
  112. if (cursor->resource_id > 0) {
  113. VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
  114. g, s, cursor->resource_id);
  115. }
  116. dpy_cursor_define(s->con, s->current_cursor);
  117. s->cursor = *cursor;
  118. } else {
  119. s->cursor.pos.x = cursor->pos.x;
  120. s->cursor.pos.y = cursor->pos.y;
  121. }
  122. dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
  123. cursor->resource_id ? 1 : 0);
  124. }
  125. static struct virtio_gpu_simple_resource *
  126. virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
  127. {
  128. struct virtio_gpu_simple_resource *res;
  129. QTAILQ_FOREACH(res, &g->reslist, next) {
  130. if (res->resource_id == resource_id) {
  131. return res;
  132. }
  133. }
  134. return NULL;
  135. }
  136. void virtio_gpu_ctrl_response(VirtIOGPU *g,
  137. struct virtio_gpu_ctrl_command *cmd,
  138. struct virtio_gpu_ctrl_hdr *resp,
  139. size_t resp_len)
  140. {
  141. size_t s;
  142. if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
  143. resp->flags |= VIRTIO_GPU_FLAG_FENCE;
  144. resp->fence_id = cmd->cmd_hdr.fence_id;
  145. resp->ctx_id = cmd->cmd_hdr.ctx_id;
  146. }
  147. virtio_gpu_ctrl_hdr_bswap(resp);
  148. s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
  149. if (s != resp_len) {
  150. qemu_log_mask(LOG_GUEST_ERROR,
  151. "%s: response size incorrect %zu vs %zu\n",
  152. __func__, s, resp_len);
  153. }
  154. virtqueue_push(cmd->vq, &cmd->elem, s);
  155. virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
  156. cmd->finished = true;
  157. }
  158. void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
  159. struct virtio_gpu_ctrl_command *cmd,
  160. enum virtio_gpu_ctrl_type type)
  161. {
  162. struct virtio_gpu_ctrl_hdr resp;
  163. memset(&resp, 0, sizeof(resp));
  164. resp.type = type;
  165. virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
  166. }
  167. void virtio_gpu_get_display_info(VirtIOGPU *g,
  168. struct virtio_gpu_ctrl_command *cmd)
  169. {
  170. struct virtio_gpu_resp_display_info display_info;
  171. trace_virtio_gpu_cmd_get_display_info();
  172. memset(&display_info, 0, sizeof(display_info));
  173. display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
  174. virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
  175. virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
  176. sizeof(display_info));
  177. }
  178. static void
  179. virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
  180. struct virtio_gpu_resp_edid *edid)
  181. {
  182. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  183. qemu_edid_info info = {
  184. .prefx = b->req_state[scanout].width,
  185. .prefy = b->req_state[scanout].height,
  186. };
  187. edid->size = cpu_to_le32(sizeof(edid->edid));
  188. qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
  189. }
  190. void virtio_gpu_get_edid(VirtIOGPU *g,
  191. struct virtio_gpu_ctrl_command *cmd)
  192. {
  193. struct virtio_gpu_resp_edid edid;
  194. struct virtio_gpu_cmd_get_edid get_edid;
  195. VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
  196. VIRTIO_GPU_FILL_CMD(get_edid);
  197. virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
  198. if (get_edid.scanout >= b->conf.max_outputs) {
  199. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  200. return;
  201. }
  202. trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
  203. memset(&edid, 0, sizeof(edid));
  204. edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
  205. virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
  206. virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
  207. }
  208. static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
  209. uint32_t width, uint32_t height)
  210. {
  211. /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
  212. * pixman_image_create_bits will fail in case it overflow.
  213. */
  214. int bpp = PIXMAN_FORMAT_BPP(pformat);
  215. int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
  216. return height * stride;
  217. }
  218. static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
  219. struct virtio_gpu_ctrl_command *cmd)
  220. {
  221. pixman_format_code_t pformat;
  222. struct virtio_gpu_simple_resource *res;
  223. struct virtio_gpu_resource_create_2d c2d;
  224. VIRTIO_GPU_FILL_CMD(c2d);
  225. virtio_gpu_bswap_32(&c2d, sizeof(c2d));
  226. trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
  227. c2d.width, c2d.height);
  228. if (c2d.resource_id == 0) {
  229. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
  230. __func__);
  231. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  232. return;
  233. }
  234. res = virtio_gpu_find_resource(g, c2d.resource_id);
  235. if (res) {
  236. qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
  237. __func__, c2d.resource_id);
  238. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  239. return;
  240. }
  241. res = g_new0(struct virtio_gpu_simple_resource, 1);
  242. res->width = c2d.width;
  243. res->height = c2d.height;
  244. res->format = c2d.format;
  245. res->resource_id = c2d.resource_id;
  246. pformat = virtio_gpu_get_pixman_format(c2d.format);
  247. if (!pformat) {
  248. qemu_log_mask(LOG_GUEST_ERROR,
  249. "%s: host couldn't handle guest format %d\n",
  250. __func__, c2d.format);
  251. g_free(res);
  252. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  253. return;
  254. }
  255. res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
  256. if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
  257. res->image = pixman_image_create_bits(pformat,
  258. c2d.width,
  259. c2d.height,
  260. NULL, 0);
  261. }
  262. if (!res->image) {
  263. qemu_log_mask(LOG_GUEST_ERROR,
  264. "%s: resource creation failed %d %d %d\n",
  265. __func__, c2d.resource_id, c2d.width, c2d.height);
  266. g_free(res);
  267. cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
  268. return;
  269. }
  270. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  271. g->hostmem += res->hostmem;
  272. }
  273. static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
  274. {
  275. struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
  276. struct virtio_gpu_simple_resource *res;
  277. DisplaySurface *ds = NULL;
  278. if (scanout->resource_id == 0) {
  279. return;
  280. }
  281. res = virtio_gpu_find_resource(g, scanout->resource_id);
  282. if (res) {
  283. res->scanout_bitmask &= ~(1 << scanout_id);
  284. }
  285. if (scanout_id == 0) {
  286. /* primary head */
  287. ds = qemu_create_message_surface(scanout->width ?: 640,
  288. scanout->height ?: 480,
  289. "Guest disabled display.");
  290. }
  291. dpy_gfx_replace_surface(scanout->con, ds);
  292. scanout->resource_id = 0;
  293. scanout->ds = NULL;
  294. scanout->width = 0;
  295. scanout->height = 0;
  296. }
  297. static void virtio_gpu_resource_destroy(VirtIOGPU *g,
  298. struct virtio_gpu_simple_resource *res)
  299. {
  300. int i;
  301. if (res->scanout_bitmask) {
  302. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  303. if (res->scanout_bitmask & (1 << i)) {
  304. virtio_gpu_disable_scanout(g, i);
  305. }
  306. }
  307. }
  308. pixman_image_unref(res->image);
  309. virtio_gpu_cleanup_mapping(g, res);
  310. QTAILQ_REMOVE(&g->reslist, res, next);
  311. g->hostmem -= res->hostmem;
  312. g_free(res);
  313. }
  314. static void virtio_gpu_resource_unref(VirtIOGPU *g,
  315. struct virtio_gpu_ctrl_command *cmd)
  316. {
  317. struct virtio_gpu_simple_resource *res;
  318. struct virtio_gpu_resource_unref unref;
  319. VIRTIO_GPU_FILL_CMD(unref);
  320. virtio_gpu_bswap_32(&unref, sizeof(unref));
  321. trace_virtio_gpu_cmd_res_unref(unref.resource_id);
  322. res = virtio_gpu_find_resource(g, unref.resource_id);
  323. if (!res) {
  324. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  325. __func__, unref.resource_id);
  326. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  327. return;
  328. }
  329. virtio_gpu_resource_destroy(g, res);
  330. }
  331. static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
  332. struct virtio_gpu_ctrl_command *cmd)
  333. {
  334. struct virtio_gpu_simple_resource *res;
  335. int h;
  336. uint32_t src_offset, dst_offset, stride;
  337. int bpp;
  338. pixman_format_code_t format;
  339. struct virtio_gpu_transfer_to_host_2d t2d;
  340. VIRTIO_GPU_FILL_CMD(t2d);
  341. virtio_gpu_t2d_bswap(&t2d);
  342. trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
  343. res = virtio_gpu_find_resource(g, t2d.resource_id);
  344. if (!res || !res->iov) {
  345. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  346. __func__, t2d.resource_id);
  347. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  348. return;
  349. }
  350. if (t2d.r.x > res->width ||
  351. t2d.r.y > res->height ||
  352. t2d.r.width > res->width ||
  353. t2d.r.height > res->height ||
  354. t2d.r.x + t2d.r.width > res->width ||
  355. t2d.r.y + t2d.r.height > res->height) {
  356. qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
  357. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  358. __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
  359. t2d.r.width, t2d.r.height, res->width, res->height);
  360. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  361. return;
  362. }
  363. format = pixman_image_get_format(res->image);
  364. bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
  365. stride = pixman_image_get_stride(res->image);
  366. if (t2d.offset || t2d.r.x || t2d.r.y ||
  367. t2d.r.width != pixman_image_get_width(res->image)) {
  368. void *img_data = pixman_image_get_data(res->image);
  369. for (h = 0; h < t2d.r.height; h++) {
  370. src_offset = t2d.offset + stride * h;
  371. dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
  372. iov_to_buf(res->iov, res->iov_cnt, src_offset,
  373. (uint8_t *)img_data
  374. + dst_offset, t2d.r.width * bpp);
  375. }
  376. } else {
  377. iov_to_buf(res->iov, res->iov_cnt, 0,
  378. pixman_image_get_data(res->image),
  379. pixman_image_get_stride(res->image)
  380. * pixman_image_get_height(res->image));
  381. }
  382. }
  383. static void virtio_gpu_resource_flush(VirtIOGPU *g,
  384. struct virtio_gpu_ctrl_command *cmd)
  385. {
  386. struct virtio_gpu_simple_resource *res;
  387. struct virtio_gpu_resource_flush rf;
  388. pixman_region16_t flush_region;
  389. int i;
  390. VIRTIO_GPU_FILL_CMD(rf);
  391. virtio_gpu_bswap_32(&rf, sizeof(rf));
  392. trace_virtio_gpu_cmd_res_flush(rf.resource_id,
  393. rf.r.width, rf.r.height, rf.r.x, rf.r.y);
  394. res = virtio_gpu_find_resource(g, rf.resource_id);
  395. if (!res) {
  396. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  397. __func__, rf.resource_id);
  398. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  399. return;
  400. }
  401. if (rf.r.x > res->width ||
  402. rf.r.y > res->height ||
  403. rf.r.width > res->width ||
  404. rf.r.height > res->height ||
  405. rf.r.x + rf.r.width > res->width ||
  406. rf.r.y + rf.r.height > res->height) {
  407. qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
  408. " bounds for resource %d: %d %d %d %d vs %d %d\n",
  409. __func__, rf.resource_id, rf.r.x, rf.r.y,
  410. rf.r.width, rf.r.height, res->width, res->height);
  411. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  412. return;
  413. }
  414. pixman_region_init_rect(&flush_region,
  415. rf.r.x, rf.r.y, rf.r.width, rf.r.height);
  416. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  417. struct virtio_gpu_scanout *scanout;
  418. pixman_region16_t region, finalregion;
  419. pixman_box16_t *extents;
  420. if (!(res->scanout_bitmask & (1 << i))) {
  421. continue;
  422. }
  423. scanout = &g->parent_obj.scanout[i];
  424. pixman_region_init(&finalregion);
  425. pixman_region_init_rect(&region, scanout->x, scanout->y,
  426. scanout->width, scanout->height);
  427. pixman_region_intersect(&finalregion, &flush_region, &region);
  428. pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
  429. extents = pixman_region_extents(&finalregion);
  430. /* work out the area we need to update for each console */
  431. dpy_gfx_update(g->parent_obj.scanout[i].con,
  432. extents->x1, extents->y1,
  433. extents->x2 - extents->x1,
  434. extents->y2 - extents->y1);
  435. pixman_region_fini(&region);
  436. pixman_region_fini(&finalregion);
  437. }
  438. pixman_region_fini(&flush_region);
  439. }
  440. static void virtio_unref_resource(pixman_image_t *image, void *data)
  441. {
  442. pixman_image_unref(data);
  443. }
  444. static void virtio_gpu_set_scanout(VirtIOGPU *g,
  445. struct virtio_gpu_ctrl_command *cmd)
  446. {
  447. struct virtio_gpu_simple_resource *res, *ores;
  448. struct virtio_gpu_scanout *scanout;
  449. pixman_format_code_t format;
  450. uint32_t offset;
  451. int bpp;
  452. struct virtio_gpu_set_scanout ss;
  453. VIRTIO_GPU_FILL_CMD(ss);
  454. virtio_gpu_bswap_32(&ss, sizeof(ss));
  455. trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
  456. ss.r.width, ss.r.height, ss.r.x, ss.r.y);
  457. if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
  458. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
  459. __func__, ss.scanout_id);
  460. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
  461. return;
  462. }
  463. g->parent_obj.enable = 1;
  464. if (ss.resource_id == 0) {
  465. virtio_gpu_disable_scanout(g, ss.scanout_id);
  466. return;
  467. }
  468. /* create a surface for this scanout */
  469. res = virtio_gpu_find_resource(g, ss.resource_id);
  470. if (!res) {
  471. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  472. __func__, ss.resource_id);
  473. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  474. return;
  475. }
  476. if (ss.r.x > res->width ||
  477. ss.r.y > res->height ||
  478. ss.r.width < 16 ||
  479. ss.r.height < 16 ||
  480. ss.r.width > res->width ||
  481. ss.r.height > res->height ||
  482. ss.r.x + ss.r.width > res->width ||
  483. ss.r.y + ss.r.height > res->height) {
  484. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
  485. " resource %d, (%d,%d)+%d,%d vs %d %d\n",
  486. __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
  487. ss.r.width, ss.r.height, res->width, res->height);
  488. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
  489. return;
  490. }
  491. scanout = &g->parent_obj.scanout[ss.scanout_id];
  492. format = pixman_image_get_format(res->image);
  493. bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
  494. offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
  495. if (!scanout->ds || surface_data(scanout->ds)
  496. != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
  497. scanout->width != ss.r.width ||
  498. scanout->height != ss.r.height) {
  499. pixman_image_t *rect;
  500. void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
  501. rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
  502. pixman_image_get_stride(res->image));
  503. pixman_image_ref(res->image);
  504. pixman_image_set_destroy_function(rect, virtio_unref_resource,
  505. res->image);
  506. /* realloc the surface ptr */
  507. scanout->ds = qemu_create_displaysurface_pixman(rect);
  508. if (!scanout->ds) {
  509. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  510. return;
  511. }
  512. pixman_image_unref(rect);
  513. dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con,
  514. scanout->ds);
  515. }
  516. ores = virtio_gpu_find_resource(g, scanout->resource_id);
  517. if (ores) {
  518. ores->scanout_bitmask &= ~(1 << ss.scanout_id);
  519. }
  520. res->scanout_bitmask |= (1 << ss.scanout_id);
  521. scanout->resource_id = ss.resource_id;
  522. scanout->x = ss.r.x;
  523. scanout->y = ss.r.y;
  524. scanout->width = ss.r.width;
  525. scanout->height = ss.r.height;
  526. }
  527. int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
  528. struct virtio_gpu_resource_attach_backing *ab,
  529. struct virtio_gpu_ctrl_command *cmd,
  530. uint64_t **addr, struct iovec **iov)
  531. {
  532. struct virtio_gpu_mem_entry *ents;
  533. size_t esize, s;
  534. int i;
  535. if (ab->nr_entries > 16384) {
  536. qemu_log_mask(LOG_GUEST_ERROR,
  537. "%s: nr_entries is too big (%d > 16384)\n",
  538. __func__, ab->nr_entries);
  539. return -1;
  540. }
  541. esize = sizeof(*ents) * ab->nr_entries;
  542. ents = g_malloc(esize);
  543. s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
  544. sizeof(*ab), ents, esize);
  545. if (s != esize) {
  546. qemu_log_mask(LOG_GUEST_ERROR,
  547. "%s: command data size incorrect %zu vs %zu\n",
  548. __func__, s, esize);
  549. g_free(ents);
  550. return -1;
  551. }
  552. *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
  553. if (addr) {
  554. *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
  555. }
  556. for (i = 0; i < ab->nr_entries; i++) {
  557. uint64_t a = le64_to_cpu(ents[i].addr);
  558. uint32_t l = le32_to_cpu(ents[i].length);
  559. hwaddr len = l;
  560. (*iov)[i].iov_len = l;
  561. (*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
  562. a, &len, DMA_DIRECTION_TO_DEVICE);
  563. if (addr) {
  564. (*addr)[i] = a;
  565. }
  566. if (!(*iov)[i].iov_base || len != l) {
  567. qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
  568. " resource %d element %d\n",
  569. __func__, ab->resource_id, i);
  570. virtio_gpu_cleanup_mapping_iov(g, *iov, i);
  571. g_free(ents);
  572. *iov = NULL;
  573. if (addr) {
  574. g_free(*addr);
  575. *addr = NULL;
  576. }
  577. return -1;
  578. }
  579. }
  580. g_free(ents);
  581. return 0;
  582. }
  583. void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
  584. struct iovec *iov, uint32_t count)
  585. {
  586. int i;
  587. for (i = 0; i < count; i++) {
  588. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  589. iov[i].iov_base, iov[i].iov_len,
  590. DMA_DIRECTION_TO_DEVICE,
  591. iov[i].iov_len);
  592. }
  593. g_free(iov);
  594. }
  595. static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
  596. struct virtio_gpu_simple_resource *res)
  597. {
  598. virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
  599. res->iov = NULL;
  600. res->iov_cnt = 0;
  601. g_free(res->addrs);
  602. res->addrs = NULL;
  603. }
  604. static void
  605. virtio_gpu_resource_attach_backing(VirtIOGPU *g,
  606. struct virtio_gpu_ctrl_command *cmd)
  607. {
  608. struct virtio_gpu_simple_resource *res;
  609. struct virtio_gpu_resource_attach_backing ab;
  610. int ret;
  611. VIRTIO_GPU_FILL_CMD(ab);
  612. virtio_gpu_bswap_32(&ab, sizeof(ab));
  613. trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
  614. res = virtio_gpu_find_resource(g, ab.resource_id);
  615. if (!res) {
  616. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  617. __func__, ab.resource_id);
  618. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  619. return;
  620. }
  621. if (res->iov) {
  622. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  623. return;
  624. }
  625. ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov);
  626. if (ret != 0) {
  627. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  628. return;
  629. }
  630. res->iov_cnt = ab.nr_entries;
  631. }
  632. static void
  633. virtio_gpu_resource_detach_backing(VirtIOGPU *g,
  634. struct virtio_gpu_ctrl_command *cmd)
  635. {
  636. struct virtio_gpu_simple_resource *res;
  637. struct virtio_gpu_resource_detach_backing detach;
  638. VIRTIO_GPU_FILL_CMD(detach);
  639. virtio_gpu_bswap_32(&detach, sizeof(detach));
  640. trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
  641. res = virtio_gpu_find_resource(g, detach.resource_id);
  642. if (!res || !res->iov) {
  643. qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
  644. __func__, detach.resource_id);
  645. cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
  646. return;
  647. }
  648. virtio_gpu_cleanup_mapping(g, res);
  649. }
  650. static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
  651. struct virtio_gpu_ctrl_command *cmd)
  652. {
  653. VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
  654. virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
  655. switch (cmd->cmd_hdr.type) {
  656. case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
  657. virtio_gpu_get_display_info(g, cmd);
  658. break;
  659. case VIRTIO_GPU_CMD_GET_EDID:
  660. virtio_gpu_get_edid(g, cmd);
  661. break;
  662. case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
  663. virtio_gpu_resource_create_2d(g, cmd);
  664. break;
  665. case VIRTIO_GPU_CMD_RESOURCE_UNREF:
  666. virtio_gpu_resource_unref(g, cmd);
  667. break;
  668. case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
  669. virtio_gpu_resource_flush(g, cmd);
  670. break;
  671. case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
  672. virtio_gpu_transfer_to_host_2d(g, cmd);
  673. break;
  674. case VIRTIO_GPU_CMD_SET_SCANOUT:
  675. virtio_gpu_set_scanout(g, cmd);
  676. break;
  677. case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
  678. virtio_gpu_resource_attach_backing(g, cmd);
  679. break;
  680. case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
  681. virtio_gpu_resource_detach_backing(g, cmd);
  682. break;
  683. default:
  684. cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
  685. break;
  686. }
  687. if (!cmd->finished) {
  688. virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
  689. VIRTIO_GPU_RESP_OK_NODATA);
  690. }
  691. }
  692. static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
  693. {
  694. VirtIOGPU *g = VIRTIO_GPU(vdev);
  695. qemu_bh_schedule(g->ctrl_bh);
  696. }
  697. static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
  698. {
  699. VirtIOGPU *g = VIRTIO_GPU(vdev);
  700. qemu_bh_schedule(g->cursor_bh);
  701. }
  702. void virtio_gpu_process_cmdq(VirtIOGPU *g)
  703. {
  704. struct virtio_gpu_ctrl_command *cmd;
  705. while (!QTAILQ_EMPTY(&g->cmdq)) {
  706. cmd = QTAILQ_FIRST(&g->cmdq);
  707. if (g->parent_obj.renderer_blocked) {
  708. break;
  709. }
  710. /* process command */
  711. VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
  712. g, cmd);
  713. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  714. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  715. g->stats.requests++;
  716. }
  717. if (!cmd->finished) {
  718. QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
  719. g->inflight++;
  720. if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
  721. if (g->stats.max_inflight < g->inflight) {
  722. g->stats.max_inflight = g->inflight;
  723. }
  724. fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
  725. }
  726. } else {
  727. g_free(cmd);
  728. }
  729. }
  730. }
  731. static void virtio_gpu_gl_unblock(VirtIOGPUBase *b)
  732. {
  733. VirtIOGPU *g = VIRTIO_GPU(b);
  734. #ifdef CONFIG_VIRGL
  735. if (g->renderer_reset) {
  736. g->renderer_reset = false;
  737. virtio_gpu_virgl_reset(g);
  738. }
  739. #endif
  740. virtio_gpu_process_cmdq(g);
  741. }
  742. static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
  743. {
  744. VirtIOGPU *g = VIRTIO_GPU(vdev);
  745. struct virtio_gpu_ctrl_command *cmd;
  746. if (!virtio_queue_ready(vq)) {
  747. return;
  748. }
  749. #ifdef CONFIG_VIRGL
  750. if (!g->renderer_inited && g->parent_obj.use_virgl_renderer) {
  751. virtio_gpu_virgl_init(g);
  752. g->renderer_inited = true;
  753. }
  754. #endif
  755. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  756. while (cmd) {
  757. cmd->vq = vq;
  758. cmd->error = 0;
  759. cmd->finished = false;
  760. QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
  761. cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
  762. }
  763. virtio_gpu_process_cmdq(g);
  764. #ifdef CONFIG_VIRGL
  765. if (g->parent_obj.use_virgl_renderer) {
  766. virtio_gpu_virgl_fence_poll(g);
  767. }
  768. #endif
  769. }
  770. static void virtio_gpu_ctrl_bh(void *opaque)
  771. {
  772. VirtIOGPU *g = opaque;
  773. virtio_gpu_handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
  774. }
  775. static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
  776. {
  777. VirtIOGPU *g = VIRTIO_GPU(vdev);
  778. VirtQueueElement *elem;
  779. size_t s;
  780. struct virtio_gpu_update_cursor cursor_info;
  781. if (!virtio_queue_ready(vq)) {
  782. return;
  783. }
  784. for (;;) {
  785. elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
  786. if (!elem) {
  787. break;
  788. }
  789. s = iov_to_buf(elem->out_sg, elem->out_num, 0,
  790. &cursor_info, sizeof(cursor_info));
  791. if (s != sizeof(cursor_info)) {
  792. qemu_log_mask(LOG_GUEST_ERROR,
  793. "%s: cursor size incorrect %zu vs %zu\n",
  794. __func__, s, sizeof(cursor_info));
  795. } else {
  796. virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
  797. update_cursor(g, &cursor_info);
  798. }
  799. virtqueue_push(vq, elem, 0);
  800. virtio_notify(vdev, vq);
  801. g_free(elem);
  802. }
  803. }
  804. static void virtio_gpu_cursor_bh(void *opaque)
  805. {
  806. VirtIOGPU *g = opaque;
  807. virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
  808. }
  809. static const VMStateDescription vmstate_virtio_gpu_scanout = {
  810. .name = "virtio-gpu-one-scanout",
  811. .version_id = 1,
  812. .fields = (VMStateField[]) {
  813. VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
  814. VMSTATE_UINT32(width, struct virtio_gpu_scanout),
  815. VMSTATE_UINT32(height, struct virtio_gpu_scanout),
  816. VMSTATE_INT32(x, struct virtio_gpu_scanout),
  817. VMSTATE_INT32(y, struct virtio_gpu_scanout),
  818. VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
  819. VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
  820. VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
  821. VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
  822. VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
  823. VMSTATE_END_OF_LIST()
  824. },
  825. };
  826. static const VMStateDescription vmstate_virtio_gpu_scanouts = {
  827. .name = "virtio-gpu-scanouts",
  828. .version_id = 1,
  829. .fields = (VMStateField[]) {
  830. VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
  831. VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
  832. struct VirtIOGPU, NULL),
  833. VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
  834. parent_obj.conf.max_outputs, 1,
  835. vmstate_virtio_gpu_scanout,
  836. struct virtio_gpu_scanout),
  837. VMSTATE_END_OF_LIST()
  838. },
  839. };
  840. static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
  841. const VMStateField *field, QJSON *vmdesc)
  842. {
  843. VirtIOGPU *g = opaque;
  844. struct virtio_gpu_simple_resource *res;
  845. int i;
  846. /* in 2d mode we should never find unprocessed commands here */
  847. assert(QTAILQ_EMPTY(&g->cmdq));
  848. QTAILQ_FOREACH(res, &g->reslist, next) {
  849. qemu_put_be32(f, res->resource_id);
  850. qemu_put_be32(f, res->width);
  851. qemu_put_be32(f, res->height);
  852. qemu_put_be32(f, res->format);
  853. qemu_put_be32(f, res->iov_cnt);
  854. for (i = 0; i < res->iov_cnt; i++) {
  855. qemu_put_be64(f, res->addrs[i]);
  856. qemu_put_be32(f, res->iov[i].iov_len);
  857. }
  858. qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
  859. pixman_image_get_stride(res->image) * res->height);
  860. }
  861. qemu_put_be32(f, 0); /* end of list */
  862. return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
  863. }
  864. static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
  865. const VMStateField *field)
  866. {
  867. VirtIOGPU *g = opaque;
  868. struct virtio_gpu_simple_resource *res;
  869. struct virtio_gpu_scanout *scanout;
  870. uint32_t resource_id, pformat;
  871. int i;
  872. g->hostmem = 0;
  873. resource_id = qemu_get_be32(f);
  874. while (resource_id != 0) {
  875. res = virtio_gpu_find_resource(g, resource_id);
  876. if (res) {
  877. return -EINVAL;
  878. }
  879. res = g_new0(struct virtio_gpu_simple_resource, 1);
  880. res->resource_id = resource_id;
  881. res->width = qemu_get_be32(f);
  882. res->height = qemu_get_be32(f);
  883. res->format = qemu_get_be32(f);
  884. res->iov_cnt = qemu_get_be32(f);
  885. /* allocate */
  886. pformat = virtio_gpu_get_pixman_format(res->format);
  887. if (!pformat) {
  888. g_free(res);
  889. return -EINVAL;
  890. }
  891. res->image = pixman_image_create_bits(pformat,
  892. res->width, res->height,
  893. NULL, 0);
  894. if (!res->image) {
  895. g_free(res);
  896. return -EINVAL;
  897. }
  898. res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
  899. res->addrs = g_new(uint64_t, res->iov_cnt);
  900. res->iov = g_new(struct iovec, res->iov_cnt);
  901. /* read data */
  902. for (i = 0; i < res->iov_cnt; i++) {
  903. res->addrs[i] = qemu_get_be64(f);
  904. res->iov[i].iov_len = qemu_get_be32(f);
  905. }
  906. qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
  907. pixman_image_get_stride(res->image) * res->height);
  908. /* restore mapping */
  909. for (i = 0; i < res->iov_cnt; i++) {
  910. hwaddr len = res->iov[i].iov_len;
  911. res->iov[i].iov_base =
  912. dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
  913. res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
  914. if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
  915. /* Clean up the half-a-mapping we just created... */
  916. if (res->iov[i].iov_base) {
  917. dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
  918. res->iov[i].iov_base,
  919. len,
  920. DMA_DIRECTION_TO_DEVICE,
  921. 0);
  922. }
  923. /* ...and the mappings for previous loop iterations */
  924. res->iov_cnt = i;
  925. virtio_gpu_cleanup_mapping(g, res);
  926. pixman_image_unref(res->image);
  927. g_free(res);
  928. return -EINVAL;
  929. }
  930. }
  931. QTAILQ_INSERT_HEAD(&g->reslist, res, next);
  932. g->hostmem += res->hostmem;
  933. resource_id = qemu_get_be32(f);
  934. }
  935. /* load & apply scanout state */
  936. vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
  937. for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
  938. scanout = &g->parent_obj.scanout[i];
  939. if (!scanout->resource_id) {
  940. continue;
  941. }
  942. res = virtio_gpu_find_resource(g, scanout->resource_id);
  943. if (!res) {
  944. return -EINVAL;
  945. }
  946. scanout->ds = qemu_create_displaysurface_pixman(res->image);
  947. if (!scanout->ds) {
  948. return -EINVAL;
  949. }
  950. dpy_gfx_replace_surface(scanout->con, scanout->ds);
  951. dpy_gfx_update_full(scanout->con);
  952. if (scanout->cursor.resource_id) {
  953. update_cursor(g, &scanout->cursor);
  954. }
  955. res->scanout_bitmask |= (1 << i);
  956. }
  957. return 0;
  958. }
  959. static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
  960. {
  961. VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
  962. VirtIOGPU *g = VIRTIO_GPU(qdev);
  963. bool have_virgl;
  964. #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
  965. have_virgl = false;
  966. #else
  967. have_virgl = display_opengl;
  968. #endif
  969. if (!have_virgl) {
  970. g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
  971. } else {
  972. #if defined(CONFIG_VIRGL)
  973. VIRTIO_GPU_BASE(g)->virtio_config.num_capsets =
  974. virtio_gpu_virgl_get_num_capsets(g);
  975. #endif
  976. }
  977. if (!virtio_gpu_base_device_realize(qdev,
  978. virtio_gpu_handle_ctrl_cb,
  979. virtio_gpu_handle_cursor_cb,
  980. errp)) {
  981. return;
  982. }
  983. g->ctrl_vq = virtio_get_queue(vdev, 0);
  984. g->cursor_vq = virtio_get_queue(vdev, 1);
  985. g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
  986. g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
  987. QTAILQ_INIT(&g->reslist);
  988. QTAILQ_INIT(&g->cmdq);
  989. QTAILQ_INIT(&g->fenceq);
  990. }
  991. static void virtio_gpu_reset(VirtIODevice *vdev)
  992. {
  993. VirtIOGPU *g = VIRTIO_GPU(vdev);
  994. struct virtio_gpu_simple_resource *res, *tmp;
  995. struct virtio_gpu_ctrl_command *cmd;
  996. #ifdef CONFIG_VIRGL
  997. if (g->parent_obj.use_virgl_renderer) {
  998. virtio_gpu_virgl_reset(g);
  999. }
  1000. #endif
  1001. QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
  1002. virtio_gpu_resource_destroy(g, res);
  1003. }
  1004. while (!QTAILQ_EMPTY(&g->cmdq)) {
  1005. cmd = QTAILQ_FIRST(&g->cmdq);
  1006. QTAILQ_REMOVE(&g->cmdq, cmd, next);
  1007. g_free(cmd);
  1008. }
  1009. while (!QTAILQ_EMPTY(&g->fenceq)) {
  1010. cmd = QTAILQ_FIRST(&g->fenceq);
  1011. QTAILQ_REMOVE(&g->fenceq, cmd, next);
  1012. g->inflight--;
  1013. g_free(cmd);
  1014. }
  1015. #ifdef CONFIG_VIRGL
  1016. if (g->parent_obj.use_virgl_renderer) {
  1017. if (g->parent_obj.renderer_blocked) {
  1018. g->renderer_reset = true;
  1019. } else {
  1020. virtio_gpu_virgl_reset(g);
  1021. }
  1022. g->parent_obj.use_virgl_renderer = false;
  1023. }
  1024. #endif
  1025. virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
  1026. }
  1027. static void
  1028. virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
  1029. {
  1030. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1031. memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
  1032. }
  1033. static void
  1034. virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
  1035. {
  1036. VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
  1037. const struct virtio_gpu_config *vgconfig =
  1038. (const struct virtio_gpu_config *)config;
  1039. if (vgconfig->events_clear) {
  1040. g->virtio_config.events_read &= ~vgconfig->events_clear;
  1041. }
  1042. }
  1043. /*
  1044. * For historical reasons virtio_gpu does not adhere to virtio migration
  1045. * scheme as described in doc/virtio-migration.txt, in a sense that no
  1046. * save/load callback are provided to the core. Instead the device data
  1047. * is saved/loaded after the core data.
  1048. *
  1049. * Because of this we need a special vmsd.
  1050. */
  1051. static const VMStateDescription vmstate_virtio_gpu = {
  1052. .name = "virtio-gpu",
  1053. .minimum_version_id = VIRTIO_GPU_VM_VERSION,
  1054. .version_id = VIRTIO_GPU_VM_VERSION,
  1055. .fields = (VMStateField[]) {
  1056. VMSTATE_VIRTIO_DEVICE /* core */,
  1057. {
  1058. .name = "virtio-gpu",
  1059. .info = &(const VMStateInfo) {
  1060. .name = "virtio-gpu",
  1061. .get = virtio_gpu_load,
  1062. .put = virtio_gpu_save,
  1063. },
  1064. .flags = VMS_SINGLE,
  1065. } /* device */,
  1066. VMSTATE_END_OF_LIST()
  1067. },
  1068. };
  1069. static Property virtio_gpu_properties[] = {
  1070. VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
  1071. DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
  1072. 256 * MiB),
  1073. #ifdef CONFIG_VIRGL
  1074. DEFINE_PROP_BIT("virgl", VirtIOGPU, parent_obj.conf.flags,
  1075. VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
  1076. DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags,
  1077. VIRTIO_GPU_FLAG_STATS_ENABLED, false),
  1078. #endif
  1079. DEFINE_PROP_END_OF_LIST(),
  1080. };
  1081. static void virtio_gpu_class_init(ObjectClass *klass, void *data)
  1082. {
  1083. DeviceClass *dc = DEVICE_CLASS(klass);
  1084. VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
  1085. VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
  1086. vgc->gl_unblock = virtio_gpu_gl_unblock;
  1087. vdc->realize = virtio_gpu_device_realize;
  1088. vdc->reset = virtio_gpu_reset;
  1089. vdc->get_config = virtio_gpu_get_config;
  1090. vdc->set_config = virtio_gpu_set_config;
  1091. dc->vmsd = &vmstate_virtio_gpu;
  1092. dc->props = virtio_gpu_properties;
  1093. }
  1094. static const TypeInfo virtio_gpu_info = {
  1095. .name = TYPE_VIRTIO_GPU,
  1096. .parent = TYPE_VIRTIO_GPU_BASE,
  1097. .instance_size = sizeof(VirtIOGPU),
  1098. .class_init = virtio_gpu_class_init,
  1099. };
  1100. static void virtio_register_types(void)
  1101. {
  1102. type_register_static(&virtio_gpu_info);
  1103. }
  1104. type_init(virtio_register_types)