apple-gfx.m 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. /*
  2. * QEMU Apple ParavirtualizedGraphics.framework device
  3. *
  4. * Copyright © 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
  5. *
  6. * SPDX-License-Identifier: GPL-2.0-or-later
  7. *
  8. * ParavirtualizedGraphics.framework is a set of libraries that macOS provides
  9. * which implements 3d graphics passthrough to the host as well as a
  10. * proprietary guest communication channel to drive it. This device model
  11. * implements support to drive that library from within QEMU.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/lockable.h"
  15. #include "qemu/cutils.h"
  16. #include "qemu/log.h"
  17. #include "qapi/visitor.h"
  18. #include "qapi/error.h"
  19. #include "block/aio-wait.h"
  20. #include "exec/address-spaces.h"
  21. #include "system/dma.h"
  22. #include "migration/blocker.h"
  23. #include "ui/console.h"
  24. #include "apple-gfx.h"
  25. #include "trace.h"
  26. #include <mach/mach.h>
  27. #include <mach/mach_vm.h>
  28. #include <dispatch/dispatch.h>
  29. #import <ParavirtualizedGraphics/ParavirtualizedGraphics.h>
  30. static const AppleGFXDisplayMode apple_gfx_default_modes[] = {
  31. { 1920, 1080, 60 },
  32. { 1440, 1080, 60 },
  33. { 1280, 1024, 60 },
  34. };
  35. static Error *apple_gfx_mig_blocker;
  36. static uint32_t next_pgdisplay_serial_num = 1;
  37. static dispatch_queue_t get_background_queue(void)
  38. {
  39. return dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
  40. }
  41. /* ------ PGTask and task operations: new/destroy/map/unmap ------ */
  42. /*
  43. * This implements the type declared in <ParavirtualizedGraphics/PGDevice.h>
  44. * which is opaque from the framework's point of view. It is used in callbacks
  45. * in the form of its typedef PGTask_t, which also already exists in the
  46. * framework headers.
  47. *
  48. * A "task" in PVG terminology represents a host-virtual contiguous address
  49. * range which is reserved in a large chunk on task creation. The mapMemory
  50. * callback then requests ranges of guest system memory (identified by their
  51. * GPA) to be mapped into subranges of this reserved address space.
  52. * This type of operation isn't well-supported by QEMU's memory subsystem,
  53. * but it is fortunately trivial to achieve with Darwin's mach_vm_remap() call,
  54. * which allows us to refer to the same backing memory via multiple virtual
  55. * address ranges. The Mach VM APIs are therefore used throughout for managing
  56. * task memory.
  57. */
  58. struct PGTask_s {
  59. QTAILQ_ENTRY(PGTask_s) node;
  60. AppleGFXState *s;
  61. mach_vm_address_t address;
  62. uint64_t len;
  63. /*
  64. * All unique MemoryRegions for which a mapping has been created in in this
  65. * task, and on which we have thus called memory_region_ref(). There are
  66. * usually very few regions of system RAM in total, so we expect this array
  67. * to be very short. Therefore, no need for sorting or fancy search
  68. * algorithms, linear search will do.
  69. * Protected by AppleGFXState's task_mutex.
  70. */
  71. GPtrArray *mapped_regions;
  72. };
  73. static PGTask_t *apple_gfx_new_task(AppleGFXState *s, uint64_t len)
  74. {
  75. mach_vm_address_t task_mem;
  76. PGTask_t *task;
  77. kern_return_t r;
  78. r = mach_vm_allocate(mach_task_self(), &task_mem, len, VM_FLAGS_ANYWHERE);
  79. if (r != KERN_SUCCESS) {
  80. return NULL;
  81. }
  82. task = g_new0(PGTask_t, 1);
  83. task->s = s;
  84. task->address = task_mem;
  85. task->len = len;
  86. task->mapped_regions = g_ptr_array_sized_new(2 /* Usually enough */);
  87. QEMU_LOCK_GUARD(&s->task_mutex);
  88. QTAILQ_INSERT_TAIL(&s->tasks, task, node);
  89. return task;
  90. }
  91. static void apple_gfx_destroy_task(AppleGFXState *s, PGTask_t *task)
  92. {
  93. GPtrArray *regions = task->mapped_regions;
  94. MemoryRegion *region;
  95. size_t i;
  96. for (i = 0; i < regions->len; ++i) {
  97. region = g_ptr_array_index(regions, i);
  98. memory_region_unref(region);
  99. }
  100. g_ptr_array_unref(regions);
  101. mach_vm_deallocate(mach_task_self(), task->address, task->len);
  102. QEMU_LOCK_GUARD(&s->task_mutex);
  103. QTAILQ_REMOVE(&s->tasks, task, node);
  104. g_free(task);
  105. }
  106. void *apple_gfx_host_ptr_for_gpa_range(uint64_t guest_physical,
  107. uint64_t length, bool read_only,
  108. MemoryRegion **mapping_in_region)
  109. {
  110. MemoryRegion *ram_region;
  111. char *host_ptr;
  112. hwaddr ram_region_offset = 0;
  113. hwaddr ram_region_length = length;
  114. ram_region = address_space_translate(&address_space_memory,
  115. guest_physical,
  116. &ram_region_offset,
  117. &ram_region_length, !read_only,
  118. MEMTXATTRS_UNSPECIFIED);
  119. if (!ram_region || ram_region_length < length ||
  120. !memory_access_is_direct(ram_region, !read_only,
  121. MEMTXATTRS_UNSPECIFIED)) {
  122. return NULL;
  123. }
  124. host_ptr = memory_region_get_ram_ptr(ram_region);
  125. if (!host_ptr) {
  126. return NULL;
  127. }
  128. host_ptr += ram_region_offset;
  129. *mapping_in_region = ram_region;
  130. return host_ptr;
  131. }
  132. static bool apple_gfx_task_map_memory(AppleGFXState *s, PGTask_t *task,
  133. uint64_t virtual_offset,
  134. PGPhysicalMemoryRange_t *ranges,
  135. uint32_t range_count, bool read_only)
  136. {
  137. kern_return_t r;
  138. void *source_ptr;
  139. mach_vm_address_t target;
  140. vm_prot_t cur_protection, max_protection;
  141. bool success = true;
  142. MemoryRegion *region;
  143. RCU_READ_LOCK_GUARD();
  144. QEMU_LOCK_GUARD(&s->task_mutex);
  145. trace_apple_gfx_map_memory(task, range_count, virtual_offset, read_only);
  146. for (int i = 0; i < range_count; i++) {
  147. PGPhysicalMemoryRange_t *range = &ranges[i];
  148. target = task->address + virtual_offset;
  149. virtual_offset += range->physicalLength;
  150. trace_apple_gfx_map_memory_range(i, range->physicalAddress,
  151. range->physicalLength);
  152. region = NULL;
  153. source_ptr = apple_gfx_host_ptr_for_gpa_range(range->physicalAddress,
  154. range->physicalLength,
  155. read_only, &region);
  156. if (!source_ptr) {
  157. success = false;
  158. continue;
  159. }
  160. if (!g_ptr_array_find(task->mapped_regions, region, NULL)) {
  161. g_ptr_array_add(task->mapped_regions, region);
  162. memory_region_ref(region);
  163. }
  164. cur_protection = 0;
  165. max_protection = 0;
  166. /* Map guest RAM at range->physicalAddress into PG task memory range */
  167. r = mach_vm_remap(mach_task_self(),
  168. &target, range->physicalLength, vm_page_size - 1,
  169. VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
  170. mach_task_self(), (mach_vm_address_t)source_ptr,
  171. false /* shared mapping, no copy */,
  172. &cur_protection, &max_protection,
  173. VM_INHERIT_COPY);
  174. trace_apple_gfx_remap(r, source_ptr, target);
  175. g_assert(r == KERN_SUCCESS);
  176. }
  177. return success;
  178. }
  179. static void apple_gfx_task_unmap_memory(AppleGFXState *s, PGTask_t *task,
  180. uint64_t virtual_offset, uint64_t length)
  181. {
  182. kern_return_t r;
  183. mach_vm_address_t range_address;
  184. trace_apple_gfx_unmap_memory(task, virtual_offset, length);
  185. /*
  186. * Replace task memory range with fresh 0 pages, undoing the mapping
  187. * from guest RAM.
  188. */
  189. range_address = task->address + virtual_offset;
  190. r = mach_vm_allocate(mach_task_self(), &range_address, length,
  191. VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
  192. g_assert(r == KERN_SUCCESS);
  193. }
  194. /* ------ Rendering and frame management ------ */
  195. static void apple_gfx_render_frame_completed_bh(void *opaque);
  196. static void apple_gfx_render_new_frame(AppleGFXState *s)
  197. {
  198. bool managed_texture = s->using_managed_texture_storage;
  199. uint32_t width = surface_width(s->surface);
  200. uint32_t height = surface_height(s->surface);
  201. MTLRegion region = MTLRegionMake2D(0, 0, width, height);
  202. id<MTLCommandBuffer> command_buffer = [s->mtl_queue commandBuffer];
  203. id<MTLTexture> texture = s->texture;
  204. assert(bql_locked());
  205. [texture retain];
  206. [command_buffer retain];
  207. s->rendering_frame_width = width;
  208. s->rendering_frame_height = height;
  209. dispatch_async(get_background_queue(), ^{
  210. /*
  211. * This is not safe to call from the BQL/BH due to PVG-internal locks
  212. * causing deadlocks.
  213. */
  214. bool r = [s->pgdisp encodeCurrentFrameToCommandBuffer:command_buffer
  215. texture:texture
  216. region:region];
  217. if (!r) {
  218. [texture release];
  219. [command_buffer release];
  220. qemu_log_mask(LOG_GUEST_ERROR,
  221. "%s: encodeCurrentFrameToCommandBuffer:texture:region: "
  222. "failed\n", __func__);
  223. bql_lock();
  224. --s->pending_frames;
  225. if (s->pending_frames > 0) {
  226. apple_gfx_render_new_frame(s);
  227. }
  228. bql_unlock();
  229. return;
  230. }
  231. if (managed_texture) {
  232. /* "Managed" textures exist in both VRAM and RAM and must be synced. */
  233. id<MTLBlitCommandEncoder> blit = [command_buffer blitCommandEncoder];
  234. [blit synchronizeResource:texture];
  235. [blit endEncoding];
  236. }
  237. [texture release];
  238. [command_buffer addCompletedHandler:
  239. ^(id<MTLCommandBuffer> cb)
  240. {
  241. aio_bh_schedule_oneshot(qemu_get_aio_context(),
  242. apple_gfx_render_frame_completed_bh, s);
  243. }];
  244. [command_buffer commit];
  245. [command_buffer release];
  246. });
  247. }
  248. static void copy_mtl_texture_to_surface_mem(id<MTLTexture> texture, void *vram)
  249. {
  250. /*
  251. * TODO: Skip this entirely on a pure Metal or headless/guest-only
  252. * rendering path, else use a blit command encoder? Needs careful
  253. * (double?) buffering design.
  254. */
  255. size_t width = texture.width, height = texture.height;
  256. MTLRegion region = MTLRegionMake2D(0, 0, width, height);
  257. [texture getBytes:vram
  258. bytesPerRow:(width * 4)
  259. bytesPerImage:(width * height * 4)
  260. fromRegion:region
  261. mipmapLevel:0
  262. slice:0];
  263. }
  264. static void apple_gfx_render_frame_completed_bh(void *opaque)
  265. {
  266. AppleGFXState *s = opaque;
  267. @autoreleasepool {
  268. --s->pending_frames;
  269. assert(s->pending_frames >= 0);
  270. /* Only update display if mode hasn't changed since we started rendering. */
  271. if (s->rendering_frame_width == surface_width(s->surface) &&
  272. s->rendering_frame_height == surface_height(s->surface)) {
  273. copy_mtl_texture_to_surface_mem(s->texture, surface_data(s->surface));
  274. if (s->gfx_update_requested) {
  275. s->gfx_update_requested = false;
  276. dpy_gfx_update_full(s->con);
  277. graphic_hw_update_done(s->con);
  278. s->new_frame_ready = false;
  279. } else {
  280. s->new_frame_ready = true;
  281. }
  282. }
  283. if (s->pending_frames > 0) {
  284. apple_gfx_render_new_frame(s);
  285. }
  286. }
  287. }
  288. static void apple_gfx_fb_update_display(void *opaque)
  289. {
  290. AppleGFXState *s = opaque;
  291. assert(bql_locked());
  292. if (s->new_frame_ready) {
  293. dpy_gfx_update_full(s->con);
  294. s->new_frame_ready = false;
  295. graphic_hw_update_done(s->con);
  296. } else if (s->pending_frames > 0) {
  297. s->gfx_update_requested = true;
  298. } else {
  299. graphic_hw_update_done(s->con);
  300. }
  301. }
  302. static const GraphicHwOps apple_gfx_fb_ops = {
  303. .gfx_update = apple_gfx_fb_update_display,
  304. .gfx_update_async = true,
  305. };
  306. /* ------ Mouse cursor and display mode setting ------ */
  307. static void set_mode(AppleGFXState *s, uint32_t width, uint32_t height)
  308. {
  309. MTLTextureDescriptor *textureDescriptor;
  310. if (s->surface &&
  311. width == surface_width(s->surface) &&
  312. height == surface_height(s->surface)) {
  313. return;
  314. }
  315. [s->texture release];
  316. s->surface = qemu_create_displaysurface(width, height);
  317. @autoreleasepool {
  318. textureDescriptor =
  319. [MTLTextureDescriptor
  320. texture2DDescriptorWithPixelFormat:MTLPixelFormatBGRA8Unorm
  321. width:width
  322. height:height
  323. mipmapped:NO];
  324. textureDescriptor.usage = s->pgdisp.minimumTextureUsage;
  325. s->texture = [s->mtl newTextureWithDescriptor:textureDescriptor];
  326. s->using_managed_texture_storage =
  327. (s->texture.storageMode == MTLStorageModeManaged);
  328. }
  329. dpy_gfx_replace_surface(s->con, s->surface);
  330. }
  331. static void update_cursor(AppleGFXState *s)
  332. {
  333. assert(bql_locked());
  334. dpy_mouse_set(s->con, s->pgdisp.cursorPosition.x,
  335. s->pgdisp.cursorPosition.y, qatomic_read(&s->cursor_show));
  336. }
  337. static void update_cursor_bh(void *opaque)
  338. {
  339. AppleGFXState *s = opaque;
  340. update_cursor(s);
  341. }
  342. typedef struct AppleGFXSetCursorGlyphJob {
  343. AppleGFXState *s;
  344. NSBitmapImageRep *glyph;
  345. PGDisplayCoord_t hotspot;
  346. } AppleGFXSetCursorGlyphJob;
  347. static void set_cursor_glyph(void *opaque)
  348. {
  349. AppleGFXSetCursorGlyphJob *job = opaque;
  350. AppleGFXState *s = job->s;
  351. NSBitmapImageRep *glyph = job->glyph;
  352. uint32_t bpp = glyph.bitsPerPixel;
  353. size_t width = glyph.pixelsWide;
  354. size_t height = glyph.pixelsHigh;
  355. size_t padding_bytes_per_row = glyph.bytesPerRow - width * 4;
  356. const uint8_t* px_data = glyph.bitmapData;
  357. trace_apple_gfx_cursor_set(bpp, width, height);
  358. if (s->cursor) {
  359. cursor_unref(s->cursor);
  360. s->cursor = NULL;
  361. }
  362. if (bpp == 32) { /* Shouldn't be anything else, but just to be safe... */
  363. s->cursor = cursor_alloc(width, height);
  364. s->cursor->hot_x = job->hotspot.x;
  365. s->cursor->hot_y = job->hotspot.y;
  366. uint32_t *dest_px = s->cursor->data;
  367. for (size_t y = 0; y < height; ++y) {
  368. for (size_t x = 0; x < width; ++x) {
  369. /*
  370. * NSBitmapImageRep's red & blue channels are swapped
  371. * compared to QEMUCursor's.
  372. */
  373. *dest_px =
  374. (px_data[0] << 16u) |
  375. (px_data[1] << 8u) |
  376. (px_data[2] << 0u) |
  377. (px_data[3] << 24u);
  378. ++dest_px;
  379. px_data += 4;
  380. }
  381. px_data += padding_bytes_per_row;
  382. }
  383. dpy_cursor_define(s->con, s->cursor);
  384. update_cursor(s);
  385. }
  386. [glyph release];
  387. g_free(job);
  388. }
  389. /* ------ DMA (device reading system memory) ------ */
  390. typedef struct AppleGFXReadMemoryJob {
  391. QemuSemaphore sem;
  392. hwaddr physical_address;
  393. uint64_t length;
  394. void *dst;
  395. bool success;
  396. } AppleGFXReadMemoryJob;
  397. static void apple_gfx_do_read_memory(void *opaque)
  398. {
  399. AppleGFXReadMemoryJob *job = opaque;
  400. MemTxResult r;
  401. r = dma_memory_read(&address_space_memory, job->physical_address,
  402. job->dst, job->length, MEMTXATTRS_UNSPECIFIED);
  403. job->success = (r == MEMTX_OK);
  404. qemu_sem_post(&job->sem);
  405. }
  406. static bool apple_gfx_read_memory(AppleGFXState *s, hwaddr physical_address,
  407. uint64_t length, void *dst)
  408. {
  409. AppleGFXReadMemoryJob job = {
  410. .physical_address = physical_address, .length = length, .dst = dst
  411. };
  412. trace_apple_gfx_read_memory(physical_address, length, dst);
  413. /* Performing DMA requires BQL, so do it in a BH. */
  414. qemu_sem_init(&job.sem, 0);
  415. aio_bh_schedule_oneshot(qemu_get_aio_context(),
  416. apple_gfx_do_read_memory, &job);
  417. qemu_sem_wait(&job.sem);
  418. qemu_sem_destroy(&job.sem);
  419. return job.success;
  420. }
  421. /* ------ Memory-mapped device I/O operations ------ */
  422. typedef struct AppleGFXIOJob {
  423. AppleGFXState *state;
  424. uint64_t offset;
  425. uint64_t value;
  426. bool completed;
  427. } AppleGFXIOJob;
  428. static void apple_gfx_do_read(void *opaque)
  429. {
  430. AppleGFXIOJob *job = opaque;
  431. job->value = [job->state->pgdev mmioReadAtOffset:job->offset];
  432. qatomic_set(&job->completed, true);
  433. aio_wait_kick();
  434. }
  435. static uint64_t apple_gfx_read(void *opaque, hwaddr offset, unsigned size)
  436. {
  437. AppleGFXIOJob job = {
  438. .state = opaque,
  439. .offset = offset,
  440. .completed = false,
  441. };
  442. dispatch_queue_t queue = get_background_queue();
  443. dispatch_async_f(queue, &job, apple_gfx_do_read);
  444. AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
  445. trace_apple_gfx_read(offset, job.value);
  446. return job.value;
  447. }
  448. static void apple_gfx_do_write(void *opaque)
  449. {
  450. AppleGFXIOJob *job = opaque;
  451. [job->state->pgdev mmioWriteAtOffset:job->offset value:job->value];
  452. qatomic_set(&job->completed, true);
  453. aio_wait_kick();
  454. }
  455. static void apple_gfx_write(void *opaque, hwaddr offset, uint64_t val,
  456. unsigned size)
  457. {
  458. /*
  459. * The methods mmioReadAtOffset: and especially mmioWriteAtOffset: can
  460. * trigger synchronous operations on other dispatch queues, which in turn
  461. * may call back out on one or more of the callback blocks. For this reason,
  462. * and as we are holding the BQL, we invoke the I/O methods on a pool
  463. * thread and handle AIO tasks while we wait. Any work in the callbacks
  464. * requiring the BQL will in turn schedule BHs which this thread will
  465. * process while waiting.
  466. */
  467. AppleGFXIOJob job = {
  468. .state = opaque,
  469. .offset = offset,
  470. .value = val,
  471. .completed = false,
  472. };
  473. dispatch_queue_t queue = get_background_queue();
  474. dispatch_async_f(queue, &job, apple_gfx_do_write);
  475. AIO_WAIT_WHILE(NULL, !qatomic_read(&job.completed));
  476. trace_apple_gfx_write(offset, val);
  477. }
  478. static const MemoryRegionOps apple_gfx_ops = {
  479. .read = apple_gfx_read,
  480. .write = apple_gfx_write,
  481. .endianness = DEVICE_LITTLE_ENDIAN,
  482. .valid = {
  483. .min_access_size = 4,
  484. .max_access_size = 8,
  485. },
  486. .impl = {
  487. .min_access_size = 4,
  488. .max_access_size = 4,
  489. },
  490. };
  491. static size_t apple_gfx_get_default_mmio_range_size(void)
  492. {
  493. size_t mmio_range_size;
  494. @autoreleasepool {
  495. PGDeviceDescriptor *desc = [PGDeviceDescriptor new];
  496. mmio_range_size = desc.mmioLength;
  497. [desc release];
  498. }
  499. return mmio_range_size;
  500. }
  501. /* ------ Initialisation and startup ------ */
  502. void apple_gfx_common_init(Object *obj, AppleGFXState *s, const char* obj_name)
  503. {
  504. size_t mmio_range_size = apple_gfx_get_default_mmio_range_size();
  505. trace_apple_gfx_common_init(obj_name, mmio_range_size);
  506. memory_region_init_io(&s->iomem_gfx, obj, &apple_gfx_ops, s, obj_name,
  507. mmio_range_size);
  508. /* TODO: PVG framework supports serialising device state: integrate it! */
  509. }
  510. static void apple_gfx_register_task_mapping_handlers(AppleGFXState *s,
  511. PGDeviceDescriptor *desc)
  512. {
  513. desc.createTask = ^(uint64_t vmSize, void * _Nullable * _Nonnull baseAddress) {
  514. PGTask_t *task = apple_gfx_new_task(s, vmSize);
  515. *baseAddress = (void *)task->address;
  516. trace_apple_gfx_create_task(vmSize, *baseAddress);
  517. return task;
  518. };
  519. desc.destroyTask = ^(PGTask_t * _Nonnull task) {
  520. trace_apple_gfx_destroy_task(task, task->mapped_regions->len);
  521. apple_gfx_destroy_task(s, task);
  522. };
  523. desc.mapMemory = ^bool(PGTask_t * _Nonnull task, uint32_t range_count,
  524. uint64_t virtual_offset, bool read_only,
  525. PGPhysicalMemoryRange_t * _Nonnull ranges) {
  526. return apple_gfx_task_map_memory(s, task, virtual_offset,
  527. ranges, range_count, read_only);
  528. };
  529. desc.unmapMemory = ^bool(PGTask_t * _Nonnull task, uint64_t virtual_offset,
  530. uint64_t length) {
  531. apple_gfx_task_unmap_memory(s, task, virtual_offset, length);
  532. return true;
  533. };
  534. desc.readMemory = ^bool(uint64_t physical_address, uint64_t length,
  535. void * _Nonnull dst) {
  536. return apple_gfx_read_memory(s, physical_address, length, dst);
  537. };
  538. }
  539. static void new_frame_handler_bh(void *opaque)
  540. {
  541. AppleGFXState *s = opaque;
  542. /* Drop frames if guest gets too far ahead. */
  543. if (s->pending_frames >= 2) {
  544. return;
  545. }
  546. ++s->pending_frames;
  547. if (s->pending_frames > 1) {
  548. return;
  549. }
  550. @autoreleasepool {
  551. apple_gfx_render_new_frame(s);
  552. }
  553. }
  554. static PGDisplayDescriptor *apple_gfx_prepare_display_descriptor(AppleGFXState *s)
  555. {
  556. PGDisplayDescriptor *disp_desc = [PGDisplayDescriptor new];
  557. disp_desc.name = @"QEMU display";
  558. disp_desc.sizeInMillimeters = NSMakeSize(400., 300.); /* A 20" display */
  559. disp_desc.queue = dispatch_get_main_queue();
  560. disp_desc.newFrameEventHandler = ^(void) {
  561. trace_apple_gfx_new_frame();
  562. aio_bh_schedule_oneshot(qemu_get_aio_context(), new_frame_handler_bh, s);
  563. };
  564. disp_desc.modeChangeHandler = ^(PGDisplayCoord_t sizeInPixels,
  565. OSType pixelFormat) {
  566. trace_apple_gfx_mode_change(sizeInPixels.x, sizeInPixels.y);
  567. BQL_LOCK_GUARD();
  568. set_mode(s, sizeInPixels.x, sizeInPixels.y);
  569. };
  570. disp_desc.cursorGlyphHandler = ^(NSBitmapImageRep *glyph,
  571. PGDisplayCoord_t hotspot) {
  572. AppleGFXSetCursorGlyphJob *job = g_malloc0(sizeof(*job));
  573. job->s = s;
  574. job->glyph = glyph;
  575. job->hotspot = hotspot;
  576. [glyph retain];
  577. aio_bh_schedule_oneshot(qemu_get_aio_context(),
  578. set_cursor_glyph, job);
  579. };
  580. disp_desc.cursorShowHandler = ^(BOOL show) {
  581. trace_apple_gfx_cursor_show(show);
  582. qatomic_set(&s->cursor_show, show);
  583. aio_bh_schedule_oneshot(qemu_get_aio_context(),
  584. update_cursor_bh, s);
  585. };
  586. disp_desc.cursorMoveHandler = ^(void) {
  587. trace_apple_gfx_cursor_move();
  588. aio_bh_schedule_oneshot(qemu_get_aio_context(),
  589. update_cursor_bh, s);
  590. };
  591. return disp_desc;
  592. }
  593. static NSArray<PGDisplayMode *> *apple_gfx_create_display_mode_array(
  594. const AppleGFXDisplayMode display_modes[], uint32_t display_mode_count)
  595. {
  596. PGDisplayMode *mode_obj;
  597. NSMutableArray<PGDisplayMode *> *mode_array =
  598. [[NSMutableArray alloc] initWithCapacity:display_mode_count];
  599. for (unsigned i = 0; i < display_mode_count; i++) {
  600. const AppleGFXDisplayMode *mode = &display_modes[i];
  601. trace_apple_gfx_display_mode(i, mode->width_px, mode->height_px);
  602. PGDisplayCoord_t mode_size = { mode->width_px, mode->height_px };
  603. mode_obj =
  604. [[PGDisplayMode alloc] initWithSizeInPixels:mode_size
  605. refreshRateInHz:mode->refresh_rate_hz];
  606. [mode_array addObject:mode_obj];
  607. [mode_obj release];
  608. }
  609. return mode_array;
  610. }
  611. static id<MTLDevice> copy_suitable_metal_device(void)
  612. {
  613. id<MTLDevice> dev = nil;
  614. NSArray<id<MTLDevice>> *devs = MTLCopyAllDevices();
  615. /* Prefer a unified memory GPU. Failing that, pick a non-removable GPU. */
  616. for (size_t i = 0; i < devs.count; ++i) {
  617. if (devs[i].hasUnifiedMemory) {
  618. dev = devs[i];
  619. break;
  620. }
  621. if (!devs[i].removable) {
  622. dev = devs[i];
  623. }
  624. }
  625. if (dev != nil) {
  626. [dev retain];
  627. } else {
  628. dev = MTLCreateSystemDefaultDevice();
  629. }
  630. [devs release];
  631. return dev;
  632. }
  633. bool apple_gfx_common_realize(AppleGFXState *s, DeviceState *dev,
  634. PGDeviceDescriptor *desc, Error **errp)
  635. {
  636. PGDisplayDescriptor *disp_desc;
  637. const AppleGFXDisplayMode *display_modes = apple_gfx_default_modes;
  638. uint32_t num_display_modes = ARRAY_SIZE(apple_gfx_default_modes);
  639. NSArray<PGDisplayMode *> *mode_array;
  640. if (apple_gfx_mig_blocker == NULL) {
  641. error_setg(&apple_gfx_mig_blocker,
  642. "Migration state blocked by apple-gfx display device");
  643. if (migrate_add_blocker(&apple_gfx_mig_blocker, errp) < 0) {
  644. return false;
  645. }
  646. }
  647. qemu_mutex_init(&s->task_mutex);
  648. QTAILQ_INIT(&s->tasks);
  649. s->mtl = copy_suitable_metal_device();
  650. s->mtl_queue = [s->mtl newCommandQueue];
  651. desc.device = s->mtl;
  652. apple_gfx_register_task_mapping_handlers(s, desc);
  653. s->cursor_show = true;
  654. s->pgdev = PGNewDeviceWithDescriptor(desc);
  655. disp_desc = apple_gfx_prepare_display_descriptor(s);
  656. /*
  657. * Although the framework does, this integration currently does not support
  658. * multiple virtual displays connected to a single PV graphics device.
  659. * It is however possible to create
  660. * more than one instance of the device, each with one display. The macOS
  661. * guest will ignore these displays if they share the same serial number,
  662. * so ensure each instance gets a unique one.
  663. */
  664. s->pgdisp = [s->pgdev newDisplayWithDescriptor:disp_desc
  665. port:0
  666. serialNum:next_pgdisplay_serial_num++];
  667. [disp_desc release];
  668. if (s->display_modes != NULL && s->num_display_modes > 0) {
  669. trace_apple_gfx_common_realize_modes_property(s->num_display_modes);
  670. display_modes = s->display_modes;
  671. num_display_modes = s->num_display_modes;
  672. }
  673. s->pgdisp.modeList = mode_array =
  674. apple_gfx_create_display_mode_array(display_modes, num_display_modes);
  675. [mode_array release];
  676. s->con = graphic_console_init(dev, 0, &apple_gfx_fb_ops, s);
  677. return true;
  678. }
  679. /* ------ Display mode list device property ------ */
  680. static void apple_gfx_get_display_mode(Object *obj, Visitor *v,
  681. const char *name, void *opaque,
  682. Error **errp)
  683. {
  684. Property *prop = opaque;
  685. AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop);
  686. /* 3 uint16s (max 5 digits) + 2 separator characters + nul. */
  687. char buffer[5 * 3 + 2 + 1];
  688. char *pos = buffer;
  689. int rc = snprintf(buffer, sizeof(buffer),
  690. "%"PRIu16"x%"PRIu16"@%"PRIu16,
  691. mode->width_px, mode->height_px,
  692. mode->refresh_rate_hz);
  693. assert(rc < sizeof(buffer));
  694. visit_type_str(v, name, &pos, errp);
  695. }
  696. static void apple_gfx_set_display_mode(Object *obj, Visitor *v,
  697. const char *name, void *opaque,
  698. Error **errp)
  699. {
  700. Property *prop = opaque;
  701. AppleGFXDisplayMode *mode = object_field_prop_ptr(obj, prop);
  702. const char *endptr;
  703. g_autofree char *str = NULL;
  704. int ret;
  705. int val;
  706. if (!visit_type_str(v, name, &str, errp)) {
  707. return;
  708. }
  709. endptr = str;
  710. ret = qemu_strtoi(endptr, &endptr, 10, &val);
  711. if (ret || val > UINT16_MAX || val <= 0) {
  712. error_setg(errp, "width in '%s' must be a decimal integer number"
  713. " of pixels in the range 1..65535", name);
  714. return;
  715. }
  716. mode->width_px = val;
  717. if (*endptr != 'x') {
  718. goto separator_error;
  719. }
  720. ret = qemu_strtoi(endptr + 1, &endptr, 10, &val);
  721. if (ret || val > UINT16_MAX || val <= 0) {
  722. error_setg(errp, "height in '%s' must be a decimal integer number"
  723. " of pixels in the range 1..65535", name);
  724. return;
  725. }
  726. mode->height_px = val;
  727. if (*endptr != '@') {
  728. goto separator_error;
  729. }
  730. ret = qemu_strtoi(endptr + 1, &endptr, 10, &val);
  731. if (ret || val > UINT16_MAX || val <= 0) {
  732. error_setg(errp, "refresh rate in '%s'"
  733. " must be a positive decimal integer (Hertz)", name);
  734. return;
  735. }
  736. mode->refresh_rate_hz = val;
  737. return;
  738. separator_error:
  739. error_setg(errp,
  740. "Each display mode takes the format '<width>x<height>@<rate>'");
  741. }
  742. const PropertyInfo qdev_prop_apple_gfx_display_mode = {
  743. .type = "display_mode",
  744. .description =
  745. "Display mode in pixels and Hertz, as <width>x<height>@<refresh-rate> "
  746. "Example: 3840x2160@60",
  747. .get = apple_gfx_get_display_mode,
  748. .set = apple_gfx_set_display_mode,
  749. };