hv.m 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. // Decompiled by hand (based-ish on a Ghidra decompile) from Hypervisor.framework on macOS 12.0b1
  2. @import Darwin;
  3. #include <Hypervisor/Hypervisor.h>
  4. #include <assert.h>
  5. #include "hv_kernel_structs.h"
  6. static_assert(sizeof(hv_vcpu_exit_t) == 0x20, "hv_vcpu_exit");
  7. #define HV_CALL_VM_GET_CAPABILITIES 0
  8. #define HV_CALL_VM_CREATE 1
  9. #define HV_CALL_VM_DESTROY 2
  10. #define HV_CALL_VM_MAP 3
  11. #define HV_CALL_VM_UNMAP 4
  12. #define HV_CALL_VM_PROTECT 5
  13. #define HV_CALL_VCPU_CREATE 6
  14. #define HV_CALL_VCPU_DESTROY 7
  15. #define HV_CALL_VCPU_SYSREGS_SYNC 8
  16. #define HV_CALL_VCPU_RUN 9
  17. #define HV_CALL_VCPU_RUN_CANCEL 10
  18. #define HV_CALL_VCPU_SET_ADDRESS_SPACE 11
  19. #define HV_CALL_VM_ADDRESS_SPACE_CREATE 12
  20. #define HV_CALL_VM_INVALIDATE_TLB 13
  21. #ifdef USE_EXTERNAL_HV_TRAP
  22. uint64_t hv_trap(unsigned int hv_call, void* hv_arg);
  23. #else
  24. __attribute__((naked)) uint64_t hv_trap(unsigned int hv_call, void* hv_arg) {
  25. asm volatile("mov x16, #-0x5\n"
  26. "svc 0x80\n"
  27. "ret\n");
  28. }
  29. #endif
  30. // type lookup hv_vm_create_t
  31. struct hv_vm_create_kernel_args {
  32. uint64_t min_ipa;
  33. uint64_t ipa_size;
  34. uint32_t granule;
  35. uint32_t flags;
  36. uint32_t isa;
  37. };
  38. static_assert(sizeof(struct hv_vm_create_kernel_args) == 0x20, "hv_vm_create_kernel_args size");
  39. const struct hv_vm_create_kernel_args kDefaultVmCreateKernelArgs = {
  40. .min_ipa = 0,
  41. .ipa_size = 0,
  42. .granule = 0,
  43. .flags = 0,
  44. .isa = 1,
  45. };
  46. hv_return_t hv_vm_create(hv_vm_config_t config) {
  47. struct hv_vm_create_kernel_args args = kDefaultVmCreateKernelArgs;
  48. if (config) {
  49. // TODO(zhuowei): figure this out?
  50. }
  51. return hv_trap(HV_CALL_VM_CREATE, &args);
  52. }
  53. // type lookup hv_vm_map_item_t, although fields are renamed to match userspace args
  54. struct hv_vm_map_kernel_args {
  55. void* addr; // 0x0
  56. hv_ipa_t ipa; // 0x8
  57. size_t size; // 0x10
  58. hv_memory_flags_t flags; // 0x18
  59. uint64_t asid; // 0x20
  60. };
  61. hv_return_t hv_vm_map(void* addr, hv_ipa_t ipa, size_t size, hv_memory_flags_t flags) {
  62. struct hv_vm_map_kernel_args args = {
  63. .addr = addr, .ipa = ipa, .size = size, .flags = flags, .asid = 0};
  64. return hv_trap(HV_CALL_VM_MAP, &args);
  65. }
  66. hv_return_t hv_vm_unmap(hv_ipa_t ipa, size_t size) {
  67. struct hv_vm_map_kernel_args args = {
  68. .addr = nil, .ipa = ipa, .size = size, .flags = 0, .asid = 0};
  69. return hv_trap(HV_CALL_VM_UNMAP, &args);
  70. }
  71. hv_return_t hv_vm_protect(hv_ipa_t ipa, size_t size, hv_memory_flags_t flags) {
  72. struct hv_vm_map_kernel_args args = {
  73. .addr = nil, .ipa = ipa, .size = size, .flags = flags, .asid = 0};
  74. return hv_trap(HV_CALL_VM_PROTECT, &args);
  75. }
  76. static pthread_mutex_t vcpus_mutex = PTHREAD_MUTEX_INITIALIZER;
  77. struct hv_vcpu_zone {
  78. arm_guest_rw_context_t rw;
  79. arm_guest_ro_context_t ro;
  80. };
  81. static_assert(sizeof(struct hv_vcpu_zone) == 0x8000, "hv_vcpu_zone");
  82. struct hv_vcpu_data {
  83. struct hv_vcpu_zone* vcpu_zone; // 0x0
  84. // TODO(zhuowei)
  85. char filler[0xe8 - 0x8]; // 0x8
  86. uint64_t pending_interrupts; // 0xe8
  87. hv_vcpu_exit_t exit; // 0xf0
  88. char filler2[0x8]; // 0x110
  89. };
  90. static_assert(sizeof(struct hv_vcpu_data) == 0x118, "hv_vcpu_data");
  91. static const size_t kHvMaxVcpus = 0x40;
  92. static struct hv_vcpu_data vcpus[kHvMaxVcpus];
  93. struct hv_vcpu_create_kernel_args {
  94. uint64_t id; // 0x0
  95. struct hv_vcpu_zone* output_vcpu_zone; // 0x8
  96. };
  97. // ' hyp', 0xe
  98. static const uint64_t kHvVcpuMagic = 0x206879700000000eull;
  99. hv_return_t hv_vcpu_create(hv_vcpu_t* vcpu, hv_vcpu_exit_t** exit, hv_vcpu_config_t config) {
  100. pthread_mutex_lock(&vcpus_mutex);
  101. hv_vcpu_t cpuid = 0;
  102. for (; cpuid < kHvMaxVcpus; cpuid++) {
  103. if (!vcpus[cpuid].vcpu_zone) {
  104. break;
  105. }
  106. }
  107. if (cpuid == kHvMaxVcpus) {
  108. pthread_mutex_unlock(&vcpus_mutex);
  109. return HV_NO_RESOURCES;
  110. }
  111. // TODO(zhuowei): support more than one
  112. struct hv_vcpu_data* vcpu_data = &vcpus[cpuid];
  113. struct hv_vcpu_create_kernel_args args = {
  114. .id = cpuid,
  115. .output_vcpu_zone = 0,
  116. };
  117. kern_return_t err = hv_trap(HV_CALL_VCPU_CREATE, &args);
  118. if (err) {
  119. pthread_mutex_unlock(&vcpus_mutex);
  120. return err;
  121. }
  122. printf("vcpu_zone = %p\n", args.output_vcpu_zone);
  123. if (args.output_vcpu_zone->ro.ver != kHvVcpuMagic) {
  124. printf("Invalid magic! expected %llx, got %llx\n", kHvVcpuMagic, args.output_vcpu_zone->ro.ver);
  125. const bool yolo = true;
  126. if (!yolo) {
  127. hv_trap(HV_CALL_VCPU_DESTROY, nil);
  128. pthread_mutex_unlock(&vcpus_mutex);
  129. return HV_UNSUPPORTED;
  130. }
  131. printf("yoloing\n");
  132. }
  133. vcpu_data->vcpu_zone = args.output_vcpu_zone;
  134. *vcpu = cpuid; // TODO(zhuowei)
  135. *exit = &vcpu_data->exit;
  136. pthread_mutex_unlock(&vcpus_mutex);
  137. // TODO(zhuowei): configure regs
  138. return 0;
  139. }
  140. hv_return_t hv_vcpu_destroy(hv_vcpu_t vcpu) {
  141. kern_return_t err = hv_trap(HV_CALL_VCPU_DESTROY, nil);
  142. if (err) {
  143. return err;
  144. }
  145. pthread_mutex_lock(&vcpus_mutex);
  146. struct hv_vcpu_data* vcpu_data = &vcpus[vcpu];
  147. vcpu_data->vcpu_zone = nil;
  148. // TODO(zhuowei): vcpu + 0xe8 = 0???
  149. vcpu_data->pending_interrupts = 0;
  150. pthread_mutex_unlock(&vcpus_mutex);
  151. return 0;
  152. }
  153. hv_return_t hv_vcpu_run(hv_vcpu_t vcpu) {
  154. // TODO(zhuowei): update registers
  155. struct hv_vcpu_data* vcpu_data = &vcpus[0];
  156. if (vcpu_data->pending_interrupts) {
  157. vcpu_data->vcpu_zone->rw.controls.hcr_el2 |= vcpu_data->pending_interrupts;
  158. vcpu_data->vcpu_zone->rw.state_dirty |= 0x4;
  159. }
  160. hv_return_t err = hv_trap(HV_CALL_VCPU_RUN, nil);
  161. if (err) {
  162. return err;
  163. }
  164. printf("exit = %d (esr = %x)\n", vcpu_data->vcpu_zone->ro.exit.vmexit_reason,
  165. vcpu_data->vcpu_zone->ro.exit.vmexit_esr);
  166. hv_vcpu_exit_t* exit = &vcpu_data->exit;
  167. switch (vcpu_data->vcpu_zone->ro.exit.vmexit_reason) {
  168. case 0: {
  169. exit->reason = HV_EXIT_REASON_CANCELED;
  170. break;
  171. }
  172. case 1: // hvc call?
  173. case 6: // memory fault?
  174. case 8: {
  175. exit->reason = HV_EXIT_REASON_EXCEPTION;
  176. exit->exception.syndrome = vcpu_data->vcpu_zone->ro.exit.vmexit_esr;
  177. exit->exception.virtual_address = vcpu_data->vcpu_zone->ro.exit.vmexit_far;
  178. exit->exception.physical_address = vcpu_data->vcpu_zone->ro.exit.vmexit_hpfar;
  179. // TODO(zhuowei): handle registers
  180. break;
  181. }
  182. case 3:
  183. case 4: {
  184. exit->reason = HV_EXIT_REASON_VTIMER_ACTIVATED;
  185. break;
  186. }
  187. default: {
  188. exit->reason = HV_EXIT_REASON_UNKNOWN;
  189. break;
  190. }
  191. }
  192. return 0;
  193. }
  194. hv_return_t hv_vcpu_get_reg(hv_vcpu_t vcpu, hv_reg_t reg, uint64_t* value) {
  195. if (reg > HV_REG_CPSR) {
  196. return HV_BAD_ARGUMENT;
  197. }
  198. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  199. if (reg < HV_REG_FP) {
  200. *value = vcpu_zone->rw.regs.x[reg];
  201. } else if (reg == HV_REG_FP) {
  202. *value = vcpu_zone->rw.regs.fp;
  203. } else if (reg == HV_REG_LR) {
  204. *value = vcpu_zone->rw.regs.lr;
  205. } else if (reg == HV_REG_PC) {
  206. *value = vcpu_zone->rw.regs.pc;
  207. } else if (reg == HV_REG_FPCR) {
  208. *value = vcpu_zone->rw.neon.fpcr;
  209. } else if (reg == HV_REG_FPSR) {
  210. *value = vcpu_zone->rw.neon.fpsr;
  211. } else if (reg == HV_REG_CPSR) {
  212. *value = vcpu_zone->rw.regs.cpsr;
  213. }
  214. return 0;
  215. }
  216. hv_return_t hv_vcpu_set_reg(hv_vcpu_t vcpu, hv_reg_t reg, uint64_t value) {
  217. if (reg > HV_REG_CPSR) {
  218. return HV_BAD_ARGUMENT;
  219. }
  220. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  221. if (reg < HV_REG_FP) {
  222. vcpu_zone->rw.regs.x[reg] = value;
  223. } else if (reg == HV_REG_FP) {
  224. vcpu_zone->rw.regs.lr = value;
  225. } else if (reg == HV_REG_LR) {
  226. vcpu_zone->rw.regs.lr = value;
  227. } else if (reg == HV_REG_PC) {
  228. vcpu_zone->rw.regs.pc = value;
  229. } else if (reg == HV_REG_FPCR) {
  230. vcpu_zone->rw.neon.fpcr = value;
  231. } else if (reg == HV_REG_FPSR) {
  232. vcpu_zone->rw.neon.fpsr = value;
  233. } else if (reg == HV_REG_CPSR) {
  234. vcpu_zone->rw.regs.cpsr = value;
  235. }
  236. return 0;
  237. }
  238. hv_return_t hv_vcpu_get_simd_fp_reg(hv_vcpu_t vcpu, hv_simd_fp_reg_t reg,
  239. hv_simd_fp_uchar16_t* value) {
  240. if (reg > HV_SIMD_FP_REG_Q31) {
  241. return HV_BAD_ARGUMENT;
  242. }
  243. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  244. *((__uint128_t*)value) = vcpu_zone->rw.neon.q[reg];
  245. return 0;
  246. }
  247. hv_return_t hv_vcpu_set_simd_fp_reg(hv_vcpu_t vcpu, hv_simd_fp_reg_t reg,
  248. hv_simd_fp_uchar16_t value) {
  249. if (reg > HV_SIMD_FP_REG_Q31) {
  250. return HV_BAD_ARGUMENT;
  251. }
  252. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  253. vcpu_zone->rw.neon.q[reg] = *((__uint128_t*)&value);
  254. return 0;
  255. }
  256. static bool find_sys_reg(hv_sys_reg_t sys_reg, uint64_t* offset, uint64_t* sync_mask) {
  257. uint64_t o = 0;
  258. uint64_t f = 0;
  259. switch (sys_reg) {
  260. #include "sysreg_offsets.h"
  261. default:
  262. return false;
  263. }
  264. *offset = o;
  265. *sync_mask = f;
  266. return true;
  267. }
  268. static_assert(offsetof(arm_guest_rw_context_t, dbgregs.bp[0].bvr) == 0x450,
  269. "HV_SYS_REG_DBGBVR0_EL1");
  270. hv_return_t hv_vcpu_get_sys_reg(hv_vcpu_t vcpu, hv_sys_reg_t sys_reg, uint64_t* value) {
  271. if (sys_reg >= HV_SYS_REG_ID_AA64ISAR0_EL1 && sys_reg <= HV_SYS_REG_ID_AA64MMFR2_EL1) {
  272. printf("TODO(zhuowei): not implemented\n");
  273. return HV_BAD_ARGUMENT;
  274. }
  275. // TODO(zhuowei): handle the special cases
  276. uint64_t offset = 0;
  277. uint64_t sync_mask = 0;
  278. bool found = find_sys_reg(sys_reg, &offset, &sync_mask);
  279. if (!found) {
  280. return HV_BAD_ARGUMENT;
  281. }
  282. if (sync_mask) {
  283. // TODO(zhuowei): HV_CALL_VCPU_SYSREGS_SYNC only when needed
  284. hv_trap(HV_CALL_VCPU_SYSREGS_SYNC, 0);
  285. }
  286. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  287. *value = *(uint64_t*)((char*)(&vcpu_zone->rw) + offset);
  288. return 0;
  289. }
  290. hv_return_t hv_vcpu_set_sys_reg(hv_vcpu_t vcpu, hv_sys_reg_t sys_reg, uint64_t value) {
  291. if (sys_reg >= HV_SYS_REG_ID_AA64ISAR0_EL1 && sys_reg <= HV_SYS_REG_ID_AA64MMFR2_EL1) {
  292. printf("TODO(zhuowei): not implemented\n");
  293. return HV_BAD_ARGUMENT;
  294. }
  295. // TODO(zhuowei): handle the special cases
  296. uint64_t offset = 0;
  297. uint64_t sync_mask = 0;
  298. bool found = find_sys_reg(sys_reg, &offset, &sync_mask);
  299. if (!found) {
  300. return HV_BAD_ARGUMENT;
  301. }
  302. if (sync_mask) {
  303. // TODO(zhuowei): HV_CALL_VCPU_SYSREGS_SYNC only when needed
  304. hv_trap(HV_CALL_VCPU_SYSREGS_SYNC, 0);
  305. }
  306. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  307. *(uint64_t*)((char*)(&vcpu_zone->rw) + offset) = offset;
  308. return 0;
  309. }
  310. hv_return_t hv_vcpu_get_vtimer_mask(hv_vcpu_t vcpu, bool* vtimer_is_masked) {
  311. if (!vtimer_is_masked) {
  312. return HV_BAD_ARGUMENT;
  313. }
  314. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  315. *vtimer_is_masked = vcpu_zone->rw.controls.timer & 1;
  316. return 0;
  317. }
  318. hv_return_t hv_vcpu_set_vtimer_mask(hv_vcpu_t vcpu, bool vtimer_is_masked) {
  319. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  320. vcpu_zone->rw.controls.timer = (vcpu_zone->rw.controls.timer & ~1ull) | vtimer_is_masked;
  321. return 0;
  322. }
  323. hv_return_t hv_vcpu_get_vtimer_offset(hv_vcpu_t vcpu, uint64_t* vtimer_offset) {
  324. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  325. *vtimer_offset = vcpu_zone->rw.controls.virtual_timer_offset;
  326. return 0;
  327. }
  328. hv_return_t hv_vcpu_set_vtimer_offset(hv_vcpu_t vcpu, uint64_t vtimer_offset) {
  329. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  330. vcpu_zone->rw.controls.virtual_timer_offset = vtimer_offset;
  331. vcpu_zone->rw.state_dirty |= 0x4;
  332. return 0;
  333. }
  334. hv_return_t hv_vcpu_set_pending_interrupt(hv_vcpu_t vcpu, hv_interrupt_type_t type, bool pending) {
  335. struct hv_vcpu_data* vcpu_data = &vcpus[vcpu];
  336. if (type == HV_INTERRUPT_TYPE_IRQ) {
  337. // HCR_EL2 VI bit
  338. if (pending) {
  339. vcpu_data->pending_interrupts |= 0x80ull;
  340. } else {
  341. vcpu_data->pending_interrupts &= ~0x80ull;
  342. }
  343. return 0;
  344. } else if (type == HV_INTERRUPT_TYPE_FIQ) {
  345. // HCR_EL2 VF bit
  346. if (pending) {
  347. vcpu_data->pending_interrupts |= 0x40ull;
  348. } else {
  349. vcpu_data->pending_interrupts &= ~0x40ull;
  350. }
  351. return 0;
  352. } else {
  353. return HV_BAD_ARGUMENT;
  354. }
  355. }
  356. hv_return_t hv_vcpus_exit(hv_vcpu_t* vcpus, uint32_t vcpu_count) {
  357. uint64_t mask = 0;
  358. for (int i = 0; i < vcpu_count; i++) {
  359. hv_vcpu_t cpu = vcpus[i];
  360. if (cpu >= kHvMaxVcpus) {
  361. return HV_BAD_ARGUMENT;
  362. }
  363. mask |= (1ul << cpu);
  364. }
  365. return hv_trap(HV_CALL_VCPU_RUN_CANCEL, (void*)mask);
  366. }