hv.m 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. // Decompiled by hand (based-ish on a Ghidra decompile) from Hypervisor.framework on macOS 12.0b1
  2. @import Darwin;
  3. #include <Hypervisor/Hypervisor.h>
  4. #include <assert.h>
  5. #include "hv_kernel_structs.h"
  6. #if NO_HVF_HEADER
  7. @protocol OS_hv_vcpu_config;
  8. @class NSObject;
  9. typedef kern_return_t hv_return_t;
  10. typedef void* hv_vm_config_t;
  11. typedef uint64_t hv_ipa_t;
  12. typedef uint64_t hv_vcpu_t;
  13. typedef uint64_t hv_exception_syndrome_t;
  14. typedef uint64_t hv_exception_address_t;
  15. typedef uint64_t hv_exit_reason_t;
  16. typedef NSObject<OS_hv_vcpu_config>* hv_vcpu_config_t;
  17. typedef uint64_t hv_memory_flags_t;
  18. #define HV_BAD_ARGUMENT 0xfae94003;
  19. #define HV_UNSUPPORTED 0xfae9400f;
  20. // from hv_vcpu_types.h
  21. typedef struct hv_vcpu_exit_exception {
  22. hv_exception_syndrome_t syndrome;
  23. hv_exception_address_t virtual_address;
  24. hv_ipa_t physical_address;
  25. } hv_vcpu_exit_exception_t;
  26. typedef struct hv_vcpu_exit {
  27. hv_exit_reason_t reason;
  28. hv_vcpu_exit_exception_t exception;
  29. } hv_vcpu_exit_t;
  30. #endif // NO_HVF_HEADER
  31. static_assert(sizeof(hv_vcpu_exit_t) == 0x20, "hv_vcpu_exit");
  32. #define HV_CALL_VM_GET_CAPABILITIES 0
  33. #define HV_CALL_VM_CREATE 1
  34. #define HV_CALL_VM_DESTROY 2
  35. #define HV_CALL_VM_MAP 3
  36. #define HV_CALL_VM_UNMAP 4
  37. #define HV_CALL_VM_PROTECT 5
  38. #define HV_CALL_VCPU_CREATE 6
  39. #define HV_CALL_VCPU_DESTROY 7
  40. #define HV_CALL_VCPU_SYSREGS_SYNC 8
  41. #define HV_CALL_VCPU_RUN 9
  42. #define HV_CALL_VCPU_RUN_CANCEL 10
  43. #define HV_CALL_VCPU_SET_ADDRESS_SPACE 11
  44. #define HV_CALL_VM_ADDRESS_SPACE_CREATE 12
  45. #define HV_CALL_VM_INVALIDATE_TLB 13
  46. #ifdef USE_EXTERNAL_HV_TRAP
  47. uint64_t hv_trap(unsigned int hv_call, void* hv_arg);
  48. #else
  49. __attribute__((naked)) uint64_t hv_trap(unsigned int hv_call, void* hv_arg) {
  50. asm volatile("mov x16, #-0x5\n"
  51. "svc 0x80\n"
  52. "ret\n");
  53. }
  54. #endif
  55. // type lookup hv_vm_create_t
  56. struct hv_vm_create_kernel_args {
  57. uint64_t min_ipa;
  58. uint64_t ipa_size;
  59. uint32_t granule;
  60. uint32_t flags;
  61. uint32_t isa;
  62. };
  63. static_assert(sizeof(struct hv_vm_create_kernel_args) == 0x20, "hv_vm_create_kernel_args size");
  64. const struct hv_vm_create_kernel_args kDefaultVmCreateKernelArgs = {
  65. .min_ipa = 0,
  66. .ipa_size = 0,
  67. .granule = 0,
  68. .flags = 0,
  69. .isa = 1,
  70. };
  71. hv_return_t hv_vm_create(hv_vm_config_t config) {
  72. struct hv_vm_create_kernel_args args = kDefaultVmCreateKernelArgs;
  73. if (config) {
  74. // TODO(zhuowei): figure this out?
  75. }
  76. return hv_trap(HV_CALL_VM_CREATE, &args);
  77. }
  78. // type lookup hv_vm_map_item_t, although fields are renamed to match userspace args
  79. struct hv_vm_map_kernel_args {
  80. void* addr; // 0x0
  81. hv_ipa_t ipa; // 0x8
  82. size_t size; // 0x10
  83. hv_memory_flags_t flags; // 0x18
  84. uint64_t asid; // 0x20
  85. };
  86. hv_return_t hv_vm_map(void* addr, hv_ipa_t ipa, size_t size, hv_memory_flags_t flags) {
  87. struct hv_vm_map_kernel_args args = {
  88. .addr = addr, .ipa = ipa, .size = size, .flags = flags, .asid = 0};
  89. return hv_trap(HV_CALL_VM_MAP, &args);
  90. }
  91. static pthread_mutex_t vcpus_mutex = PTHREAD_MUTEX_INITIALIZER;
  92. struct hv_vcpu_zone {
  93. arm_guest_rw_context_t rw;
  94. arm_guest_ro_context_t ro;
  95. };
  96. static_assert(sizeof(struct hv_vcpu_zone) == 0x8000, "hv_vcpu_zone");
  97. struct hv_vcpu_data {
  98. struct hv_vcpu_zone* vcpu_zone; // 0x0
  99. // TODO(zhuowei)
  100. char filler[0xf0 - 0x8]; // 0x8
  101. hv_vcpu_exit_t exit; // 0xf0
  102. char filler2[0x8]; // 0x110
  103. };
  104. static_assert(sizeof(struct hv_vcpu_data) == 0x118, "hv_vcpu_data");
  105. static const size_t kHvMaxVcpus = 0x40;
  106. static struct hv_vcpu_data vcpus[kHvMaxVcpus];
  107. struct hv_vcpu_create_kernel_args {
  108. uint64_t id; // 0x0
  109. struct hv_vcpu_zone* output_vcpu_zone; // 0x8
  110. };
  111. // ' hyp', 0xe
  112. static const uint64_t kHvVcpuMagic = 0x206879700000000eull;
  113. hv_return_t hv_vcpu_create(hv_vcpu_t* vcpu, hv_vcpu_exit_t** exit, hv_vcpu_config_t config) {
  114. pthread_mutex_lock(&vcpus_mutex);
  115. hv_vcpu_t cpuid = 0;
  116. for (; cpuid < kHvMaxVcpus; cpuid++) {
  117. if (!vcpus[cpuid].vcpu_zone) {
  118. break;
  119. }
  120. }
  121. if (cpuid == kHvMaxVcpus) {
  122. pthread_mutex_unlock(&vcpus_mutex);
  123. return HV_NO_RESOURCES;
  124. }
  125. // TODO(zhuowei): support more than one
  126. struct hv_vcpu_data* vcpu_data = &vcpus[cpuid];
  127. struct hv_vcpu_create_kernel_args args = {
  128. .id = cpuid,
  129. .output_vcpu_zone = 0,
  130. };
  131. kern_return_t err = hv_trap(HV_CALL_VCPU_CREATE, &args);
  132. if (err) {
  133. pthread_mutex_unlock(&vcpus_mutex);
  134. return err;
  135. }
  136. printf("vcpu_zone = %p\n", args.output_vcpu_zone);
  137. if (args.output_vcpu_zone->ro.ver != kHvVcpuMagic) {
  138. printf("Invalid magic! expected %lx, got %lx\n", kHvVcpuMagic, args.output_vcpu_zone->ro.ver);
  139. const bool yolo = true;
  140. if (!yolo) {
  141. hv_trap(HV_CALL_VCPU_DESTROY, nil);
  142. pthread_mutex_unlock(&vcpus_mutex);
  143. return HV_UNSUPPORTED;
  144. }
  145. printf("yoloing\n");
  146. }
  147. vcpu_data->vcpu_zone = args.output_vcpu_zone;
  148. *vcpu = cpuid; // TODO(zhuowei)
  149. *exit = &vcpu_data->exit;
  150. pthread_mutex_unlock(&vcpus_mutex);
  151. // TODO(zhuowei): configure regs
  152. return 0;
  153. }
  154. hv_return_t hv_vcpu_run(hv_vcpu_t vcpu) {
  155. // TODO(zhuowei): update registers
  156. struct hv_vcpu_data* vcpu_data = &vcpus[0];
  157. hv_return_t err = hv_trap(HV_CALL_VCPU_RUN, nil);
  158. if (err) {
  159. return err;
  160. }
  161. printf("exit = %d (esr = %x)\n", vcpu_data->vcpu_zone->ro.exit.vmexit_reason,
  162. vcpu_data->vcpu_zone->ro.exit.vmexit_esr);
  163. return 0;
  164. }
  165. hv_return_t hv_vcpu_get_reg(hv_vcpu_t vcpu, hv_reg_t reg, uint64_t* value) {
  166. if (reg > HV_REG_CPSR) {
  167. return HV_BAD_ARGUMENT;
  168. }
  169. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  170. if (reg < HV_REG_FP) {
  171. *value = vcpu_zone->rw.regs.x[reg];
  172. } else if (reg == HV_REG_FP) {
  173. *value = vcpu_zone->rw.regs.fp;
  174. } else if (reg == HV_REG_LR) {
  175. *value = vcpu_zone->rw.regs.lr;
  176. } else if (reg == HV_REG_PC) {
  177. *value = vcpu_zone->rw.regs.pc;
  178. } else if (reg == HV_REG_FPCR) {
  179. *value = vcpu_zone->rw.neon.fpcr;
  180. } else if (reg == HV_REG_FPSR) {
  181. *value = vcpu_zone->rw.neon.fpsr;
  182. } else if (reg == HV_REG_CPSR) {
  183. *value = vcpu_zone->rw.regs.cpsr;
  184. }
  185. return 0;
  186. }
  187. hv_return_t hv_vcpu_set_reg(hv_vcpu_t vcpu, hv_reg_t reg, uint64_t value) {
  188. if (reg > HV_REG_CPSR) {
  189. return HV_BAD_ARGUMENT;
  190. }
  191. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  192. if (reg < HV_REG_FP) {
  193. vcpu_zone->rw.regs.x[reg] = value;
  194. } else if (reg == HV_REG_FP) {
  195. vcpu_zone->rw.regs.lr = value;
  196. } else if (reg == HV_REG_LR) {
  197. vcpu_zone->rw.regs.lr = value;
  198. } else if (reg == HV_REG_PC) {
  199. vcpu_zone->rw.regs.pc = value;
  200. } else if (reg == HV_REG_FPCR) {
  201. vcpu_zone->rw.neon.fpcr = value;
  202. } else if (reg == HV_REG_FPSR) {
  203. vcpu_zone->rw.neon.fpsr = value;
  204. } else if (reg == HV_REG_CPSR) {
  205. vcpu_zone->rw.regs.cpsr = value;
  206. }
  207. return 0;
  208. }
  209. hv_return_t hv_vcpu_get_simd_fp_reg(hv_vcpu_t vcpu, hv_simd_fp_reg_t reg,
  210. hv_simd_fp_uchar16_t* value) {
  211. if (reg > HV_SIMD_FP_REG_Q31) {
  212. return HV_BAD_ARGUMENT;
  213. }
  214. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  215. *((__uint128_t*)value) = vcpu_zone->rw.neon.q[reg];
  216. return 0;
  217. }
  218. hv_return_t hv_vcpu_set_simd_fp_reg(hv_vcpu_t vcpu, hv_simd_fp_reg_t reg,
  219. hv_simd_fp_uchar16_t value) {
  220. if (reg > HV_SIMD_FP_REG_Q31) {
  221. return HV_BAD_ARGUMENT;
  222. }
  223. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  224. vcpu_zone->rw.neon.q[reg] = *((__uint128_t*)&value);
  225. return 0;
  226. }
  227. static bool find_sys_reg(hv_sys_reg_t sys_reg, uint64_t* offset, uint64_t* sync_mask) {
  228. return false;
  229. }
  230. static_assert(offsetof(arm_guest_rw_context_t, dbgregs.bp[0].bvr) == 0x450,
  231. "HV_SYS_REG_DBGBVR0_EL1");
  232. hv_return_t hv_vcpu_set_sys_reg(hv_vcpu_t vcpu, hv_sys_reg_t sys_reg, uint64_t value) {
  233. if (sys_reg >= HV_SYS_REG_ID_AA64ISAR0_EL1 && sys_reg <= HV_SYS_REG_ID_AA64MMFR2_EL1) {
  234. printf("TODO(zhuowei): not implemented\n");
  235. return HV_BAD_ARGUMENT;
  236. }
  237. // TODO(zhuowei): handle the special cases
  238. uint64_t offset = 0;
  239. uint64_t sync_mask = 0;
  240. bool found = find_sys_reg(sys_reg, &offset, &sync_mask);
  241. if (!found) {
  242. return HV_BAD_ARGUMENT;
  243. }
  244. if (sync_mask) {
  245. // TODO(zhuowei): HV_CALL_VCPU_SYSREGS_SYNC
  246. }
  247. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  248. *(uint64_t*)((char*)(&vcpu_zone->rw) + offset) = offset;
  249. return 0;
  250. }
  251. hv_return_t hv_vcpus_exit(hv_vcpu_t* vcpus, uint32_t vcpu_count) {
  252. uint64_t mask = 0;
  253. for (int i = 0; i < vcpu_count; i++) {
  254. hv_vcpu_t cpu = vcpus[i];
  255. if (cpu >= kHvMaxVcpus) {
  256. return HV_BAD_ARGUMENT;
  257. }
  258. mask |= (1ul << cpu);
  259. }
  260. return hv_trap(HV_CALL_VCPU_RUN_CANCEL, (void*)mask);
  261. }
  262. int main() {
  263. hv_return_t err = hv_vm_create(nil);
  264. printf("vm create %x\n", err);
  265. hv_vcpu_t cpu = 0;
  266. hv_vcpu_exit_t* exit = nil;
  267. err = hv_vcpu_create(&cpu, &exit, nil);
  268. printf("vcpu create %x\n", err);
  269. err = hv_vcpu_run(cpu);
  270. printf("run %x\n", err);
  271. }