hv.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993
  1. // Decompiled by hand (based-ish on a Ghidra decompile) from Hypervisor.framework on macOS 12.0b1
  2. // 06/09/22: updated for macOS 12.5.1
  3. // 15/09/22: added offsets for macOS 11.6.5
  4. #include <assert.h>
  5. #include <pthread.h>
  6. #include <stdio.h>
  7. #include <dispatch/dispatch.h>
  8. #include <mach/vm_types.h>
  9. #include "hv_kernel_structs.h"
  10. #include "hv_vm_types.h"
  11. static_assert(sizeof(hv_vcpu_exit_t) == 0x20, "hv_vcpu_exit");
  12. #define HV_CALL_VM_GET_CAPABILITIES 0
  13. #define HV_CALL_VM_CREATE 1
  14. #define HV_CALL_VM_DESTROY 2
  15. #define HV_CALL_VM_MAP 3
  16. #define HV_CALL_VM_UNMAP 4
  17. #define HV_CALL_VM_PROTECT 5
  18. #define HV_CALL_VCPU_CREATE 6
  19. #define HV_CALL_VCPU_DESTROY 7
  20. #define HV_CALL_VCPU_SYSREGS_SYNC 8
  21. #define HV_CALL_VCPU_RUN 9
  22. #define HV_CALL_VCPU_RUN_CANCEL 10
  23. #define HV_CALL_VCPU_SET_ADDRESS_SPACE 11
  24. #define HV_CALL_VM_ADDRESS_SPACE_CREATE 12
  25. #define HV_CALL_VM_INVALIDATE_TLB 13
  26. #ifdef USE_EXTERNAL_HV_TRAP
  27. uint64_t hv_trap(unsigned int hv_call, void* hv_arg);
  28. #else
  29. __attribute__((naked)) uint64_t hv_trap(unsigned int hv_call, void* hv_arg) {
  30. asm volatile("mov x16, #-0x5\n"
  31. "svc 0x80\n"
  32. "ret\n");
  33. }
  34. #endif
  35. static uint64_t hv_trap_wrap(unsigned int hv_call, void* hv_arg) {
  36. uint64_t err = hv_trap(hv_call, hv_arg);
  37. printf("hv_trap %u %p returned %llx\n", hv_call, hv_arg, err);
  38. return err;
  39. }
  40. //#define hv_trap hv_trap_wrap
  41. static hv_return_t _hv_get_capabilities(hv_capabilities_t** c) {
  42. static dispatch_once_t caps_once;
  43. static hv_capabilities_t caps;
  44. static hv_return_t status;
  45. dispatch_once(&caps_once, ^{
  46. status = hv_trap(HV_CALL_VM_GET_CAPABILITIES, &caps);
  47. });
  48. *c = &caps;
  49. return status;
  50. }
  51. // this is placed at offset 8 of the cpu regs, so I'm labelling the offsets relative to those
  52. struct hv_vcpu_data_feature_regs {
  53. uint64_t aa64dfr0_el1; // 0x8
  54. uint64_t aa64dfr1_el1; // 0x10
  55. uint64_t aa64isar0_el1; // 0x18
  56. uint64_t aa64isar1_el1; // 0x20
  57. uint64_t aa64mmfr0_el1; // 0x28
  58. uint64_t aa64mmfr1_el1; // 0x30
  59. uint64_t aa64mmfr2_el1; // 0x38
  60. uint64_t aa64pfr0_el1; // 0x40
  61. uint64_t aa64pfr1_el1; // 0x48
  62. uint64_t ctr_el0; // 0x50
  63. uint64_t dczid_el0; // 0x58
  64. uint64_t clidr_el1; // 0x60
  65. uint64_t ccsidr_el1_inst[8]; // 0x68
  66. uint64_t ccsidr_el1_data_or_unified[8]; // 0xA8
  67. };
  68. // TODO: define names for the flags from aarch64 documents
  69. #define MODIFY_FLAGS_AA64DFR0_EL1(reg) ((reg) & 0xf0f0f000 | 6)
  70. #define MODIFY_FLAGS_AA64DFR1_EL1(reg) ((reg) & 0)
  71. #define MODIFY_FLAGS_AA64ISAR0_EL1(reg) ((reg) & 0xfffffffff0fffff0)
  72. #define MODIFY_FLAGS_AA64ISAR1_EL1(reg) ((reg) & 0xfffffffffff)
  73. #define MODIFY_FLAGS_AA64MMFR0_EL1(reg) ((reg) & 0xf000fff000f0 | 1)
  74. #define MODIFY_FLAGS_AA64MMFR1_EL1(reg) ((reg) & 0xfffff0f0)
  75. #define MODIFY_FLAGS_AA64MMFR2_EL1(reg) ((reg) & 0xf000000000000000 | (((((reg) >> 48) & 0xff) << 48) | ((((reg) >> 32) & 0xff) << 32) | (((reg) & 0xff0ff))))
  76. #define MODIFY_FLAGS_AA64PFR0_EL1(reg) ((reg) & 0xff0f0000f0ff00ff | 0x1100)
  77. #define MODIFY_FLAGS_AA64PFR1_EL1(reg) ((reg) & 0xf0)
  78. #define MODIFY_FLAGS_CTR_EL0(reg) (reg)
  79. #define MODIFY_FLAGS_DCZID_EL0(reg) (reg)
  80. #define MODIFY_FLAGS_CLIDR_EL1(reg) (reg)
  81. static hv_return_t _hv_vcpu_config_get_feature_regs(
  82. struct hv_vcpu_data_feature_regs* feature_regs) {
  83. hv_capabilities_t* caps = NULL;
  84. hv_return_t err = _hv_get_capabilities(&caps);
  85. if (err) {
  86. return err;
  87. }
  88. feature_regs->aa64dfr0_el1 = MODIFY_FLAGS_AA64DFR0_EL1(ACCESS(caps, id_aa64dfr0_el1));
  89. feature_regs->aa64dfr1_el1 = MODIFY_FLAGS_AA64DFR1_EL1(ACCESS(caps, id_aa64dfr1_el1));
  90. feature_regs->aa64isar0_el1 = MODIFY_FLAGS_AA64ISAR0_EL1(ACCESS(caps, id_aa64isar0_el1));
  91. feature_regs->aa64isar1_el1 = MODIFY_FLAGS_AA64ISAR1_EL1(ACCESS(caps, id_aa64isar1_el1));
  92. feature_regs->aa64mmfr0_el1 = MODIFY_FLAGS_AA64MMFR0_EL1(ACCESS(caps, id_aa64mmfr0_el1));
  93. feature_regs->aa64mmfr1_el1 = MODIFY_FLAGS_AA64MMFR1_EL1(ACCESS(caps, id_aa64mmfr1_el1));
  94. feature_regs->aa64mmfr2_el1 = MODIFY_FLAGS_AA64MMFR2_EL1(ACCESS(caps, id_aa64mmfr2_el1));
  95. feature_regs->aa64pfr0_el1 = MODIFY_FLAGS_AA64PFR0_EL1(ACCESS(caps, id_aa64pfr0_el1));
  96. feature_regs->aa64pfr1_el1 = MODIFY_FLAGS_AA64PFR1_EL1(ACCESS(caps, id_aa64pfr1_el1));
  97. feature_regs->ctr_el0 = MODIFY_FLAGS_CTR_EL0(ACCESS(caps, ctr_el0));
  98. feature_regs->dczid_el0 = MODIFY_FLAGS_DCZID_EL0(ACCESS(caps, dczid_el0));
  99. feature_regs->clidr_el1 = MODIFY_FLAGS_CLIDR_EL1(ACCESS(caps, clidr_el1));
  100. if (get_xnu_version() >= HV_VERSION_XNU_21) {
  101. static_assert(sizeof(feature_regs->ccsidr_el1_inst) == sizeof(caps->v21.ccsidr_el1_inst), "ccsidr_el1_inst size");
  102. memcpy(feature_regs->ccsidr_el1_inst, ACCESS(caps, ccsidr_el1_inst), sizeof(feature_regs->ccsidr_el1_inst));
  103. static_assert(sizeof(feature_regs->ccsidr_el1_data_or_unified) == sizeof(caps->v21.ccsidr_el1_data_or_unified), "ccsidr_el1_data_or_unified size");
  104. memcpy(feature_regs->ccsidr_el1_data_or_unified, ACCESS(caps, ccsidr_el1_data_or_unified), sizeof(feature_regs->ccsidr_el1_data_or_unified));
  105. }
  106. return 0;
  107. }
  108. // type lookup hv_vm_create_t
  109. struct hv_vm_create_kernel_args {
  110. uint64_t min_ipa;
  111. uint64_t ipa_size;
  112. uint32_t granule;
  113. uint32_t flags;
  114. uint32_t isa;
  115. };
  116. static_assert(sizeof(struct hv_vm_create_kernel_args) == 0x20, "hv_vm_create_kernel_args size");
  117. const struct hv_vm_create_kernel_args kDefaultVmCreateKernelArgs = {
  118. .min_ipa = 0,
  119. .ipa_size = 0,
  120. .granule = 0,
  121. .flags = 0,
  122. .isa = 1,
  123. };
  124. struct hv_vm_config_private {
  125. char field_0[16];
  126. uint64_t min_ipa;
  127. uint64_t ipa_size;
  128. uint32_t granule;
  129. uint32_t isa;
  130. };
  131. hv_return_t hv_vm_create(hv_vm_config_t config) {
  132. struct hv_vm_create_kernel_args args = kDefaultVmCreateKernelArgs;
  133. struct hv_vm_config_private *_config = (struct hv_vm_config_private *)config;
  134. if (config) {
  135. args.min_ipa = _config->min_ipa;
  136. args.ipa_size = _config->ipa_size;
  137. args.granule = _config->granule;
  138. args.isa = _config->isa;
  139. }
  140. return hv_trap(HV_CALL_VM_CREATE, &args);
  141. }
  142. // type lookup hv_vm_map_item_t, although fields are renamed to match userspace args
  143. struct hv_vm_map_kernel_args {
  144. void* addr; // 0x0
  145. hv_ipa_t ipa; // 0x8
  146. size_t size; // 0x10
  147. hv_memory_flags_t flags; // 0x18
  148. uint64_t asid; // 0x20
  149. };
  150. hv_return_t hv_vm_map(void* addr, hv_ipa_t ipa, size_t size, hv_memory_flags_t flags) {
  151. struct hv_vm_map_kernel_args args = {
  152. .addr = addr, .ipa = ipa, .size = size, .flags = flags, .asid = 0};
  153. return hv_trap(HV_CALL_VM_MAP, &args);
  154. }
  155. hv_return_t hv_vm_unmap(hv_ipa_t ipa, size_t size) {
  156. struct hv_vm_map_kernel_args args = {
  157. .addr = NULL, .ipa = ipa, .size = size, .flags = 0, .asid = 0};
  158. return hv_trap(HV_CALL_VM_UNMAP, &args);
  159. }
  160. hv_return_t hv_vm_protect(hv_ipa_t ipa, size_t size, hv_memory_flags_t flags) {
  161. struct hv_vm_map_kernel_args args = {
  162. .addr = NULL, .ipa = ipa, .size = size, .flags = flags, .asid = 0};
  163. return hv_trap(HV_CALL_VM_PROTECT, &args);
  164. }
  165. static pthread_mutex_t vcpus_mutex = PTHREAD_MUTEX_INITIALIZER;
  166. struct hv_vcpu_zone {
  167. arm_guest_rw_context_t rw;
  168. arm_guest_ro_context_t ro;
  169. };
  170. static_assert(sizeof(struct hv_vcpu_zone) == 0x8000, "hv_vcpu_zone");
  171. struct hv_vcpu_data {
  172. struct hv_vcpu_zone* vcpu_zone; // 0x0
  173. struct hv_vcpu_data_feature_regs feature_regs; // 0x8
  174. uint64_t pending_interrupts; // 0xe8
  175. hv_vcpu_exit_t exit; // 0xf0
  176. uint32_t timer_enabled; // 0x110
  177. uint32_t field_114; // 0x114
  178. };
  179. static_assert(sizeof(struct hv_vcpu_data) == 0x118, "hv_vcpu_data");
  180. static const size_t kHvMaxVcpus = 0x40;
  181. static struct hv_vcpu_data vcpus[kHvMaxVcpus];
  182. struct hv_vcpu_create_kernel_args {
  183. uint64_t id; // 0x0
  184. struct hv_vcpu_zone* output_vcpu_zone; // 0x8
  185. };
  186. struct hv_vcpu_config_private {
  187. char field_0[16];
  188. uint64_t vmkeylo_el2;
  189. uint64_t vmkeyhi_el2;
  190. };
  191. hv_return_t hv_vcpu_create(hv_vcpu_t* vcpu, hv_vcpu_exit_t** exit, hv_vcpu_config_t config) {
  192. struct hv_vcpu_config_private *_config = (struct hv_vcpu_config_private *)config;
  193. pthread_mutex_lock(&vcpus_mutex);
  194. hv_vcpu_t cpuid = 0;
  195. for (; cpuid < kHvMaxVcpus; cpuid++) {
  196. if (!vcpus[cpuid].vcpu_zone) {
  197. break;
  198. }
  199. }
  200. if (cpuid == kHvMaxVcpus) {
  201. pthread_mutex_unlock(&vcpus_mutex);
  202. return HV_NO_RESOURCES;
  203. }
  204. struct hv_vcpu_data* vcpu_data = &vcpus[cpuid];
  205. struct hv_vcpu_create_kernel_args args = {
  206. .id = cpuid,
  207. .output_vcpu_zone = 0,
  208. };
  209. kern_return_t err = hv_trap(HV_CALL_VCPU_CREATE, &args);
  210. if (err) {
  211. pthread_mutex_unlock(&vcpus_mutex);
  212. return err;
  213. }
  214. printf("vcpu_zone = %p\n", args.output_vcpu_zone);
  215. uint64_t expected_magic = get_expected_magic();
  216. if (args.output_vcpu_zone->ro.ver != expected_magic) {
  217. printf("Invalid magic! expected %llx, got %llx\n", expected_magic, args.output_vcpu_zone->ro.ver);
  218. #ifndef USE_KERNEL_BYPASS_CHECKS
  219. hv_trap(HV_CALL_VCPU_DESTROY, NULL);
  220. pthread_mutex_unlock(&vcpus_mutex);
  221. return HV_UNSUPPORTED;
  222. #else
  223. printf("yoloing\n");
  224. #endif
  225. }
  226. vcpu_data->vcpu_zone = args.output_vcpu_zone;
  227. arm_guest_ro_context_t *ro = &vcpu_data->vcpu_zone->ro;
  228. arm_guest_rw_context_t *rw = &vcpu_data->vcpu_zone->rw;
  229. *vcpu = cpuid;
  230. *exit = &vcpu_data->exit;
  231. pthread_mutex_unlock(&vcpus_mutex);
  232. // configure regs from HV_CALL_VM_GET_CAPABILITIES
  233. err = _hv_vcpu_config_get_feature_regs(&vcpu_data->feature_regs);
  234. if (err) {
  235. hv_vcpu_destroy(cpuid);
  236. return err;
  237. }
  238. if (config) {
  239. ACCESS(rw, controls.vmkeylo_el2) = _config->vmkeylo_el2;
  240. ACCESS(rw, controls.vmkeyhi_el2) = _config->vmkeyhi_el2;
  241. }
  242. // Apple traps PMCCNTR_EL0 using this proprietary register, then translates the syndrome.
  243. // No, I don't know why Apple doesn't just use HDFGRTR_EL2 or MDCR_EL2
  244. ACCESS(rw, controls.hacr_el2) |= 1ull << 56;
  245. // TID3: trap the feature regs so we can handle these ourselves
  246. ACCESS(rw, controls.hcr_el2) |= 0x40000ull;
  247. // if ro hacr has a bit set, clear rw hcr_el2 TIDCP?!
  248. if ((ACCESS(ro, controls.hacr_el2) >> 4 & 1) != 0) {
  249. ACCESS(rw, controls.hcr_el2) &= ~0x100000;
  250. }
  251. ACCESS(rw, controls.hcr_el2) |= 0x80000;
  252. ACCESS(rw, state_dirty) |= 0x4;
  253. return 0;
  254. }
  255. hv_return_t hv_vcpu_destroy(hv_vcpu_t vcpu) {
  256. kern_return_t err = hv_trap(HV_CALL_VCPU_DESTROY, NULL);
  257. if (err) {
  258. return err;
  259. }
  260. pthread_mutex_lock(&vcpus_mutex);
  261. struct hv_vcpu_data* vcpu_data = &vcpus[vcpu];
  262. vcpu_data->vcpu_zone = NULL;
  263. vcpu_data->pending_interrupts = 0;
  264. pthread_mutex_unlock(&vcpus_mutex);
  265. return 0;
  266. }
  267. static bool deliver_ordinary_exception(struct hv_vcpu_data* vcpu_data, hv_vcpu_exit_t* exit);
  268. static void deliver_uncategorized_exception(struct hv_vcpu_data* vcpu_data);
  269. hv_return_t hv_vcpu_run(hv_vcpu_t vcpu) {
  270. // update registers
  271. struct hv_vcpu_data* vcpu_data = &vcpus[vcpu];
  272. arm_guest_ro_context_t *ro = &vcpu_data->vcpu_zone->ro;
  273. arm_guest_rw_context_t *rw = &vcpu_data->vcpu_zone->rw;
  274. bool injected_interrupt = false;
  275. if (vcpu_data->pending_interrupts) {
  276. injected_interrupt = true;
  277. ACCESS(rw, controls.hcr_el2) |= vcpu_data->pending_interrupts;
  278. ACCESS(rw, state_dirty) |= 0x4;
  279. }
  280. vcpu_data->timer_enabled = ACCESS(rw, controls.timer) & 1;
  281. while (true) {
  282. hv_return_t err = hv_trap(HV_CALL_VCPU_RUN, NULL);
  283. if (err) {
  284. return err;
  285. }
  286. bool print_vmexit = false;
  287. if (print_vmexit) {
  288. printf("exit = %d (esr = %x far = %llx hpfar = %llx)\n",
  289. ACCESS(ro, exit.vmexit_reason), ACCESS(ro, exit.vmexit_esr),
  290. ACCESS(ro, exit.vmexit_far), ACCESS(ro, exit.vmexit_hpfar));
  291. }
  292. hv_vcpu_exit_t* exit = &vcpu_data->exit;
  293. switch (ACCESS(ro, exit.vmexit_reason)) {
  294. case 0: {
  295. exit->reason = HV_EXIT_REASON_CANCELED;
  296. break;
  297. }
  298. case 1: // hvc call?
  299. case 6: // memory fault?
  300. case 8: {
  301. if (deliver_ordinary_exception(vcpu_data, exit)) {
  302. continue;
  303. }
  304. break;
  305. }
  306. case 3:
  307. case 4: {
  308. if (!vcpu_data->timer_enabled && ACCESS(rw, banked_sysregs.cntv_ctl_el0) == 5) {
  309. exit->reason = HV_EXIT_REASON_VTIMER_ACTIVATED;
  310. // mask vtimer
  311. ACCESS(rw, controls.timer) |= 1ull;
  312. } else {
  313. exit->reason = HV_EXIT_REASON_UNKNOWN;
  314. }
  315. break;
  316. }
  317. case 2:
  318. case 11: {
  319. // keep going!
  320. continue;
  321. }
  322. case 7:
  323. deliver_uncategorized_exception(vcpu_data);
  324. continue;
  325. default: {
  326. exit->reason = HV_EXIT_REASON_UNKNOWN;
  327. break;
  328. }
  329. }
  330. if (injected_interrupt) {
  331. vcpu_data->pending_interrupts = 0;
  332. ACCESS(rw, controls.hcr_el2) &= ~0xc0ull;
  333. ACCESS(rw, state_dirty) |= 0x4;
  334. }
  335. return 0;
  336. }
  337. }
  338. hv_return_t hv_vcpu_get_reg(hv_vcpu_t vcpu, hv_reg_t reg, uint64_t* value) {
  339. if (reg > HV_REG_CPSR) {
  340. return HV_BAD_ARGUMENT;
  341. }
  342. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  343. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  344. if (reg < HV_REG_FP) {
  345. *value = ACCESS(rw, regs.x[reg]);
  346. } else if (reg == HV_REG_FP) {
  347. *value = ACCESS(rw, regs.fp);
  348. } else if (reg == HV_REG_LR) {
  349. *value = ACCESS(rw, regs.lr);
  350. } else if (reg == HV_REG_PC) {
  351. *value = ACCESS(rw, regs.pc);
  352. } else if (reg == HV_REG_FPCR) {
  353. *value = ACCESS(rw, neon.fpcr);
  354. } else if (reg == HV_REG_FPSR) {
  355. *value = ACCESS(rw, neon.fpsr);
  356. } else if (reg == HV_REG_CPSR) {
  357. *value = ACCESS(rw, regs.cpsr);
  358. }
  359. return 0;
  360. }
  361. hv_return_t hv_vcpu_set_reg(hv_vcpu_t vcpu, hv_reg_t reg, uint64_t value) {
  362. if (reg > HV_REG_CPSR) {
  363. return HV_BAD_ARGUMENT;
  364. }
  365. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  366. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  367. if (reg < HV_REG_FP) {
  368. ACCESS(rw, regs.x[reg]) = value;
  369. } else if (reg == HV_REG_FP) {
  370. ACCESS(rw, regs.fp) = value;
  371. } else if (reg == HV_REG_LR) {
  372. ACCESS(rw, regs.lr) = value;
  373. } else if (reg == HV_REG_PC) {
  374. ACCESS(rw, regs.pc) = value;
  375. } else if (reg == HV_REG_FPCR) {
  376. ACCESS(rw, neon.fpcr) = value;
  377. } else if (reg == HV_REG_FPSR) {
  378. ACCESS(rw, neon.fpsr) = value;
  379. } else if (reg == HV_REG_CPSR) {
  380. ACCESS(rw, regs.cpsr) = value;
  381. }
  382. return 0;
  383. }
  384. hv_return_t hv_vcpu_get_simd_fp_reg(hv_vcpu_t vcpu, hv_simd_fp_reg_t reg,
  385. hv_simd_fp_uchar16_t* value) {
  386. if (reg > HV_SIMD_FP_REG_Q31) {
  387. return HV_BAD_ARGUMENT;
  388. }
  389. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  390. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  391. *((__uint128_t*)value) = ACCESS(rw, neon.q[reg]);
  392. return 0;
  393. }
  394. hv_return_t hv_vcpu_set_simd_fp_reg(hv_vcpu_t vcpu, hv_simd_fp_reg_t reg,
  395. hv_simd_fp_uchar16_t value) {
  396. if (reg > HV_SIMD_FP_REG_Q31) {
  397. return HV_BAD_ARGUMENT;
  398. }
  399. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  400. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  401. ACCESS(rw, neon.q[reg]) = *((__uint128_t*)&value);
  402. return 0;
  403. }
  404. static bool find_sys_reg(hv_sys_reg_t sys_reg, uint64_t* offset, uint64_t* sync_mask) {
  405. uint64_t o = 0;
  406. uint64_t f = 0;
  407. if (get_xnu_version() == HV_VERSION_XNU_20) {
  408. switch (sys_reg) {
  409. #include "sysreg_offsets_xnu_20.h"
  410. default:
  411. return false;
  412. }
  413. } else if (get_xnu_version() == HV_VERSION_XNU_21 || get_xnu_version() == HV_VERSION_XNU_22) {
  414. switch (sys_reg) {
  415. #include "sysreg_offsets_xnu_21_22.h"
  416. default:
  417. return false;
  418. }
  419. } else {
  420. return false;
  421. }
  422. *offset = o;
  423. *sync_mask = f;
  424. return true;
  425. }
  426. // static_assert(offsetof(arm_guest_rw_context_t, dbgregs.bp[0].bvr) == 0x450,
  427. // "HV_SYS_REG_DBGBVR0_EL1");
  428. hv_return_t hv_vcpu_get_sys_reg(hv_vcpu_t vcpu, hv_sys_reg_t sys_reg, uint64_t* value) {
  429. hv_return_t err;
  430. struct hv_vcpu_data* vcpu_data = &vcpus[vcpu];
  431. struct hv_vcpu_zone* vcpu_zone = vcpu_data->vcpu_zone;
  432. arm_guest_ro_context_t *ro = &vcpu_zone->ro;
  433. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  434. switch (sys_reg) {
  435. case HV_SYS_REG_MIDR_EL1:
  436. *value = ACCESS(rw, controls.vpidr_el2);
  437. return 0;
  438. case HV_SYS_REG_MPIDR_EL1:
  439. *value = ACCESS(rw, controls.vmpidr_el2);
  440. return 0;
  441. case HV_SYS_REG_ID_AA64PFR0_EL1:
  442. *value = vcpu_data->feature_regs.aa64pfr0_el1;
  443. return 0;
  444. case HV_SYS_REG_ID_AA64PFR1_EL1:
  445. *value = vcpu_data->feature_regs.aa64pfr1_el1;
  446. return 0;
  447. case HV_SYS_REG_ID_AA64DFR0_EL1:
  448. *value = vcpu_data->feature_regs.aa64dfr0_el1;
  449. return 0;
  450. case HV_SYS_REG_ID_AA64DFR1_EL1:
  451. *value = vcpu_data->feature_regs.aa64dfr1_el1;
  452. return 0;
  453. case HV_SYS_REG_ID_AA64ISAR0_EL1:
  454. *value = vcpu_data->feature_regs.aa64isar0_el1;
  455. return 0;
  456. case HV_SYS_REG_ID_AA64ISAR1_EL1:
  457. *value = vcpu_data->feature_regs.aa64isar1_el1;
  458. return 0;
  459. case HV_SYS_REG_ID_AA64MMFR0_EL1:
  460. *value = vcpu_data->feature_regs.aa64mmfr0_el1;
  461. return 0;
  462. case HV_SYS_REG_ID_AA64MMFR1_EL1:
  463. *value = vcpu_data->feature_regs.aa64mmfr1_el1;
  464. return 0;
  465. case HV_SYS_REG_ID_AA64MMFR2_EL1:
  466. *value = vcpu_data->feature_regs.aa64mmfr2_el1;
  467. return 0;
  468. default:
  469. break;
  470. }
  471. // handle the special cases
  472. uint64_t offset = 0;
  473. uint64_t sync_mask = 0;
  474. bool found = find_sys_reg(sys_reg, &offset, &sync_mask);
  475. if (!found) {
  476. printf("invalid get sys reg: %x\n", sys_reg);
  477. return HV_BAD_ARGUMENT;
  478. }
  479. if ((sync_mask != 0) &&
  480. ((ACCESS(rw, state_dirty) & sync_mask) == 0 && (ACCESS(ro, state_valid) & sync_mask) == 0)) {
  481. if ((err = hv_trap(HV_CALL_VCPU_SYSREGS_SYNC, 0)) != 0) {
  482. return err;
  483. }
  484. }
  485. *value = *(uint64_t*)((char*)rw + offset);
  486. return 0;
  487. }
  488. hv_return_t hv_vcpu_set_sys_reg(hv_vcpu_t vcpu, hv_sys_reg_t sys_reg, uint64_t value) {
  489. hv_return_t err;
  490. struct hv_vcpu_data* vcpu_data = &vcpus[vcpu];
  491. struct hv_vcpu_zone* vcpu_zone = vcpu_data->vcpu_zone;
  492. arm_guest_ro_context_t *ro = &vcpu_zone->ro;
  493. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  494. switch (sys_reg) {
  495. case HV_SYS_REG_MIDR_EL1: {
  496. ACCESS(rw, controls.vpidr_el2) = value;
  497. ACCESS(rw, state_dirty) |= 0x4;
  498. return 0;
  499. }
  500. case HV_SYS_REG_MPIDR_EL1: {
  501. ACCESS(rw, controls.vmpidr_el2) = value;
  502. ACCESS(rw, state_dirty) |= 0x4;
  503. return 0;
  504. }
  505. // the kernel doesn't set these - userspace traps and handles these
  506. case HV_SYS_REG_ID_AA64PFR0_EL1:
  507. vcpu_data->feature_regs.aa64pfr0_el1 = value;
  508. return 0;
  509. case HV_SYS_REG_ID_AA64PFR1_EL1:
  510. vcpu_data->feature_regs.aa64pfr1_el1 = value;
  511. return 0;
  512. case HV_SYS_REG_ID_AA64DFR0_EL1:
  513. vcpu_data->feature_regs.aa64dfr0_el1 = value;
  514. return 0;
  515. case HV_SYS_REG_ID_AA64DFR1_EL1:
  516. vcpu_data->feature_regs.aa64dfr1_el1 = value;
  517. return 0;
  518. case HV_SYS_REG_ID_AA64ISAR0_EL1:
  519. vcpu_data->feature_regs.aa64isar0_el1 = value;
  520. return 0;
  521. case HV_SYS_REG_ID_AA64ISAR1_EL1:
  522. vcpu_data->feature_regs.aa64isar1_el1 = value;
  523. return 0;
  524. case HV_SYS_REG_ID_AA64MMFR0_EL1:
  525. vcpu_data->feature_regs.aa64mmfr0_el1 = value;
  526. return 0;
  527. case HV_SYS_REG_ID_AA64MMFR1_EL1:
  528. vcpu_data->feature_regs.aa64mmfr1_el1 = value;
  529. return 0;
  530. case HV_SYS_REG_ID_AA64MMFR2_EL1:
  531. vcpu_data->feature_regs.aa64mmfr2_el1 = value;
  532. return 0;
  533. default:
  534. break;
  535. }
  536. // handle the special cases
  537. uint64_t offset = 0;
  538. uint64_t sync_mask = 0;
  539. bool found = find_sys_reg(sys_reg, &offset, &sync_mask);
  540. if (!found) {
  541. printf("invalid set sys reg: %x\n", sys_reg);
  542. return HV_BAD_ARGUMENT;
  543. }
  544. if ((sync_mask != 0) && (((ACCESS(ro, state_valid) & sync_mask) == 0))) {
  545. if ((err = hv_trap(HV_CALL_VCPU_SYSREGS_SYNC, 0)) != 0) {
  546. return err;
  547. }
  548. }
  549. *(uint64_t*)((char*)rw + offset) = value;
  550. if (sync_mask != 0) {
  551. ACCESS(rw, state_dirty) |= sync_mask;
  552. }
  553. return 0;
  554. }
  555. hv_return_t hv_vcpu_get_vtimer_mask(hv_vcpu_t vcpu, bool* vtimer_is_masked) {
  556. if (!vtimer_is_masked) {
  557. return HV_BAD_ARGUMENT;
  558. }
  559. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  560. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  561. *vtimer_is_masked = ACCESS(rw, controls.timer) & 1;
  562. return 0;
  563. }
  564. hv_return_t hv_vcpu_set_vtimer_mask(hv_vcpu_t vcpu, bool vtimer_is_masked) {
  565. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  566. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  567. ACCESS(rw, controls.timer) = (ACCESS(rw, controls.timer) & ~1ull) | vtimer_is_masked;
  568. return 0;
  569. }
  570. hv_return_t hv_vcpu_get_vtimer_offset(hv_vcpu_t vcpu, uint64_t* vtimer_offset) {
  571. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  572. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  573. *vtimer_offset = ACCESS(rw, controls.virtual_timer_offset);
  574. return 0;
  575. }
  576. hv_return_t hv_vcpu_set_vtimer_offset(hv_vcpu_t vcpu, uint64_t vtimer_offset) {
  577. struct hv_vcpu_zone* vcpu_zone = vcpus[vcpu].vcpu_zone;
  578. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  579. ACCESS(rw, controls.virtual_timer_offset) = vtimer_offset;
  580. ACCESS(rw, state_dirty) |= 0x4;
  581. return 0;
  582. }
  583. hv_return_t hv_vcpu_set_pending_interrupt(hv_vcpu_t vcpu, hv_interrupt_type_t type, bool pending) {
  584. struct hv_vcpu_data* vcpu_data = &vcpus[vcpu];
  585. if (type == HV_INTERRUPT_TYPE_IRQ) {
  586. // HCR_EL2 VI bit
  587. if (pending) {
  588. vcpu_data->pending_interrupts |= 0x80ull;
  589. } else {
  590. vcpu_data->pending_interrupts &= ~0x80ull;
  591. }
  592. return 0;
  593. } else if (type == HV_INTERRUPT_TYPE_FIQ) {
  594. // HCR_EL2 VF bit
  595. if (pending) {
  596. vcpu_data->pending_interrupts |= 0x40ull;
  597. } else {
  598. vcpu_data->pending_interrupts &= ~0x40ull;
  599. }
  600. return 0;
  601. } else {
  602. return HV_BAD_ARGUMENT;
  603. }
  604. }
  605. hv_return_t hv_vcpus_exit(hv_vcpu_t* vcpus, uint32_t vcpu_count) {
  606. uint64_t mask = 0;
  607. for (int i = 0; i < vcpu_count; i++) {
  608. hv_vcpu_t cpu = vcpus[i];
  609. if (cpu >= kHvMaxVcpus) {
  610. return HV_BAD_ARGUMENT;
  611. }
  612. mask |= (1ul << cpu);
  613. }
  614. return hv_trap(HV_CALL_VCPU_RUN_CANCEL, (void*)mask);
  615. }
  616. void sync_and_dirty_banked_state(struct hv_vcpu_zone *vcpu_zone, uint64_t state)
  617. {
  618. arm_guest_ro_context_t *ro = &vcpu_zone->ro;
  619. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  620. if (((ACCESS(ro, state_valid) & state) == 0) && hv_trap(HV_CALL_VCPU_SYSREGS_SYNC, 0) != 0) {
  621. assert(false);
  622. }
  623. ACCESS(rw, state_dirty) = ACCESS(rw, state_dirty) | state;
  624. return;
  625. }
  626. static bool deliver_msr_trap(struct hv_vcpu_data* vcpu_data, hv_vcpu_exit_t* exit) {
  627. struct hv_vcpu_zone* vcpu_zone = vcpu_data->vcpu_zone;
  628. arm_guest_ro_context_t *ro = &vcpu_zone->ro;
  629. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  630. uint64_t esr = ACCESS(ro, exit.vmexit_esr);
  631. uint32_t reg = (esr >> 5) & 0x1f;
  632. uint32_t sysreg = esr & 0x3ffc1e;
  633. if ((esr & 0x300000) == 0x200000) {
  634. if ((ACCESS(rw, controls.mdcr_el2) >> 9 & 1) != 0) {
  635. return false;
  636. }
  637. if ((esr & 1) == 0) {
  638. switch (sysreg) {
  639. case 0x200004:
  640. ACCESS(rw, dbgregs.mdccint_el1) = ACCESS(rw, regs.x[reg]);
  641. break;
  642. case 0x240000:
  643. ACCESS(rw, dbgregs.osdtrrx_el1) = ACCESS(rw, regs.x[reg]);
  644. break;
  645. case 0x20c008:
  646. case 0x240006:
  647. ACCESS(rw, dbgregs.osdtrtx_el1) = ACCESS(rw, regs.x[reg]);
  648. break;
  649. default:
  650. return false;
  651. }
  652. } else {
  653. switch (sysreg) {
  654. case 0x200004:
  655. ACCESS(rw, regs.x[reg]) = ACCESS(rw, dbgregs.mdccint_el1);
  656. break;
  657. case 0x20c008:
  658. case 0x20c00a:
  659. case 0x240000:
  660. ACCESS(rw, regs.x[reg]) = ACCESS(rw, dbgregs.osdtrrx_el1);
  661. break;
  662. case 0x240006:
  663. ACCESS(rw, regs.x[reg]) = ACCESS(rw, dbgregs.osdtrtx_el1);
  664. break;
  665. case 0x20c002:
  666. ACCESS(rw, regs.x[reg]) = 0;
  667. break;
  668. default:
  669. return false;
  670. }
  671. }
  672. } else {
  673. if ((esr & 1) == 0) {
  674. return false;
  675. }
  676. switch (sysreg) {
  677. case 0x300002:
  678. case 0x300004:
  679. case 0x300006:
  680. case 0x320002:
  681. case 0x320004:
  682. case 0x320006:
  683. case 0x340002:
  684. case 0x340004:
  685. case 0x340006:
  686. case 0x360002:
  687. case 0x360004:
  688. case 0x380002:
  689. case 0x380004:
  690. case 0x3a0002:
  691. case 0x3a0004:
  692. case 0x3c0002:
  693. case 0x3c0004:
  694. case 0x3e0002:
  695. ACCESS(rw, regs.x[reg]) = 0;
  696. break;
  697. case 0x34000e:
  698. ACCESS(rw, regs.x[reg]) = vcpu_data->feature_regs.aa64mmfr2_el1;
  699. break;
  700. case 0x300008:
  701. ACCESS(rw, regs.x[reg]) = vcpu_data->feature_regs.aa64pfr0_el1;
  702. break;
  703. case 0x30000a:
  704. ACCESS(rw, regs.x[reg]) = vcpu_data->feature_regs.aa64dfr0_el1;
  705. break;
  706. case 0x30000c:
  707. ACCESS(rw, regs.x[reg]) = vcpu_data->feature_regs.aa64isar0_el1;
  708. break;
  709. case 0x30000e:
  710. ACCESS(rw, regs.x[reg]) = vcpu_data->feature_regs.aa64mmfr0_el1;
  711. break;
  712. case 0x320008:
  713. ACCESS(rw, regs.x[reg]) = vcpu_data->feature_regs.aa64pfr1_el1;
  714. break;
  715. case 0x32000a:
  716. ACCESS(rw, regs.x[reg]) = vcpu_data->feature_regs.aa64dfr1_el1;
  717. break;
  718. case 0x32000c:
  719. ACCESS(rw, regs.x[reg]) = vcpu_data->feature_regs.aa64isar1_el1;
  720. break;
  721. case 0x32000e:
  722. ACCESS(rw, regs.x[reg]) = vcpu_data->feature_regs.aa64mmfr1_el1;
  723. break;
  724. default:
  725. return false;
  726. }
  727. }
  728. ACCESS(rw, regs.pc) += 4;
  729. return true;
  730. }
  731. // https://github.com/apple-oss-distributions/xnu/blob/e7776783b89a353188416a9a346c6cdb4928faad/pexpert/pexpert/arm64/VMAPPLE.h#L84
  732. static bool deliver_pac_trap(struct hv_vcpu_data* vcpu_data) {
  733. struct hv_vcpu_zone* vcpu_zone = vcpu_data->vcpu_zone;
  734. arm_guest_ro_context_t *ro = &vcpu_zone->ro;
  735. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  736. uint64_t esr = ACCESS(ro, exit.vmexit_esr);
  737. uint32_t uVar6;
  738. uint64_t uVar9;
  739. if (((esr & 0xffff) != 0) ||
  740. ((ACCESS(rw, regs.x[0]) & 0xff000000) != 0xc1000000)) {
  741. return false;
  742. }
  743. uVar6 = ACCESS(rw, regs.x[0]) & 0xffffff;
  744. if (((ACCESS(ro, controls.hacr_el2) >> 4 & 1) == 0) ||
  745. (6 < uVar6)) {
  746. ACCESS(rw, regs.x[0]) = 0xffffffff;
  747. return true;
  748. }
  749. switch(uVar6) {
  750. default:
  751. // VMAPPLE_PAC_SET_INITIAL_STATE
  752. ACCESS(rw, extregs.apctl_el1) = 0x11;
  753. sync_and_dirty_banked_state(vcpu_zone, 0x2000000000000000);
  754. ACCESS(rw, extregs.apiakeylo_el1) = 0xfeedfacefeedfacf;
  755. ACCESS(rw, extregs.apiakeyhi_el1) = 0xfeedfacefeedfad0;
  756. ACCESS(rw, extregs.apdakeylo_el1) = 0xfeedfacefeedfad1;
  757. ACCESS(rw, extregs.apdakeyhi_el1) = 0xfeedfacefeedfad2;
  758. sync_and_dirty_banked_state(vcpu_zone, 0x2000000000000000);
  759. ACCESS(rw, extregs.apibkeylo_el1) = 0xfeedfacefeedfad5;
  760. ACCESS(rw, extregs.apibkeyhi_el1) = 0xfeedfacefeedfad6;
  761. ACCESS(rw, extregs.apdbkeylo_el1) = 0xfeedfacefeedfad7;
  762. ACCESS(rw, extregs.apdbkeyhi_el1) = 0xfeedfacefeedfad8;
  763. sync_and_dirty_banked_state(vcpu_zone, 0x2000000000000000);
  764. ACCESS(rw, extregs.apgakeylo_el1) = 0xfeedfacefeedfad9;
  765. ACCESS(rw, extregs.apgakeyhi_el1) = 0xfeedfacefeedfada;
  766. sync_and_dirty_banked_state(vcpu_zone, 0x1000000000000000);
  767. ACCESS(rw, extregs.kernkeylo_el1) = 0xfeedfacefeedfad3;
  768. ACCESS(rw, extregs.kernkeyhi_el1) = 0xfeedfacefeedfad4;
  769. break;
  770. case 1:
  771. // VMAPPLE_PAC_GET_DEFAULT_KEYS
  772. ACCESS(rw, regs.x[1]) = 0xfeedfacefeedfacf;
  773. ACCESS(rw, regs.x[0]) = 0;
  774. ACCESS(rw, regs.x[3]) = 0xfeedfacefeedfad3;
  775. ACCESS(rw, regs.x[2]) = 0xfeedfacefeedfad5;
  776. ACCESS(rw, regs.x[4]) = 0xfeedfacefeedfad9;
  777. return true;
  778. case 2:
  779. // VMAPPLE_PAC_SET_A_KEYS
  780. uVar9 = ACCESS(rw, regs.x[1]);
  781. sync_and_dirty_banked_state(vcpu_zone, 0x2000000000000000);
  782. ACCESS(rw, extregs.apiakeylo_el1) = uVar9;
  783. ACCESS(rw, extregs.apiakeyhi_el1) = uVar9 + 1;
  784. ACCESS(rw, extregs.apdakeylo_el1) = uVar9 + 2;
  785. ACCESS(rw, extregs.apdakeyhi_el1) = uVar9 + 3;
  786. break;
  787. case 3:
  788. // VMAPPLE_PAC_SET_B_KEYS
  789. uVar9 = ACCESS(rw, regs.x[1]);
  790. sync_and_dirty_banked_state(vcpu_zone, 0x2000000000000000);
  791. ACCESS(rw, extregs.apibkeylo_el1) = uVar9;
  792. ACCESS(rw, extregs.apibkeyhi_el1) = uVar9 + 1;
  793. ACCESS(rw, extregs.apdbkeylo_el1) = uVar9 + 2;
  794. ACCESS(rw, extregs.apdbkeyhi_el1) = uVar9 + 3;
  795. break;
  796. case 4:
  797. // VMAPPLE_PAC_SET_EL0_DIVERSIFIER
  798. uVar9 = ACCESS(rw, regs.x[1]);
  799. sync_and_dirty_banked_state(vcpu_zone, 0x1000000000000000);
  800. ACCESS(rw, extregs.kernkeylo_el1) = uVar9;
  801. ACCESS(rw, extregs.kernkeyhi_el1) = uVar9 + 1;
  802. break;
  803. case 5:
  804. // VMAPPLE_PAC_SET_EL0_DIVERSIFIER_AT_EL1
  805. uVar9 = ACCESS(rw, regs.x[2]);
  806. sync_and_dirty_banked_state(vcpu_zone, 0x1000000000000000);
  807. ACCESS(rw, extregs.kernkeylo_el1) = uVar9;
  808. ACCESS(rw, extregs.kernkeyhi_el1) = uVar9 + 1;
  809. uVar9 = ACCESS(rw, regs.x[1]);
  810. if (uVar9 == 0) {
  811. ACCESS(rw, extregs.apctl_el1) = ACCESS(rw, extregs.apctl_el1) & 0xfffffffffffffffd;
  812. }
  813. else if (uVar9 == 1) {
  814. ACCESS(rw, extregs.apctl_el1) = ACCESS(rw, extregs.apctl_el1) | 2;
  815. }
  816. break;
  817. case 6:
  818. uVar9 = ACCESS(rw, regs.x[1]);
  819. sync_and_dirty_banked_state(vcpu_zone, 0x2000000000000000);
  820. ACCESS(rw, extregs.apgakeylo_el1) = uVar9;
  821. ACCESS(rw, extregs.apgakeyhi_el1) = uVar9 + 1;
  822. break;
  823. }
  824. ACCESS(rw, regs.x[0]) = 0;
  825. return true;
  826. }
  827. static bool deliver_ordinary_exception(struct hv_vcpu_data* vcpu_data, hv_vcpu_exit_t* exit) {
  828. struct hv_vcpu_zone* vcpu_zone = vcpu_data->vcpu_zone;
  829. arm_guest_ro_context_t *ro = &vcpu_zone->ro;
  830. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  831. uint64_t esr = ACCESS(ro, exit.vmexit_esr);
  832. exit->reason = HV_EXIT_REASON_EXCEPTION;
  833. exit->exception.syndrome = esr;
  834. exit->exception.virtual_address = ACCESS(ro, exit.vmexit_far);
  835. exit->exception.physical_address = ACCESS(ro, exit.vmexit_hpfar);
  836. if ((esr >> 26) == 0x16) {
  837. return deliver_pac_trap(vcpu_data);
  838. } else if ((esr >> 26) == 0x3f) {
  839. if (ACCESS(ro, exit.vmexit_reason) != 8) {
  840. deliver_uncategorized_exception(vcpu_data);
  841. return true;
  842. }
  843. uint64_t exit_instr = ACCESS(ro, exit.vmexit_instr);
  844. if (((exit_instr ^ 0xffffffff) & 0x302c00) == 0) {
  845. if ((ACCESS(ro, controls.hacr_el2) >> 4 & 1) != 0) {
  846. deliver_uncategorized_exception(vcpu_data);
  847. return true;
  848. }
  849. } else if ((exit_instr & 0x1ff0000) == 0x1c00000) {
  850. exit->exception.syndrome = exit_instr & 0xffff | 0x5e000000;
  851. } else {
  852. if (((exit_instr & 0x3ffc1e) == 0x3e4000) &&
  853. ((ACCESS(ro, controls.hacr_el2) >> 4 & 1) != 0)) {
  854. ACCESS(rw, regs.x[((exit_instr >> 5) & 0x1f)]) = 0x980200;
  855. ACCESS(rw, regs.pc) += 4;
  856. return true;
  857. }
  858. exit->exception.syndrome = exit_instr & 0x1ffffff | 0x62000000;
  859. }
  860. return false;
  861. } else if ((esr >> 26) == 0x18) {
  862. return deliver_msr_trap(vcpu_data, exit);
  863. }
  864. return false;
  865. }
  866. static void deliver_uncategorized_exception(struct hv_vcpu_data* vcpu_data) {
  867. struct hv_vcpu_zone* vcpu_zone = vcpu_data->vcpu_zone;
  868. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  869. uint64_t cpsr, vbar_el1, pc;
  870. sync_and_dirty_banked_state(vcpu_zone, 1);
  871. ACCESS(rw, banked_sysregs.elr_el1) = ACCESS(rw, regs.pc);
  872. ACCESS(rw, banked_sysregs.esr_el1) = 0x2000000;
  873. ACCESS(rw, banked_sysregs.spsr_el1) = ACCESS(rw, regs.cpsr);
  874. cpsr = ACCESS(rw, regs.cpsr);
  875. assert((cpsr >> 4 & 1) == 0); // (m & SPSR_MODE_RW_32) == 0
  876. vbar_el1 = ACCESS(rw, banked_sysregs.vbar_el1);
  877. pc = vbar_el1;
  878. if ((cpsr & 1) != 0) {
  879. pc = vbar_el1 + 0x200;
  880. }
  881. if ((cpsr & 0x1f) < 4) {
  882. pc = vbar_el1 + 0x400;
  883. }
  884. ACCESS(rw, regs.pc) = pc;
  885. ACCESS(rw, regs.cpsr) = ACCESS(rw, regs.cpsr) & 0xffffffe0;
  886. ACCESS(rw, regs.cpsr) = ACCESS(rw, regs.cpsr) | 0x3c5;
  887. }
  888. extern void *_os_object_alloc(const void *cls, size_t size);
  889. hv_vm_config_t hv_vm_config_create(void) {
  890. struct hv_vm_config_private *_config = _os_object_alloc(NULL, sizeof(struct hv_vm_config_private));
  891. _config->min_ipa = 0;
  892. _config->ipa_size = 0;
  893. _config->granule = 0;
  894. _config->isa = 1;
  895. return (hv_vm_config_t)_config;
  896. }
  897. hv_return_t _hv_vm_config_set_isa(hv_vm_config_t config, uint32_t isa) {
  898. struct hv_vm_config_private *_config = (struct hv_vm_config_private *)config;
  899. if (config == NULL) {
  900. return HV_BAD_ARGUMENT;
  901. }
  902. _config->isa = isa;
  903. return 0;
  904. }
  905. hv_return_t _hv_vcpu_get_actlr(hv_vcpu_t vcpu, uint64_t* value) {
  906. hv_return_t err;
  907. struct hv_vcpu_data* vcpu_data = &vcpus[vcpu];
  908. struct hv_vcpu_zone* vcpu_zone = vcpu_data->vcpu_zone;
  909. arm_guest_ro_context_t *ro = &vcpu_zone->ro;
  910. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  911. const uint64_t sync_mask = 0x1;
  912. if ((ACCESS(ro, state_valid) & sync_mask) == 0) {
  913. if ((err = hv_trap(HV_CALL_VCPU_SYSREGS_SYNC, 0)) != 0) {
  914. return err;
  915. }
  916. }
  917. *value = ACCESS(rw, banked_sysregs.actlr_el1);
  918. return 0;
  919. }
  920. hv_return_t _hv_vcpu_set_actlr(hv_vcpu_t vcpu, uint64_t value) {
  921. hv_return_t err;
  922. struct hv_vcpu_data* vcpu_data = &vcpus[vcpu];
  923. struct hv_vcpu_zone* vcpu_zone = vcpu_data->vcpu_zone;
  924. arm_guest_ro_context_t *ro = &vcpu_zone->ro;
  925. arm_guest_rw_context_t *rw = &vcpu_zone->rw;
  926. const uint64_t sync_mask = 0x1;
  927. if ((ACCESS(ro, state_valid) & sync_mask) == 0) {
  928. if ((err = hv_trap(HV_CALL_VCPU_SYSREGS_SYNC, 0)) != 0) {
  929. return err;
  930. }
  931. }
  932. ACCESS(rw, banked_sysregs.actlr_el1) = value;
  933. ACCESS(rw, state_dirty) |= sync_mask;
  934. return 0;
  935. }