tlb_helper.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * ARM TLB (Translation lookaside buffer) helpers.
  3. *
  4. * This code is licensed under the GNU GPL v2 or later.
  5. *
  6. * SPDX-License-Identifier: GPL-2.0-or-later
  7. */
  8. #include "qemu/osdep.h"
  9. #include "cpu.h"
  10. #include "internals.h"
  11. #include "exec/exec-all.h"
  12. #include "exec/helper-proto.h"
  13. /* Return true if the translation regime is using LPAE format page tables */
  14. bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
  15. {
  16. int el = regime_el(env, mmu_idx);
  17. if (el == 2 || arm_el_is_aa64(env, el)) {
  18. return true;
  19. }
  20. if (arm_feature(env, ARM_FEATURE_LPAE)
  21. && (regime_tcr(env, mmu_idx) & TTBCR_EAE)) {
  22. return true;
  23. }
  24. return false;
  25. }
  26. /*
  27. * Returns true if the stage 1 translation regime is using LPAE format page
  28. * tables. Used when raising alignment exceptions, whose FSR changes depending
  29. * on whether the long or short descriptor format is in use.
  30. */
  31. bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
  32. {
  33. mmu_idx = stage_1_mmu_idx(mmu_idx);
  34. return regime_using_lpae_format(env, mmu_idx);
  35. }
  36. static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
  37. unsigned int target_el,
  38. bool same_el, bool ea,
  39. bool s1ptw, bool is_write,
  40. int fsc)
  41. {
  42. uint32_t syn;
  43. /*
  44. * ISV is only set for data aborts routed to EL2 and
  45. * never for stage-1 page table walks faulting on stage 2.
  46. *
  47. * Furthermore, ISV is only set for certain kinds of load/stores.
  48. * If the template syndrome does not have ISV set, we should leave
  49. * it cleared.
  50. *
  51. * See ARMv8 specs, D7-1974:
  52. * ISS encoding for an exception from a Data Abort, the
  53. * ISV field.
  54. */
  55. if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
  56. syn = syn_data_abort_no_iss(same_el, 0,
  57. ea, 0, s1ptw, is_write, fsc);
  58. } else {
  59. /*
  60. * Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
  61. * syndrome created at translation time.
  62. * Now we create the runtime syndrome with the remaining fields.
  63. */
  64. syn = syn_data_abort_with_iss(same_el,
  65. 0, 0, 0, 0, 0,
  66. ea, 0, s1ptw, is_write, fsc,
  67. true);
  68. /* Merge the runtime syndrome with the template syndrome. */
  69. syn |= template_syn;
  70. }
  71. return syn;
  72. }
  73. static uint32_t compute_fsr_fsc(CPUARMState *env, ARMMMUFaultInfo *fi,
  74. int target_el, int mmu_idx, uint32_t *ret_fsc)
  75. {
  76. ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
  77. uint32_t fsr, fsc;
  78. if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
  79. arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
  80. /*
  81. * LPAE format fault status register : bottom 6 bits are
  82. * status code in the same form as needed for syndrome
  83. */
  84. fsr = arm_fi_to_lfsc(fi);
  85. fsc = extract32(fsr, 0, 6);
  86. } else {
  87. fsr = arm_fi_to_sfsc(fi);
  88. /*
  89. * Short format FSR : this fault will never actually be reported
  90. * to an EL that uses a syndrome register. Use a (currently)
  91. * reserved FSR code in case the constructed syndrome does leak
  92. * into the guest somehow.
  93. */
  94. fsc = 0x3f;
  95. }
  96. *ret_fsc = fsc;
  97. return fsr;
  98. }
  99. static G_NORETURN
  100. void arm_deliver_fault(ARMCPU *cpu, vaddr addr,
  101. MMUAccessType access_type,
  102. int mmu_idx, ARMMMUFaultInfo *fi)
  103. {
  104. CPUARMState *env = &cpu->env;
  105. int target_el;
  106. bool same_el;
  107. uint32_t syn, exc, fsr, fsc;
  108. target_el = exception_target_el(env);
  109. if (fi->stage2) {
  110. target_el = 2;
  111. env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
  112. if (arm_is_secure_below_el3(env) && fi->s1ns) {
  113. env->cp15.hpfar_el2 |= HPFAR_NS;
  114. }
  115. }
  116. same_el = (arm_current_el(env) == target_el);
  117. fsr = compute_fsr_fsc(env, fi, target_el, mmu_idx, &fsc);
  118. if (access_type == MMU_INST_FETCH) {
  119. syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
  120. exc = EXCP_PREFETCH_ABORT;
  121. } else {
  122. syn = merge_syn_data_abort(env->exception.syndrome, target_el,
  123. same_el, fi->ea, fi->s1ptw,
  124. access_type == MMU_DATA_STORE,
  125. fsc);
  126. if (access_type == MMU_DATA_STORE
  127. && arm_feature(env, ARM_FEATURE_V6)) {
  128. fsr |= (1 << 11);
  129. }
  130. exc = EXCP_DATA_ABORT;
  131. }
  132. env->exception.vaddress = addr;
  133. env->exception.fsr = fsr;
  134. raise_exception(env, exc, syn, target_el);
  135. }
  136. /* Raise a data fault alignment exception for the specified virtual address */
  137. void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
  138. MMUAccessType access_type,
  139. int mmu_idx, uintptr_t retaddr)
  140. {
  141. ARMCPU *cpu = ARM_CPU(cs);
  142. ARMMMUFaultInfo fi = {};
  143. /* now we have a real cpu fault */
  144. cpu_restore_state(cs, retaddr, true);
  145. fi.type = ARMFault_Alignment;
  146. arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
  147. }
  148. void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
  149. {
  150. ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
  151. int target_el = exception_target_el(env);
  152. int mmu_idx = cpu_mmu_index(env, true);
  153. uint32_t fsc;
  154. env->exception.vaddress = pc;
  155. /*
  156. * Note that the fsc is not applicable to this exception,
  157. * since any syndrome is pcalignment not insn_abort.
  158. */
  159. env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc);
  160. raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);
  161. }
  162. #if !defined(CONFIG_USER_ONLY)
  163. /*
  164. * arm_cpu_do_transaction_failed: handle a memory system error response
  165. * (eg "no device/memory present at address") by raising an external abort
  166. * exception
  167. */
  168. void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
  169. vaddr addr, unsigned size,
  170. MMUAccessType access_type,
  171. int mmu_idx, MemTxAttrs attrs,
  172. MemTxResult response, uintptr_t retaddr)
  173. {
  174. ARMCPU *cpu = ARM_CPU(cs);
  175. ARMMMUFaultInfo fi = {};
  176. /* now we have a real cpu fault */
  177. cpu_restore_state(cs, retaddr, true);
  178. fi.ea = arm_extabort_type(response);
  179. fi.type = ARMFault_SyncExternal;
  180. arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
  181. }
  182. bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
  183. MMUAccessType access_type, int mmu_idx,
  184. bool probe, uintptr_t retaddr)
  185. {
  186. ARMCPU *cpu = ARM_CPU(cs);
  187. GetPhysAddrResult res = {};
  188. ARMMMUFaultInfo local_fi, *fi;
  189. int ret;
  190. /*
  191. * Allow S1_ptw_translate to see any fault generated here.
  192. * Since this may recurse, read and clear.
  193. */
  194. fi = cpu->env.tlb_fi;
  195. if (fi) {
  196. cpu->env.tlb_fi = NULL;
  197. } else {
  198. fi = memset(&local_fi, 0, sizeof(local_fi));
  199. }
  200. /*
  201. * Walk the page table and (if the mapping exists) add the page
  202. * to the TLB. On success, return true. Otherwise, if probing,
  203. * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
  204. * register format, and signal the fault.
  205. */
  206. ret = get_phys_addr(&cpu->env, address, access_type,
  207. core_to_arm_mmu_idx(&cpu->env, mmu_idx),
  208. &res, fi);
  209. if (likely(!ret)) {
  210. /*
  211. * Map a single [sub]page. Regions smaller than our declared
  212. * target page size are handled specially, so for those we
  213. * pass in the exact addresses.
  214. */
  215. if (res.f.lg_page_size >= TARGET_PAGE_BITS) {
  216. res.f.phys_addr &= TARGET_PAGE_MASK;
  217. address &= TARGET_PAGE_MASK;
  218. }
  219. res.f.pte_attrs = res.cacheattrs.attrs;
  220. res.f.shareability = res.cacheattrs.shareability;
  221. tlb_set_page_full(cs, mmu_idx, address, &res.f);
  222. return true;
  223. } else if (probe) {
  224. return false;
  225. } else {
  226. /* now we have a real cpu fault */
  227. cpu_restore_state(cs, retaddr, true);
  228. arm_deliver_fault(cpu, address, access_type, mmu_idx, fi);
  229. }
  230. }
  231. #else
  232. void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr,
  233. MMUAccessType access_type,
  234. bool maperr, uintptr_t ra)
  235. {
  236. ARMMMUFaultInfo fi = {
  237. .type = maperr ? ARMFault_Translation : ARMFault_Permission,
  238. .level = 3,
  239. };
  240. ARMCPU *cpu = ARM_CPU(cs);
  241. /*
  242. * We report both ESR and FAR to signal handlers.
  243. * For now, it's easiest to deliver the fault normally.
  244. */
  245. cpu_restore_state(cs, ra, true);
  246. arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi);
  247. }
  248. void arm_cpu_record_sigbus(CPUState *cs, vaddr addr,
  249. MMUAccessType access_type, uintptr_t ra)
  250. {
  251. arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra);
  252. }
  253. #endif /* !defined(CONFIG_USER_ONLY) */