internals.h 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280
  1. /*
  2. * QEMU ARM CPU -- internal functions and types
  3. *
  4. * Copyright (c) 2014 Linaro Ltd
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see
  18. * <http://www.gnu.org/licenses/gpl-2.0.html>
  19. *
  20. * This header defines functions, types, etc which need to be shared
  21. * between different source files within target/arm/ but which are
  22. * private to it and not required by the rest of QEMU.
  23. */
  24. #ifndef TARGET_ARM_INTERNALS_H
  25. #define TARGET_ARM_INTERNALS_H
  26. #include "hw/registerfields.h"
  27. #include "tcg/tcg-gvec-desc.h"
  28. #include "syndrome.h"
  29. /* register banks for CPU modes */
  30. #define BANK_USRSYS 0
  31. #define BANK_SVC 1
  32. #define BANK_ABT 2
  33. #define BANK_UND 3
  34. #define BANK_IRQ 4
  35. #define BANK_FIQ 5
  36. #define BANK_HYP 6
  37. #define BANK_MON 7
  38. static inline bool excp_is_internal(int excp)
  39. {
  40. /* Return true if this exception number represents a QEMU-internal
  41. * exception that will not be passed to the guest.
  42. */
  43. return excp == EXCP_INTERRUPT
  44. || excp == EXCP_HLT
  45. || excp == EXCP_DEBUG
  46. || excp == EXCP_HALTED
  47. || excp == EXCP_EXCEPTION_EXIT
  48. || excp == EXCP_KERNEL_TRAP
  49. || excp == EXCP_SEMIHOST;
  50. }
  51. /* Scale factor for generic timers, ie number of ns per tick.
  52. * This gives a 62.5MHz timer.
  53. */
  54. #define GTIMER_SCALE 16
  55. /* Bit definitions for the v7M CONTROL register */
  56. FIELD(V7M_CONTROL, NPRIV, 0, 1)
  57. FIELD(V7M_CONTROL, SPSEL, 1, 1)
  58. FIELD(V7M_CONTROL, FPCA, 2, 1)
  59. FIELD(V7M_CONTROL, SFPA, 3, 1)
  60. /* Bit definitions for v7M exception return payload */
  61. FIELD(V7M_EXCRET, ES, 0, 1)
  62. FIELD(V7M_EXCRET, RES0, 1, 1)
  63. FIELD(V7M_EXCRET, SPSEL, 2, 1)
  64. FIELD(V7M_EXCRET, MODE, 3, 1)
  65. FIELD(V7M_EXCRET, FTYPE, 4, 1)
  66. FIELD(V7M_EXCRET, DCRS, 5, 1)
  67. FIELD(V7M_EXCRET, S, 6, 1)
  68. FIELD(V7M_EXCRET, RES1, 7, 25) /* including the must-be-1 prefix */
  69. /* Minimum value which is a magic number for exception return */
  70. #define EXC_RETURN_MIN_MAGIC 0xff000000
  71. /* Minimum number which is a magic number for function or exception return
  72. * when using v8M security extension
  73. */
  74. #define FNC_RETURN_MIN_MAGIC 0xfefffffe
  75. /* We use a few fake FSR values for internal purposes in M profile.
  76. * M profile cores don't have A/R format FSRs, but currently our
  77. * get_phys_addr() code assumes A/R profile and reports failures via
  78. * an A/R format FSR value. We then translate that into the proper
  79. * M profile exception and FSR status bit in arm_v7m_cpu_do_interrupt().
  80. * Mostly the FSR values we use for this are those defined for v7PMSA,
  81. * since we share some of that codepath. A few kinds of fault are
  82. * only for M profile and have no A/R equivalent, though, so we have
  83. * to pick a value from the reserved range (which we never otherwise
  84. * generate) to use for these.
  85. * These values will never be visible to the guest.
  86. */
  87. #define M_FAKE_FSR_NSC_EXEC 0xf /* NS executing in S&NSC memory */
  88. #define M_FAKE_FSR_SFAULT 0xe /* SecureFault INVTRAN, INVEP or AUVIOL */
  89. /**
  90. * raise_exception: Raise the specified exception.
  91. * Raise a guest exception with the specified value, syndrome register
  92. * and target exception level. This should be called from helper functions,
  93. * and never returns because we will longjump back up to the CPU main loop.
  94. */
  95. void QEMU_NORETURN raise_exception(CPUARMState *env, uint32_t excp,
  96. uint32_t syndrome, uint32_t target_el);
  97. /*
  98. * Similarly, but also use unwinding to restore cpu state.
  99. */
  100. void QEMU_NORETURN raise_exception_ra(CPUARMState *env, uint32_t excp,
  101. uint32_t syndrome, uint32_t target_el,
  102. uintptr_t ra);
  103. /*
  104. * For AArch64, map a given EL to an index in the banked_spsr array.
  105. * Note that this mapping and the AArch32 mapping defined in bank_number()
  106. * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
  107. * mandated mapping between each other.
  108. */
  109. static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
  110. {
  111. static const unsigned int map[4] = {
  112. [1] = BANK_SVC, /* EL1. */
  113. [2] = BANK_HYP, /* EL2. */
  114. [3] = BANK_MON, /* EL3. */
  115. };
  116. assert(el >= 1 && el <= 3);
  117. return map[el];
  118. }
  119. /* Map CPU modes onto saved register banks. */
  120. static inline int bank_number(int mode)
  121. {
  122. switch (mode) {
  123. case ARM_CPU_MODE_USR:
  124. case ARM_CPU_MODE_SYS:
  125. return BANK_USRSYS;
  126. case ARM_CPU_MODE_SVC:
  127. return BANK_SVC;
  128. case ARM_CPU_MODE_ABT:
  129. return BANK_ABT;
  130. case ARM_CPU_MODE_UND:
  131. return BANK_UND;
  132. case ARM_CPU_MODE_IRQ:
  133. return BANK_IRQ;
  134. case ARM_CPU_MODE_FIQ:
  135. return BANK_FIQ;
  136. case ARM_CPU_MODE_HYP:
  137. return BANK_HYP;
  138. case ARM_CPU_MODE_MON:
  139. return BANK_MON;
  140. }
  141. g_assert_not_reached();
  142. }
  143. /**
  144. * r14_bank_number: Map CPU mode onto register bank for r14
  145. *
  146. * Given an AArch32 CPU mode, return the index into the saved register
  147. * banks to use for the R14 (LR) in that mode. This is the same as
  148. * bank_number(), except for the special case of Hyp mode, where
  149. * R14 is shared with USR and SYS, unlike its R13 and SPSR.
  150. * This should be used as the index into env->banked_r14[], and
  151. * bank_number() used for the index into env->banked_r13[] and
  152. * env->banked_spsr[].
  153. */
  154. static inline int r14_bank_number(int mode)
  155. {
  156. return (mode == ARM_CPU_MODE_HYP) ? BANK_USRSYS : bank_number(mode);
  157. }
  158. void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
  159. void arm_translate_init(void);
  160. #ifdef CONFIG_TCG
  161. void arm_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb);
  162. #endif /* CONFIG_TCG */
  163. /**
  164. * aarch64_sve_zcr_get_valid_len:
  165. * @cpu: cpu context
  166. * @start_len: maximum len to consider
  167. *
  168. * Return the maximum supported sve vector length <= @start_len.
  169. * Note that both @start_len and the return value are in units
  170. * of ZCR_ELx.LEN, so the vector bit length is (x + 1) * 128.
  171. */
  172. uint32_t aarch64_sve_zcr_get_valid_len(ARMCPU *cpu, uint32_t start_len);
  173. enum arm_fprounding {
  174. FPROUNDING_TIEEVEN,
  175. FPROUNDING_POSINF,
  176. FPROUNDING_NEGINF,
  177. FPROUNDING_ZERO,
  178. FPROUNDING_TIEAWAY,
  179. FPROUNDING_ODD
  180. };
  181. int arm_rmode_to_sf(int rmode);
  182. static inline void aarch64_save_sp(CPUARMState *env, int el)
  183. {
  184. if (env->pstate & PSTATE_SP) {
  185. env->sp_el[el] = env->xregs[31];
  186. } else {
  187. env->sp_el[0] = env->xregs[31];
  188. }
  189. }
  190. static inline void aarch64_restore_sp(CPUARMState *env, int el)
  191. {
  192. if (env->pstate & PSTATE_SP) {
  193. env->xregs[31] = env->sp_el[el];
  194. } else {
  195. env->xregs[31] = env->sp_el[0];
  196. }
  197. }
  198. static inline void update_spsel(CPUARMState *env, uint32_t imm)
  199. {
  200. unsigned int cur_el = arm_current_el(env);
  201. /* Update PSTATE SPSel bit; this requires us to update the
  202. * working stack pointer in xregs[31].
  203. */
  204. if (!((imm ^ env->pstate) & PSTATE_SP)) {
  205. return;
  206. }
  207. aarch64_save_sp(env, cur_el);
  208. env->pstate = deposit32(env->pstate, 0, 1, imm);
  209. /* We rely on illegal updates to SPsel from EL0 to get trapped
  210. * at translation time.
  211. */
  212. assert(cur_el >= 1 && cur_el <= 3);
  213. aarch64_restore_sp(env, cur_el);
  214. }
  215. /*
  216. * arm_pamax
  217. * @cpu: ARMCPU
  218. *
  219. * Returns the implementation defined bit-width of physical addresses.
  220. * The ARMv8 reference manuals refer to this as PAMax().
  221. */
  222. static inline unsigned int arm_pamax(ARMCPU *cpu)
  223. {
  224. static const unsigned int pamax_map[] = {
  225. [0] = 32,
  226. [1] = 36,
  227. [2] = 40,
  228. [3] = 42,
  229. [4] = 44,
  230. [5] = 48,
  231. };
  232. unsigned int parange =
  233. FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
  234. /* id_aa64mmfr0 is a read-only register so values outside of the
  235. * supported mappings can be considered an implementation error. */
  236. assert(parange < ARRAY_SIZE(pamax_map));
  237. return pamax_map[parange];
  238. }
  239. /* Return true if extended addresses are enabled.
  240. * This is always the case if our translation regime is 64 bit,
  241. * but depends on TTBCR.EAE for 32 bit.
  242. */
  243. static inline bool extended_addresses_enabled(CPUARMState *env)
  244. {
  245. TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
  246. return arm_el_is_aa64(env, 1) ||
  247. (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
  248. }
  249. /* Update a QEMU watchpoint based on the information the guest has set in the
  250. * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
  251. */
  252. void hw_watchpoint_update(ARMCPU *cpu, int n);
  253. /* Update the QEMU watchpoints for every guest watchpoint. This does a
  254. * complete delete-and-reinstate of the QEMU watchpoint list and so is
  255. * suitable for use after migration or on reset.
  256. */
  257. void hw_watchpoint_update_all(ARMCPU *cpu);
  258. /* Update a QEMU breakpoint based on the information the guest has set in the
  259. * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
  260. */
  261. void hw_breakpoint_update(ARMCPU *cpu, int n);
  262. /* Update the QEMU breakpoints for every guest breakpoint. This does a
  263. * complete delete-and-reinstate of the QEMU breakpoint list and so is
  264. * suitable for use after migration or on reset.
  265. */
  266. void hw_breakpoint_update_all(ARMCPU *cpu);
  267. /* Callback function for checking if a breakpoint should trigger. */
  268. bool arm_debug_check_breakpoint(CPUState *cs);
  269. /* Callback function for checking if a watchpoint should trigger. */
  270. bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
  271. /* Adjust addresses (in BE32 mode) before testing against watchpoint
  272. * addresses.
  273. */
  274. vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len);
  275. /* Callback function for when a watchpoint or breakpoint triggers. */
  276. void arm_debug_excp_handler(CPUState *cs);
  277. #if defined(CONFIG_USER_ONLY) || !defined(CONFIG_TCG)
  278. static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
  279. {
  280. return false;
  281. }
  282. static inline void arm_handle_psci_call(ARMCPU *cpu)
  283. {
  284. g_assert_not_reached();
  285. }
  286. #else
  287. /* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
  288. bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
  289. /* Actually handle a PSCI call */
  290. void arm_handle_psci_call(ARMCPU *cpu);
  291. #endif
  292. /**
  293. * arm_clear_exclusive: clear the exclusive monitor
  294. * @env: CPU env
  295. * Clear the CPU's exclusive monitor, like the guest CLREX instruction.
  296. */
  297. static inline void arm_clear_exclusive(CPUARMState *env)
  298. {
  299. env->exclusive_addr = -1;
  300. }
  301. /**
  302. * ARMFaultType: type of an ARM MMU fault
  303. * This corresponds to the v8A pseudocode's Fault enumeration,
  304. * with extensions for QEMU internal conditions.
  305. */
  306. typedef enum ARMFaultType {
  307. ARMFault_None,
  308. ARMFault_AccessFlag,
  309. ARMFault_Alignment,
  310. ARMFault_Background,
  311. ARMFault_Domain,
  312. ARMFault_Permission,
  313. ARMFault_Translation,
  314. ARMFault_AddressSize,
  315. ARMFault_SyncExternal,
  316. ARMFault_SyncExternalOnWalk,
  317. ARMFault_SyncParity,
  318. ARMFault_SyncParityOnWalk,
  319. ARMFault_AsyncParity,
  320. ARMFault_AsyncExternal,
  321. ARMFault_Debug,
  322. ARMFault_TLBConflict,
  323. ARMFault_Lockdown,
  324. ARMFault_Exclusive,
  325. ARMFault_ICacheMaint,
  326. ARMFault_QEMU_NSCExec, /* v8M: NS executing in S&NSC memory */
  327. ARMFault_QEMU_SFault, /* v8M: SecureFault INVTRAN, INVEP or AUVIOL */
  328. } ARMFaultType;
  329. /**
  330. * ARMMMUFaultInfo: Information describing an ARM MMU Fault
  331. * @type: Type of fault
  332. * @level: Table walk level (for translation, access flag and permission faults)
  333. * @domain: Domain of the fault address (for non-LPAE CPUs only)
  334. * @s2addr: Address that caused a fault at stage 2
  335. * @stage2: True if we faulted at stage 2
  336. * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
  337. * @s1ns: True if we faulted on a non-secure IPA while in secure state
  338. * @ea: True if we should set the EA (external abort type) bit in syndrome
  339. */
  340. typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
  341. struct ARMMMUFaultInfo {
  342. ARMFaultType type;
  343. target_ulong s2addr;
  344. int level;
  345. int domain;
  346. bool stage2;
  347. bool s1ptw;
  348. bool s1ns;
  349. bool ea;
  350. };
  351. /**
  352. * arm_fi_to_sfsc: Convert fault info struct to short-format FSC
  353. * Compare pseudocode EncodeSDFSC(), though unlike that function
  354. * we set up a whole FSR-format code including domain field and
  355. * putting the high bit of the FSC into bit 10.
  356. */
  357. static inline uint32_t arm_fi_to_sfsc(ARMMMUFaultInfo *fi)
  358. {
  359. uint32_t fsc;
  360. switch (fi->type) {
  361. case ARMFault_None:
  362. return 0;
  363. case ARMFault_AccessFlag:
  364. fsc = fi->level == 1 ? 0x3 : 0x6;
  365. break;
  366. case ARMFault_Alignment:
  367. fsc = 0x1;
  368. break;
  369. case ARMFault_Permission:
  370. fsc = fi->level == 1 ? 0xd : 0xf;
  371. break;
  372. case ARMFault_Domain:
  373. fsc = fi->level == 1 ? 0x9 : 0xb;
  374. break;
  375. case ARMFault_Translation:
  376. fsc = fi->level == 1 ? 0x5 : 0x7;
  377. break;
  378. case ARMFault_SyncExternal:
  379. fsc = 0x8 | (fi->ea << 12);
  380. break;
  381. case ARMFault_SyncExternalOnWalk:
  382. fsc = fi->level == 1 ? 0xc : 0xe;
  383. fsc |= (fi->ea << 12);
  384. break;
  385. case ARMFault_SyncParity:
  386. fsc = 0x409;
  387. break;
  388. case ARMFault_SyncParityOnWalk:
  389. fsc = fi->level == 1 ? 0x40c : 0x40e;
  390. break;
  391. case ARMFault_AsyncParity:
  392. fsc = 0x408;
  393. break;
  394. case ARMFault_AsyncExternal:
  395. fsc = 0x406 | (fi->ea << 12);
  396. break;
  397. case ARMFault_Debug:
  398. fsc = 0x2;
  399. break;
  400. case ARMFault_TLBConflict:
  401. fsc = 0x400;
  402. break;
  403. case ARMFault_Lockdown:
  404. fsc = 0x404;
  405. break;
  406. case ARMFault_Exclusive:
  407. fsc = 0x405;
  408. break;
  409. case ARMFault_ICacheMaint:
  410. fsc = 0x4;
  411. break;
  412. case ARMFault_Background:
  413. fsc = 0x0;
  414. break;
  415. case ARMFault_QEMU_NSCExec:
  416. fsc = M_FAKE_FSR_NSC_EXEC;
  417. break;
  418. case ARMFault_QEMU_SFault:
  419. fsc = M_FAKE_FSR_SFAULT;
  420. break;
  421. default:
  422. /* Other faults can't occur in a context that requires a
  423. * short-format status code.
  424. */
  425. g_assert_not_reached();
  426. }
  427. fsc |= (fi->domain << 4);
  428. return fsc;
  429. }
  430. /**
  431. * arm_fi_to_lfsc: Convert fault info struct to long-format FSC
  432. * Compare pseudocode EncodeLDFSC(), though unlike that function
  433. * we fill in also the LPAE bit 9 of a DFSR format.
  434. */
  435. static inline uint32_t arm_fi_to_lfsc(ARMMMUFaultInfo *fi)
  436. {
  437. uint32_t fsc;
  438. switch (fi->type) {
  439. case ARMFault_None:
  440. return 0;
  441. case ARMFault_AddressSize:
  442. fsc = fi->level & 3;
  443. break;
  444. case ARMFault_AccessFlag:
  445. fsc = (fi->level & 3) | (0x2 << 2);
  446. break;
  447. case ARMFault_Permission:
  448. fsc = (fi->level & 3) | (0x3 << 2);
  449. break;
  450. case ARMFault_Translation:
  451. fsc = (fi->level & 3) | (0x1 << 2);
  452. break;
  453. case ARMFault_SyncExternal:
  454. fsc = 0x10 | (fi->ea << 12);
  455. break;
  456. case ARMFault_SyncExternalOnWalk:
  457. fsc = (fi->level & 3) | (0x5 << 2) | (fi->ea << 12);
  458. break;
  459. case ARMFault_SyncParity:
  460. fsc = 0x18;
  461. break;
  462. case ARMFault_SyncParityOnWalk:
  463. fsc = (fi->level & 3) | (0x7 << 2);
  464. break;
  465. case ARMFault_AsyncParity:
  466. fsc = 0x19;
  467. break;
  468. case ARMFault_AsyncExternal:
  469. fsc = 0x11 | (fi->ea << 12);
  470. break;
  471. case ARMFault_Alignment:
  472. fsc = 0x21;
  473. break;
  474. case ARMFault_Debug:
  475. fsc = 0x22;
  476. break;
  477. case ARMFault_TLBConflict:
  478. fsc = 0x30;
  479. break;
  480. case ARMFault_Lockdown:
  481. fsc = 0x34;
  482. break;
  483. case ARMFault_Exclusive:
  484. fsc = 0x35;
  485. break;
  486. default:
  487. /* Other faults can't occur in a context that requires a
  488. * long-format status code.
  489. */
  490. g_assert_not_reached();
  491. }
  492. fsc |= 1 << 9;
  493. return fsc;
  494. }
  495. static inline bool arm_extabort_type(MemTxResult result)
  496. {
  497. /* The EA bit in syndromes and fault status registers is an
  498. * IMPDEF classification of external aborts. ARM implementations
  499. * usually use this to indicate AXI bus Decode error (0) or
  500. * Slave error (1); in QEMU we follow that.
  501. */
  502. return result != MEMTX_DECODE_ERROR;
  503. }
  504. bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
  505. MMUAccessType access_type, int mmu_idx,
  506. bool probe, uintptr_t retaddr);
  507. static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
  508. {
  509. return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
  510. }
  511. static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
  512. {
  513. if (arm_feature(env, ARM_FEATURE_M)) {
  514. return mmu_idx | ARM_MMU_IDX_M;
  515. } else {
  516. return mmu_idx | ARM_MMU_IDX_A;
  517. }
  518. }
  519. static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx)
  520. {
  521. /* AArch64 is always a-profile. */
  522. return mmu_idx | ARM_MMU_IDX_A;
  523. }
  524. int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx);
  525. /*
  526. * Return the MMU index for a v7M CPU with all relevant information
  527. * manually specified.
  528. */
  529. ARMMMUIdx arm_v7m_mmu_idx_all(CPUARMState *env,
  530. bool secstate, bool priv, bool negpri);
  531. /*
  532. * Return the MMU index for a v7M CPU in the specified security and
  533. * privilege state.
  534. */
  535. ARMMMUIdx arm_v7m_mmu_idx_for_secstate_and_priv(CPUARMState *env,
  536. bool secstate, bool priv);
  537. /* Return the MMU index for a v7M CPU in the specified security state */
  538. ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate);
  539. /* Return true if the stage 1 translation regime is using LPAE format page
  540. * tables */
  541. bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
  542. /* Raise a data fault alignment exception for the specified virtual address */
  543. void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
  544. MMUAccessType access_type,
  545. int mmu_idx, uintptr_t retaddr) QEMU_NORETURN;
  546. /* arm_cpu_do_transaction_failed: handle a memory system error response
  547. * (eg "no device/memory present at address") by raising an external abort
  548. * exception
  549. */
  550. void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
  551. vaddr addr, unsigned size,
  552. MMUAccessType access_type,
  553. int mmu_idx, MemTxAttrs attrs,
  554. MemTxResult response, uintptr_t retaddr);
  555. /* Call any registered EL change hooks */
  556. static inline void arm_call_pre_el_change_hook(ARMCPU *cpu)
  557. {
  558. ARMELChangeHook *hook, *next;
  559. QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
  560. hook->hook(cpu, hook->opaque);
  561. }
  562. }
  563. static inline void arm_call_el_change_hook(ARMCPU *cpu)
  564. {
  565. ARMELChangeHook *hook, *next;
  566. QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
  567. hook->hook(cpu, hook->opaque);
  568. }
  569. }
  570. /* Return true if this address translation regime has two ranges. */
  571. static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx)
  572. {
  573. switch (mmu_idx) {
  574. case ARMMMUIdx_Stage1_E0:
  575. case ARMMMUIdx_Stage1_E1:
  576. case ARMMMUIdx_Stage1_E1_PAN:
  577. case ARMMMUIdx_Stage1_SE0:
  578. case ARMMMUIdx_Stage1_SE1:
  579. case ARMMMUIdx_Stage1_SE1_PAN:
  580. case ARMMMUIdx_E10_0:
  581. case ARMMMUIdx_E10_1:
  582. case ARMMMUIdx_E10_1_PAN:
  583. case ARMMMUIdx_E20_0:
  584. case ARMMMUIdx_E20_2:
  585. case ARMMMUIdx_E20_2_PAN:
  586. case ARMMMUIdx_SE10_0:
  587. case ARMMMUIdx_SE10_1:
  588. case ARMMMUIdx_SE10_1_PAN:
  589. case ARMMMUIdx_SE20_0:
  590. case ARMMMUIdx_SE20_2:
  591. case ARMMMUIdx_SE20_2_PAN:
  592. return true;
  593. default:
  594. return false;
  595. }
  596. }
  597. /* Return true if this address translation regime is secure */
  598. static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
  599. {
  600. switch (mmu_idx) {
  601. case ARMMMUIdx_E10_0:
  602. case ARMMMUIdx_E10_1:
  603. case ARMMMUIdx_E10_1_PAN:
  604. case ARMMMUIdx_E20_0:
  605. case ARMMMUIdx_E20_2:
  606. case ARMMMUIdx_E20_2_PAN:
  607. case ARMMMUIdx_Stage1_E0:
  608. case ARMMMUIdx_Stage1_E1:
  609. case ARMMMUIdx_Stage1_E1_PAN:
  610. case ARMMMUIdx_E2:
  611. case ARMMMUIdx_Stage2:
  612. case ARMMMUIdx_MPrivNegPri:
  613. case ARMMMUIdx_MUserNegPri:
  614. case ARMMMUIdx_MPriv:
  615. case ARMMMUIdx_MUser:
  616. return false;
  617. case ARMMMUIdx_SE3:
  618. case ARMMMUIdx_SE10_0:
  619. case ARMMMUIdx_SE10_1:
  620. case ARMMMUIdx_SE10_1_PAN:
  621. case ARMMMUIdx_SE20_0:
  622. case ARMMMUIdx_SE20_2:
  623. case ARMMMUIdx_SE20_2_PAN:
  624. case ARMMMUIdx_Stage1_SE0:
  625. case ARMMMUIdx_Stage1_SE1:
  626. case ARMMMUIdx_Stage1_SE1_PAN:
  627. case ARMMMUIdx_SE2:
  628. case ARMMMUIdx_Stage2_S:
  629. case ARMMMUIdx_MSPrivNegPri:
  630. case ARMMMUIdx_MSUserNegPri:
  631. case ARMMMUIdx_MSPriv:
  632. case ARMMMUIdx_MSUser:
  633. return true;
  634. default:
  635. g_assert_not_reached();
  636. }
  637. }
  638. static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx)
  639. {
  640. switch (mmu_idx) {
  641. case ARMMMUIdx_Stage1_E1_PAN:
  642. case ARMMMUIdx_Stage1_SE1_PAN:
  643. case ARMMMUIdx_E10_1_PAN:
  644. case ARMMMUIdx_E20_2_PAN:
  645. case ARMMMUIdx_SE10_1_PAN:
  646. case ARMMMUIdx_SE20_2_PAN:
  647. return true;
  648. default:
  649. return false;
  650. }
  651. }
  652. /* Return the exception level which controls this address translation regime */
  653. static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
  654. {
  655. switch (mmu_idx) {
  656. case ARMMMUIdx_SE20_0:
  657. case ARMMMUIdx_SE20_2:
  658. case ARMMMUIdx_SE20_2_PAN:
  659. case ARMMMUIdx_E20_0:
  660. case ARMMMUIdx_E20_2:
  661. case ARMMMUIdx_E20_2_PAN:
  662. case ARMMMUIdx_Stage2:
  663. case ARMMMUIdx_Stage2_S:
  664. case ARMMMUIdx_SE2:
  665. case ARMMMUIdx_E2:
  666. return 2;
  667. case ARMMMUIdx_SE3:
  668. return 3;
  669. case ARMMMUIdx_SE10_0:
  670. case ARMMMUIdx_Stage1_SE0:
  671. return arm_el_is_aa64(env, 3) ? 1 : 3;
  672. case ARMMMUIdx_SE10_1:
  673. case ARMMMUIdx_SE10_1_PAN:
  674. case ARMMMUIdx_Stage1_E0:
  675. case ARMMMUIdx_Stage1_E1:
  676. case ARMMMUIdx_Stage1_E1_PAN:
  677. case ARMMMUIdx_Stage1_SE1:
  678. case ARMMMUIdx_Stage1_SE1_PAN:
  679. case ARMMMUIdx_E10_0:
  680. case ARMMMUIdx_E10_1:
  681. case ARMMMUIdx_E10_1_PAN:
  682. case ARMMMUIdx_MPrivNegPri:
  683. case ARMMMUIdx_MUserNegPri:
  684. case ARMMMUIdx_MPriv:
  685. case ARMMMUIdx_MUser:
  686. case ARMMMUIdx_MSPrivNegPri:
  687. case ARMMMUIdx_MSUserNegPri:
  688. case ARMMMUIdx_MSPriv:
  689. case ARMMMUIdx_MSUser:
  690. return 1;
  691. default:
  692. g_assert_not_reached();
  693. }
  694. }
  695. /* Return the TCR controlling this translation regime */
  696. static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
  697. {
  698. if (mmu_idx == ARMMMUIdx_Stage2) {
  699. return &env->cp15.vtcr_el2;
  700. }
  701. if (mmu_idx == ARMMMUIdx_Stage2_S) {
  702. /*
  703. * Note: Secure stage 2 nominally shares fields from VTCR_EL2, but
  704. * those are not currently used by QEMU, so just return VSTCR_EL2.
  705. */
  706. return &env->cp15.vstcr_el2;
  707. }
  708. return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
  709. }
  710. /* Return the FSR value for a debug exception (watchpoint, hardware
  711. * breakpoint or BKPT insn) targeting the specified exception level.
  712. */
  713. static inline uint32_t arm_debug_exception_fsr(CPUARMState *env)
  714. {
  715. ARMMMUFaultInfo fi = { .type = ARMFault_Debug };
  716. int target_el = arm_debug_target_el(env);
  717. bool using_lpae = false;
  718. if (target_el == 2 || arm_el_is_aa64(env, target_el)) {
  719. using_lpae = true;
  720. } else {
  721. if (arm_feature(env, ARM_FEATURE_LPAE) &&
  722. (env->cp15.tcr_el[target_el].raw_tcr & TTBCR_EAE)) {
  723. using_lpae = true;
  724. }
  725. }
  726. if (using_lpae) {
  727. return arm_fi_to_lfsc(&fi);
  728. } else {
  729. return arm_fi_to_sfsc(&fi);
  730. }
  731. }
  732. /**
  733. * arm_num_brps: Return number of implemented breakpoints.
  734. * Note that the ID register BRPS field is "number of bps - 1",
  735. * and we return the actual number of breakpoints.
  736. */
  737. static inline int arm_num_brps(ARMCPU *cpu)
  738. {
  739. if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
  740. return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1;
  741. } else {
  742. return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1;
  743. }
  744. }
  745. /**
  746. * arm_num_wrps: Return number of implemented watchpoints.
  747. * Note that the ID register WRPS field is "number of wps - 1",
  748. * and we return the actual number of watchpoints.
  749. */
  750. static inline int arm_num_wrps(ARMCPU *cpu)
  751. {
  752. if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
  753. return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1;
  754. } else {
  755. return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1;
  756. }
  757. }
  758. /**
  759. * arm_num_ctx_cmps: Return number of implemented context comparators.
  760. * Note that the ID register CTX_CMPS field is "number of cmps - 1",
  761. * and we return the actual number of comparators.
  762. */
  763. static inline int arm_num_ctx_cmps(ARMCPU *cpu)
  764. {
  765. if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
  766. return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1;
  767. } else {
  768. return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1;
  769. }
  770. }
  771. /**
  772. * v7m_using_psp: Return true if using process stack pointer
  773. * Return true if the CPU is currently using the process stack
  774. * pointer, or false if it is using the main stack pointer.
  775. */
  776. static inline bool v7m_using_psp(CPUARMState *env)
  777. {
  778. /* Handler mode always uses the main stack; for thread mode
  779. * the CONTROL.SPSEL bit determines the answer.
  780. * Note that in v7M it is not possible to be in Handler mode with
  781. * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
  782. */
  783. return !arm_v7m_is_handler_mode(env) &&
  784. env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
  785. }
  786. /**
  787. * v7m_sp_limit: Return SP limit for current CPU state
  788. * Return the SP limit value for the current CPU security state
  789. * and stack pointer.
  790. */
  791. static inline uint32_t v7m_sp_limit(CPUARMState *env)
  792. {
  793. if (v7m_using_psp(env)) {
  794. return env->v7m.psplim[env->v7m.secure];
  795. } else {
  796. return env->v7m.msplim[env->v7m.secure];
  797. }
  798. }
  799. /**
  800. * v7m_cpacr_pass:
  801. * Return true if the v7M CPACR permits access to the FPU for the specified
  802. * security state and privilege level.
  803. */
  804. static inline bool v7m_cpacr_pass(CPUARMState *env,
  805. bool is_secure, bool is_priv)
  806. {
  807. switch (extract32(env->v7m.cpacr[is_secure], 20, 2)) {
  808. case 0:
  809. case 2: /* UNPREDICTABLE: we treat like 0 */
  810. return false;
  811. case 1:
  812. return is_priv;
  813. case 3:
  814. return true;
  815. default:
  816. g_assert_not_reached();
  817. }
  818. }
  819. /**
  820. * aarch32_mode_name(): Return name of the AArch32 CPU mode
  821. * @psr: Program Status Register indicating CPU mode
  822. *
  823. * Returns, for debug logging purposes, a printable representation
  824. * of the AArch32 CPU mode ("svc", "usr", etc) as indicated by
  825. * the low bits of the specified PSR.
  826. */
  827. static inline const char *aarch32_mode_name(uint32_t psr)
  828. {
  829. static const char cpu_mode_names[16][4] = {
  830. "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
  831. "???", "???", "hyp", "und", "???", "???", "???", "sys"
  832. };
  833. return cpu_mode_names[psr & 0xf];
  834. }
  835. /**
  836. * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request
  837. *
  838. * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
  839. * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
  840. * Must be called with the iothread lock held.
  841. */
  842. void arm_cpu_update_virq(ARMCPU *cpu);
  843. /**
  844. * arm_cpu_update_vfiq: Update CPU_INTERRUPT_VFIQ bit in cs->interrupt_request
  845. *
  846. * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
  847. * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
  848. * Must be called with the iothread lock held.
  849. */
  850. void arm_cpu_update_vfiq(ARMCPU *cpu);
  851. /**
  852. * arm_mmu_idx_el:
  853. * @env: The cpu environment
  854. * @el: The EL to use.
  855. *
  856. * Return the full ARMMMUIdx for the translation regime for EL.
  857. */
  858. ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el);
  859. /**
  860. * arm_mmu_idx:
  861. * @env: The cpu environment
  862. *
  863. * Return the full ARMMMUIdx for the current translation regime.
  864. */
  865. ARMMMUIdx arm_mmu_idx(CPUARMState *env);
  866. /**
  867. * arm_stage1_mmu_idx:
  868. * @env: The cpu environment
  869. *
  870. * Return the ARMMMUIdx for the stage1 traversal for the current regime.
  871. */
  872. #ifdef CONFIG_USER_ONLY
  873. static inline ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env)
  874. {
  875. return ARMMMUIdx_Stage1_E0;
  876. }
  877. #else
  878. ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env);
  879. #endif
  880. /**
  881. * arm_mmu_idx_is_stage1_of_2:
  882. * @mmu_idx: The ARMMMUIdx to test
  883. *
  884. * Return true if @mmu_idx is a NOTLB mmu_idx that is the
  885. * first stage of a two stage regime.
  886. */
  887. static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx)
  888. {
  889. switch (mmu_idx) {
  890. case ARMMMUIdx_Stage1_E0:
  891. case ARMMMUIdx_Stage1_E1:
  892. case ARMMMUIdx_Stage1_E1_PAN:
  893. case ARMMMUIdx_Stage1_SE0:
  894. case ARMMMUIdx_Stage1_SE1:
  895. case ARMMMUIdx_Stage1_SE1_PAN:
  896. return true;
  897. default:
  898. return false;
  899. }
  900. }
  901. static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features,
  902. const ARMISARegisters *id)
  903. {
  904. uint32_t valid = CPSR_M | CPSR_AIF | CPSR_IL | CPSR_NZCV;
  905. if ((features >> ARM_FEATURE_V4T) & 1) {
  906. valid |= CPSR_T;
  907. }
  908. if ((features >> ARM_FEATURE_V5) & 1) {
  909. valid |= CPSR_Q; /* V5TE in reality*/
  910. }
  911. if ((features >> ARM_FEATURE_V6) & 1) {
  912. valid |= CPSR_E | CPSR_GE;
  913. }
  914. if ((features >> ARM_FEATURE_THUMB2) & 1) {
  915. valid |= CPSR_IT;
  916. }
  917. if (isar_feature_aa32_jazelle(id)) {
  918. valid |= CPSR_J;
  919. }
  920. if (isar_feature_aa32_pan(id)) {
  921. valid |= CPSR_PAN;
  922. }
  923. if (isar_feature_aa32_dit(id)) {
  924. valid |= CPSR_DIT;
  925. }
  926. if (isar_feature_aa32_ssbs(id)) {
  927. valid |= CPSR_SSBS;
  928. }
  929. return valid;
  930. }
  931. static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
  932. {
  933. uint32_t valid;
  934. valid = PSTATE_M | PSTATE_DAIF | PSTATE_IL | PSTATE_SS | PSTATE_NZCV;
  935. if (isar_feature_aa64_bti(id)) {
  936. valid |= PSTATE_BTYPE;
  937. }
  938. if (isar_feature_aa64_pan(id)) {
  939. valid |= PSTATE_PAN;
  940. }
  941. if (isar_feature_aa64_uao(id)) {
  942. valid |= PSTATE_UAO;
  943. }
  944. if (isar_feature_aa64_dit(id)) {
  945. valid |= PSTATE_DIT;
  946. }
  947. if (isar_feature_aa64_ssbs(id)) {
  948. valid |= PSTATE_SSBS;
  949. }
  950. if (isar_feature_aa64_mte(id)) {
  951. valid |= PSTATE_TCO;
  952. }
  953. return valid;
  954. }
  955. /*
  956. * Parameters of a given virtual address, as extracted from the
  957. * translation control register (TCR) for a given regime.
  958. */
  959. typedef struct ARMVAParameters {
  960. unsigned tsz : 8;
  961. unsigned select : 1;
  962. bool tbi : 1;
  963. bool epd : 1;
  964. bool hpd : 1;
  965. bool using16k : 1;
  966. bool using64k : 1;
  967. } ARMVAParameters;
  968. ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
  969. ARMMMUIdx mmu_idx, bool data);
  970. static inline int exception_target_el(CPUARMState *env)
  971. {
  972. int target_el = MAX(1, arm_current_el(env));
  973. /*
  974. * No such thing as secure EL1 if EL3 is aarch32,
  975. * so update the target EL to EL3 in this case.
  976. */
  977. if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
  978. target_el = 3;
  979. }
  980. return target_el;
  981. }
  982. /* Determine if allocation tags are available. */
  983. static inline bool allocation_tag_access_enabled(CPUARMState *env, int el,
  984. uint64_t sctlr)
  985. {
  986. if (el < 3
  987. && arm_feature(env, ARM_FEATURE_EL3)
  988. && !(env->cp15.scr_el3 & SCR_ATA)) {
  989. return false;
  990. }
  991. if (el < 2 && arm_feature(env, ARM_FEATURE_EL2)) {
  992. uint64_t hcr = arm_hcr_el2_eff(env);
  993. if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) {
  994. return false;
  995. }
  996. }
  997. sctlr &= (el == 0 ? SCTLR_ATA0 : SCTLR_ATA);
  998. return sctlr != 0;
  999. }
  1000. #ifndef CONFIG_USER_ONLY
  1001. /* Security attributes for an address, as returned by v8m_security_lookup. */
  1002. typedef struct V8M_SAttributes {
  1003. bool subpage; /* true if these attrs don't cover the whole TARGET_PAGE */
  1004. bool ns;
  1005. bool nsc;
  1006. uint8_t sregion;
  1007. bool srvalid;
  1008. uint8_t iregion;
  1009. bool irvalid;
  1010. } V8M_SAttributes;
  1011. void v8m_security_lookup(CPUARMState *env, uint32_t address,
  1012. MMUAccessType access_type, ARMMMUIdx mmu_idx,
  1013. V8M_SAttributes *sattrs);
  1014. bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
  1015. MMUAccessType access_type, ARMMMUIdx mmu_idx,
  1016. hwaddr *phys_ptr, MemTxAttrs *txattrs,
  1017. int *prot, bool *is_subpage,
  1018. ARMMMUFaultInfo *fi, uint32_t *mregion);
  1019. /* Cacheability and shareability attributes for a memory access */
  1020. typedef struct ARMCacheAttrs {
  1021. unsigned int attrs:8; /* as in the MAIR register encoding */
  1022. unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
  1023. } ARMCacheAttrs;
  1024. bool get_phys_addr(CPUARMState *env, target_ulong address,
  1025. MMUAccessType access_type, ARMMMUIdx mmu_idx,
  1026. hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
  1027. target_ulong *page_size,
  1028. ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
  1029. __attribute__((nonnull));
  1030. void arm_log_exception(int idx);
  1031. #endif /* !CONFIG_USER_ONLY */
  1032. /*
  1033. * The log2 of the words in the tag block, for GMID_EL1.BS.
  1034. * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
  1035. */
  1036. #define GMID_EL1_BS 6
  1037. /* We associate one allocation tag per 16 bytes, the minimum. */
  1038. #define LOG2_TAG_GRANULE 4
  1039. #define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
  1040. /*
  1041. * SVE predicates are 1/8 the size of SVE vectors, and cannot use
  1042. * the same simd_desc() encoding due to restrictions on size.
  1043. * Use these instead.
  1044. */
  1045. FIELD(PREDDESC, OPRSZ, 0, 6)
  1046. FIELD(PREDDESC, ESZ, 6, 2)
  1047. FIELD(PREDDESC, DATA, 8, 24)
  1048. /*
  1049. * The SVE simd_data field, for memory ops, contains either
  1050. * rd (5 bits) or a shift count (2 bits).
  1051. */
  1052. #define SVE_MTEDESC_SHIFT 5
  1053. /* Bits within a descriptor passed to the helper_mte_check* functions. */
  1054. FIELD(MTEDESC, MIDX, 0, 4)
  1055. FIELD(MTEDESC, TBI, 4, 2)
  1056. FIELD(MTEDESC, TCMA, 6, 2)
  1057. FIELD(MTEDESC, WRITE, 8, 1)
  1058. FIELD(MTEDESC, SIZEM1, 9, SIMD_DATA_BITS - 9) /* size - 1 */
  1059. bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr);
  1060. uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra);
  1061. static inline int allocation_tag_from_addr(uint64_t ptr)
  1062. {
  1063. return extract64(ptr, 56, 4);
  1064. }
  1065. static inline uint64_t address_with_allocation_tag(uint64_t ptr, int rtag)
  1066. {
  1067. return deposit64(ptr, 56, 4, rtag);
  1068. }
  1069. /* Return true if tbi bits mean that the access is checked. */
  1070. static inline bool tbi_check(uint32_t desc, int bit55)
  1071. {
  1072. return (desc >> (R_MTEDESC_TBI_SHIFT + bit55)) & 1;
  1073. }
  1074. /* Return true if tcma bits mean that the access is unchecked. */
  1075. static inline bool tcma_check(uint32_t desc, int bit55, int ptr_tag)
  1076. {
  1077. /*
  1078. * We had extracted bit55 and ptr_tag for other reasons, so fold
  1079. * (ptr<59:55> == 00000 || ptr<59:55> == 11111) into a single test.
  1080. */
  1081. bool match = ((ptr_tag + bit55) & 0xf) == 0;
  1082. bool tcma = (desc >> (R_MTEDESC_TCMA_SHIFT + bit55)) & 1;
  1083. return tcma && match;
  1084. }
  1085. /*
  1086. * For TBI, ideally, we would do nothing. Proper behaviour on fault is
  1087. * for the tag to be present in the FAR_ELx register. But for user-only
  1088. * mode, we do not have a TLB with which to implement this, so we must
  1089. * remove the top byte.
  1090. */
  1091. static inline uint64_t useronly_clean_ptr(uint64_t ptr)
  1092. {
  1093. #ifdef CONFIG_USER_ONLY
  1094. /* TBI0 is known to be enabled, while TBI1 is disabled. */
  1095. ptr &= sextract64(ptr, 0, 56);
  1096. #endif
  1097. return ptr;
  1098. }
  1099. static inline uint64_t useronly_maybe_clean_ptr(uint32_t desc, uint64_t ptr)
  1100. {
  1101. #ifdef CONFIG_USER_ONLY
  1102. int64_t clean_ptr = sextract64(ptr, 0, 56);
  1103. if (tbi_check(desc, clean_ptr < 0)) {
  1104. ptr = clean_ptr;
  1105. }
  1106. #endif
  1107. return ptr;
  1108. }
  1109. /* Values for M-profile PSR.ECI for MVE insns */
  1110. enum MVEECIState {
  1111. ECI_NONE = 0, /* No completed beats */
  1112. ECI_A0 = 1, /* Completed: A0 */
  1113. ECI_A0A1 = 2, /* Completed: A0, A1 */
  1114. /* 3 is reserved */
  1115. ECI_A0A1A2 = 4, /* Completed: A0, A1, A2 */
  1116. ECI_A0A1A2B0 = 5, /* Completed: A0, A1, A2, B0 */
  1117. /* All other values reserved */
  1118. };
  1119. /* Definitions for the PMU registers */
  1120. #define PMCRN_MASK 0xf800
  1121. #define PMCRN_SHIFT 11
  1122. #define PMCRLC 0x40
  1123. #define PMCRDP 0x20
  1124. #define PMCRX 0x10
  1125. #define PMCRD 0x8
  1126. #define PMCRC 0x4
  1127. #define PMCRP 0x2
  1128. #define PMCRE 0x1
  1129. /*
  1130. * Mask of PMCR bits writeable by guest (not including WO bits like C, P,
  1131. * which can be written as 1 to trigger behaviour but which stay RAZ).
  1132. */
  1133. #define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
  1134. #define PMXEVTYPER_P 0x80000000
  1135. #define PMXEVTYPER_U 0x40000000
  1136. #define PMXEVTYPER_NSK 0x20000000
  1137. #define PMXEVTYPER_NSU 0x10000000
  1138. #define PMXEVTYPER_NSH 0x08000000
  1139. #define PMXEVTYPER_M 0x04000000
  1140. #define PMXEVTYPER_MT 0x02000000
  1141. #define PMXEVTYPER_EVTCOUNT 0x0000ffff
  1142. #define PMXEVTYPER_MASK (PMXEVTYPER_P | PMXEVTYPER_U | PMXEVTYPER_NSK | \
  1143. PMXEVTYPER_NSU | PMXEVTYPER_NSH | \
  1144. PMXEVTYPER_M | PMXEVTYPER_MT | \
  1145. PMXEVTYPER_EVTCOUNT)
  1146. #define PMCCFILTR 0xf8000000
  1147. #define PMCCFILTR_M PMXEVTYPER_M
  1148. #define PMCCFILTR_EL0 (PMCCFILTR | PMCCFILTR_M)
  1149. static inline uint32_t pmu_num_counters(CPUARMState *env)
  1150. {
  1151. return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
  1152. }
  1153. /* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
  1154. static inline uint64_t pmu_counter_mask(CPUARMState *env)
  1155. {
  1156. return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
  1157. }
  1158. #ifdef TARGET_AARCH64
  1159. int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg);
  1160. int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
  1161. int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
  1162. int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
  1163. #endif
  1164. #endif