2
0

armv7m_nvic.c 92 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757
  1. /*
  2. * ARM Nested Vectored Interrupt Controller
  3. *
  4. * Copyright (c) 2006-2007 CodeSourcery.
  5. * Written by Paul Brook
  6. *
  7. * This code is licensed under the GPL.
  8. *
  9. * The ARMv7M System controller is fairly tightly tied in with the
  10. * NVIC. Much of that is also implemented here.
  11. */
  12. #include "qemu/osdep.h"
  13. #include "qapi/error.h"
  14. #include "hw/sysbus.h"
  15. #include "migration/vmstate.h"
  16. #include "qemu/timer.h"
  17. #include "hw/intc/armv7m_nvic.h"
  18. #include "hw/irq.h"
  19. #include "hw/qdev-properties.h"
  20. #include "system/tcg.h"
  21. #include "system/runstate.h"
  22. #include "target/arm/cpu.h"
  23. #include "target/arm/cpu-features.h"
  24. #include "exec/cputlb.h"
  25. #include "exec/memop.h"
  26. #include "qemu/log.h"
  27. #include "qemu/module.h"
  28. #include "trace.h"
  29. /* IRQ number counting:
  30. *
  31. * the num-irq property counts the number of external IRQ lines
  32. *
  33. * NVICState::num_irq counts the total number of exceptions
  34. * (external IRQs, the 15 internal exceptions including reset,
  35. * and one for the unused exception number 0).
  36. *
  37. * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
  38. *
  39. * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
  40. *
  41. * Iterating through all exceptions should typically be done with
  42. * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
  43. *
  44. * The external qemu_irq lines are the NVIC's external IRQ lines,
  45. * so line 0 is exception 16.
  46. *
  47. * In the terminology of the architecture manual, "interrupts" are
  48. * a subcategory of exception referring to the external interrupts
  49. * (which are exception numbers NVIC_FIRST_IRQ and upward).
  50. * For historical reasons QEMU tends to use "interrupt" and
  51. * "exception" more or less interchangeably.
  52. */
  53. #define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
  54. #define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
  55. /* Effective running priority of the CPU when no exception is active
  56. * (higher than the highest possible priority value)
  57. */
  58. #define NVIC_NOEXC_PRIO 0x100
  59. /* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
  60. #define NVIC_NS_PRIO_LIMIT 0x80
  61. static const uint8_t nvic_id[] = {
  62. 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
  63. };
  64. static void signal_sysresetreq(NVICState *s)
  65. {
  66. if (qemu_irq_is_connected(s->sysresetreq)) {
  67. qemu_irq_pulse(s->sysresetreq);
  68. } else {
  69. /*
  70. * Default behaviour if the SoC doesn't need to wire up
  71. * SYSRESETREQ (eg to a system reset controller of some kind):
  72. * perform a system reset via the usual QEMU API.
  73. */
  74. qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
  75. }
  76. }
  77. static int nvic_pending_prio(NVICState *s)
  78. {
  79. /* return the group priority of the current pending interrupt,
  80. * or NVIC_NOEXC_PRIO if no interrupt is pending
  81. */
  82. return s->vectpending_prio;
  83. }
  84. /* Return the value of the ISCR RETTOBASE bit:
  85. * 1 if there is exactly one active exception
  86. * 0 if there is more than one active exception
  87. * UNKNOWN if there are no active exceptions (we choose 1,
  88. * which matches the choice Cortex-M3 is documented as making).
  89. *
  90. * NB: some versions of the documentation talk about this
  91. * counting "active exceptions other than the one shown by IPSR";
  92. * this is only different in the obscure corner case where guest
  93. * code has manually deactivated an exception and is about
  94. * to fail an exception-return integrity check. The definition
  95. * above is the one from the v8M ARM ARM and is also in line
  96. * with the behaviour documented for the Cortex-M3.
  97. */
  98. static bool nvic_rettobase(NVICState *s)
  99. {
  100. int irq, nhand = 0;
  101. bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
  102. for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
  103. if (s->vectors[irq].active ||
  104. (check_sec && irq < NVIC_INTERNAL_VECTORS &&
  105. s->sec_vectors[irq].active)) {
  106. nhand++;
  107. if (nhand == 2) {
  108. return 0;
  109. }
  110. }
  111. }
  112. return 1;
  113. }
  114. /* Return the value of the ISCR ISRPENDING bit:
  115. * 1 if an external interrupt is pending
  116. * 0 if no external interrupt is pending
  117. */
  118. static bool nvic_isrpending(NVICState *s)
  119. {
  120. int irq;
  121. /*
  122. * We can shortcut if the highest priority pending interrupt
  123. * happens to be external; if not we need to check the whole
  124. * vectors[] array.
  125. */
  126. if (s->vectpending > NVIC_FIRST_IRQ) {
  127. return true;
  128. }
  129. for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
  130. if (s->vectors[irq].pending) {
  131. return true;
  132. }
  133. }
  134. return false;
  135. }
  136. static bool exc_is_banked(int exc)
  137. {
  138. /* Return true if this is one of the limited set of exceptions which
  139. * are banked (and thus have state in sec_vectors[])
  140. */
  141. return exc == ARMV7M_EXCP_HARD ||
  142. exc == ARMV7M_EXCP_MEM ||
  143. exc == ARMV7M_EXCP_USAGE ||
  144. exc == ARMV7M_EXCP_SVC ||
  145. exc == ARMV7M_EXCP_PENDSV ||
  146. exc == ARMV7M_EXCP_SYSTICK;
  147. }
  148. /* Return a mask word which clears the subpriority bits from
  149. * a priority value for an M-profile exception, leaving only
  150. * the group priority.
  151. */
  152. static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
  153. {
  154. return ~0U << (s->prigroup[secure] + 1);
  155. }
  156. static bool exc_targets_secure(NVICState *s, int exc)
  157. {
  158. /* Return true if this non-banked exception targets Secure state. */
  159. if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
  160. return false;
  161. }
  162. if (exc >= NVIC_FIRST_IRQ) {
  163. return !s->itns[exc];
  164. }
  165. /* Function shouldn't be called for banked exceptions. */
  166. assert(!exc_is_banked(exc));
  167. switch (exc) {
  168. case ARMV7M_EXCP_NMI:
  169. case ARMV7M_EXCP_BUS:
  170. return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
  171. case ARMV7M_EXCP_SECURE:
  172. return true;
  173. case ARMV7M_EXCP_DEBUG:
  174. /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
  175. return false;
  176. default:
  177. /* reset, and reserved (unused) low exception numbers.
  178. * We'll get called by code that loops through all the exception
  179. * numbers, but it doesn't matter what we return here as these
  180. * non-existent exceptions will never be pended or active.
  181. */
  182. return true;
  183. }
  184. }
  185. static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
  186. {
  187. /* Return the group priority for this exception, given its raw
  188. * (group-and-subgroup) priority value and whether it is targeting
  189. * secure state or not.
  190. */
  191. if (rawprio < 0) {
  192. return rawprio;
  193. }
  194. rawprio &= nvic_gprio_mask(s, targets_secure);
  195. /* AIRCR.PRIS causes us to squash all NS priorities into the
  196. * lower half of the total range
  197. */
  198. if (!targets_secure &&
  199. (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
  200. rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
  201. }
  202. return rawprio;
  203. }
  204. /* Recompute vectpending and exception_prio for a CPU which implements
  205. * the Security extension
  206. */
  207. static void nvic_recompute_state_secure(NVICState *s)
  208. {
  209. int i, bank;
  210. int pend_prio = NVIC_NOEXC_PRIO;
  211. int active_prio = NVIC_NOEXC_PRIO;
  212. int pend_irq = 0;
  213. bool pending_is_s_banked = false;
  214. int pend_subprio = 0;
  215. /* R_CQRV: precedence is by:
  216. * - lowest group priority; if both the same then
  217. * - lowest subpriority; if both the same then
  218. * - lowest exception number; if both the same (ie banked) then
  219. * - secure exception takes precedence
  220. * Compare pseudocode RawExecutionPriority.
  221. * Annoyingly, now we have two prigroup values (for S and NS)
  222. * we can't do the loop comparison on raw priority values.
  223. */
  224. for (i = 1; i < s->num_irq; i++) {
  225. for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
  226. VecInfo *vec;
  227. int prio, subprio;
  228. bool targets_secure;
  229. if (bank == M_REG_S) {
  230. if (!exc_is_banked(i)) {
  231. continue;
  232. }
  233. vec = &s->sec_vectors[i];
  234. targets_secure = true;
  235. } else {
  236. vec = &s->vectors[i];
  237. targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
  238. }
  239. prio = exc_group_prio(s, vec->prio, targets_secure);
  240. subprio = vec->prio & ~nvic_gprio_mask(s, targets_secure);
  241. if (vec->enabled && vec->pending &&
  242. ((prio < pend_prio) ||
  243. (prio == pend_prio && prio >= 0 && subprio < pend_subprio))) {
  244. pend_prio = prio;
  245. pend_subprio = subprio;
  246. pend_irq = i;
  247. pending_is_s_banked = (bank == M_REG_S);
  248. }
  249. if (vec->active && prio < active_prio) {
  250. active_prio = prio;
  251. }
  252. }
  253. }
  254. s->vectpending_is_s_banked = pending_is_s_banked;
  255. s->vectpending = pend_irq;
  256. s->vectpending_prio = pend_prio;
  257. s->exception_prio = active_prio;
  258. trace_nvic_recompute_state_secure(s->vectpending,
  259. s->vectpending_is_s_banked,
  260. s->vectpending_prio,
  261. s->exception_prio);
  262. }
  263. /* Recompute vectpending and exception_prio */
  264. static void nvic_recompute_state(NVICState *s)
  265. {
  266. int i;
  267. int pend_prio = NVIC_NOEXC_PRIO;
  268. int active_prio = NVIC_NOEXC_PRIO;
  269. int pend_irq = 0;
  270. /* In theory we could write one function that handled both
  271. * the "security extension present" and "not present"; however
  272. * the security related changes significantly complicate the
  273. * recomputation just by themselves and mixing both cases together
  274. * would be even worse, so we retain a separate non-secure-only
  275. * version for CPUs which don't implement the security extension.
  276. */
  277. if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
  278. nvic_recompute_state_secure(s);
  279. return;
  280. }
  281. for (i = 1; i < s->num_irq; i++) {
  282. VecInfo *vec = &s->vectors[i];
  283. if (vec->enabled && vec->pending && vec->prio < pend_prio) {
  284. pend_prio = vec->prio;
  285. pend_irq = i;
  286. }
  287. if (vec->active && vec->prio < active_prio) {
  288. active_prio = vec->prio;
  289. }
  290. }
  291. if (active_prio > 0) {
  292. active_prio &= nvic_gprio_mask(s, false);
  293. }
  294. if (pend_prio > 0) {
  295. pend_prio &= nvic_gprio_mask(s, false);
  296. }
  297. s->vectpending = pend_irq;
  298. s->vectpending_prio = pend_prio;
  299. s->exception_prio = active_prio;
  300. trace_nvic_recompute_state(s->vectpending,
  301. s->vectpending_prio,
  302. s->exception_prio);
  303. }
  304. /* Return the current execution priority of the CPU
  305. * (equivalent to the pseudocode ExecutionPriority function).
  306. * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
  307. */
  308. static inline int nvic_exec_prio(NVICState *s)
  309. {
  310. CPUARMState *env = &s->cpu->env;
  311. int running = NVIC_NOEXC_PRIO;
  312. if (env->v7m.basepri[M_REG_NS] > 0) {
  313. running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
  314. }
  315. if (env->v7m.basepri[M_REG_S] > 0) {
  316. int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
  317. if (running > basepri) {
  318. running = basepri;
  319. }
  320. }
  321. if (env->v7m.primask[M_REG_NS]) {
  322. if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
  323. if (running > NVIC_NS_PRIO_LIMIT) {
  324. running = NVIC_NS_PRIO_LIMIT;
  325. }
  326. } else {
  327. running = 0;
  328. }
  329. }
  330. if (env->v7m.primask[M_REG_S]) {
  331. running = 0;
  332. }
  333. if (env->v7m.faultmask[M_REG_NS]) {
  334. if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
  335. running = -1;
  336. } else {
  337. if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
  338. if (running > NVIC_NS_PRIO_LIMIT) {
  339. running = NVIC_NS_PRIO_LIMIT;
  340. }
  341. } else {
  342. running = 0;
  343. }
  344. }
  345. }
  346. if (env->v7m.faultmask[M_REG_S]) {
  347. running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
  348. }
  349. /* consider priority of active handler */
  350. return MIN(running, s->exception_prio);
  351. }
  352. bool armv7m_nvic_neg_prio_requested(NVICState *s, bool secure)
  353. {
  354. /* Return true if the requested execution priority is negative
  355. * for the specified security state, ie that security state
  356. * has an active NMI or HardFault or has set its FAULTMASK.
  357. * Note that this is not the same as whether the execution
  358. * priority is actually negative (for instance AIRCR.PRIS may
  359. * mean we don't allow FAULTMASK_NS to actually make the execution
  360. * priority negative). Compare pseudocode IsReqExcPriNeg().
  361. */
  362. if (s->cpu->env.v7m.faultmask[secure]) {
  363. return true;
  364. }
  365. if (secure ? s->sec_vectors[ARMV7M_EXCP_HARD].active :
  366. s->vectors[ARMV7M_EXCP_HARD].active) {
  367. return true;
  368. }
  369. if (s->vectors[ARMV7M_EXCP_NMI].active &&
  370. exc_targets_secure(s, ARMV7M_EXCP_NMI) == secure) {
  371. return true;
  372. }
  373. return false;
  374. }
  375. bool armv7m_nvic_can_take_pending_exception(NVICState *s)
  376. {
  377. return nvic_exec_prio(s) > nvic_pending_prio(s);
  378. }
  379. int armv7m_nvic_raw_execution_priority(NVICState *s)
  380. {
  381. return s->exception_prio;
  382. }
  383. /* caller must call nvic_irq_update() after this.
  384. * secure indicates the bank to use for banked exceptions (we assert if
  385. * we are passed secure=true for a non-banked exception).
  386. */
  387. static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
  388. {
  389. assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
  390. assert(irq < s->num_irq);
  391. prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
  392. if (secure) {
  393. assert(exc_is_banked(irq));
  394. s->sec_vectors[irq].prio = prio;
  395. } else {
  396. s->vectors[irq].prio = prio;
  397. }
  398. trace_nvic_set_prio(irq, secure, prio);
  399. }
  400. /* Return the current raw priority register value.
  401. * secure indicates the bank to use for banked exceptions (we assert if
  402. * we are passed secure=true for a non-banked exception).
  403. */
  404. static int get_prio(NVICState *s, unsigned irq, bool secure)
  405. {
  406. assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
  407. assert(irq < s->num_irq);
  408. if (secure) {
  409. assert(exc_is_banked(irq));
  410. return s->sec_vectors[irq].prio;
  411. } else {
  412. return s->vectors[irq].prio;
  413. }
  414. }
  415. /* Recompute state and assert irq line accordingly.
  416. * Must be called after changes to:
  417. * vec->active, vec->enabled, vec->pending or vec->prio for any vector
  418. * prigroup
  419. */
  420. static void nvic_irq_update(NVICState *s)
  421. {
  422. int lvl;
  423. int pend_prio;
  424. nvic_recompute_state(s);
  425. pend_prio = nvic_pending_prio(s);
  426. /* Raise NVIC output if this IRQ would be taken, except that we
  427. * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
  428. * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
  429. * to those CPU registers don't cause us to recalculate the NVIC
  430. * pending info.
  431. */
  432. lvl = (pend_prio < s->exception_prio);
  433. trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
  434. qemu_set_irq(s->excpout, lvl);
  435. }
  436. /**
  437. * armv7m_nvic_clear_pending: mark the specified exception as not pending
  438. * @opaque: the NVIC
  439. * @irq: the exception number to mark as not pending
  440. * @secure: false for non-banked exceptions or for the nonsecure
  441. * version of a banked exception, true for the secure version of a banked
  442. * exception.
  443. *
  444. * Marks the specified exception as not pending. Note that we will assert()
  445. * if @secure is true and @irq does not specify one of the fixed set
  446. * of architecturally banked exceptions.
  447. */
  448. static void armv7m_nvic_clear_pending(NVICState *s, int irq, bool secure)
  449. {
  450. VecInfo *vec;
  451. assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
  452. if (secure) {
  453. assert(exc_is_banked(irq));
  454. vec = &s->sec_vectors[irq];
  455. } else {
  456. vec = &s->vectors[irq];
  457. }
  458. trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
  459. if (vec->pending) {
  460. vec->pending = 0;
  461. nvic_irq_update(s);
  462. }
  463. }
  464. static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
  465. bool derived)
  466. {
  467. /* Pend an exception, including possibly escalating it to HardFault.
  468. *
  469. * This function handles both "normal" pending of interrupts and
  470. * exceptions, and also derived exceptions (ones which occur as
  471. * a result of trying to take some other exception).
  472. *
  473. * If derived == true, the caller guarantees that we are part way through
  474. * trying to take an exception (but have not yet called
  475. * armv7m_nvic_acknowledge_irq() to make it active), and so:
  476. * - s->vectpending is the "original exception" we were trying to take
  477. * - irq is the "derived exception"
  478. * - nvic_exec_prio(s) gives the priority before exception entry
  479. * Here we handle the prioritization logic which the pseudocode puts
  480. * in the DerivedLateArrival() function.
  481. */
  482. NVICState *s = (NVICState *)opaque;
  483. bool banked = exc_is_banked(irq);
  484. VecInfo *vec;
  485. bool targets_secure;
  486. assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
  487. assert(!secure || banked);
  488. vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
  489. targets_secure = banked ? secure : exc_targets_secure(s, irq);
  490. trace_nvic_set_pending(irq, secure, targets_secure,
  491. derived, vec->enabled, vec->prio);
  492. if (derived) {
  493. /* Derived exceptions are always synchronous. */
  494. assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
  495. if (irq == ARMV7M_EXCP_DEBUG &&
  496. exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
  497. /* DebugMonitorFault, but its priority is lower than the
  498. * preempted exception priority: just ignore it.
  499. */
  500. return;
  501. }
  502. if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
  503. /* If this is a terminal exception (one which means we cannot
  504. * take the original exception, like a failure to read its
  505. * vector table entry), then we must take the derived exception.
  506. * If the derived exception can't take priority over the
  507. * original exception, then we go into Lockup.
  508. *
  509. * For QEMU, we rely on the fact that a derived exception is
  510. * terminal if and only if it's reported to us as HardFault,
  511. * which saves having to have an extra argument is_terminal
  512. * that we'd only use in one place.
  513. */
  514. cpu_abort(CPU(s->cpu),
  515. "Lockup: can't take terminal derived exception "
  516. "(original exception priority %d)\n",
  517. s->vectpending_prio);
  518. }
  519. /* We now continue with the same code as for a normal pending
  520. * exception, which will cause us to pend the derived exception.
  521. * We'll then take either the original or the derived exception
  522. * based on which is higher priority by the usual mechanism
  523. * for selecting the highest priority pending interrupt.
  524. */
  525. }
  526. if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
  527. /* If a synchronous exception is pending then it may be
  528. * escalated to HardFault if:
  529. * * it is equal or lower priority to current execution
  530. * * it is disabled
  531. * (ie we need to take it immediately but we can't do so).
  532. * Asynchronous exceptions (and interrupts) simply remain pending.
  533. *
  534. * For QEMU, we don't have any imprecise (asynchronous) faults,
  535. * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
  536. * synchronous.
  537. * Debug exceptions are awkward because only Debug exceptions
  538. * resulting from the BKPT instruction should be escalated,
  539. * but we don't currently implement any Debug exceptions other
  540. * than those that result from BKPT, so we treat all debug exceptions
  541. * as needing escalation.
  542. *
  543. * This all means we can identify whether to escalate based only on
  544. * the exception number and don't (yet) need the caller to explicitly
  545. * tell us whether this exception is synchronous or not.
  546. */
  547. int running = nvic_exec_prio(s);
  548. bool escalate = false;
  549. if (exc_group_prio(s, vec->prio, secure) >= running) {
  550. trace_nvic_escalate_prio(irq, vec->prio, running);
  551. escalate = true;
  552. } else if (!vec->enabled) {
  553. trace_nvic_escalate_disabled(irq);
  554. escalate = true;
  555. }
  556. if (escalate) {
  557. /* We need to escalate this exception to a synchronous HardFault.
  558. * If BFHFNMINS is set then we escalate to the banked HF for
  559. * the target security state of the original exception; otherwise
  560. * we take a Secure HardFault.
  561. */
  562. irq = ARMV7M_EXCP_HARD;
  563. if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
  564. (targets_secure ||
  565. !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
  566. vec = &s->sec_vectors[irq];
  567. } else {
  568. vec = &s->vectors[irq];
  569. }
  570. if (running <= vec->prio) {
  571. /* We want to escalate to HardFault but we can't take the
  572. * synchronous HardFault at this point either. This is a
  573. * Lockup condition due to a guest bug. We don't model
  574. * Lockup, so report via cpu_abort() instead.
  575. */
  576. cpu_abort(CPU(s->cpu),
  577. "Lockup: can't escalate %d to HardFault "
  578. "(current priority %d)\n", irq, running);
  579. }
  580. /* HF may be banked but there is only one shared HFSR */
  581. s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
  582. }
  583. }
  584. if (!vec->pending) {
  585. vec->pending = 1;
  586. nvic_irq_update(s);
  587. }
  588. }
  589. void armv7m_nvic_set_pending(NVICState *s, int irq, bool secure)
  590. {
  591. do_armv7m_nvic_set_pending(s, irq, secure, false);
  592. }
  593. void armv7m_nvic_set_pending_derived(NVICState *s, int irq, bool secure)
  594. {
  595. do_armv7m_nvic_set_pending(s, irq, secure, true);
  596. }
  597. void armv7m_nvic_set_pending_lazyfp(NVICState *s, int irq, bool secure)
  598. {
  599. /*
  600. * Pend an exception during lazy FP stacking. This differs
  601. * from the usual exception pending because the logic for
  602. * whether we should escalate depends on the saved context
  603. * in the FPCCR register, not on the current state of the CPU/NVIC.
  604. */
  605. bool banked = exc_is_banked(irq);
  606. VecInfo *vec;
  607. bool targets_secure;
  608. bool escalate = false;
  609. /*
  610. * We will only look at bits in fpccr if this is a banked exception
  611. * (in which case 'secure' tells us whether it is the S or NS version).
  612. * All the bits for the non-banked exceptions are in fpccr_s.
  613. */
  614. uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
  615. uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
  616. assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
  617. assert(!secure || banked);
  618. vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
  619. targets_secure = banked ? secure : exc_targets_secure(s, irq);
  620. switch (irq) {
  621. case ARMV7M_EXCP_DEBUG:
  622. if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
  623. /* Ignore DebugMonitor exception */
  624. return;
  625. }
  626. break;
  627. case ARMV7M_EXCP_MEM:
  628. escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
  629. break;
  630. case ARMV7M_EXCP_USAGE:
  631. escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
  632. break;
  633. case ARMV7M_EXCP_BUS:
  634. escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
  635. break;
  636. case ARMV7M_EXCP_SECURE:
  637. escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
  638. break;
  639. default:
  640. g_assert_not_reached();
  641. }
  642. if (escalate) {
  643. /*
  644. * Escalate to HardFault: faults that initially targeted Secure
  645. * continue to do so, even if HF normally targets NonSecure.
  646. */
  647. irq = ARMV7M_EXCP_HARD;
  648. if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
  649. (targets_secure ||
  650. !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
  651. vec = &s->sec_vectors[irq];
  652. } else {
  653. vec = &s->vectors[irq];
  654. }
  655. }
  656. if (!vec->enabled ||
  657. nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
  658. if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
  659. /*
  660. * We want to escalate to HardFault but the context the
  661. * FP state belongs to prevents the exception pre-empting.
  662. */
  663. cpu_abort(CPU(s->cpu),
  664. "Lockup: can't escalate to HardFault during "
  665. "lazy FP register stacking\n");
  666. }
  667. }
  668. if (escalate) {
  669. s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
  670. }
  671. if (!vec->pending) {
  672. vec->pending = 1;
  673. /*
  674. * We do not call nvic_irq_update(), because we know our caller
  675. * is going to handle causing us to take the exception by
  676. * raising EXCP_LAZYFP, so raising the IRQ line would be
  677. * pointless extra work. We just need to recompute the
  678. * priorities so that armv7m_nvic_can_take_pending_exception()
  679. * returns the right answer.
  680. */
  681. nvic_recompute_state(s);
  682. }
  683. }
  684. /* Make pending IRQ active. */
  685. void armv7m_nvic_acknowledge_irq(NVICState *s)
  686. {
  687. CPUARMState *env = &s->cpu->env;
  688. const int pending = s->vectpending;
  689. const int running = nvic_exec_prio(s);
  690. VecInfo *vec;
  691. assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
  692. if (s->vectpending_is_s_banked) {
  693. vec = &s->sec_vectors[pending];
  694. } else {
  695. vec = &s->vectors[pending];
  696. }
  697. assert(vec->enabled);
  698. assert(vec->pending);
  699. assert(s->vectpending_prio < running);
  700. trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
  701. vec->active = 1;
  702. vec->pending = 0;
  703. write_v7m_exception(env, s->vectpending);
  704. nvic_irq_update(s);
  705. }
  706. static bool vectpending_targets_secure(NVICState *s)
  707. {
  708. /* Return true if s->vectpending targets Secure state */
  709. if (s->vectpending_is_s_banked) {
  710. return true;
  711. }
  712. return !exc_is_banked(s->vectpending) &&
  713. exc_targets_secure(s, s->vectpending);
  714. }
  715. void armv7m_nvic_get_pending_irq_info(NVICState *s,
  716. int *pirq, bool *ptargets_secure)
  717. {
  718. const int pending = s->vectpending;
  719. bool targets_secure;
  720. assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
  721. targets_secure = vectpending_targets_secure(s);
  722. trace_nvic_get_pending_irq_info(pending, targets_secure);
  723. *ptargets_secure = targets_secure;
  724. *pirq = pending;
  725. }
  726. int armv7m_nvic_complete_irq(NVICState *s, int irq, bool secure)
  727. {
  728. VecInfo *vec = NULL;
  729. int ret = 0;
  730. assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
  731. trace_nvic_complete_irq(irq, secure);
  732. if (secure && exc_is_banked(irq)) {
  733. vec = &s->sec_vectors[irq];
  734. } else {
  735. vec = &s->vectors[irq];
  736. }
  737. /*
  738. * Identify illegal exception return cases. We can't immediately
  739. * return at this point because we still need to deactivate
  740. * (either this exception or NMI/HardFault) first.
  741. */
  742. if (!exc_is_banked(irq) && exc_targets_secure(s, irq) != secure) {
  743. /*
  744. * Return from a configurable exception targeting the opposite
  745. * security state from the one we're trying to complete it for.
  746. * Clear vec because it's not really the VecInfo for this
  747. * (irq, secstate) so we mustn't deactivate it.
  748. */
  749. ret = -1;
  750. vec = NULL;
  751. } else if (!vec->active) {
  752. /* Return from an inactive interrupt */
  753. ret = -1;
  754. } else {
  755. /* Legal return, we will return the RETTOBASE bit value to the caller */
  756. ret = nvic_rettobase(s);
  757. }
  758. /*
  759. * For negative priorities, v8M will forcibly deactivate the appropriate
  760. * NMI or HardFault regardless of what interrupt we're being asked to
  761. * deactivate (compare the DeActivate() pseudocode). This is a guard
  762. * against software returning from NMI or HardFault with a corrupted
  763. * IPSR and leaving the CPU in a negative-priority state.
  764. * v7M does not do this, but simply deactivates the requested interrupt.
  765. */
  766. if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
  767. switch (armv7m_nvic_raw_execution_priority(s)) {
  768. case -1:
  769. if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
  770. vec = &s->vectors[ARMV7M_EXCP_HARD];
  771. } else {
  772. vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
  773. }
  774. break;
  775. case -2:
  776. vec = &s->vectors[ARMV7M_EXCP_NMI];
  777. break;
  778. case -3:
  779. vec = &s->sec_vectors[ARMV7M_EXCP_HARD];
  780. break;
  781. default:
  782. break;
  783. }
  784. }
  785. if (!vec) {
  786. return ret;
  787. }
  788. vec->active = 0;
  789. if (vec->level) {
  790. /* Re-pend the exception if it's still held high; only
  791. * happens for external IRQs
  792. */
  793. assert(irq >= NVIC_FIRST_IRQ);
  794. vec->pending = 1;
  795. }
  796. nvic_irq_update(s);
  797. return ret;
  798. }
  799. bool armv7m_nvic_get_ready_status(NVICState *s, int irq, bool secure)
  800. {
  801. /*
  802. * Return whether an exception is "ready", i.e. it is enabled and is
  803. * configured at a priority which would allow it to interrupt the
  804. * current execution priority.
  805. *
  806. * irq and secure have the same semantics as for armv7m_nvic_set_pending():
  807. * for non-banked exceptions secure is always false; for banked exceptions
  808. * it indicates which of the exceptions is required.
  809. */
  810. bool banked = exc_is_banked(irq);
  811. VecInfo *vec;
  812. int running = nvic_exec_prio(s);
  813. assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
  814. assert(!secure || banked);
  815. /*
  816. * HardFault is an odd special case: we always check against -1,
  817. * even if we're secure and HardFault has priority -3; we never
  818. * need to check for enabled state.
  819. */
  820. if (irq == ARMV7M_EXCP_HARD) {
  821. return running > -1;
  822. }
  823. vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
  824. return vec->enabled &&
  825. exc_group_prio(s, vec->prio, secure) < running;
  826. }
  827. /* callback when external interrupt line is changed */
  828. static void set_irq_level(void *opaque, int n, int level)
  829. {
  830. NVICState *s = opaque;
  831. VecInfo *vec;
  832. n += NVIC_FIRST_IRQ;
  833. assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
  834. trace_nvic_set_irq_level(n, level);
  835. /* The pending status of an external interrupt is
  836. * latched on rising edge and exception handler return.
  837. *
  838. * Pulsing the IRQ will always run the handler
  839. * once, and the handler will re-run until the
  840. * level is low when the handler completes.
  841. */
  842. vec = &s->vectors[n];
  843. if (level != vec->level) {
  844. vec->level = level;
  845. if (level) {
  846. armv7m_nvic_set_pending(s, n, false);
  847. }
  848. }
  849. }
  850. /* callback when external NMI line is changed */
  851. static void nvic_nmi_trigger(void *opaque, int n, int level)
  852. {
  853. NVICState *s = opaque;
  854. trace_nvic_set_nmi_level(level);
  855. /*
  856. * The architecture doesn't specify whether NMI should share
  857. * the normal-interrupt behaviour of being resampled on
  858. * exception handler return. We choose not to, so just
  859. * set NMI pending here and don't track the current level.
  860. */
  861. if (level) {
  862. armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
  863. }
  864. }
  865. static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
  866. {
  867. ARMCPU *cpu = s->cpu;
  868. uint32_t val;
  869. switch (offset) {
  870. case 4: /* Interrupt Control Type. */
  871. if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
  872. goto bad_offset;
  873. }
  874. return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
  875. case 0xc: /* CPPWR */
  876. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  877. goto bad_offset;
  878. }
  879. /* We make the IMPDEF choice that nothing can ever go into a
  880. * non-retentive power state, which allows us to RAZ/WI this.
  881. */
  882. return 0;
  883. case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
  884. {
  885. int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
  886. int i;
  887. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  888. goto bad_offset;
  889. }
  890. if (!attrs.secure) {
  891. return 0;
  892. }
  893. val = 0;
  894. for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
  895. if (s->itns[startvec + i]) {
  896. val |= (1 << i);
  897. }
  898. }
  899. return val;
  900. }
  901. case 0xcfc:
  902. if (!arm_feature(&cpu->env, ARM_FEATURE_V8_1M)) {
  903. goto bad_offset;
  904. }
  905. return cpu->revidr;
  906. case 0xd00: /* CPUID Base. */
  907. return cpu->midr;
  908. case 0xd04: /* Interrupt Control State (ICSR) */
  909. /* VECTACTIVE */
  910. val = cpu->env.v7m.exception;
  911. /* VECTPENDING */
  912. if (s->vectpending) {
  913. /*
  914. * From v8.1M VECTPENDING must read as 1 if accessed as
  915. * NonSecure and the highest priority pending and enabled
  916. * exception targets Secure.
  917. */
  918. int vp = s->vectpending;
  919. if (!attrs.secure && arm_feature(&cpu->env, ARM_FEATURE_V8_1M) &&
  920. vectpending_targets_secure(s)) {
  921. vp = 1;
  922. }
  923. val |= (vp & 0x1ff) << 12;
  924. }
  925. /* ISRPENDING - set if any external IRQ is pending */
  926. if (nvic_isrpending(s)) {
  927. val |= (1 << 22);
  928. }
  929. /* RETTOBASE - set if only one handler is active */
  930. if (nvic_rettobase(s)) {
  931. val |= (1 << 11);
  932. }
  933. if (attrs.secure) {
  934. /* PENDSTSET */
  935. if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].pending) {
  936. val |= (1 << 26);
  937. }
  938. /* PENDSVSET */
  939. if (s->sec_vectors[ARMV7M_EXCP_PENDSV].pending) {
  940. val |= (1 << 28);
  941. }
  942. } else {
  943. /* PENDSTSET */
  944. if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
  945. val |= (1 << 26);
  946. }
  947. /* PENDSVSET */
  948. if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
  949. val |= (1 << 28);
  950. }
  951. }
  952. /* NMIPENDSET */
  953. if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
  954. && s->vectors[ARMV7M_EXCP_NMI].pending) {
  955. val |= (1 << 31);
  956. }
  957. /* ISRPREEMPT: RES0 when halting debug not implemented */
  958. /* STTNS: RES0 for the Main Extension */
  959. return val;
  960. case 0xd08: /* Vector Table Offset. */
  961. return cpu->env.v7m.vecbase[attrs.secure];
  962. case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
  963. val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
  964. if (attrs.secure) {
  965. /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
  966. val |= cpu->env.v7m.aircr;
  967. } else {
  968. if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  969. /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
  970. * security isn't supported then BFHFNMINS is RAO (and
  971. * the bit in env.v7m.aircr is always set).
  972. */
  973. val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
  974. }
  975. }
  976. return val;
  977. case 0xd10: /* System Control. */
  978. if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
  979. goto bad_offset;
  980. }
  981. return cpu->env.v7m.scr[attrs.secure];
  982. case 0xd14: /* Configuration Control. */
  983. /*
  984. * Non-banked bits: BFHFNMIGN (stored in the NS copy of the register)
  985. * and TRD (stored in the S copy of the register)
  986. */
  987. val = cpu->env.v7m.ccr[attrs.secure];
  988. val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
  989. /* BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0 */
  990. if (!attrs.secure) {
  991. if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  992. val &= ~R_V7M_CCR_BFHFNMIGN_MASK;
  993. }
  994. }
  995. return val;
  996. case 0xd24: /* System Handler Control and State (SHCSR) */
  997. if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
  998. goto bad_offset;
  999. }
  1000. val = 0;
  1001. if (attrs.secure) {
  1002. if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
  1003. val |= (1 << 0);
  1004. }
  1005. if (s->sec_vectors[ARMV7M_EXCP_HARD].active) {
  1006. val |= (1 << 2);
  1007. }
  1008. if (s->sec_vectors[ARMV7M_EXCP_USAGE].active) {
  1009. val |= (1 << 3);
  1010. }
  1011. if (s->sec_vectors[ARMV7M_EXCP_SVC].active) {
  1012. val |= (1 << 7);
  1013. }
  1014. if (s->sec_vectors[ARMV7M_EXCP_PENDSV].active) {
  1015. val |= (1 << 10);
  1016. }
  1017. if (s->sec_vectors[ARMV7M_EXCP_SYSTICK].active) {
  1018. val |= (1 << 11);
  1019. }
  1020. if (s->sec_vectors[ARMV7M_EXCP_USAGE].pending) {
  1021. val |= (1 << 12);
  1022. }
  1023. if (s->sec_vectors[ARMV7M_EXCP_MEM].pending) {
  1024. val |= (1 << 13);
  1025. }
  1026. if (s->sec_vectors[ARMV7M_EXCP_SVC].pending) {
  1027. val |= (1 << 15);
  1028. }
  1029. if (s->sec_vectors[ARMV7M_EXCP_MEM].enabled) {
  1030. val |= (1 << 16);
  1031. }
  1032. if (s->sec_vectors[ARMV7M_EXCP_USAGE].enabled) {
  1033. val |= (1 << 18);
  1034. }
  1035. if (s->sec_vectors[ARMV7M_EXCP_HARD].pending) {
  1036. val |= (1 << 21);
  1037. }
  1038. /* SecureFault is not banked but is always RAZ/WI to NS */
  1039. if (s->vectors[ARMV7M_EXCP_SECURE].active) {
  1040. val |= (1 << 4);
  1041. }
  1042. if (s->vectors[ARMV7M_EXCP_SECURE].enabled) {
  1043. val |= (1 << 19);
  1044. }
  1045. if (s->vectors[ARMV7M_EXCP_SECURE].pending) {
  1046. val |= (1 << 20);
  1047. }
  1048. } else {
  1049. if (s->vectors[ARMV7M_EXCP_MEM].active) {
  1050. val |= (1 << 0);
  1051. }
  1052. if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1053. /* HARDFAULTACT, HARDFAULTPENDED not present in v7M */
  1054. if (s->vectors[ARMV7M_EXCP_HARD].active) {
  1055. val |= (1 << 2);
  1056. }
  1057. if (s->vectors[ARMV7M_EXCP_HARD].pending) {
  1058. val |= (1 << 21);
  1059. }
  1060. }
  1061. if (s->vectors[ARMV7M_EXCP_USAGE].active) {
  1062. val |= (1 << 3);
  1063. }
  1064. if (s->vectors[ARMV7M_EXCP_SVC].active) {
  1065. val |= (1 << 7);
  1066. }
  1067. if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
  1068. val |= (1 << 10);
  1069. }
  1070. if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
  1071. val |= (1 << 11);
  1072. }
  1073. if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
  1074. val |= (1 << 12);
  1075. }
  1076. if (s->vectors[ARMV7M_EXCP_MEM].pending) {
  1077. val |= (1 << 13);
  1078. }
  1079. if (s->vectors[ARMV7M_EXCP_SVC].pending) {
  1080. val |= (1 << 15);
  1081. }
  1082. if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
  1083. val |= (1 << 16);
  1084. }
  1085. if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
  1086. val |= (1 << 18);
  1087. }
  1088. }
  1089. if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  1090. if (s->vectors[ARMV7M_EXCP_BUS].active) {
  1091. val |= (1 << 1);
  1092. }
  1093. if (s->vectors[ARMV7M_EXCP_BUS].pending) {
  1094. val |= (1 << 14);
  1095. }
  1096. if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
  1097. val |= (1 << 17);
  1098. }
  1099. if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
  1100. s->vectors[ARMV7M_EXCP_NMI].active) {
  1101. /* NMIACT is not present in v7M */
  1102. val |= (1 << 5);
  1103. }
  1104. }
  1105. /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
  1106. if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
  1107. val |= (1 << 8);
  1108. }
  1109. return val;
  1110. case 0xd2c: /* Hard Fault Status. */
  1111. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1112. goto bad_offset;
  1113. }
  1114. return cpu->env.v7m.hfsr;
  1115. case 0xd30: /* Debug Fault Status. */
  1116. return cpu->env.v7m.dfsr;
  1117. case 0xd34: /* MMFAR MemManage Fault Address */
  1118. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1119. goto bad_offset;
  1120. }
  1121. return cpu->env.v7m.mmfar[attrs.secure];
  1122. case 0xd38: /* Bus Fault Address. */
  1123. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1124. goto bad_offset;
  1125. }
  1126. if (!attrs.secure &&
  1127. !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  1128. return 0;
  1129. }
  1130. return cpu->env.v7m.bfar;
  1131. case 0xd3c: /* Aux Fault Status. */
  1132. /* TODO: Implement fault status registers. */
  1133. qemu_log_mask(LOG_UNIMP,
  1134. "Aux Fault status registers unimplemented\n");
  1135. return 0;
  1136. case 0xd40: /* PFR0. */
  1137. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1138. goto bad_offset;
  1139. }
  1140. return cpu->isar.id_pfr0;
  1141. case 0xd44: /* PFR1. */
  1142. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1143. goto bad_offset;
  1144. }
  1145. return cpu->isar.id_pfr1;
  1146. case 0xd48: /* DFR0. */
  1147. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1148. goto bad_offset;
  1149. }
  1150. return cpu->isar.id_dfr0;
  1151. case 0xd4c: /* AFR0. */
  1152. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1153. goto bad_offset;
  1154. }
  1155. return cpu->id_afr0;
  1156. case 0xd50: /* MMFR0. */
  1157. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1158. goto bad_offset;
  1159. }
  1160. return cpu->isar.id_mmfr0;
  1161. case 0xd54: /* MMFR1. */
  1162. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1163. goto bad_offset;
  1164. }
  1165. return cpu->isar.id_mmfr1;
  1166. case 0xd58: /* MMFR2. */
  1167. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1168. goto bad_offset;
  1169. }
  1170. return cpu->isar.id_mmfr2;
  1171. case 0xd5c: /* MMFR3. */
  1172. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1173. goto bad_offset;
  1174. }
  1175. return cpu->isar.id_mmfr3;
  1176. case 0xd60: /* ISAR0. */
  1177. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1178. goto bad_offset;
  1179. }
  1180. return cpu->isar.id_isar0;
  1181. case 0xd64: /* ISAR1. */
  1182. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1183. goto bad_offset;
  1184. }
  1185. return cpu->isar.id_isar1;
  1186. case 0xd68: /* ISAR2. */
  1187. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1188. goto bad_offset;
  1189. }
  1190. return cpu->isar.id_isar2;
  1191. case 0xd6c: /* ISAR3. */
  1192. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1193. goto bad_offset;
  1194. }
  1195. return cpu->isar.id_isar3;
  1196. case 0xd70: /* ISAR4. */
  1197. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1198. goto bad_offset;
  1199. }
  1200. return cpu->isar.id_isar4;
  1201. case 0xd74: /* ISAR5. */
  1202. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1203. goto bad_offset;
  1204. }
  1205. return cpu->isar.id_isar5;
  1206. case 0xd78: /* CLIDR */
  1207. return cpu->clidr;
  1208. case 0xd7c: /* CTR */
  1209. return cpu->ctr;
  1210. case 0xd80: /* CSSIDR */
  1211. {
  1212. int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
  1213. return cpu->ccsidr[idx];
  1214. }
  1215. case 0xd84: /* CSSELR */
  1216. return cpu->env.v7m.csselr[attrs.secure];
  1217. case 0xd88: /* CPACR */
  1218. if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1219. return 0;
  1220. }
  1221. return cpu->env.v7m.cpacr[attrs.secure];
  1222. case 0xd8c: /* NSACR */
  1223. if (!attrs.secure || !cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1224. return 0;
  1225. }
  1226. return cpu->env.v7m.nsacr;
  1227. /* TODO: Implement debug registers. */
  1228. case 0xd90: /* MPU_TYPE */
  1229. /* Unified MPU; if the MPU is not present this value is zero */
  1230. return cpu->pmsav7_dregion << 8;
  1231. case 0xd94: /* MPU_CTRL */
  1232. return cpu->env.v7m.mpu_ctrl[attrs.secure];
  1233. case 0xd98: /* MPU_RNR */
  1234. return cpu->env.pmsav7.rnr[attrs.secure];
  1235. case 0xd9c: /* MPU_RBAR */
  1236. case 0xda4: /* MPU_RBAR_A1 */
  1237. case 0xdac: /* MPU_RBAR_A2 */
  1238. case 0xdb4: /* MPU_RBAR_A3 */
  1239. {
  1240. int region = cpu->env.pmsav7.rnr[attrs.secure];
  1241. if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1242. /* PMSAv8M handling of the aliases is different from v7M:
  1243. * aliases A1, A2, A3 override the low two bits of the region
  1244. * number in MPU_RNR, and there is no 'region' field in the
  1245. * RBAR register.
  1246. */
  1247. int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
  1248. if (aliasno) {
  1249. region = deposit32(region, 0, 2, aliasno);
  1250. }
  1251. if (region >= cpu->pmsav7_dregion) {
  1252. return 0;
  1253. }
  1254. return cpu->env.pmsav8.rbar[attrs.secure][region];
  1255. }
  1256. if (region >= cpu->pmsav7_dregion) {
  1257. return 0;
  1258. }
  1259. return (cpu->env.pmsav7.drbar[region] & ~0x1f) | (region & 0xf);
  1260. }
  1261. case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
  1262. case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
  1263. case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
  1264. case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
  1265. {
  1266. int region = cpu->env.pmsav7.rnr[attrs.secure];
  1267. if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1268. /* PMSAv8M handling of the aliases is different from v7M:
  1269. * aliases A1, A2, A3 override the low two bits of the region
  1270. * number in MPU_RNR.
  1271. */
  1272. int aliasno = (offset - 0xda0) / 8; /* 0..3 */
  1273. if (aliasno) {
  1274. region = deposit32(region, 0, 2, aliasno);
  1275. }
  1276. if (region >= cpu->pmsav7_dregion) {
  1277. return 0;
  1278. }
  1279. return cpu->env.pmsav8.rlar[attrs.secure][region];
  1280. }
  1281. if (region >= cpu->pmsav7_dregion) {
  1282. return 0;
  1283. }
  1284. return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
  1285. (cpu->env.pmsav7.drsr[region] & 0xffff);
  1286. }
  1287. case 0xdc0: /* MPU_MAIR0 */
  1288. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1289. goto bad_offset;
  1290. }
  1291. return cpu->env.pmsav8.mair0[attrs.secure];
  1292. case 0xdc4: /* MPU_MAIR1 */
  1293. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1294. goto bad_offset;
  1295. }
  1296. return cpu->env.pmsav8.mair1[attrs.secure];
  1297. case 0xdd0: /* SAU_CTRL */
  1298. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1299. goto bad_offset;
  1300. }
  1301. if (!attrs.secure) {
  1302. return 0;
  1303. }
  1304. return cpu->env.sau.ctrl;
  1305. case 0xdd4: /* SAU_TYPE */
  1306. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1307. goto bad_offset;
  1308. }
  1309. if (!attrs.secure) {
  1310. return 0;
  1311. }
  1312. return cpu->sau_sregion;
  1313. case 0xdd8: /* SAU_RNR */
  1314. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1315. goto bad_offset;
  1316. }
  1317. if (!attrs.secure) {
  1318. return 0;
  1319. }
  1320. return cpu->env.sau.rnr;
  1321. case 0xddc: /* SAU_RBAR */
  1322. {
  1323. int region = cpu->env.sau.rnr;
  1324. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1325. goto bad_offset;
  1326. }
  1327. if (!attrs.secure) {
  1328. return 0;
  1329. }
  1330. if (region >= cpu->sau_sregion) {
  1331. return 0;
  1332. }
  1333. return cpu->env.sau.rbar[region];
  1334. }
  1335. case 0xde0: /* SAU_RLAR */
  1336. {
  1337. int region = cpu->env.sau.rnr;
  1338. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1339. goto bad_offset;
  1340. }
  1341. if (!attrs.secure) {
  1342. return 0;
  1343. }
  1344. if (region >= cpu->sau_sregion) {
  1345. return 0;
  1346. }
  1347. return cpu->env.sau.rlar[region];
  1348. }
  1349. case 0xde4: /* SFSR */
  1350. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1351. goto bad_offset;
  1352. }
  1353. if (!attrs.secure) {
  1354. return 0;
  1355. }
  1356. return cpu->env.v7m.sfsr;
  1357. case 0xde8: /* SFAR */
  1358. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1359. goto bad_offset;
  1360. }
  1361. if (!attrs.secure) {
  1362. return 0;
  1363. }
  1364. return cpu->env.v7m.sfar;
  1365. case 0xf04: /* RFSR */
  1366. if (!cpu_isar_feature(aa32_ras, cpu)) {
  1367. goto bad_offset;
  1368. }
  1369. /* We provide minimal-RAS only: RFSR is RAZ/WI */
  1370. return 0;
  1371. case 0xf34: /* FPCCR */
  1372. if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1373. return 0;
  1374. }
  1375. if (attrs.secure) {
  1376. return cpu->env.v7m.fpccr[M_REG_S];
  1377. } else {
  1378. /*
  1379. * NS can read LSPEN, CLRONRET and MONRDY. It can read
  1380. * BFRDY and HFRDY if AIRCR.BFHFNMINS != 0;
  1381. * other non-banked bits RAZ.
  1382. * TODO: MONRDY should RAZ/WI if DEMCR.SDME is set.
  1383. */
  1384. uint32_t value = cpu->env.v7m.fpccr[M_REG_S];
  1385. uint32_t mask = R_V7M_FPCCR_LSPEN_MASK |
  1386. R_V7M_FPCCR_CLRONRET_MASK |
  1387. R_V7M_FPCCR_MONRDY_MASK;
  1388. if (s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
  1389. mask |= R_V7M_FPCCR_BFRDY_MASK | R_V7M_FPCCR_HFRDY_MASK;
  1390. }
  1391. value &= mask;
  1392. value |= cpu->env.v7m.fpccr[M_REG_NS];
  1393. return value;
  1394. }
  1395. case 0xf38: /* FPCAR */
  1396. if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1397. return 0;
  1398. }
  1399. return cpu->env.v7m.fpcar[attrs.secure];
  1400. case 0xf3c: /* FPDSCR */
  1401. if (!cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1402. return 0;
  1403. }
  1404. return cpu->env.v7m.fpdscr[attrs.secure];
  1405. case 0xf40: /* MVFR0 */
  1406. return cpu->isar.mvfr0;
  1407. case 0xf44: /* MVFR1 */
  1408. return cpu->isar.mvfr1;
  1409. case 0xf48: /* MVFR2 */
  1410. return cpu->isar.mvfr2;
  1411. default:
  1412. bad_offset:
  1413. qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
  1414. return 0;
  1415. }
  1416. }
  1417. static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
  1418. MemTxAttrs attrs)
  1419. {
  1420. ARMCPU *cpu = s->cpu;
  1421. switch (offset) {
  1422. case 0xc: /* CPPWR */
  1423. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1424. goto bad_offset;
  1425. }
  1426. /* Make the IMPDEF choice to RAZ/WI this. */
  1427. break;
  1428. case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
  1429. {
  1430. int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
  1431. int i;
  1432. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1433. goto bad_offset;
  1434. }
  1435. if (!attrs.secure) {
  1436. break;
  1437. }
  1438. for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
  1439. s->itns[startvec + i] = (value >> i) & 1;
  1440. }
  1441. nvic_irq_update(s);
  1442. break;
  1443. }
  1444. case 0xd04: /* Interrupt Control State (ICSR) */
  1445. if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
  1446. if (value & (1 << 31)) {
  1447. armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
  1448. } else if (value & (1 << 30) &&
  1449. arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1450. /* PENDNMICLR didn't exist in v7M */
  1451. armv7m_nvic_clear_pending(s, ARMV7M_EXCP_NMI, false);
  1452. }
  1453. }
  1454. if (value & (1 << 28)) {
  1455. armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
  1456. } else if (value & (1 << 27)) {
  1457. armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
  1458. }
  1459. if (value & (1 << 26)) {
  1460. armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
  1461. } else if (value & (1 << 25)) {
  1462. armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
  1463. }
  1464. break;
  1465. case 0xd08: /* Vector Table Offset. */
  1466. cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
  1467. break;
  1468. case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
  1469. if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
  1470. if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
  1471. if (attrs.secure ||
  1472. !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
  1473. signal_sysresetreq(s);
  1474. }
  1475. }
  1476. if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
  1477. qemu_log_mask(LOG_GUEST_ERROR,
  1478. "Setting VECTCLRACTIVE when not in DEBUG mode "
  1479. "is UNPREDICTABLE\n");
  1480. }
  1481. if (value & R_V7M_AIRCR_VECTRESET_MASK) {
  1482. /* NB: this bit is RES0 in v8M */
  1483. qemu_log_mask(LOG_GUEST_ERROR,
  1484. "Setting VECTRESET when not in DEBUG mode "
  1485. "is UNPREDICTABLE\n");
  1486. }
  1487. if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1488. s->prigroup[attrs.secure] =
  1489. extract32(value,
  1490. R_V7M_AIRCR_PRIGROUP_SHIFT,
  1491. R_V7M_AIRCR_PRIGROUP_LENGTH);
  1492. }
  1493. /* AIRCR.IESB is RAZ/WI because we implement only minimal RAS */
  1494. if (attrs.secure) {
  1495. /* These bits are only writable by secure */
  1496. cpu->env.v7m.aircr = value &
  1497. (R_V7M_AIRCR_SYSRESETREQS_MASK |
  1498. R_V7M_AIRCR_BFHFNMINS_MASK |
  1499. R_V7M_AIRCR_PRIS_MASK);
  1500. /* BFHFNMINS changes the priority of Secure HardFault, and
  1501. * allows a pending Non-secure HardFault to preempt (which
  1502. * we implement by marking it enabled).
  1503. */
  1504. if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
  1505. s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
  1506. s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
  1507. } else {
  1508. s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
  1509. s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
  1510. }
  1511. }
  1512. nvic_irq_update(s);
  1513. }
  1514. break;
  1515. case 0xd10: /* System Control. */
  1516. if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
  1517. goto bad_offset;
  1518. }
  1519. /* We don't implement deep-sleep so these bits are RAZ/WI.
  1520. * The other bits in the register are banked.
  1521. * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
  1522. * is architecturally permitted.
  1523. */
  1524. value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
  1525. cpu->env.v7m.scr[attrs.secure] = value;
  1526. break;
  1527. case 0xd14: /* Configuration Control. */
  1528. {
  1529. uint32_t mask;
  1530. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1531. goto bad_offset;
  1532. }
  1533. /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
  1534. mask = R_V7M_CCR_STKALIGN_MASK |
  1535. R_V7M_CCR_BFHFNMIGN_MASK |
  1536. R_V7M_CCR_DIV_0_TRP_MASK |
  1537. R_V7M_CCR_UNALIGN_TRP_MASK |
  1538. R_V7M_CCR_USERSETMPEND_MASK |
  1539. R_V7M_CCR_NONBASETHRDENA_MASK;
  1540. if (arm_feature(&cpu->env, ARM_FEATURE_V8_1M) && attrs.secure) {
  1541. /* TRD is always RAZ/WI from NS */
  1542. mask |= R_V7M_CCR_TRD_MASK;
  1543. }
  1544. value &= mask;
  1545. if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1546. /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
  1547. value |= R_V7M_CCR_NONBASETHRDENA_MASK
  1548. | R_V7M_CCR_STKALIGN_MASK;
  1549. }
  1550. if (attrs.secure) {
  1551. /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
  1552. cpu->env.v7m.ccr[M_REG_NS] =
  1553. (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
  1554. | (value & R_V7M_CCR_BFHFNMIGN_MASK);
  1555. value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
  1556. } else {
  1557. /*
  1558. * BFHFNMIGN is RAZ/WI from NS if AIRCR.BFHFNMINS is 0, so
  1559. * preserve the state currently in the NS element of the array
  1560. */
  1561. if (!(cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  1562. value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
  1563. value |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
  1564. }
  1565. }
  1566. cpu->env.v7m.ccr[attrs.secure] = value;
  1567. break;
  1568. }
  1569. case 0xd24: /* System Handler Control and State (SHCSR) */
  1570. if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
  1571. goto bad_offset;
  1572. }
  1573. if (attrs.secure) {
  1574. s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
  1575. /* Secure HardFault active bit cannot be written */
  1576. s->sec_vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
  1577. s->sec_vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
  1578. s->sec_vectors[ARMV7M_EXCP_PENDSV].active =
  1579. (value & (1 << 10)) != 0;
  1580. s->sec_vectors[ARMV7M_EXCP_SYSTICK].active =
  1581. (value & (1 << 11)) != 0;
  1582. s->sec_vectors[ARMV7M_EXCP_USAGE].pending =
  1583. (value & (1 << 12)) != 0;
  1584. s->sec_vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
  1585. s->sec_vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
  1586. s->sec_vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
  1587. s->sec_vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
  1588. s->sec_vectors[ARMV7M_EXCP_USAGE].enabled =
  1589. (value & (1 << 18)) != 0;
  1590. s->sec_vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
  1591. /* SecureFault not banked, but RAZ/WI to NS */
  1592. s->vectors[ARMV7M_EXCP_SECURE].active = (value & (1 << 4)) != 0;
  1593. s->vectors[ARMV7M_EXCP_SECURE].enabled = (value & (1 << 19)) != 0;
  1594. s->vectors[ARMV7M_EXCP_SECURE].pending = (value & (1 << 20)) != 0;
  1595. } else {
  1596. s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
  1597. if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1598. /* HARDFAULTPENDED is not present in v7M */
  1599. s->vectors[ARMV7M_EXCP_HARD].pending = (value & (1 << 21)) != 0;
  1600. }
  1601. s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
  1602. s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
  1603. s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
  1604. s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
  1605. s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
  1606. s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
  1607. s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
  1608. s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
  1609. s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
  1610. }
  1611. if (attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  1612. s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
  1613. s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
  1614. s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
  1615. }
  1616. /* NMIACT can only be written if the write is of a zero, with
  1617. * BFHFNMINS 1, and by the CPU in secure state via the NS alias.
  1618. */
  1619. if (!attrs.secure && cpu->env.v7m.secure &&
  1620. (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
  1621. (value & (1 << 5)) == 0) {
  1622. s->vectors[ARMV7M_EXCP_NMI].active = 0;
  1623. }
  1624. /* HARDFAULTACT can only be written if the write is of a zero
  1625. * to the non-secure HardFault state by the CPU in secure state.
  1626. * The only case where we can be targeting the non-secure HF state
  1627. * when in secure state is if this is a write via the NS alias
  1628. * and BFHFNMINS is 1.
  1629. */
  1630. if (!attrs.secure && cpu->env.v7m.secure &&
  1631. (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
  1632. (value & (1 << 2)) == 0) {
  1633. s->vectors[ARMV7M_EXCP_HARD].active = 0;
  1634. }
  1635. /* TODO: this is RAZ/WI from NS if DEMCR.SDME is set */
  1636. s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
  1637. nvic_irq_update(s);
  1638. break;
  1639. case 0xd2c: /* Hard Fault Status. */
  1640. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1641. goto bad_offset;
  1642. }
  1643. cpu->env.v7m.hfsr &= ~value; /* W1C */
  1644. break;
  1645. case 0xd30: /* Debug Fault Status. */
  1646. cpu->env.v7m.dfsr &= ~value; /* W1C */
  1647. break;
  1648. case 0xd34: /* Mem Manage Address. */
  1649. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1650. goto bad_offset;
  1651. }
  1652. cpu->env.v7m.mmfar[attrs.secure] = value;
  1653. return;
  1654. case 0xd38: /* Bus Fault Address. */
  1655. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1656. goto bad_offset;
  1657. }
  1658. if (!attrs.secure &&
  1659. !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  1660. return;
  1661. }
  1662. cpu->env.v7m.bfar = value;
  1663. return;
  1664. case 0xd3c: /* Aux Fault Status. */
  1665. qemu_log_mask(LOG_UNIMP,
  1666. "NVIC: Aux fault status registers unimplemented\n");
  1667. break;
  1668. case 0xd84: /* CSSELR */
  1669. if (!arm_v7m_csselr_razwi(cpu)) {
  1670. cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
  1671. }
  1672. break;
  1673. case 0xd88: /* CPACR */
  1674. if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1675. /* We implement only the Floating Point extension's CP10/CP11 */
  1676. cpu->env.v7m.cpacr[attrs.secure] = value & (0xf << 20);
  1677. }
  1678. break;
  1679. case 0xd8c: /* NSACR */
  1680. if (attrs.secure && cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1681. /* We implement only the Floating Point extension's CP10/CP11 */
  1682. cpu->env.v7m.nsacr = value & (3 << 10);
  1683. }
  1684. break;
  1685. case 0xd90: /* MPU_TYPE */
  1686. return; /* RO */
  1687. case 0xd94: /* MPU_CTRL */
  1688. if ((value &
  1689. (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
  1690. == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
  1691. qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
  1692. "UNPREDICTABLE\n");
  1693. }
  1694. cpu->env.v7m.mpu_ctrl[attrs.secure]
  1695. = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
  1696. R_V7M_MPU_CTRL_HFNMIENA_MASK |
  1697. R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
  1698. tlb_flush(CPU(cpu));
  1699. break;
  1700. case 0xd98: /* MPU_RNR */
  1701. if (value >= cpu->pmsav7_dregion) {
  1702. qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
  1703. PRIu32 "/%" PRIu32 "\n",
  1704. value, cpu->pmsav7_dregion);
  1705. } else {
  1706. cpu->env.pmsav7.rnr[attrs.secure] = value;
  1707. }
  1708. break;
  1709. case 0xd9c: /* MPU_RBAR */
  1710. case 0xda4: /* MPU_RBAR_A1 */
  1711. case 0xdac: /* MPU_RBAR_A2 */
  1712. case 0xdb4: /* MPU_RBAR_A3 */
  1713. {
  1714. int region;
  1715. if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1716. /* PMSAv8M handling of the aliases is different from v7M:
  1717. * aliases A1, A2, A3 override the low two bits of the region
  1718. * number in MPU_RNR, and there is no 'region' field in the
  1719. * RBAR register.
  1720. */
  1721. int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
  1722. region = cpu->env.pmsav7.rnr[attrs.secure];
  1723. if (aliasno) {
  1724. region = deposit32(region, 0, 2, aliasno);
  1725. }
  1726. if (region >= cpu->pmsav7_dregion) {
  1727. return;
  1728. }
  1729. cpu->env.pmsav8.rbar[attrs.secure][region] = value;
  1730. tlb_flush(CPU(cpu));
  1731. return;
  1732. }
  1733. if (value & (1 << 4)) {
  1734. /* VALID bit means use the region number specified in this
  1735. * value and also update MPU_RNR.REGION with that value.
  1736. */
  1737. region = extract32(value, 0, 4);
  1738. if (region >= cpu->pmsav7_dregion) {
  1739. qemu_log_mask(LOG_GUEST_ERROR,
  1740. "MPU region out of range %u/%" PRIu32 "\n",
  1741. region, cpu->pmsav7_dregion);
  1742. return;
  1743. }
  1744. cpu->env.pmsav7.rnr[attrs.secure] = region;
  1745. } else {
  1746. region = cpu->env.pmsav7.rnr[attrs.secure];
  1747. }
  1748. if (region >= cpu->pmsav7_dregion) {
  1749. return;
  1750. }
  1751. cpu->env.pmsav7.drbar[region] = value & ~0x1f;
  1752. tlb_flush(CPU(cpu));
  1753. break;
  1754. }
  1755. case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
  1756. case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
  1757. case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
  1758. case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
  1759. {
  1760. int region = cpu->env.pmsav7.rnr[attrs.secure];
  1761. if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1762. /* PMSAv8M handling of the aliases is different from v7M:
  1763. * aliases A1, A2, A3 override the low two bits of the region
  1764. * number in MPU_RNR.
  1765. */
  1766. int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
  1767. region = cpu->env.pmsav7.rnr[attrs.secure];
  1768. if (aliasno) {
  1769. region = deposit32(region, 0, 2, aliasno);
  1770. }
  1771. if (region >= cpu->pmsav7_dregion) {
  1772. return;
  1773. }
  1774. cpu->env.pmsav8.rlar[attrs.secure][region] = value;
  1775. tlb_flush(CPU(cpu));
  1776. return;
  1777. }
  1778. if (region >= cpu->pmsav7_dregion) {
  1779. return;
  1780. }
  1781. cpu->env.pmsav7.drsr[region] = value & 0xff3f;
  1782. cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
  1783. tlb_flush(CPU(cpu));
  1784. break;
  1785. }
  1786. case 0xdc0: /* MPU_MAIR0 */
  1787. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1788. goto bad_offset;
  1789. }
  1790. if (cpu->pmsav7_dregion) {
  1791. /* Register is RES0 if no MPU regions are implemented */
  1792. cpu->env.pmsav8.mair0[attrs.secure] = value;
  1793. }
  1794. /* We don't need to do anything else because memory attributes
  1795. * only affect cacheability, and we don't implement caching.
  1796. */
  1797. break;
  1798. case 0xdc4: /* MPU_MAIR1 */
  1799. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1800. goto bad_offset;
  1801. }
  1802. if (cpu->pmsav7_dregion) {
  1803. /* Register is RES0 if no MPU regions are implemented */
  1804. cpu->env.pmsav8.mair1[attrs.secure] = value;
  1805. }
  1806. /* We don't need to do anything else because memory attributes
  1807. * only affect cacheability, and we don't implement caching.
  1808. */
  1809. break;
  1810. case 0xdd0: /* SAU_CTRL */
  1811. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1812. goto bad_offset;
  1813. }
  1814. if (!attrs.secure) {
  1815. return;
  1816. }
  1817. cpu->env.sau.ctrl = value & 3;
  1818. break;
  1819. case 0xdd4: /* SAU_TYPE */
  1820. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1821. goto bad_offset;
  1822. }
  1823. break;
  1824. case 0xdd8: /* SAU_RNR */
  1825. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1826. goto bad_offset;
  1827. }
  1828. if (!attrs.secure) {
  1829. return;
  1830. }
  1831. if (value >= cpu->sau_sregion) {
  1832. qemu_log_mask(LOG_GUEST_ERROR, "SAU region out of range %"
  1833. PRIu32 "/%" PRIu32 "\n",
  1834. value, cpu->sau_sregion);
  1835. } else {
  1836. cpu->env.sau.rnr = value;
  1837. }
  1838. break;
  1839. case 0xddc: /* SAU_RBAR */
  1840. {
  1841. int region = cpu->env.sau.rnr;
  1842. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1843. goto bad_offset;
  1844. }
  1845. if (!attrs.secure) {
  1846. return;
  1847. }
  1848. if (region >= cpu->sau_sregion) {
  1849. return;
  1850. }
  1851. cpu->env.sau.rbar[region] = value & ~0x1f;
  1852. tlb_flush(CPU(cpu));
  1853. break;
  1854. }
  1855. case 0xde0: /* SAU_RLAR */
  1856. {
  1857. int region = cpu->env.sau.rnr;
  1858. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1859. goto bad_offset;
  1860. }
  1861. if (!attrs.secure) {
  1862. return;
  1863. }
  1864. if (region >= cpu->sau_sregion) {
  1865. return;
  1866. }
  1867. cpu->env.sau.rlar[region] = value & ~0x1c;
  1868. tlb_flush(CPU(cpu));
  1869. break;
  1870. }
  1871. case 0xde4: /* SFSR */
  1872. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1873. goto bad_offset;
  1874. }
  1875. if (!attrs.secure) {
  1876. return;
  1877. }
  1878. cpu->env.v7m.sfsr &= ~value; /* W1C */
  1879. break;
  1880. case 0xde8: /* SFAR */
  1881. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1882. goto bad_offset;
  1883. }
  1884. if (!attrs.secure) {
  1885. return;
  1886. }
  1887. cpu->env.v7m.sfsr = value;
  1888. break;
  1889. case 0xf00: /* Software Triggered Interrupt Register */
  1890. {
  1891. int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
  1892. if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
  1893. goto bad_offset;
  1894. }
  1895. if (excnum < s->num_irq) {
  1896. armv7m_nvic_set_pending(s, excnum, false);
  1897. }
  1898. break;
  1899. }
  1900. case 0xf04: /* RFSR */
  1901. if (!cpu_isar_feature(aa32_ras, cpu)) {
  1902. goto bad_offset;
  1903. }
  1904. /* We provide minimal-RAS only: RFSR is RAZ/WI */
  1905. break;
  1906. case 0xf34: /* FPCCR */
  1907. if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1908. /* Not all bits here are banked. */
  1909. uint32_t fpccr_s;
  1910. if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
  1911. /* Don't allow setting of bits not present in v7M */
  1912. value &= (R_V7M_FPCCR_LSPACT_MASK |
  1913. R_V7M_FPCCR_USER_MASK |
  1914. R_V7M_FPCCR_THREAD_MASK |
  1915. R_V7M_FPCCR_HFRDY_MASK |
  1916. R_V7M_FPCCR_MMRDY_MASK |
  1917. R_V7M_FPCCR_BFRDY_MASK |
  1918. R_V7M_FPCCR_MONRDY_MASK |
  1919. R_V7M_FPCCR_LSPEN_MASK |
  1920. R_V7M_FPCCR_ASPEN_MASK);
  1921. }
  1922. value &= ~R_V7M_FPCCR_RES0_MASK;
  1923. if (!attrs.secure) {
  1924. /* Some non-banked bits are configurably writable by NS */
  1925. fpccr_s = cpu->env.v7m.fpccr[M_REG_S];
  1926. if (!(fpccr_s & R_V7M_FPCCR_LSPENS_MASK)) {
  1927. uint32_t lspen = FIELD_EX32(value, V7M_FPCCR, LSPEN);
  1928. fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, LSPEN, lspen);
  1929. }
  1930. if (!(fpccr_s & R_V7M_FPCCR_CLRONRETS_MASK)) {
  1931. uint32_t cor = FIELD_EX32(value, V7M_FPCCR, CLRONRET);
  1932. fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, CLRONRET, cor);
  1933. }
  1934. if ((s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  1935. uint32_t hfrdy = FIELD_EX32(value, V7M_FPCCR, HFRDY);
  1936. uint32_t bfrdy = FIELD_EX32(value, V7M_FPCCR, BFRDY);
  1937. fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, HFRDY, hfrdy);
  1938. fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, BFRDY, bfrdy);
  1939. }
  1940. /* TODO MONRDY should RAZ/WI if DEMCR.SDME is set */
  1941. {
  1942. uint32_t monrdy = FIELD_EX32(value, V7M_FPCCR, MONRDY);
  1943. fpccr_s = FIELD_DP32(fpccr_s, V7M_FPCCR, MONRDY, monrdy);
  1944. }
  1945. /*
  1946. * All other non-banked bits are RAZ/WI from NS; write
  1947. * just the banked bits to fpccr[M_REG_NS].
  1948. */
  1949. value &= R_V7M_FPCCR_BANKED_MASK;
  1950. cpu->env.v7m.fpccr[M_REG_NS] = value;
  1951. } else {
  1952. fpccr_s = value;
  1953. }
  1954. cpu->env.v7m.fpccr[M_REG_S] = fpccr_s;
  1955. }
  1956. break;
  1957. case 0xf38: /* FPCAR */
  1958. if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1959. value &= ~7;
  1960. cpu->env.v7m.fpcar[attrs.secure] = value;
  1961. }
  1962. break;
  1963. case 0xf3c: /* FPDSCR */
  1964. if (cpu_isar_feature(aa32_vfp_simd, cpu)) {
  1965. uint32_t mask = FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE_MASK;
  1966. if (cpu_isar_feature(any_fp16, cpu)) {
  1967. mask |= FPCR_FZ16;
  1968. }
  1969. value &= mask;
  1970. if (cpu_isar_feature(aa32_lob, cpu)) {
  1971. value |= 4 << FPCR_LTPSIZE_SHIFT;
  1972. }
  1973. cpu->env.v7m.fpdscr[attrs.secure] = value;
  1974. }
  1975. break;
  1976. case 0xf50: /* ICIALLU */
  1977. case 0xf58: /* ICIMVAU */
  1978. case 0xf5c: /* DCIMVAC */
  1979. case 0xf60: /* DCISW */
  1980. case 0xf64: /* DCCMVAU */
  1981. case 0xf68: /* DCCMVAC */
  1982. case 0xf6c: /* DCCSW */
  1983. case 0xf70: /* DCCIMVAC */
  1984. case 0xf74: /* DCCISW */
  1985. case 0xf78: /* BPIALL */
  1986. /* Cache and branch predictor maintenance: for QEMU these always NOP */
  1987. break;
  1988. default:
  1989. bad_offset:
  1990. qemu_log_mask(LOG_GUEST_ERROR,
  1991. "NVIC: Bad write offset 0x%x\n", offset);
  1992. }
  1993. }
  1994. static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
  1995. {
  1996. /* Return true if unprivileged access to this register is permitted. */
  1997. switch (offset) {
  1998. case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
  1999. /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
  2000. * controls access even though the CPU is in Secure state (I_QDKX).
  2001. */
  2002. return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
  2003. default:
  2004. /* All other user accesses cause a BusFault unconditionally */
  2005. return false;
  2006. }
  2007. }
  2008. static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
  2009. {
  2010. /* Behaviour for the SHPR register field for this exception:
  2011. * return M_REG_NS to use the nonsecure vector (including for
  2012. * non-banked exceptions), M_REG_S for the secure version of
  2013. * a banked exception, and -1 if this field should RAZ/WI.
  2014. */
  2015. switch (exc) {
  2016. case ARMV7M_EXCP_MEM:
  2017. case ARMV7M_EXCP_USAGE:
  2018. case ARMV7M_EXCP_SVC:
  2019. case ARMV7M_EXCP_PENDSV:
  2020. case ARMV7M_EXCP_SYSTICK:
  2021. /* Banked exceptions */
  2022. return attrs.secure;
  2023. case ARMV7M_EXCP_BUS:
  2024. /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
  2025. if (!attrs.secure &&
  2026. !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  2027. return -1;
  2028. }
  2029. return M_REG_NS;
  2030. case ARMV7M_EXCP_SECURE:
  2031. /* Not banked, RAZ/WI from nonsecure */
  2032. if (!attrs.secure) {
  2033. return -1;
  2034. }
  2035. return M_REG_NS;
  2036. case ARMV7M_EXCP_DEBUG:
  2037. /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
  2038. return M_REG_NS;
  2039. case 8 ... 10:
  2040. case 13:
  2041. /* RES0 */
  2042. return -1;
  2043. default:
  2044. /* Not reachable due to decode of SHPR register addresses */
  2045. g_assert_not_reached();
  2046. }
  2047. }
  2048. static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
  2049. uint64_t *data, unsigned size,
  2050. MemTxAttrs attrs)
  2051. {
  2052. NVICState *s = (NVICState *)opaque;
  2053. uint32_t offset = addr;
  2054. unsigned i, startvec, end;
  2055. uint32_t val;
  2056. if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
  2057. /* Generate BusFault for unprivileged accesses */
  2058. return MEMTX_ERROR;
  2059. }
  2060. switch (offset) {
  2061. /* reads of set and clear both return the status */
  2062. case 0x100 ... 0x13f: /* NVIC Set enable */
  2063. offset += 0x80;
  2064. /* fall through */
  2065. case 0x180 ... 0x1bf: /* NVIC Clear enable */
  2066. val = 0;
  2067. startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */
  2068. for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
  2069. if (s->vectors[startvec + i].enabled &&
  2070. (attrs.secure || s->itns[startvec + i])) {
  2071. val |= (1 << i);
  2072. }
  2073. }
  2074. break;
  2075. case 0x200 ... 0x23f: /* NVIC Set pend */
  2076. offset += 0x80;
  2077. /* fall through */
  2078. case 0x280 ... 0x2bf: /* NVIC Clear pend */
  2079. val = 0;
  2080. startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
  2081. for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
  2082. if (s->vectors[startvec + i].pending &&
  2083. (attrs.secure || s->itns[startvec + i])) {
  2084. val |= (1 << i);
  2085. }
  2086. }
  2087. break;
  2088. case 0x300 ... 0x33f: /* NVIC Active */
  2089. val = 0;
  2090. if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
  2091. break;
  2092. }
  2093. startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
  2094. for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
  2095. if (s->vectors[startvec + i].active &&
  2096. (attrs.secure || s->itns[startvec + i])) {
  2097. val |= (1 << i);
  2098. }
  2099. }
  2100. break;
  2101. case 0x400 ... 0x5ef: /* NVIC Priority */
  2102. val = 0;
  2103. startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
  2104. for (i = 0; i < size && startvec + i < s->num_irq; i++) {
  2105. if (attrs.secure || s->itns[startvec + i]) {
  2106. val |= s->vectors[startvec + i].prio << (8 * i);
  2107. }
  2108. }
  2109. break;
  2110. case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
  2111. if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
  2112. val = 0;
  2113. break;
  2114. }
  2115. /* fall through */
  2116. case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
  2117. val = 0;
  2118. for (i = 0; i < size; i++) {
  2119. unsigned hdlidx = (offset - 0xd14) + i;
  2120. int sbank = shpr_bank(s, hdlidx, attrs);
  2121. if (sbank < 0) {
  2122. continue;
  2123. }
  2124. val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
  2125. }
  2126. break;
  2127. case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
  2128. if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
  2129. val = 0;
  2130. break;
  2131. };
  2132. /*
  2133. * The BFSR bits [15:8] are shared between security states
  2134. * and we store them in the NS copy. They are RAZ/WI for
  2135. * NS code if AIRCR.BFHFNMINS is 0.
  2136. */
  2137. val = s->cpu->env.v7m.cfsr[attrs.secure];
  2138. if (!attrs.secure &&
  2139. !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  2140. val &= ~R_V7M_CFSR_BFSR_MASK;
  2141. } else {
  2142. val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
  2143. }
  2144. val = extract32(val, (offset - 0xd28) * 8, size * 8);
  2145. break;
  2146. case 0xfe0 ... 0xfff: /* ID. */
  2147. if (offset & 3) {
  2148. val = 0;
  2149. } else {
  2150. val = nvic_id[(offset - 0xfe0) >> 2];
  2151. }
  2152. break;
  2153. default:
  2154. if (size == 4) {
  2155. val = nvic_readl(s, offset, attrs);
  2156. } else {
  2157. qemu_log_mask(LOG_GUEST_ERROR,
  2158. "NVIC: Bad read of size %d at offset 0x%x\n",
  2159. size, offset);
  2160. val = 0;
  2161. }
  2162. }
  2163. trace_nvic_sysreg_read(addr, val, size);
  2164. *data = val;
  2165. return MEMTX_OK;
  2166. }
  2167. static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
  2168. uint64_t value, unsigned size,
  2169. MemTxAttrs attrs)
  2170. {
  2171. NVICState *s = (NVICState *)opaque;
  2172. uint32_t offset = addr;
  2173. unsigned i, startvec, end;
  2174. unsigned setval = 0;
  2175. trace_nvic_sysreg_write(addr, value, size);
  2176. if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
  2177. /* Generate BusFault for unprivileged accesses */
  2178. return MEMTX_ERROR;
  2179. }
  2180. switch (offset) {
  2181. case 0x100 ... 0x13f: /* NVIC Set enable */
  2182. offset += 0x80;
  2183. setval = 1;
  2184. /* fall through */
  2185. case 0x180 ... 0x1bf: /* NVIC Clear enable */
  2186. startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
  2187. for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
  2188. if (value & (1 << i) &&
  2189. (attrs.secure || s->itns[startvec + i])) {
  2190. s->vectors[startvec + i].enabled = setval;
  2191. }
  2192. }
  2193. nvic_irq_update(s);
  2194. goto exit_ok;
  2195. case 0x200 ... 0x23f: /* NVIC Set pend */
  2196. /* the special logic in armv7m_nvic_set_pending()
  2197. * is not needed since IRQs are never escalated
  2198. */
  2199. offset += 0x80;
  2200. setval = 1;
  2201. /* fall through */
  2202. case 0x280 ... 0x2bf: /* NVIC Clear pend */
  2203. startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
  2204. for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
  2205. /*
  2206. * Note that if the input line is still held high and the interrupt
  2207. * is not active then rule R_CVJS requires that the Pending state
  2208. * remains set; in that case we mustn't let it be cleared.
  2209. */
  2210. if (value & (1 << i) &&
  2211. (attrs.secure || s->itns[startvec + i]) &&
  2212. !(setval == 0 && s->vectors[startvec + i].level &&
  2213. !s->vectors[startvec + i].active)) {
  2214. s->vectors[startvec + i].pending = setval;
  2215. }
  2216. }
  2217. nvic_irq_update(s);
  2218. goto exit_ok;
  2219. case 0x300 ... 0x33f: /* NVIC Active */
  2220. goto exit_ok; /* R/O */
  2221. case 0x400 ... 0x5ef: /* NVIC Priority */
  2222. startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
  2223. for (i = 0; i < size && startvec + i < s->num_irq; i++) {
  2224. if (attrs.secure || s->itns[startvec + i]) {
  2225. set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
  2226. }
  2227. }
  2228. nvic_irq_update(s);
  2229. goto exit_ok;
  2230. case 0xd18 ... 0xd1b: /* System Handler Priority (SHPR1) */
  2231. if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
  2232. goto exit_ok;
  2233. }
  2234. /* fall through */
  2235. case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
  2236. for (i = 0; i < size; i++) {
  2237. unsigned hdlidx = (offset - 0xd14) + i;
  2238. int newprio = extract32(value, i * 8, 8);
  2239. int sbank = shpr_bank(s, hdlidx, attrs);
  2240. if (sbank < 0) {
  2241. continue;
  2242. }
  2243. set_prio(s, hdlidx, sbank, newprio);
  2244. }
  2245. nvic_irq_update(s);
  2246. goto exit_ok;
  2247. case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
  2248. if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
  2249. goto exit_ok;
  2250. }
  2251. /* All bits are W1C, so construct 32 bit value with 0s in
  2252. * the parts not written by the access size
  2253. */
  2254. value <<= ((offset - 0xd28) * 8);
  2255. if (!attrs.secure &&
  2256. !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
  2257. /* BFSR bits are RAZ/WI for NS if BFHFNMINS is set */
  2258. value &= ~R_V7M_CFSR_BFSR_MASK;
  2259. }
  2260. s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
  2261. if (attrs.secure) {
  2262. /* The BFSR bits [15:8] are shared between security states
  2263. * and we store them in the NS copy.
  2264. */
  2265. s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
  2266. }
  2267. goto exit_ok;
  2268. }
  2269. if (size == 4) {
  2270. nvic_writel(s, offset, value, attrs);
  2271. goto exit_ok;
  2272. }
  2273. qemu_log_mask(LOG_GUEST_ERROR,
  2274. "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
  2275. /* This is UNPREDICTABLE; treat as RAZ/WI */
  2276. exit_ok:
  2277. if (tcg_enabled()) {
  2278. /* Ensure any changes made are reflected in the cached hflags. */
  2279. arm_rebuild_hflags(&s->cpu->env);
  2280. }
  2281. return MEMTX_OK;
  2282. }
  2283. static const MemoryRegionOps nvic_sysreg_ops = {
  2284. .read_with_attrs = nvic_sysreg_read,
  2285. .write_with_attrs = nvic_sysreg_write,
  2286. .endianness = DEVICE_NATIVE_ENDIAN,
  2287. };
  2288. static int nvic_post_load(void *opaque, int version_id)
  2289. {
  2290. NVICState *s = opaque;
  2291. unsigned i;
  2292. int resetprio;
  2293. /* Check for out of range priority settings */
  2294. resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
  2295. if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
  2296. s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
  2297. s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
  2298. return 1;
  2299. }
  2300. for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
  2301. if (s->vectors[i].prio & ~0xff) {
  2302. return 1;
  2303. }
  2304. }
  2305. nvic_recompute_state(s);
  2306. return 0;
  2307. }
  2308. static const VMStateDescription vmstate_VecInfo = {
  2309. .name = "armv7m_nvic_info",
  2310. .version_id = 1,
  2311. .minimum_version_id = 1,
  2312. .fields = (const VMStateField[]) {
  2313. VMSTATE_INT16(prio, VecInfo),
  2314. VMSTATE_UINT8(enabled, VecInfo),
  2315. VMSTATE_UINT8(pending, VecInfo),
  2316. VMSTATE_UINT8(active, VecInfo),
  2317. VMSTATE_UINT8(level, VecInfo),
  2318. VMSTATE_END_OF_LIST()
  2319. }
  2320. };
  2321. static bool nvic_security_needed(void *opaque)
  2322. {
  2323. NVICState *s = opaque;
  2324. return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
  2325. }
  2326. static int nvic_security_post_load(void *opaque, int version_id)
  2327. {
  2328. NVICState *s = opaque;
  2329. int i;
  2330. /* Check for out of range priority settings */
  2331. if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
  2332. && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
  2333. /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
  2334. * if the CPU state has been migrated yet; a mismatch won't
  2335. * cause the emulation to blow up, though.
  2336. */
  2337. return 1;
  2338. }
  2339. for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
  2340. if (s->sec_vectors[i].prio & ~0xff) {
  2341. return 1;
  2342. }
  2343. }
  2344. return 0;
  2345. }
  2346. static const VMStateDescription vmstate_nvic_security = {
  2347. .name = "armv7m_nvic/m-security",
  2348. .version_id = 1,
  2349. .minimum_version_id = 1,
  2350. .needed = nvic_security_needed,
  2351. .post_load = &nvic_security_post_load,
  2352. .fields = (const VMStateField[]) {
  2353. VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
  2354. vmstate_VecInfo, VecInfo),
  2355. VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
  2356. VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
  2357. VMSTATE_END_OF_LIST()
  2358. }
  2359. };
  2360. static const VMStateDescription vmstate_nvic = {
  2361. .name = "armv7m_nvic",
  2362. .version_id = 4,
  2363. .minimum_version_id = 4,
  2364. .post_load = &nvic_post_load,
  2365. .fields = (const VMStateField[]) {
  2366. VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
  2367. vmstate_VecInfo, VecInfo),
  2368. VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
  2369. VMSTATE_END_OF_LIST()
  2370. },
  2371. .subsections = (const VMStateDescription * const []) {
  2372. &vmstate_nvic_security,
  2373. NULL
  2374. }
  2375. };
  2376. static const Property props_nvic[] = {
  2377. /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
  2378. DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
  2379. /*
  2380. * Number of the maximum priority bits that can be used. 0 means
  2381. * to use a reasonable default.
  2382. */
  2383. DEFINE_PROP_UINT8("num-prio-bits", NVICState, num_prio_bits, 0),
  2384. };
  2385. static void armv7m_nvic_reset(DeviceState *dev)
  2386. {
  2387. int resetprio;
  2388. NVICState *s = NVIC(dev);
  2389. memset(s->vectors, 0, sizeof(s->vectors));
  2390. memset(s->sec_vectors, 0, sizeof(s->sec_vectors));
  2391. s->prigroup[M_REG_NS] = 0;
  2392. s->prigroup[M_REG_S] = 0;
  2393. s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
  2394. /* MEM, BUS, and USAGE are enabled through
  2395. * the System Handler Control register
  2396. */
  2397. s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
  2398. s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
  2399. s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
  2400. /* DebugMonitor is enabled via DEMCR.MON_EN */
  2401. s->vectors[ARMV7M_EXCP_DEBUG].enabled = 0;
  2402. resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
  2403. s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
  2404. s->vectors[ARMV7M_EXCP_NMI].prio = -2;
  2405. s->vectors[ARMV7M_EXCP_HARD].prio = -1;
  2406. if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
  2407. s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
  2408. s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
  2409. s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
  2410. s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
  2411. /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
  2412. s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
  2413. /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
  2414. s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
  2415. } else {
  2416. s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
  2417. }
  2418. /* Strictly speaking the reset handler should be enabled.
  2419. * However, we don't simulate soft resets through the NVIC,
  2420. * and the reset vector should never be pended.
  2421. * So we leave it disabled to catch logic errors.
  2422. */
  2423. s->exception_prio = NVIC_NOEXC_PRIO;
  2424. s->vectpending = 0;
  2425. s->vectpending_is_s_banked = false;
  2426. s->vectpending_prio = NVIC_NOEXC_PRIO;
  2427. if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
  2428. memset(s->itns, 0, sizeof(s->itns));
  2429. } else {
  2430. /* This state is constant and not guest accessible in a non-security
  2431. * NVIC; we set the bits to true to avoid having to do a feature
  2432. * bit check in the NVIC enable/pend/etc register accessors.
  2433. */
  2434. int i;
  2435. for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
  2436. s->itns[i] = true;
  2437. }
  2438. }
  2439. if (tcg_enabled()) {
  2440. /*
  2441. * We updated state that affects the CPU's MMUidx and thus its
  2442. * hflags; and we can't guarantee that we run before the CPU
  2443. * reset function.
  2444. */
  2445. arm_rebuild_hflags(&s->cpu->env);
  2446. }
  2447. }
  2448. static void nvic_systick_trigger(void *opaque, int n, int level)
  2449. {
  2450. NVICState *s = opaque;
  2451. if (level) {
  2452. /* SysTick just asked us to pend its exception.
  2453. * (This is different from an external interrupt line's
  2454. * behaviour.)
  2455. * n == 0 : NonSecure systick
  2456. * n == 1 : Secure systick
  2457. */
  2458. armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, n);
  2459. }
  2460. }
  2461. static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
  2462. {
  2463. NVICState *s = NVIC(dev);
  2464. /* The armv7m container object will have set our CPU pointer */
  2465. if (!s->cpu || !arm_feature(&s->cpu->env, ARM_FEATURE_M)) {
  2466. error_setg(errp, "The NVIC can only be used with a Cortex-M CPU");
  2467. return;
  2468. }
  2469. if (s->num_irq > NVIC_MAX_IRQ) {
  2470. error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
  2471. return;
  2472. }
  2473. qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
  2474. /* include space for internal exception vectors */
  2475. s->num_irq += NVIC_FIRST_IRQ;
  2476. if (s->num_prio_bits == 0) {
  2477. /*
  2478. * If left unspecified, use 2 bits by default on Cortex-M0/M0+/M1
  2479. * and 8 bits otherwise.
  2480. */
  2481. s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
  2482. } else {
  2483. uint8_t min_prio_bits =
  2484. arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 3 : 2;
  2485. if (s->num_prio_bits < min_prio_bits || s->num_prio_bits > 8) {
  2486. error_setg(errp,
  2487. "num-prio-bits %d is outside "
  2488. "NVIC acceptable range [%d-8]",
  2489. s->num_prio_bits, min_prio_bits);
  2490. return;
  2491. }
  2492. }
  2493. /*
  2494. * This device provides a single memory region which covers the
  2495. * sysreg/NVIC registers from 0xE000E000 .. 0xE000EFFF, with the
  2496. * exception of the systick timer registers 0xE000E010 .. 0xE000E0FF.
  2497. */
  2498. memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
  2499. "nvic_sysregs", 0x1000);
  2500. sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->sysregmem);
  2501. }
  2502. static void armv7m_nvic_instance_init(Object *obj)
  2503. {
  2504. DeviceState *dev = DEVICE(obj);
  2505. NVICState *nvic = NVIC(obj);
  2506. SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
  2507. sysbus_init_irq(sbd, &nvic->excpout);
  2508. qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
  2509. qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger",
  2510. M_REG_NUM_BANKS);
  2511. qdev_init_gpio_in_named(dev, nvic_nmi_trigger, "NMI", 1);
  2512. }
  2513. static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
  2514. {
  2515. DeviceClass *dc = DEVICE_CLASS(klass);
  2516. dc->vmsd = &vmstate_nvic;
  2517. device_class_set_props(dc, props_nvic);
  2518. device_class_set_legacy_reset(dc, armv7m_nvic_reset);
  2519. dc->realize = armv7m_nvic_realize;
  2520. }
  2521. static const TypeInfo armv7m_nvic_info = {
  2522. .name = TYPE_NVIC,
  2523. .parent = TYPE_SYS_BUS_DEVICE,
  2524. .instance_init = armv7m_nvic_instance_init,
  2525. .instance_size = sizeof(NVICState),
  2526. .class_init = armv7m_nvic_class_init,
  2527. .class_size = sizeof(SysBusDeviceClass),
  2528. };
  2529. static void armv7m_nvic_register_types(void)
  2530. {
  2531. type_register_static(&armv7m_nvic_info);
  2532. }
  2533. type_init(armv7m_nvic_register_types)