arm_gicv3_cpuif.c 91 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926
  1. /*
  2. * ARM Generic Interrupt Controller v3 (emulation)
  3. *
  4. * Copyright (c) 2016 Linaro Limited
  5. * Written by Peter Maydell
  6. *
  7. * This code is licensed under the GPL, version 2 or (at your option)
  8. * any later version.
  9. */
  10. /* This file contains the code for the system register interface
  11. * portions of the GICv3.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/bitops.h"
  15. #include "qemu/log.h"
  16. #include "qemu/main-loop.h"
  17. #include "trace.h"
  18. #include "gicv3_internal.h"
  19. #include "hw/irq.h"
  20. #include "cpu.h"
  21. #include "target/arm/cpregs.h"
  22. #include "sysemu/tcg.h"
  23. #include "sysemu/qtest.h"
  24. /*
  25. * Special case return value from hppvi_index(); must be larger than
  26. * the architecturally maximum possible list register index (which is 15)
  27. */
  28. #define HPPVI_INDEX_VLPI 16
  29. static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
  30. {
  31. return env->gicv3state;
  32. }
  33. static bool gicv3_use_ns_bank(CPUARMState *env)
  34. {
  35. /* Return true if we should use the NonSecure bank for a banked GIC
  36. * CPU interface register. Note that this differs from the
  37. * access_secure_reg() function because GICv3 banked registers are
  38. * banked even for AArch64, unlike the other CPU system registers.
  39. */
  40. return !arm_is_secure_below_el3(env);
  41. }
  42. /* The minimum BPR for the virtual interface is a configurable property */
  43. static inline int icv_min_vbpr(GICv3CPUState *cs)
  44. {
  45. return 7 - cs->vprebits;
  46. }
  47. static inline int ich_num_aprs(GICv3CPUState *cs)
  48. {
  49. /* Return the number of virtual APR registers (1, 2, or 4) */
  50. int aprmax = 1 << (cs->vprebits - 5);
  51. assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
  52. return aprmax;
  53. }
  54. /* Simple accessor functions for LR fields */
  55. static uint32_t ich_lr_vintid(uint64_t lr)
  56. {
  57. return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
  58. }
  59. static uint32_t ich_lr_pintid(uint64_t lr)
  60. {
  61. return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
  62. }
  63. static uint32_t ich_lr_prio(uint64_t lr)
  64. {
  65. return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
  66. }
  67. static int ich_lr_state(uint64_t lr)
  68. {
  69. return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
  70. }
  71. static bool icv_access(CPUARMState *env, int hcr_flags)
  72. {
  73. /* Return true if this ICC_ register access should really be
  74. * directed to an ICV_ access. hcr_flags is a mask of
  75. * HCR_EL2 bits to check: we treat this as an ICV_ access
  76. * if we are in NS EL1 and at least one of the specified
  77. * HCR_EL2 bits is set.
  78. *
  79. * ICV registers fall into four categories:
  80. * * access if NS EL1 and HCR_EL2.FMO == 1:
  81. * all ICV regs with '0' in their name
  82. * * access if NS EL1 and HCR_EL2.IMO == 1:
  83. * all ICV regs with '1' in their name
  84. * * access if NS EL1 and either IMO or FMO == 1:
  85. * CTLR, DIR, PMR, RPR
  86. */
  87. uint64_t hcr_el2 = arm_hcr_el2_eff(env);
  88. bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
  89. return flagmatch && arm_current_el(env) == 1
  90. && !arm_is_secure_below_el3(env);
  91. }
  92. static int read_vbpr(GICv3CPUState *cs, int grp)
  93. {
  94. /* Read VBPR value out of the VMCR field (caller must handle
  95. * VCBPR effects if required)
  96. */
  97. if (grp == GICV3_G0) {
  98. return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
  99. ICH_VMCR_EL2_VBPR0_LENGTH);
  100. } else {
  101. return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
  102. ICH_VMCR_EL2_VBPR1_LENGTH);
  103. }
  104. }
  105. static void write_vbpr(GICv3CPUState *cs, int grp, int value)
  106. {
  107. /* Write new VBPR1 value, handling the "writing a value less than
  108. * the minimum sets it to the minimum" semantics.
  109. */
  110. int min = icv_min_vbpr(cs);
  111. if (grp != GICV3_G0) {
  112. min++;
  113. }
  114. value = MAX(value, min);
  115. if (grp == GICV3_G0) {
  116. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
  117. ICH_VMCR_EL2_VBPR0_LENGTH, value);
  118. } else {
  119. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
  120. ICH_VMCR_EL2_VBPR1_LENGTH, value);
  121. }
  122. }
  123. static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
  124. {
  125. /* Return a mask word which clears the unimplemented priority bits
  126. * from a priority value for a virtual interrupt. (Not to be confused
  127. * with the group priority, whose mask depends on the value of VBPR
  128. * for the interrupt group.)
  129. */
  130. return ~0U << (8 - cs->vpribits);
  131. }
  132. static int ich_highest_active_virt_prio(GICv3CPUState *cs)
  133. {
  134. /* Calculate the current running priority based on the set bits
  135. * in the ICH Active Priority Registers.
  136. */
  137. int i;
  138. int aprmax = ich_num_aprs(cs);
  139. for (i = 0; i < aprmax; i++) {
  140. uint32_t apr = cs->ich_apr[GICV3_G0][i] |
  141. cs->ich_apr[GICV3_G1NS][i];
  142. if (!apr) {
  143. continue;
  144. }
  145. return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
  146. }
  147. /* No current active interrupts: return idle priority */
  148. return 0xff;
  149. }
  150. static int hppvi_index(GICv3CPUState *cs)
  151. {
  152. /*
  153. * Return the list register index of the highest priority pending
  154. * virtual interrupt, as per the HighestPriorityVirtualInterrupt
  155. * pseudocode. If no pending virtual interrupts, return -1.
  156. * If the highest priority pending virtual interrupt is a vLPI,
  157. * return HPPVI_INDEX_VLPI.
  158. * (The pseudocode handles checking whether the vLPI is higher
  159. * priority than the highest priority list register at every
  160. * callsite of HighestPriorityVirtualInterrupt; we check it here.)
  161. */
  162. ARMCPU *cpu = ARM_CPU(cs->cpu);
  163. CPUARMState *env = &cpu->env;
  164. int idx = -1;
  165. int i;
  166. /* Note that a list register entry with a priority of 0xff will
  167. * never be reported by this function; this is the architecturally
  168. * correct behaviour.
  169. */
  170. int prio = 0xff;
  171. if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
  172. /* Both groups disabled, definitely nothing to do */
  173. return idx;
  174. }
  175. for (i = 0; i < cs->num_list_regs; i++) {
  176. uint64_t lr = cs->ich_lr_el2[i];
  177. int thisprio;
  178. if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
  179. /* Not Pending */
  180. continue;
  181. }
  182. /* Ignore interrupts if relevant group enable not set */
  183. if (lr & ICH_LR_EL2_GROUP) {
  184. if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
  185. continue;
  186. }
  187. } else {
  188. if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
  189. continue;
  190. }
  191. }
  192. thisprio = ich_lr_prio(lr);
  193. if (thisprio < prio) {
  194. prio = thisprio;
  195. idx = i;
  196. }
  197. }
  198. /*
  199. * "no pending vLPI" is indicated with prio = 0xff, which always
  200. * fails the priority check here. vLPIs are only considered
  201. * when we are in Non-Secure state.
  202. */
  203. if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) {
  204. if (cs->hppvlpi.grp == GICV3_G0) {
  205. if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) {
  206. return HPPVI_INDEX_VLPI;
  207. }
  208. } else {
  209. if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) {
  210. return HPPVI_INDEX_VLPI;
  211. }
  212. }
  213. }
  214. return idx;
  215. }
  216. static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
  217. {
  218. /* Return a mask word which clears the subpriority bits from
  219. * a priority value for a virtual interrupt in the specified group.
  220. * This depends on the VBPR value.
  221. * If using VBPR0 then:
  222. * a BPR of 0 means the group priority bits are [7:1];
  223. * a BPR of 1 means they are [7:2], and so on down to
  224. * a BPR of 7 meaning no group priority bits at all.
  225. * If using VBPR1 then:
  226. * a BPR of 0 is impossible (the minimum value is 1)
  227. * a BPR of 1 means the group priority bits are [7:1];
  228. * a BPR of 2 means they are [7:2], and so on down to
  229. * a BPR of 7 meaning the group priority is [7].
  230. *
  231. * Which BPR to use depends on the group of the interrupt and
  232. * the current ICH_VMCR_EL2.VCBPR settings.
  233. *
  234. * This corresponds to the VGroupBits() pseudocode.
  235. */
  236. int bpr;
  237. if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
  238. group = GICV3_G0;
  239. }
  240. bpr = read_vbpr(cs, group);
  241. if (group == GICV3_G1NS) {
  242. assert(bpr > 0);
  243. bpr--;
  244. }
  245. return ~0U << (bpr + 1);
  246. }
  247. static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
  248. {
  249. /* Return true if we can signal this virtual interrupt defined by
  250. * the given list register value; see the pseudocode functions
  251. * CanSignalVirtualInterrupt and CanSignalVirtualInt.
  252. * Compare also icc_hppi_can_preempt() which is the non-virtual
  253. * equivalent of these checks.
  254. */
  255. int grp;
  256. uint32_t mask, prio, rprio, vpmr;
  257. if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
  258. /* Virtual interface disabled */
  259. return false;
  260. }
  261. /* We don't need to check that this LR is in Pending state because
  262. * that has already been done in hppvi_index().
  263. */
  264. prio = ich_lr_prio(lr);
  265. vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
  266. ICH_VMCR_EL2_VPMR_LENGTH);
  267. if (prio >= vpmr) {
  268. /* Priority mask masks this interrupt */
  269. return false;
  270. }
  271. rprio = ich_highest_active_virt_prio(cs);
  272. if (rprio == 0xff) {
  273. /* No running interrupt so we can preempt */
  274. return true;
  275. }
  276. grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
  277. mask = icv_gprio_mask(cs, grp);
  278. /* We only preempt a running interrupt if the pending interrupt's
  279. * group priority is sufficient (the subpriorities are not considered).
  280. */
  281. if ((prio & mask) < (rprio & mask)) {
  282. return true;
  283. }
  284. return false;
  285. }
  286. static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs)
  287. {
  288. /*
  289. * Return true if we can signal the highest priority pending vLPI.
  290. * We can assume we're Non-secure because hppvi_index() already
  291. * tested for that.
  292. */
  293. uint32_t mask, rprio, vpmr;
  294. if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
  295. /* Virtual interface disabled */
  296. return false;
  297. }
  298. vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
  299. ICH_VMCR_EL2_VPMR_LENGTH);
  300. if (cs->hppvlpi.prio >= vpmr) {
  301. /* Priority mask masks this interrupt */
  302. return false;
  303. }
  304. rprio = ich_highest_active_virt_prio(cs);
  305. if (rprio == 0xff) {
  306. /* No running interrupt so we can preempt */
  307. return true;
  308. }
  309. mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
  310. /*
  311. * We only preempt a running interrupt if the pending interrupt's
  312. * group priority is sufficient (the subpriorities are not considered).
  313. */
  314. if ((cs->hppvlpi.prio & mask) < (rprio & mask)) {
  315. return true;
  316. }
  317. return false;
  318. }
  319. static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
  320. uint32_t *misr)
  321. {
  322. /* Return a set of bits indicating the EOI maintenance interrupt status
  323. * for each list register. The EOI maintenance interrupt status is
  324. * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
  325. * (see the GICv3 spec for the ICH_EISR_EL2 register).
  326. * If misr is not NULL then we should also collect the information
  327. * about the MISR.EOI, MISR.NP and MISR.U bits.
  328. */
  329. uint32_t value = 0;
  330. int validcount = 0;
  331. bool seenpending = false;
  332. int i;
  333. for (i = 0; i < cs->num_list_regs; i++) {
  334. uint64_t lr = cs->ich_lr_el2[i];
  335. if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
  336. == ICH_LR_EL2_EOI) {
  337. value |= (1 << i);
  338. }
  339. if ((lr & ICH_LR_EL2_STATE_MASK)) {
  340. validcount++;
  341. }
  342. if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
  343. seenpending = true;
  344. }
  345. }
  346. if (misr) {
  347. if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
  348. *misr |= ICH_MISR_EL2_U;
  349. }
  350. if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
  351. *misr |= ICH_MISR_EL2_NP;
  352. }
  353. if (value) {
  354. *misr |= ICH_MISR_EL2_EOI;
  355. }
  356. }
  357. return value;
  358. }
  359. static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
  360. {
  361. /* Return a set of bits indicating the maintenance interrupt status
  362. * (as seen in the ICH_MISR_EL2 register).
  363. */
  364. uint32_t value = 0;
  365. /* Scan list registers and fill in the U, NP and EOI bits */
  366. eoi_maintenance_interrupt_state(cs, &value);
  367. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_LRENPIE) &&
  368. (cs->ich_hcr_el2 & ICH_HCR_EL2_EOICOUNT_MASK)) {
  369. value |= ICH_MISR_EL2_LRENP;
  370. }
  371. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
  372. (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
  373. value |= ICH_MISR_EL2_VGRP0E;
  374. }
  375. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
  376. !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
  377. value |= ICH_MISR_EL2_VGRP0D;
  378. }
  379. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
  380. (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
  381. value |= ICH_MISR_EL2_VGRP1E;
  382. }
  383. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
  384. !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
  385. value |= ICH_MISR_EL2_VGRP1D;
  386. }
  387. return value;
  388. }
  389. void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs)
  390. {
  391. /*
  392. * Tell the CPU about any pending virtual interrupts.
  393. * This should only be called for changes that affect the
  394. * vIRQ and vFIQ status and do not change the maintenance
  395. * interrupt status. This means that unlike gicv3_cpuif_virt_update()
  396. * this function won't recursively call back into the GIC code.
  397. * The main use of this is when the redistributor has changed the
  398. * highest priority pending virtual LPI.
  399. */
  400. int idx;
  401. int irqlevel = 0;
  402. int fiqlevel = 0;
  403. idx = hppvi_index(cs);
  404. trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx,
  405. cs->hppvlpi.irq, cs->hppvlpi.grp,
  406. cs->hppvlpi.prio);
  407. if (idx == HPPVI_INDEX_VLPI) {
  408. if (icv_hppvlpi_can_preempt(cs)) {
  409. if (cs->hppvlpi.grp == GICV3_G0) {
  410. fiqlevel = 1;
  411. } else {
  412. irqlevel = 1;
  413. }
  414. }
  415. } else if (idx >= 0) {
  416. uint64_t lr = cs->ich_lr_el2[idx];
  417. if (icv_hppi_can_preempt(cs, lr)) {
  418. /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
  419. if (lr & ICH_LR_EL2_GROUP) {
  420. irqlevel = 1;
  421. } else {
  422. fiqlevel = 1;
  423. }
  424. }
  425. }
  426. trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
  427. qemu_set_irq(cs->parent_vfiq, fiqlevel);
  428. qemu_set_irq(cs->parent_virq, irqlevel);
  429. }
  430. static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
  431. {
  432. /*
  433. * Tell the CPU about any pending virtual interrupts or
  434. * maintenance interrupts, following a change to the state
  435. * of the CPU interface relevant to virtual interrupts.
  436. *
  437. * CAUTION: this function will call qemu_set_irq() on the
  438. * CPU maintenance IRQ line, which is typically wired up
  439. * to the GIC as a per-CPU interrupt. This means that it
  440. * will recursively call back into the GIC code via
  441. * gicv3_redist_set_irq() and thus into the CPU interface code's
  442. * gicv3_cpuif_update(). It is therefore important that this
  443. * function is only called as the final action of a CPU interface
  444. * register write implementation, after all the GIC state
  445. * fields have been updated. gicv3_cpuif_update() also must
  446. * not cause this function to be called, but that happens
  447. * naturally as a result of there being no architectural
  448. * linkage between the physical and virtual GIC logic.
  449. */
  450. ARMCPU *cpu = ARM_CPU(cs->cpu);
  451. int maintlevel = 0;
  452. gicv3_cpuif_virt_irq_fiq_update(cs);
  453. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) &&
  454. maintenance_interrupt_state(cs) != 0) {
  455. maintlevel = 1;
  456. }
  457. trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel);
  458. qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel);
  459. }
  460. static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
  461. {
  462. GICv3CPUState *cs = icc_cs_from_env(env);
  463. int regno = ri->opc2 & 3;
  464. int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
  465. uint64_t value = cs->ich_apr[grp][regno];
  466. trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  467. return value;
  468. }
  469. static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
  470. uint64_t value)
  471. {
  472. GICv3CPUState *cs = icc_cs_from_env(env);
  473. int regno = ri->opc2 & 3;
  474. int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
  475. trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  476. cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
  477. gicv3_cpuif_virt_irq_fiq_update(cs);
  478. return;
  479. }
  480. static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  481. {
  482. GICv3CPUState *cs = icc_cs_from_env(env);
  483. int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
  484. uint64_t bpr;
  485. bool satinc = false;
  486. if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
  487. /* reads return bpr0 + 1 saturated to 7, writes ignored */
  488. grp = GICV3_G0;
  489. satinc = true;
  490. }
  491. bpr = read_vbpr(cs, grp);
  492. if (satinc) {
  493. bpr++;
  494. bpr = MIN(bpr, 7);
  495. }
  496. trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
  497. return bpr;
  498. }
  499. static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  500. uint64_t value)
  501. {
  502. GICv3CPUState *cs = icc_cs_from_env(env);
  503. int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
  504. trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
  505. gicv3_redist_affid(cs), value);
  506. if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
  507. /* reads return bpr0 + 1 saturated to 7, writes ignored */
  508. return;
  509. }
  510. write_vbpr(cs, grp, value);
  511. gicv3_cpuif_virt_irq_fiq_update(cs);
  512. }
  513. static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  514. {
  515. GICv3CPUState *cs = icc_cs_from_env(env);
  516. uint64_t value;
  517. value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
  518. ICH_VMCR_EL2_VPMR_LENGTH);
  519. trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
  520. return value;
  521. }
  522. static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  523. uint64_t value)
  524. {
  525. GICv3CPUState *cs = icc_cs_from_env(env);
  526. trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
  527. value &= icv_fullprio_mask(cs);
  528. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
  529. ICH_VMCR_EL2_VPMR_LENGTH, value);
  530. gicv3_cpuif_virt_irq_fiq_update(cs);
  531. }
  532. static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
  533. {
  534. GICv3CPUState *cs = icc_cs_from_env(env);
  535. int enbit;
  536. uint64_t value;
  537. enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
  538. value = extract64(cs->ich_vmcr_el2, enbit, 1);
  539. trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
  540. gicv3_redist_affid(cs), value);
  541. return value;
  542. }
  543. static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
  544. uint64_t value)
  545. {
  546. GICv3CPUState *cs = icc_cs_from_env(env);
  547. int enbit;
  548. trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
  549. gicv3_redist_affid(cs), value);
  550. enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
  551. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
  552. gicv3_cpuif_virt_update(cs);
  553. }
  554. static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  555. {
  556. GICv3CPUState *cs = icc_cs_from_env(env);
  557. uint64_t value;
  558. /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
  559. * should match the ones reported in ich_vtr_read().
  560. */
  561. value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
  562. ((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
  563. if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
  564. value |= ICC_CTLR_EL1_EOIMODE;
  565. }
  566. if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
  567. value |= ICC_CTLR_EL1_CBPR;
  568. }
  569. trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
  570. return value;
  571. }
  572. static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  573. uint64_t value)
  574. {
  575. GICv3CPUState *cs = icc_cs_from_env(env);
  576. trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
  577. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
  578. 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
  579. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
  580. 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
  581. gicv3_cpuif_virt_irq_fiq_update(cs);
  582. }
  583. static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  584. {
  585. GICv3CPUState *cs = icc_cs_from_env(env);
  586. int prio = ich_highest_active_virt_prio(cs);
  587. trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
  588. return prio;
  589. }
  590. static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
  591. {
  592. GICv3CPUState *cs = icc_cs_from_env(env);
  593. int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
  594. int idx = hppvi_index(cs);
  595. uint64_t value = INTID_SPURIOUS;
  596. if (idx == HPPVI_INDEX_VLPI) {
  597. if (cs->hppvlpi.grp == grp) {
  598. value = cs->hppvlpi.irq;
  599. }
  600. } else if (idx >= 0) {
  601. uint64_t lr = cs->ich_lr_el2[idx];
  602. int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
  603. if (grp == thisgrp) {
  604. value = ich_lr_vintid(lr);
  605. }
  606. }
  607. trace_gicv3_icv_hppir_read(ri->crm == 8 ? 0 : 1,
  608. gicv3_redist_affid(cs), value);
  609. return value;
  610. }
  611. static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
  612. {
  613. /* Activate the interrupt in the specified list register
  614. * by moving it from Pending to Active state, and update the
  615. * Active Priority Registers.
  616. */
  617. uint32_t mask = icv_gprio_mask(cs, grp);
  618. int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
  619. int aprbit = prio >> (8 - cs->vprebits);
  620. int regno = aprbit / 32;
  621. int regbit = aprbit % 32;
  622. cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
  623. cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
  624. cs->ich_apr[grp][regno] |= (1 << regbit);
  625. }
  626. static void icv_activate_vlpi(GICv3CPUState *cs)
  627. {
  628. uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp);
  629. int prio = cs->hppvlpi.prio & mask;
  630. int aprbit = prio >> (8 - cs->vprebits);
  631. int regno = aprbit / 32;
  632. int regbit = aprbit % 32;
  633. cs->ich_apr[cs->hppvlpi.grp][regno] |= (1 << regbit);
  634. gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0);
  635. }
  636. static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
  637. {
  638. GICv3CPUState *cs = icc_cs_from_env(env);
  639. int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
  640. int idx = hppvi_index(cs);
  641. uint64_t intid = INTID_SPURIOUS;
  642. if (idx == HPPVI_INDEX_VLPI) {
  643. if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) {
  644. intid = cs->hppvlpi.irq;
  645. icv_activate_vlpi(cs);
  646. }
  647. } else if (idx >= 0) {
  648. uint64_t lr = cs->ich_lr_el2[idx];
  649. int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
  650. if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
  651. intid = ich_lr_vintid(lr);
  652. if (!gicv3_intid_is_special(intid)) {
  653. icv_activate_irq(cs, idx, grp);
  654. } else {
  655. /* Interrupt goes from Pending to Invalid */
  656. cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
  657. /* We will now return the (bogus) ID from the list register,
  658. * as per the pseudocode.
  659. */
  660. }
  661. }
  662. }
  663. trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
  664. gicv3_redist_affid(cs), intid);
  665. gicv3_cpuif_virt_update(cs);
  666. return intid;
  667. }
  668. static uint32_t icc_fullprio_mask(GICv3CPUState *cs)
  669. {
  670. /*
  671. * Return a mask word which clears the unimplemented priority bits
  672. * from a priority value for a physical interrupt. (Not to be confused
  673. * with the group priority, whose mask depends on the value of BPR
  674. * for the interrupt group.)
  675. */
  676. return ~0U << (8 - cs->pribits);
  677. }
  678. static inline int icc_min_bpr(GICv3CPUState *cs)
  679. {
  680. /* The minimum BPR for the physical interface. */
  681. return 7 - cs->prebits;
  682. }
  683. static inline int icc_min_bpr_ns(GICv3CPUState *cs)
  684. {
  685. return icc_min_bpr(cs) + 1;
  686. }
  687. static inline int icc_num_aprs(GICv3CPUState *cs)
  688. {
  689. /* Return the number of APR registers (1, 2, or 4) */
  690. int aprmax = 1 << MAX(cs->prebits - 5, 0);
  691. assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0]));
  692. return aprmax;
  693. }
  694. static int icc_highest_active_prio(GICv3CPUState *cs)
  695. {
  696. /* Calculate the current running priority based on the set bits
  697. * in the Active Priority Registers.
  698. */
  699. int i;
  700. for (i = 0; i < icc_num_aprs(cs); i++) {
  701. uint32_t apr = cs->icc_apr[GICV3_G0][i] |
  702. cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
  703. if (!apr) {
  704. continue;
  705. }
  706. return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1);
  707. }
  708. /* No current active interrupts: return idle priority */
  709. return 0xff;
  710. }
  711. static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
  712. {
  713. /* Return a mask word which clears the subpriority bits from
  714. * a priority value for an interrupt in the specified group.
  715. * This depends on the BPR value. For CBPR0 (S or NS):
  716. * a BPR of 0 means the group priority bits are [7:1];
  717. * a BPR of 1 means they are [7:2], and so on down to
  718. * a BPR of 7 meaning no group priority bits at all.
  719. * For CBPR1 NS:
  720. * a BPR of 0 is impossible (the minimum value is 1)
  721. * a BPR of 1 means the group priority bits are [7:1];
  722. * a BPR of 2 means they are [7:2], and so on down to
  723. * a BPR of 7 meaning the group priority is [7].
  724. *
  725. * Which BPR to use depends on the group of the interrupt and
  726. * the current ICC_CTLR.CBPR settings.
  727. *
  728. * This corresponds to the GroupBits() pseudocode.
  729. */
  730. int bpr;
  731. if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
  732. (group == GICV3_G1NS &&
  733. cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
  734. group = GICV3_G0;
  735. }
  736. bpr = cs->icc_bpr[group] & 7;
  737. if (group == GICV3_G1NS) {
  738. assert(bpr > 0);
  739. bpr--;
  740. }
  741. return ~0U << (bpr + 1);
  742. }
  743. static bool icc_no_enabled_hppi(GICv3CPUState *cs)
  744. {
  745. /* Return true if there is no pending interrupt, or the
  746. * highest priority pending interrupt is in a group which has been
  747. * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
  748. */
  749. return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
  750. }
  751. static bool icc_hppi_can_preempt(GICv3CPUState *cs)
  752. {
  753. /* Return true if we have a pending interrupt of sufficient
  754. * priority to preempt.
  755. */
  756. int rprio;
  757. uint32_t mask;
  758. if (icc_no_enabled_hppi(cs)) {
  759. return false;
  760. }
  761. if (cs->hppi.prio >= cs->icc_pmr_el1) {
  762. /* Priority mask masks this interrupt */
  763. return false;
  764. }
  765. rprio = icc_highest_active_prio(cs);
  766. if (rprio == 0xff) {
  767. /* No currently running interrupt so we can preempt */
  768. return true;
  769. }
  770. mask = icc_gprio_mask(cs, cs->hppi.grp);
  771. /* We only preempt a running interrupt if the pending interrupt's
  772. * group priority is sufficient (the subpriorities are not considered).
  773. */
  774. if ((cs->hppi.prio & mask) < (rprio & mask)) {
  775. return true;
  776. }
  777. return false;
  778. }
  779. void gicv3_cpuif_update(GICv3CPUState *cs)
  780. {
  781. /* Tell the CPU about its highest priority pending interrupt */
  782. int irqlevel = 0;
  783. int fiqlevel = 0;
  784. ARMCPU *cpu = ARM_CPU(cs->cpu);
  785. CPUARMState *env = &cpu->env;
  786. g_assert(qemu_mutex_iothread_locked());
  787. trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
  788. cs->hppi.grp, cs->hppi.prio);
  789. if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
  790. /* If a Security-enabled GIC sends a G1S interrupt to a
  791. * Security-disabled CPU, we must treat it as if it were G0.
  792. */
  793. cs->hppi.grp = GICV3_G0;
  794. }
  795. if (icc_hppi_can_preempt(cs)) {
  796. /* We have an interrupt: should we signal it as IRQ or FIQ?
  797. * This is described in the GICv3 spec section 4.6.2.
  798. */
  799. bool isfiq;
  800. switch (cs->hppi.grp) {
  801. case GICV3_G0:
  802. isfiq = true;
  803. break;
  804. case GICV3_G1:
  805. isfiq = (!arm_is_secure(env) ||
  806. (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
  807. break;
  808. case GICV3_G1NS:
  809. isfiq = arm_is_secure(env);
  810. break;
  811. default:
  812. g_assert_not_reached();
  813. }
  814. if (isfiq) {
  815. fiqlevel = 1;
  816. } else {
  817. irqlevel = 1;
  818. }
  819. }
  820. trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
  821. qemu_set_irq(cs->parent_fiq, fiqlevel);
  822. qemu_set_irq(cs->parent_irq, irqlevel);
  823. }
  824. static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  825. {
  826. GICv3CPUState *cs = icc_cs_from_env(env);
  827. uint32_t value = cs->icc_pmr_el1;
  828. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  829. return icv_pmr_read(env, ri);
  830. }
  831. if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
  832. (env->cp15.scr_el3 & SCR_FIQ)) {
  833. /* NS access and Group 0 is inaccessible to NS: return the
  834. * NS view of the current priority
  835. */
  836. if ((value & 0x80) == 0) {
  837. /* Secure priorities not visible to NS */
  838. value = 0;
  839. } else if (value != 0xff) {
  840. value = (value << 1) & 0xff;
  841. }
  842. }
  843. trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
  844. return value;
  845. }
  846. static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  847. uint64_t value)
  848. {
  849. GICv3CPUState *cs = icc_cs_from_env(env);
  850. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  851. return icv_pmr_write(env, ri, value);
  852. }
  853. trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
  854. if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
  855. (env->cp15.scr_el3 & SCR_FIQ)) {
  856. /* NS access and Group 0 is inaccessible to NS: return the
  857. * NS view of the current priority
  858. */
  859. if (!(cs->icc_pmr_el1 & 0x80)) {
  860. /* Current PMR in the secure range, don't allow NS to change it */
  861. return;
  862. }
  863. value = (value >> 1) | 0x80;
  864. }
  865. value &= icc_fullprio_mask(cs);
  866. cs->icc_pmr_el1 = value;
  867. gicv3_cpuif_update(cs);
  868. }
  869. static void icc_activate_irq(GICv3CPUState *cs, int irq)
  870. {
  871. /* Move the interrupt from the Pending state to Active, and update
  872. * the Active Priority Registers
  873. */
  874. uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
  875. int prio = cs->hppi.prio & mask;
  876. int aprbit = prio >> (8 - cs->prebits);
  877. int regno = aprbit / 32;
  878. int regbit = aprbit % 32;
  879. cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
  880. if (irq < GIC_INTERNAL) {
  881. cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
  882. cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
  883. gicv3_redist_update(cs);
  884. } else if (irq < GICV3_LPI_INTID_START) {
  885. gicv3_gicd_active_set(cs->gic, irq);
  886. gicv3_gicd_pending_clear(cs->gic, irq);
  887. gicv3_update(cs->gic, irq, 1);
  888. } else {
  889. gicv3_redist_lpi_pending(cs, irq, 0);
  890. }
  891. }
  892. static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
  893. {
  894. /* Return the highest priority pending interrupt register value
  895. * for group 0.
  896. */
  897. bool irq_is_secure;
  898. if (cs->hppi.prio == 0xff) {
  899. return INTID_SPURIOUS;
  900. }
  901. /* Check whether we can return the interrupt or if we should return
  902. * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
  903. * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
  904. * is always zero.)
  905. */
  906. irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
  907. (cs->hppi.grp != GICV3_G1NS));
  908. if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
  909. return INTID_SPURIOUS;
  910. }
  911. if (irq_is_secure && !arm_is_secure(env)) {
  912. /* Secure interrupts not visible to Nonsecure */
  913. return INTID_SPURIOUS;
  914. }
  915. if (cs->hppi.grp != GICV3_G0) {
  916. /* Indicate to EL3 that there's a Group 1 interrupt for the other
  917. * state pending.
  918. */
  919. return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
  920. }
  921. return cs->hppi.irq;
  922. }
  923. static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
  924. {
  925. /* Return the highest priority pending interrupt register value
  926. * for group 1.
  927. */
  928. bool irq_is_secure;
  929. if (cs->hppi.prio == 0xff) {
  930. return INTID_SPURIOUS;
  931. }
  932. /* Check whether we can return the interrupt or if we should return
  933. * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
  934. * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
  935. * is always zero.)
  936. */
  937. irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
  938. (cs->hppi.grp != GICV3_G1NS));
  939. if (cs->hppi.grp == GICV3_G0) {
  940. /* Group 0 interrupts not visible via HPPIR1 */
  941. return INTID_SPURIOUS;
  942. }
  943. if (irq_is_secure) {
  944. if (!arm_is_secure(env)) {
  945. /* Secure interrupts not visible in Non-secure */
  946. return INTID_SPURIOUS;
  947. }
  948. } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
  949. /* Group 1 non-secure interrupts not visible in Secure EL1 */
  950. return INTID_SPURIOUS;
  951. }
  952. return cs->hppi.irq;
  953. }
  954. static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
  955. {
  956. GICv3CPUState *cs = icc_cs_from_env(env);
  957. uint64_t intid;
  958. if (icv_access(env, HCR_FMO)) {
  959. return icv_iar_read(env, ri);
  960. }
  961. if (!icc_hppi_can_preempt(cs)) {
  962. intid = INTID_SPURIOUS;
  963. } else {
  964. intid = icc_hppir0_value(cs, env);
  965. }
  966. if (!gicv3_intid_is_special(intid)) {
  967. icc_activate_irq(cs, intid);
  968. }
  969. trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
  970. return intid;
  971. }
  972. static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
  973. {
  974. GICv3CPUState *cs = icc_cs_from_env(env);
  975. uint64_t intid;
  976. if (icv_access(env, HCR_IMO)) {
  977. return icv_iar_read(env, ri);
  978. }
  979. if (!icc_hppi_can_preempt(cs)) {
  980. intid = INTID_SPURIOUS;
  981. } else {
  982. intid = icc_hppir1_value(cs, env);
  983. }
  984. if (!gicv3_intid_is_special(intid)) {
  985. icc_activate_irq(cs, intid);
  986. }
  987. trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
  988. return intid;
  989. }
  990. static void icc_drop_prio(GICv3CPUState *cs, int grp)
  991. {
  992. /* Drop the priority of the currently active interrupt in
  993. * the specified group.
  994. *
  995. * Note that we can guarantee (because of the requirement to nest
  996. * ICC_IAR reads [which activate an interrupt and raise priority]
  997. * with ICC_EOIR writes [which drop the priority for the interrupt])
  998. * that the interrupt we're being called for is the highest priority
  999. * active interrupt, meaning that it has the lowest set bit in the
  1000. * APR registers.
  1001. *
  1002. * If the guest does not honour the ordering constraints then the
  1003. * behaviour of the GIC is UNPREDICTABLE, which for us means that
  1004. * the values of the APR registers might become incorrect and the
  1005. * running priority will be wrong, so interrupts that should preempt
  1006. * might not do so, and interrupts that should not preempt might do so.
  1007. */
  1008. int i;
  1009. for (i = 0; i < icc_num_aprs(cs); i++) {
  1010. uint64_t *papr = &cs->icc_apr[grp][i];
  1011. if (!*papr) {
  1012. continue;
  1013. }
  1014. /* Clear the lowest set bit */
  1015. *papr &= *papr - 1;
  1016. break;
  1017. }
  1018. /* running priority change means we need an update for this cpu i/f */
  1019. gicv3_cpuif_update(cs);
  1020. }
  1021. static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
  1022. {
  1023. /* Return true if we should split priority drop and interrupt
  1024. * deactivation, ie whether the relevant EOIMode bit is set.
  1025. */
  1026. if (arm_is_el3_or_mon(env)) {
  1027. return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
  1028. }
  1029. if (arm_is_secure_below_el3(env)) {
  1030. return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
  1031. } else {
  1032. return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
  1033. }
  1034. }
  1035. static int icc_highest_active_group(GICv3CPUState *cs)
  1036. {
  1037. /* Return the group with the highest priority active interrupt.
  1038. * We can do this by just comparing the APRs to see which one
  1039. * has the lowest set bit.
  1040. * (If more than one group is active at the same priority then
  1041. * we're in UNPREDICTABLE territory.)
  1042. */
  1043. int i;
  1044. for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
  1045. int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
  1046. int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
  1047. int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
  1048. if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
  1049. return GICV3_G1NS;
  1050. }
  1051. if (g1ctz < g0ctz) {
  1052. return GICV3_G1;
  1053. }
  1054. if (g0ctz < 32) {
  1055. return GICV3_G0;
  1056. }
  1057. }
  1058. /* No set active bits? UNPREDICTABLE; return -1 so the caller
  1059. * ignores the spurious EOI attempt.
  1060. */
  1061. return -1;
  1062. }
  1063. static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
  1064. {
  1065. if (irq < GIC_INTERNAL) {
  1066. cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
  1067. gicv3_redist_update(cs);
  1068. } else {
  1069. gicv3_gicd_active_clear(cs->gic, irq);
  1070. gicv3_update(cs->gic, irq, 1);
  1071. }
  1072. }
  1073. static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
  1074. {
  1075. /* Return true if we should split priority drop and interrupt
  1076. * deactivation, ie whether the virtual EOIMode bit is set.
  1077. */
  1078. return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
  1079. }
  1080. static int icv_find_active(GICv3CPUState *cs, int irq)
  1081. {
  1082. /* Given an interrupt number for an active interrupt, return the index
  1083. * of the corresponding list register, or -1 if there is no match.
  1084. * Corresponds to FindActiveVirtualInterrupt pseudocode.
  1085. */
  1086. int i;
  1087. for (i = 0; i < cs->num_list_regs; i++) {
  1088. uint64_t lr = cs->ich_lr_el2[i];
  1089. if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
  1090. return i;
  1091. }
  1092. }
  1093. return -1;
  1094. }
  1095. static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
  1096. {
  1097. /* Deactivate the interrupt in the specified list register index */
  1098. uint64_t lr = cs->ich_lr_el2[idx];
  1099. if (lr & ICH_LR_EL2_HW) {
  1100. /* Deactivate the associated physical interrupt */
  1101. int pirq = ich_lr_pintid(lr);
  1102. if (pirq < INTID_SECURE) {
  1103. icc_deactivate_irq(cs, pirq);
  1104. }
  1105. }
  1106. /* Clear the 'active' part of the state, so ActivePending->Pending
  1107. * and Active->Invalid.
  1108. */
  1109. lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
  1110. cs->ich_lr_el2[idx] = lr;
  1111. }
  1112. static void icv_increment_eoicount(GICv3CPUState *cs)
  1113. {
  1114. /* Increment the EOICOUNT field in ICH_HCR_EL2 */
  1115. int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
  1116. ICH_HCR_EL2_EOICOUNT_LENGTH);
  1117. cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
  1118. ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
  1119. }
  1120. static int icv_drop_prio(GICv3CPUState *cs)
  1121. {
  1122. /* Drop the priority of the currently active virtual interrupt
  1123. * (favouring group 0 if there is a set active bit at
  1124. * the same priority for both group 0 and group 1).
  1125. * Return the priority value for the bit we just cleared,
  1126. * or 0xff if no bits were set in the AP registers at all.
  1127. * Note that though the ich_apr[] are uint64_t only the low
  1128. * 32 bits are actually relevant.
  1129. */
  1130. int i;
  1131. int aprmax = ich_num_aprs(cs);
  1132. for (i = 0; i < aprmax; i++) {
  1133. uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
  1134. uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
  1135. int apr0count, apr1count;
  1136. if (!*papr0 && !*papr1) {
  1137. continue;
  1138. }
  1139. /* We can't just use the bit-twiddling hack icc_drop_prio() does
  1140. * because we need to return the bit number we cleared so
  1141. * it can be compared against the list register's priority field.
  1142. */
  1143. apr0count = ctz32(*papr0);
  1144. apr1count = ctz32(*papr1);
  1145. if (apr0count <= apr1count) {
  1146. *papr0 &= *papr0 - 1;
  1147. return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
  1148. } else {
  1149. *papr1 &= *papr1 - 1;
  1150. return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
  1151. }
  1152. }
  1153. return 0xff;
  1154. }
  1155. static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1156. uint64_t value)
  1157. {
  1158. /* Deactivate interrupt */
  1159. GICv3CPUState *cs = icc_cs_from_env(env);
  1160. int idx;
  1161. int irq = value & 0xffffff;
  1162. trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
  1163. if (irq >= GICV3_MAXIRQ) {
  1164. /* Also catches special interrupt numbers and LPIs */
  1165. return;
  1166. }
  1167. if (!icv_eoi_split(env, cs)) {
  1168. return;
  1169. }
  1170. idx = icv_find_active(cs, irq);
  1171. if (idx < 0) {
  1172. /* No list register matching this, so increment the EOI count
  1173. * (might trigger a maintenance interrupt)
  1174. */
  1175. icv_increment_eoicount(cs);
  1176. } else {
  1177. icv_deactivate_irq(cs, idx);
  1178. }
  1179. gicv3_cpuif_virt_update(cs);
  1180. }
  1181. static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1182. uint64_t value)
  1183. {
  1184. /* End of Interrupt */
  1185. GICv3CPUState *cs = icc_cs_from_env(env);
  1186. int irq = value & 0xffffff;
  1187. int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
  1188. int idx, dropprio;
  1189. trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
  1190. gicv3_redist_affid(cs), value);
  1191. if (gicv3_intid_is_special(irq)) {
  1192. return;
  1193. }
  1194. /* We implement the IMPDEF choice of "drop priority before doing
  1195. * error checks" (because that lets us avoid scanning the AP
  1196. * registers twice).
  1197. */
  1198. dropprio = icv_drop_prio(cs);
  1199. if (dropprio == 0xff) {
  1200. /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
  1201. * whether the list registers are checked in this
  1202. * situation; we choose not to.
  1203. */
  1204. return;
  1205. }
  1206. idx = icv_find_active(cs, irq);
  1207. if (idx < 0) {
  1208. /* No valid list register corresponding to EOI ID */
  1209. icv_increment_eoicount(cs);
  1210. } else {
  1211. uint64_t lr = cs->ich_lr_el2[idx];
  1212. int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
  1213. int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
  1214. if (thisgrp == grp && lr_gprio == dropprio) {
  1215. if (!icv_eoi_split(env, cs)) {
  1216. /* Priority drop and deactivate not split: deactivate irq now */
  1217. icv_deactivate_irq(cs, idx);
  1218. }
  1219. }
  1220. }
  1221. gicv3_cpuif_virt_update(cs);
  1222. }
  1223. static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1224. uint64_t value)
  1225. {
  1226. /* End of Interrupt */
  1227. GICv3CPUState *cs = icc_cs_from_env(env);
  1228. int irq = value & 0xffffff;
  1229. int grp;
  1230. bool is_eoir0 = ri->crm == 8;
  1231. if (icv_access(env, is_eoir0 ? HCR_FMO : HCR_IMO)) {
  1232. icv_eoir_write(env, ri, value);
  1233. return;
  1234. }
  1235. trace_gicv3_icc_eoir_write(is_eoir0 ? 0 : 1,
  1236. gicv3_redist_affid(cs), value);
  1237. if ((irq >= cs->gic->num_irq) &&
  1238. !(cs->gic->lpi_enable && (irq >= GICV3_LPI_INTID_START))) {
  1239. /* This handles two cases:
  1240. * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
  1241. * to the GICC_EOIR, the GIC ignores that write.
  1242. * 2. If software writes the number of a non-existent interrupt
  1243. * this must be a subcase of "value written does not match the last
  1244. * valid interrupt value read from the Interrupt Acknowledge
  1245. * register" and so this is UNPREDICTABLE. We choose to ignore it.
  1246. */
  1247. return;
  1248. }
  1249. grp = icc_highest_active_group(cs);
  1250. switch (grp) {
  1251. case GICV3_G0:
  1252. if (!is_eoir0) {
  1253. return;
  1254. }
  1255. if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS)
  1256. && arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) {
  1257. return;
  1258. }
  1259. break;
  1260. case GICV3_G1:
  1261. if (is_eoir0) {
  1262. return;
  1263. }
  1264. if (!arm_is_secure(env)) {
  1265. return;
  1266. }
  1267. break;
  1268. case GICV3_G1NS:
  1269. if (is_eoir0) {
  1270. return;
  1271. }
  1272. if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
  1273. return;
  1274. }
  1275. break;
  1276. default:
  1277. qemu_log_mask(LOG_GUEST_ERROR,
  1278. "%s: IRQ %d isn't active\n", __func__, irq);
  1279. return;
  1280. }
  1281. icc_drop_prio(cs, grp);
  1282. if (!icc_eoi_split(env, cs)) {
  1283. /* Priority drop and deactivate not split: deactivate irq now */
  1284. icc_deactivate_irq(cs, irq);
  1285. }
  1286. }
  1287. static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1288. {
  1289. GICv3CPUState *cs = icc_cs_from_env(env);
  1290. uint64_t value;
  1291. if (icv_access(env, HCR_FMO)) {
  1292. return icv_hppir_read(env, ri);
  1293. }
  1294. value = icc_hppir0_value(cs, env);
  1295. trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
  1296. return value;
  1297. }
  1298. static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1299. {
  1300. GICv3CPUState *cs = icc_cs_from_env(env);
  1301. uint64_t value;
  1302. if (icv_access(env, HCR_IMO)) {
  1303. return icv_hppir_read(env, ri);
  1304. }
  1305. value = icc_hppir1_value(cs, env);
  1306. trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
  1307. return value;
  1308. }
  1309. static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1310. {
  1311. GICv3CPUState *cs = icc_cs_from_env(env);
  1312. int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
  1313. bool satinc = false;
  1314. uint64_t bpr;
  1315. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1316. return icv_bpr_read(env, ri);
  1317. }
  1318. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1319. grp = GICV3_G1NS;
  1320. }
  1321. if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
  1322. (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
  1323. /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
  1324. * modify BPR0
  1325. */
  1326. grp = GICV3_G0;
  1327. }
  1328. if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
  1329. (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
  1330. /* reads return bpr0 + 1 sat to 7, writes ignored */
  1331. grp = GICV3_G0;
  1332. satinc = true;
  1333. }
  1334. bpr = cs->icc_bpr[grp];
  1335. if (satinc) {
  1336. bpr++;
  1337. bpr = MIN(bpr, 7);
  1338. }
  1339. trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
  1340. return bpr;
  1341. }
  1342. static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1343. uint64_t value)
  1344. {
  1345. GICv3CPUState *cs = icc_cs_from_env(env);
  1346. int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
  1347. uint64_t minval;
  1348. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1349. icv_bpr_write(env, ri, value);
  1350. return;
  1351. }
  1352. trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
  1353. gicv3_redist_affid(cs), value);
  1354. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1355. grp = GICV3_G1NS;
  1356. }
  1357. if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
  1358. (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
  1359. /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
  1360. * modify BPR0
  1361. */
  1362. grp = GICV3_G0;
  1363. }
  1364. if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
  1365. (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
  1366. /* reads return bpr0 + 1 sat to 7, writes ignored */
  1367. return;
  1368. }
  1369. minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs);
  1370. if (value < minval) {
  1371. value = minval;
  1372. }
  1373. cs->icc_bpr[grp] = value & 7;
  1374. gicv3_cpuif_update(cs);
  1375. }
  1376. static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1377. {
  1378. GICv3CPUState *cs = icc_cs_from_env(env);
  1379. uint64_t value;
  1380. int regno = ri->opc2 & 3;
  1381. int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
  1382. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1383. return icv_ap_read(env, ri);
  1384. }
  1385. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1386. grp = GICV3_G1NS;
  1387. }
  1388. value = cs->icc_apr[grp][regno];
  1389. trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  1390. return value;
  1391. }
  1392. static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1393. uint64_t value)
  1394. {
  1395. GICv3CPUState *cs = icc_cs_from_env(env);
  1396. int regno = ri->opc2 & 3;
  1397. int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
  1398. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1399. icv_ap_write(env, ri, value);
  1400. return;
  1401. }
  1402. trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  1403. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1404. grp = GICV3_G1NS;
  1405. }
  1406. /* It's not possible to claim that a Non-secure interrupt is active
  1407. * at a priority outside the Non-secure range (128..255), since this
  1408. * would otherwise allow malicious NS code to block delivery of S interrupts
  1409. * by writing a bad value to these registers.
  1410. */
  1411. if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
  1412. return;
  1413. }
  1414. cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
  1415. gicv3_cpuif_update(cs);
  1416. }
  1417. static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1418. uint64_t value)
  1419. {
  1420. /* Deactivate interrupt */
  1421. GICv3CPUState *cs = icc_cs_from_env(env);
  1422. int irq = value & 0xffffff;
  1423. bool irq_is_secure, single_sec_state, irq_is_grp0;
  1424. bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
  1425. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  1426. icv_dir_write(env, ri, value);
  1427. return;
  1428. }
  1429. trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
  1430. if (irq >= cs->gic->num_irq) {
  1431. /* Also catches special interrupt numbers and LPIs */
  1432. return;
  1433. }
  1434. if (!icc_eoi_split(env, cs)) {
  1435. return;
  1436. }
  1437. int grp = gicv3_irq_group(cs->gic, cs, irq);
  1438. single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
  1439. irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
  1440. irq_is_grp0 = grp == GICV3_G0;
  1441. /* Check whether we're allowed to deactivate this interrupt based
  1442. * on its group and the current CPU state.
  1443. * These checks are laid out to correspond to the spec's pseudocode.
  1444. */
  1445. route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
  1446. route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
  1447. /* No need to include !IsSecure in route_*_to_el2 as it's only
  1448. * tested in cases where we know !IsSecure is true.
  1449. */
  1450. uint64_t hcr_el2 = arm_hcr_el2_eff(env);
  1451. route_fiq_to_el2 = hcr_el2 & HCR_FMO;
  1452. route_irq_to_el2 = hcr_el2 & HCR_IMO;
  1453. switch (arm_current_el(env)) {
  1454. case 3:
  1455. break;
  1456. case 2:
  1457. if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
  1458. break;
  1459. }
  1460. if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
  1461. break;
  1462. }
  1463. return;
  1464. case 1:
  1465. if (!arm_is_secure_below_el3(env)) {
  1466. if (single_sec_state && irq_is_grp0 &&
  1467. !route_fiq_to_el3 && !route_fiq_to_el2) {
  1468. break;
  1469. }
  1470. if (!irq_is_secure && !irq_is_grp0 &&
  1471. !route_irq_to_el3 && !route_irq_to_el2) {
  1472. break;
  1473. }
  1474. } else {
  1475. if (irq_is_grp0 && !route_fiq_to_el3) {
  1476. break;
  1477. }
  1478. if (!irq_is_grp0 &&
  1479. (!irq_is_secure || !single_sec_state) &&
  1480. !route_irq_to_el3) {
  1481. break;
  1482. }
  1483. }
  1484. return;
  1485. default:
  1486. g_assert_not_reached();
  1487. }
  1488. icc_deactivate_irq(cs, irq);
  1489. }
  1490. static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1491. {
  1492. GICv3CPUState *cs = icc_cs_from_env(env);
  1493. int prio;
  1494. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  1495. return icv_rpr_read(env, ri);
  1496. }
  1497. prio = icc_highest_active_prio(cs);
  1498. if (arm_feature(env, ARM_FEATURE_EL3) &&
  1499. !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
  1500. /* NS GIC access and Group 0 is inaccessible to NS */
  1501. if ((prio & 0x80) == 0) {
  1502. /* NS mustn't see priorities in the Secure half of the range */
  1503. prio = 0;
  1504. } else if (prio != 0xff) {
  1505. /* Non-idle priority: show the Non-secure view of it */
  1506. prio = (prio << 1) & 0xff;
  1507. }
  1508. }
  1509. trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
  1510. return prio;
  1511. }
  1512. static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
  1513. uint64_t value, int grp, bool ns)
  1514. {
  1515. GICv3State *s = cs->gic;
  1516. /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
  1517. uint64_t aff = extract64(value, 48, 8) << 16 |
  1518. extract64(value, 32, 8) << 8 |
  1519. extract64(value, 16, 8);
  1520. uint32_t targetlist = extract64(value, 0, 16);
  1521. uint32_t irq = extract64(value, 24, 4);
  1522. bool irm = extract64(value, 40, 1);
  1523. int i;
  1524. if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
  1525. /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
  1526. * interrupts as Group 0 interrupts and must send Secure Group 0
  1527. * interrupts to the target CPUs.
  1528. */
  1529. grp = GICV3_G0;
  1530. }
  1531. trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
  1532. aff, targetlist);
  1533. for (i = 0; i < s->num_cpu; i++) {
  1534. GICv3CPUState *ocs = &s->cpu[i];
  1535. if (irm) {
  1536. /* IRM == 1 : route to all CPUs except self */
  1537. if (cs == ocs) {
  1538. continue;
  1539. }
  1540. } else {
  1541. /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
  1542. * where the corresponding bit is set in targetlist
  1543. */
  1544. int aff0;
  1545. if (ocs->gicr_typer >> 40 != aff) {
  1546. continue;
  1547. }
  1548. aff0 = extract64(ocs->gicr_typer, 32, 8);
  1549. if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
  1550. continue;
  1551. }
  1552. }
  1553. /* The redistributor will check against its own GICR_NSACR as needed */
  1554. gicv3_redist_send_sgi(ocs, grp, irq, ns);
  1555. }
  1556. }
  1557. static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1558. uint64_t value)
  1559. {
  1560. /* Generate Secure Group 0 SGI. */
  1561. GICv3CPUState *cs = icc_cs_from_env(env);
  1562. bool ns = !arm_is_secure(env);
  1563. icc_generate_sgi(env, cs, value, GICV3_G0, ns);
  1564. }
  1565. static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1566. uint64_t value)
  1567. {
  1568. /* Generate Group 1 SGI for the current Security state */
  1569. GICv3CPUState *cs = icc_cs_from_env(env);
  1570. int grp;
  1571. bool ns = !arm_is_secure(env);
  1572. grp = ns ? GICV3_G1NS : GICV3_G1;
  1573. icc_generate_sgi(env, cs, value, grp, ns);
  1574. }
  1575. static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1576. uint64_t value)
  1577. {
  1578. /* Generate Group 1 SGI for the Security state that is not
  1579. * the current state
  1580. */
  1581. GICv3CPUState *cs = icc_cs_from_env(env);
  1582. int grp;
  1583. bool ns = !arm_is_secure(env);
  1584. grp = ns ? GICV3_G1 : GICV3_G1NS;
  1585. icc_generate_sgi(env, cs, value, grp, ns);
  1586. }
  1587. static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1588. {
  1589. GICv3CPUState *cs = icc_cs_from_env(env);
  1590. int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
  1591. uint64_t value;
  1592. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1593. return icv_igrpen_read(env, ri);
  1594. }
  1595. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1596. grp = GICV3_G1NS;
  1597. }
  1598. value = cs->icc_igrpen[grp];
  1599. trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
  1600. gicv3_redist_affid(cs), value);
  1601. return value;
  1602. }
  1603. static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1604. uint64_t value)
  1605. {
  1606. GICv3CPUState *cs = icc_cs_from_env(env);
  1607. int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
  1608. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1609. icv_igrpen_write(env, ri, value);
  1610. return;
  1611. }
  1612. trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
  1613. gicv3_redist_affid(cs), value);
  1614. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1615. grp = GICV3_G1NS;
  1616. }
  1617. cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
  1618. gicv3_cpuif_update(cs);
  1619. }
  1620. static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1621. {
  1622. GICv3CPUState *cs = icc_cs_from_env(env);
  1623. uint64_t value;
  1624. /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
  1625. value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
  1626. trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
  1627. return value;
  1628. }
  1629. static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1630. uint64_t value)
  1631. {
  1632. GICv3CPUState *cs = icc_cs_from_env(env);
  1633. trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
  1634. /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
  1635. cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
  1636. cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
  1637. gicv3_cpuif_update(cs);
  1638. }
  1639. static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1640. {
  1641. GICv3CPUState *cs = icc_cs_from_env(env);
  1642. int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
  1643. uint64_t value;
  1644. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  1645. return icv_ctlr_read(env, ri);
  1646. }
  1647. value = cs->icc_ctlr_el1[bank];
  1648. trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
  1649. return value;
  1650. }
  1651. static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1652. uint64_t value)
  1653. {
  1654. GICv3CPUState *cs = icc_cs_from_env(env);
  1655. int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
  1656. uint64_t mask;
  1657. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  1658. icv_ctlr_write(env, ri, value);
  1659. return;
  1660. }
  1661. trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
  1662. /* Only CBPR and EOIMODE can be RW;
  1663. * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
  1664. * the asseciated priority-based routing of them);
  1665. * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
  1666. */
  1667. if (arm_feature(env, ARM_FEATURE_EL3) &&
  1668. ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
  1669. mask = ICC_CTLR_EL1_EOIMODE;
  1670. } else {
  1671. mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
  1672. }
  1673. cs->icc_ctlr_el1[bank] &= ~mask;
  1674. cs->icc_ctlr_el1[bank] |= (value & mask);
  1675. gicv3_cpuif_update(cs);
  1676. }
  1677. static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1678. {
  1679. GICv3CPUState *cs = icc_cs_from_env(env);
  1680. uint64_t value;
  1681. value = cs->icc_ctlr_el3;
  1682. if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
  1683. value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
  1684. }
  1685. if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
  1686. value |= ICC_CTLR_EL3_CBPR_EL1NS;
  1687. }
  1688. if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
  1689. value |= ICC_CTLR_EL3_EOIMODE_EL1S;
  1690. }
  1691. if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
  1692. value |= ICC_CTLR_EL3_CBPR_EL1S;
  1693. }
  1694. trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
  1695. return value;
  1696. }
  1697. static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1698. uint64_t value)
  1699. {
  1700. GICv3CPUState *cs = icc_cs_from_env(env);
  1701. uint64_t mask;
  1702. trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
  1703. /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
  1704. cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
  1705. if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
  1706. cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
  1707. }
  1708. if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
  1709. cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
  1710. }
  1711. cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
  1712. if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
  1713. cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
  1714. }
  1715. if (value & ICC_CTLR_EL3_CBPR_EL1S) {
  1716. cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
  1717. }
  1718. /* The only bit stored in icc_ctlr_el3 which is writable is EOIMODE_EL3: */
  1719. mask = ICC_CTLR_EL3_EOIMODE_EL3;
  1720. cs->icc_ctlr_el3 &= ~mask;
  1721. cs->icc_ctlr_el3 |= (value & mask);
  1722. gicv3_cpuif_update(cs);
  1723. }
  1724. static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
  1725. const ARMCPRegInfo *ri, bool isread)
  1726. {
  1727. CPAccessResult r = CP_ACCESS_OK;
  1728. GICv3CPUState *cs = icc_cs_from_env(env);
  1729. int el = arm_current_el(env);
  1730. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
  1731. el == 1 && !arm_is_secure_below_el3(env)) {
  1732. /* Takes priority over a possible EL3 trap */
  1733. return CP_ACCESS_TRAP_EL2;
  1734. }
  1735. if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
  1736. switch (el) {
  1737. case 1:
  1738. /* Note that arm_hcr_el2_eff takes secure state into account. */
  1739. if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
  1740. r = CP_ACCESS_TRAP_EL3;
  1741. }
  1742. break;
  1743. case 2:
  1744. r = CP_ACCESS_TRAP_EL3;
  1745. break;
  1746. case 3:
  1747. if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
  1748. r = CP_ACCESS_TRAP_EL3;
  1749. }
  1750. break;
  1751. default:
  1752. g_assert_not_reached();
  1753. }
  1754. }
  1755. if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
  1756. r = CP_ACCESS_TRAP;
  1757. }
  1758. return r;
  1759. }
  1760. static CPAccessResult gicv3_dir_access(CPUARMState *env,
  1761. const ARMCPRegInfo *ri, bool isread)
  1762. {
  1763. GICv3CPUState *cs = icc_cs_from_env(env);
  1764. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
  1765. arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
  1766. /* Takes priority over a possible EL3 trap */
  1767. return CP_ACCESS_TRAP_EL2;
  1768. }
  1769. return gicv3_irqfiq_access(env, ri, isread);
  1770. }
  1771. static CPAccessResult gicv3_sgi_access(CPUARMState *env,
  1772. const ARMCPRegInfo *ri, bool isread)
  1773. {
  1774. if (arm_current_el(env) == 1 &&
  1775. (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
  1776. /* Takes priority over a possible EL3 trap */
  1777. return CP_ACCESS_TRAP_EL2;
  1778. }
  1779. return gicv3_irqfiq_access(env, ri, isread);
  1780. }
  1781. static CPAccessResult gicv3_fiq_access(CPUARMState *env,
  1782. const ARMCPRegInfo *ri, bool isread)
  1783. {
  1784. CPAccessResult r = CP_ACCESS_OK;
  1785. GICv3CPUState *cs = icc_cs_from_env(env);
  1786. int el = arm_current_el(env);
  1787. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
  1788. el == 1 && !arm_is_secure_below_el3(env)) {
  1789. /* Takes priority over a possible EL3 trap */
  1790. return CP_ACCESS_TRAP_EL2;
  1791. }
  1792. if (env->cp15.scr_el3 & SCR_FIQ) {
  1793. switch (el) {
  1794. case 1:
  1795. if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
  1796. r = CP_ACCESS_TRAP_EL3;
  1797. }
  1798. break;
  1799. case 2:
  1800. r = CP_ACCESS_TRAP_EL3;
  1801. break;
  1802. case 3:
  1803. if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
  1804. r = CP_ACCESS_TRAP_EL3;
  1805. }
  1806. break;
  1807. default:
  1808. g_assert_not_reached();
  1809. }
  1810. }
  1811. if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
  1812. r = CP_ACCESS_TRAP;
  1813. }
  1814. return r;
  1815. }
  1816. static CPAccessResult gicv3_irq_access(CPUARMState *env,
  1817. const ARMCPRegInfo *ri, bool isread)
  1818. {
  1819. CPAccessResult r = CP_ACCESS_OK;
  1820. GICv3CPUState *cs = icc_cs_from_env(env);
  1821. int el = arm_current_el(env);
  1822. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
  1823. el == 1 && !arm_is_secure_below_el3(env)) {
  1824. /* Takes priority over a possible EL3 trap */
  1825. return CP_ACCESS_TRAP_EL2;
  1826. }
  1827. if (env->cp15.scr_el3 & SCR_IRQ) {
  1828. switch (el) {
  1829. case 1:
  1830. if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
  1831. r = CP_ACCESS_TRAP_EL3;
  1832. }
  1833. break;
  1834. case 2:
  1835. r = CP_ACCESS_TRAP_EL3;
  1836. break;
  1837. case 3:
  1838. if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
  1839. r = CP_ACCESS_TRAP_EL3;
  1840. }
  1841. break;
  1842. default:
  1843. g_assert_not_reached();
  1844. }
  1845. }
  1846. if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
  1847. r = CP_ACCESS_TRAP;
  1848. }
  1849. return r;
  1850. }
  1851. static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
  1852. {
  1853. GICv3CPUState *cs = icc_cs_from_env(env);
  1854. cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
  1855. (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
  1856. ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
  1857. cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
  1858. (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
  1859. ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
  1860. cs->icc_pmr_el1 = 0;
  1861. cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs);
  1862. cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs);
  1863. cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs);
  1864. memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
  1865. memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
  1866. cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
  1867. (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
  1868. ((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT);
  1869. memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
  1870. cs->ich_hcr_el2 = 0;
  1871. memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
  1872. cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
  1873. ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
  1874. (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
  1875. }
  1876. static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
  1877. { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
  1878. .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
  1879. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1880. .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
  1881. .readfn = icc_pmr_read,
  1882. .writefn = icc_pmr_write,
  1883. /* We hang the whole cpu interface reset routine off here
  1884. * rather than parcelling it out into one little function
  1885. * per register
  1886. */
  1887. .resetfn = icc_reset,
  1888. },
  1889. { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
  1890. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
  1891. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1892. .access = PL1_R, .accessfn = gicv3_fiq_access,
  1893. .readfn = icc_iar0_read,
  1894. },
  1895. { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
  1896. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
  1897. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1898. .access = PL1_W, .accessfn = gicv3_fiq_access,
  1899. .writefn = icc_eoir_write,
  1900. },
  1901. { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
  1902. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
  1903. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1904. .access = PL1_R, .accessfn = gicv3_fiq_access,
  1905. .readfn = icc_hppir0_read,
  1906. },
  1907. { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
  1908. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
  1909. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1910. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  1911. .readfn = icc_bpr_read,
  1912. .writefn = icc_bpr_write,
  1913. },
  1914. { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
  1915. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
  1916. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1917. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  1918. .readfn = icc_ap_read,
  1919. .writefn = icc_ap_write,
  1920. },
  1921. /* All the ICC_AP1R*_EL1 registers are banked */
  1922. { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
  1923. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
  1924. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1925. .access = PL1_RW, .accessfn = gicv3_irq_access,
  1926. .readfn = icc_ap_read,
  1927. .writefn = icc_ap_write,
  1928. },
  1929. { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
  1930. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
  1931. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1932. .access = PL1_W, .accessfn = gicv3_dir_access,
  1933. .writefn = icc_dir_write,
  1934. },
  1935. { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
  1936. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
  1937. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1938. .access = PL1_R, .accessfn = gicv3_irqfiq_access,
  1939. .readfn = icc_rpr_read,
  1940. },
  1941. { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
  1942. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
  1943. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1944. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1945. .writefn = icc_sgi1r_write,
  1946. },
  1947. { .name = "ICC_SGI1R",
  1948. .cp = 15, .opc1 = 0, .crm = 12,
  1949. .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
  1950. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1951. .writefn = icc_sgi1r_write,
  1952. },
  1953. { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
  1954. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
  1955. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1956. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1957. .writefn = icc_asgi1r_write,
  1958. },
  1959. { .name = "ICC_ASGI1R",
  1960. .cp = 15, .opc1 = 1, .crm = 12,
  1961. .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
  1962. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1963. .writefn = icc_asgi1r_write,
  1964. },
  1965. { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
  1966. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
  1967. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1968. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1969. .writefn = icc_sgi0r_write,
  1970. },
  1971. { .name = "ICC_SGI0R",
  1972. .cp = 15, .opc1 = 2, .crm = 12,
  1973. .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
  1974. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1975. .writefn = icc_sgi0r_write,
  1976. },
  1977. { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
  1978. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
  1979. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1980. .access = PL1_R, .accessfn = gicv3_irq_access,
  1981. .readfn = icc_iar1_read,
  1982. },
  1983. { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
  1984. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
  1985. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1986. .access = PL1_W, .accessfn = gicv3_irq_access,
  1987. .writefn = icc_eoir_write,
  1988. },
  1989. { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
  1990. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
  1991. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1992. .access = PL1_R, .accessfn = gicv3_irq_access,
  1993. .readfn = icc_hppir1_read,
  1994. },
  1995. /* This register is banked */
  1996. { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
  1997. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
  1998. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1999. .access = PL1_RW, .accessfn = gicv3_irq_access,
  2000. .readfn = icc_bpr_read,
  2001. .writefn = icc_bpr_write,
  2002. },
  2003. /* This register is banked */
  2004. { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
  2005. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
  2006. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2007. .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
  2008. .readfn = icc_ctlr_el1_read,
  2009. .writefn = icc_ctlr_el1_write,
  2010. },
  2011. { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
  2012. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
  2013. .type = ARM_CP_NO_RAW | ARM_CP_CONST,
  2014. .access = PL1_RW,
  2015. /* We don't support IRQ/FIQ bypass and system registers are
  2016. * always enabled, so all our bits are RAZ/WI or RAO/WI.
  2017. * This register is banked but since it's constant we don't
  2018. * need to do anything special.
  2019. */
  2020. .resetvalue = 0x7,
  2021. },
  2022. { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
  2023. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
  2024. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2025. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  2026. .fgt = FGT_ICC_IGRPENN_EL1,
  2027. .readfn = icc_igrpen_read,
  2028. .writefn = icc_igrpen_write,
  2029. },
  2030. /* This register is banked */
  2031. { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
  2032. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
  2033. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2034. .access = PL1_RW, .accessfn = gicv3_irq_access,
  2035. .fgt = FGT_ICC_IGRPENN_EL1,
  2036. .readfn = icc_igrpen_read,
  2037. .writefn = icc_igrpen_write,
  2038. },
  2039. { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
  2040. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
  2041. .type = ARM_CP_NO_RAW | ARM_CP_CONST,
  2042. .access = PL2_RW,
  2043. /* We don't support IRQ/FIQ bypass and system registers are
  2044. * always enabled, so all our bits are RAZ/WI or RAO/WI.
  2045. */
  2046. .resetvalue = 0xf,
  2047. },
  2048. { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
  2049. .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
  2050. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2051. .access = PL3_RW,
  2052. .readfn = icc_ctlr_el3_read,
  2053. .writefn = icc_ctlr_el3_write,
  2054. },
  2055. { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
  2056. .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
  2057. .type = ARM_CP_NO_RAW | ARM_CP_CONST,
  2058. .access = PL3_RW,
  2059. /* We don't support IRQ/FIQ bypass and system registers are
  2060. * always enabled, so all our bits are RAZ/WI or RAO/WI.
  2061. */
  2062. .resetvalue = 0xf,
  2063. },
  2064. { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
  2065. .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
  2066. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2067. .access = PL3_RW,
  2068. .readfn = icc_igrpen1_el3_read,
  2069. .writefn = icc_igrpen1_el3_write,
  2070. },
  2071. };
  2072. static const ARMCPRegInfo gicv3_cpuif_icc_apxr1_reginfo[] = {
  2073. { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
  2074. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
  2075. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2076. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  2077. .readfn = icc_ap_read,
  2078. .writefn = icc_ap_write,
  2079. },
  2080. { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
  2081. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
  2082. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2083. .access = PL1_RW, .accessfn = gicv3_irq_access,
  2084. .readfn = icc_ap_read,
  2085. .writefn = icc_ap_write,
  2086. },
  2087. };
  2088. static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = {
  2089. { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
  2090. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
  2091. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2092. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  2093. .readfn = icc_ap_read,
  2094. .writefn = icc_ap_write,
  2095. },
  2096. { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
  2097. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
  2098. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2099. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  2100. .readfn = icc_ap_read,
  2101. .writefn = icc_ap_write,
  2102. },
  2103. { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
  2104. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
  2105. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2106. .access = PL1_RW, .accessfn = gicv3_irq_access,
  2107. .readfn = icc_ap_read,
  2108. .writefn = icc_ap_write,
  2109. },
  2110. { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
  2111. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
  2112. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2113. .access = PL1_RW, .accessfn = gicv3_irq_access,
  2114. .readfn = icc_ap_read,
  2115. .writefn = icc_ap_write,
  2116. },
  2117. };
  2118. static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2119. {
  2120. GICv3CPUState *cs = icc_cs_from_env(env);
  2121. int regno = ri->opc2 & 3;
  2122. int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
  2123. uint64_t value;
  2124. value = cs->ich_apr[grp][regno];
  2125. trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  2126. return value;
  2127. }
  2128. static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
  2129. uint64_t value)
  2130. {
  2131. GICv3CPUState *cs = icc_cs_from_env(env);
  2132. int regno = ri->opc2 & 3;
  2133. int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
  2134. trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  2135. cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
  2136. gicv3_cpuif_virt_irq_fiq_update(cs);
  2137. }
  2138. static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2139. {
  2140. GICv3CPUState *cs = icc_cs_from_env(env);
  2141. uint64_t value = cs->ich_hcr_el2;
  2142. trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
  2143. return value;
  2144. }
  2145. static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  2146. uint64_t value)
  2147. {
  2148. GICv3CPUState *cs = icc_cs_from_env(env);
  2149. trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
  2150. value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
  2151. ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
  2152. ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
  2153. ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
  2154. ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
  2155. cs->ich_hcr_el2 = value;
  2156. gicv3_cpuif_virt_update(cs);
  2157. }
  2158. static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2159. {
  2160. GICv3CPUState *cs = icc_cs_from_env(env);
  2161. uint64_t value = cs->ich_vmcr_el2;
  2162. trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
  2163. return value;
  2164. }
  2165. static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  2166. uint64_t value)
  2167. {
  2168. GICv3CPUState *cs = icc_cs_from_env(env);
  2169. trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
  2170. value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
  2171. ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
  2172. ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
  2173. value |= ICH_VMCR_EL2_VFIQEN;
  2174. cs->ich_vmcr_el2 = value;
  2175. /* Enforce "writing BPRs to less than minimum sets them to the minimum"
  2176. * by reading and writing back the fields.
  2177. */
  2178. write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
  2179. write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
  2180. gicv3_cpuif_virt_update(cs);
  2181. }
  2182. static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2183. {
  2184. GICv3CPUState *cs = icc_cs_from_env(env);
  2185. int regno = ri->opc2 | ((ri->crm & 1) << 3);
  2186. uint64_t value;
  2187. /* This read function handles all of:
  2188. * 64-bit reads of the whole LR
  2189. * 32-bit reads of the low half of the LR
  2190. * 32-bit reads of the high half of the LR
  2191. */
  2192. if (ri->state == ARM_CP_STATE_AA32) {
  2193. if (ri->crm >= 14) {
  2194. value = extract64(cs->ich_lr_el2[regno], 32, 32);
  2195. trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
  2196. } else {
  2197. value = extract64(cs->ich_lr_el2[regno], 0, 32);
  2198. trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
  2199. }
  2200. } else {
  2201. value = cs->ich_lr_el2[regno];
  2202. trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
  2203. }
  2204. return value;
  2205. }
  2206. static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  2207. uint64_t value)
  2208. {
  2209. GICv3CPUState *cs = icc_cs_from_env(env);
  2210. int regno = ri->opc2 | ((ri->crm & 1) << 3);
  2211. /* This write function handles all of:
  2212. * 64-bit writes to the whole LR
  2213. * 32-bit writes to the low half of the LR
  2214. * 32-bit writes to the high half of the LR
  2215. */
  2216. if (ri->state == ARM_CP_STATE_AA32) {
  2217. if (ri->crm >= 14) {
  2218. trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
  2219. value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
  2220. } else {
  2221. trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
  2222. value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
  2223. }
  2224. } else {
  2225. trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
  2226. }
  2227. /* Enforce RES0 bits in priority field */
  2228. if (cs->vpribits < 8) {
  2229. value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
  2230. 8 - cs->vpribits, 0);
  2231. }
  2232. cs->ich_lr_el2[regno] = value;
  2233. gicv3_cpuif_virt_update(cs);
  2234. }
  2235. static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2236. {
  2237. GICv3CPUState *cs = icc_cs_from_env(env);
  2238. uint64_t value;
  2239. value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
  2240. | ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V
  2241. | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
  2242. | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
  2243. | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
  2244. if (cs->gic->revision < 4) {
  2245. value |= ICH_VTR_EL2_NV4;
  2246. }
  2247. trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
  2248. return value;
  2249. }
  2250. static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2251. {
  2252. GICv3CPUState *cs = icc_cs_from_env(env);
  2253. uint64_t value = maintenance_interrupt_state(cs);
  2254. trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
  2255. return value;
  2256. }
  2257. static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2258. {
  2259. GICv3CPUState *cs = icc_cs_from_env(env);
  2260. uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
  2261. trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
  2262. return value;
  2263. }
  2264. static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2265. {
  2266. GICv3CPUState *cs = icc_cs_from_env(env);
  2267. uint64_t value = 0;
  2268. int i;
  2269. for (i = 0; i < cs->num_list_regs; i++) {
  2270. uint64_t lr = cs->ich_lr_el2[i];
  2271. if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
  2272. ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
  2273. value |= (1 << i);
  2274. }
  2275. }
  2276. trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
  2277. return value;
  2278. }
  2279. static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
  2280. { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
  2281. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
  2282. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2283. .access = PL2_RW,
  2284. .readfn = ich_ap_read,
  2285. .writefn = ich_ap_write,
  2286. },
  2287. { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
  2288. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
  2289. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2290. .access = PL2_RW,
  2291. .readfn = ich_ap_read,
  2292. .writefn = ich_ap_write,
  2293. },
  2294. { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
  2295. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
  2296. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2297. .access = PL2_RW,
  2298. .readfn = ich_hcr_read,
  2299. .writefn = ich_hcr_write,
  2300. },
  2301. { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
  2302. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
  2303. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2304. .access = PL2_R,
  2305. .readfn = ich_vtr_read,
  2306. },
  2307. { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
  2308. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
  2309. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2310. .access = PL2_R,
  2311. .readfn = ich_misr_read,
  2312. },
  2313. { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
  2314. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
  2315. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2316. .access = PL2_R,
  2317. .readfn = ich_eisr_read,
  2318. },
  2319. { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
  2320. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
  2321. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2322. .access = PL2_R,
  2323. .readfn = ich_elrsr_read,
  2324. },
  2325. { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
  2326. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
  2327. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2328. .access = PL2_RW,
  2329. .readfn = ich_vmcr_read,
  2330. .writefn = ich_vmcr_write,
  2331. },
  2332. };
  2333. static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
  2334. { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
  2335. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
  2336. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2337. .access = PL2_RW,
  2338. .readfn = ich_ap_read,
  2339. .writefn = ich_ap_write,
  2340. },
  2341. { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
  2342. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
  2343. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2344. .access = PL2_RW,
  2345. .readfn = ich_ap_read,
  2346. .writefn = ich_ap_write,
  2347. },
  2348. };
  2349. static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
  2350. { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
  2351. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
  2352. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2353. .access = PL2_RW,
  2354. .readfn = ich_ap_read,
  2355. .writefn = ich_ap_write,
  2356. },
  2357. { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
  2358. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
  2359. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2360. .access = PL2_RW,
  2361. .readfn = ich_ap_read,
  2362. .writefn = ich_ap_write,
  2363. },
  2364. { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
  2365. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
  2366. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2367. .access = PL2_RW,
  2368. .readfn = ich_ap_read,
  2369. .writefn = ich_ap_write,
  2370. },
  2371. { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
  2372. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
  2373. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2374. .access = PL2_RW,
  2375. .readfn = ich_ap_read,
  2376. .writefn = ich_ap_write,
  2377. },
  2378. };
  2379. static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
  2380. {
  2381. GICv3CPUState *cs = opaque;
  2382. gicv3_cpuif_update(cs);
  2383. /*
  2384. * Because vLPIs are only pending in NonSecure state,
  2385. * an EL change can change the VIRQ/VFIQ status (but
  2386. * cannot affect the maintenance interrupt state)
  2387. */
  2388. gicv3_cpuif_virt_irq_fiq_update(cs);
  2389. }
  2390. void gicv3_init_cpuif(GICv3State *s)
  2391. {
  2392. /* Called from the GICv3 realize function; register our system
  2393. * registers with the CPU
  2394. */
  2395. int i;
  2396. for (i = 0; i < s->num_cpu; i++) {
  2397. ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
  2398. GICv3CPUState *cs = &s->cpu[i];
  2399. /*
  2400. * If the CPU doesn't define a GICv3 configuration, probably because
  2401. * in real hardware it doesn't have one, then we use default values
  2402. * matching the one used by most Arm CPUs. This applies to:
  2403. * cpu->gic_num_lrs
  2404. * cpu->gic_vpribits
  2405. * cpu->gic_vprebits
  2406. * cpu->gic_pribits
  2407. */
  2408. /* Note that we can't just use the GICv3CPUState as an opaque pointer
  2409. * in define_arm_cp_regs_with_opaque(), because when we're called back
  2410. * it might be with code translated by CPU 0 but run by CPU 1, in
  2411. * which case we'd get the wrong value.
  2412. * So instead we define the regs with no ri->opaque info, and
  2413. * get back to the GICv3CPUState from the CPUARMState.
  2414. *
  2415. * These CP regs callbacks can be called from either TCG or HVF code.
  2416. */
  2417. define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
  2418. /*
  2419. * The CPU implementation specifies the number of supported
  2420. * bits of physical priority. For backwards compatibility
  2421. * of migration, we have a compat property that forces use
  2422. * of 8 priority bits regardless of what the CPU really has.
  2423. */
  2424. if (s->force_8bit_prio) {
  2425. cs->pribits = 8;
  2426. } else {
  2427. cs->pribits = cpu->gic_pribits ?: 5;
  2428. }
  2429. /*
  2430. * The GICv3 has separate ID register fields for virtual priority
  2431. * and preemption bit values, but only a single ID register field
  2432. * for the physical priority bits. The preemption bit count is
  2433. * always the same as the priority bit count, except that 8 bits
  2434. * of priority means 7 preemption bits. We precalculate the
  2435. * preemption bits because it simplifies the code and makes the
  2436. * parallels between the virtual and physical bits of the GIC
  2437. * a bit clearer.
  2438. */
  2439. cs->prebits = cs->pribits;
  2440. if (cs->prebits == 8) {
  2441. cs->prebits--;
  2442. }
  2443. /*
  2444. * Check that CPU code defining pribits didn't violate
  2445. * architectural constraints our implementation relies on.
  2446. */
  2447. g_assert(cs->pribits >= 4 && cs->pribits <= 8);
  2448. /*
  2449. * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions
  2450. * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them.
  2451. */
  2452. if (cs->prebits >= 6) {
  2453. define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo);
  2454. }
  2455. if (cs->prebits == 7) {
  2456. define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo);
  2457. }
  2458. if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
  2459. int j;
  2460. cs->num_list_regs = cpu->gic_num_lrs ?: 4;
  2461. cs->vpribits = cpu->gic_vpribits ?: 5;
  2462. cs->vprebits = cpu->gic_vprebits ?: 5;
  2463. /* Check against architectural constraints: getting these
  2464. * wrong would be a bug in the CPU code defining these,
  2465. * and the implementation relies on them holding.
  2466. */
  2467. g_assert(cs->vprebits <= cs->vpribits);
  2468. g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
  2469. g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
  2470. define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
  2471. for (j = 0; j < cs->num_list_regs; j++) {
  2472. /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
  2473. * are split into two cp15 regs, LR (the low part, with the
  2474. * same encoding as the AArch64 LR) and LRC (the high part).
  2475. */
  2476. ARMCPRegInfo lr_regset[] = {
  2477. { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
  2478. .opc0 = 3, .opc1 = 4, .crn = 12,
  2479. .crm = 12 + (j >> 3), .opc2 = j & 7,
  2480. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2481. .access = PL2_RW,
  2482. .readfn = ich_lr_read,
  2483. .writefn = ich_lr_write,
  2484. },
  2485. { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
  2486. .cp = 15, .opc1 = 4, .crn = 12,
  2487. .crm = 14 + (j >> 3), .opc2 = j & 7,
  2488. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2489. .access = PL2_RW,
  2490. .readfn = ich_lr_read,
  2491. .writefn = ich_lr_write,
  2492. },
  2493. };
  2494. define_arm_cp_regs(cpu, lr_regset);
  2495. }
  2496. if (cs->vprebits >= 6) {
  2497. define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
  2498. }
  2499. if (cs->vprebits == 7) {
  2500. define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
  2501. }
  2502. }
  2503. if (tcg_enabled() || qtest_enabled()) {
  2504. /*
  2505. * We can only trap EL changes with TCG. However the GIC interrupt
  2506. * state only changes on EL changes involving EL2 or EL3, so for
  2507. * the non-TCG case this is OK, as EL2 and EL3 can't exist.
  2508. */
  2509. arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
  2510. } else {
  2511. assert(!arm_feature(&cpu->env, ARM_FEATURE_EL2));
  2512. assert(!arm_feature(&cpu->env, ARM_FEATURE_EL3));
  2513. }
  2514. }
  2515. }