arm_gicv3_cpuif.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675
  1. /*
  2. * ARM Generic Interrupt Controller v3
  3. *
  4. * Copyright (c) 2016 Linaro Limited
  5. * Written by Peter Maydell
  6. *
  7. * This code is licensed under the GPL, version 2 or (at your option)
  8. * any later version.
  9. */
  10. /* This file contains the code for the system register interface
  11. * portions of the GICv3.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/bitops.h"
  15. #include "qemu/main-loop.h"
  16. #include "trace.h"
  17. #include "gicv3_internal.h"
  18. #include "hw/irq.h"
  19. #include "cpu.h"
  20. void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s)
  21. {
  22. ARMCPU *arm_cpu = ARM_CPU(cpu);
  23. CPUARMState *env = &arm_cpu->env;
  24. env->gicv3state = (void *)s;
  25. };
  26. static GICv3CPUState *icc_cs_from_env(CPUARMState *env)
  27. {
  28. return env->gicv3state;
  29. }
  30. static bool gicv3_use_ns_bank(CPUARMState *env)
  31. {
  32. /* Return true if we should use the NonSecure bank for a banked GIC
  33. * CPU interface register. Note that this differs from the
  34. * access_secure_reg() function because GICv3 banked registers are
  35. * banked even for AArch64, unlike the other CPU system registers.
  36. */
  37. return !arm_is_secure_below_el3(env);
  38. }
  39. /* The minimum BPR for the virtual interface is a configurable property */
  40. static inline int icv_min_vbpr(GICv3CPUState *cs)
  41. {
  42. return 7 - cs->vprebits;
  43. }
  44. /* Simple accessor functions for LR fields */
  45. static uint32_t ich_lr_vintid(uint64_t lr)
  46. {
  47. return extract64(lr, ICH_LR_EL2_VINTID_SHIFT, ICH_LR_EL2_VINTID_LENGTH);
  48. }
  49. static uint32_t ich_lr_pintid(uint64_t lr)
  50. {
  51. return extract64(lr, ICH_LR_EL2_PINTID_SHIFT, ICH_LR_EL2_PINTID_LENGTH);
  52. }
  53. static uint32_t ich_lr_prio(uint64_t lr)
  54. {
  55. return extract64(lr, ICH_LR_EL2_PRIORITY_SHIFT, ICH_LR_EL2_PRIORITY_LENGTH);
  56. }
  57. static int ich_lr_state(uint64_t lr)
  58. {
  59. return extract64(lr, ICH_LR_EL2_STATE_SHIFT, ICH_LR_EL2_STATE_LENGTH);
  60. }
  61. static bool icv_access(CPUARMState *env, int hcr_flags)
  62. {
  63. /* Return true if this ICC_ register access should really be
  64. * directed to an ICV_ access. hcr_flags is a mask of
  65. * HCR_EL2 bits to check: we treat this as an ICV_ access
  66. * if we are in NS EL1 and at least one of the specified
  67. * HCR_EL2 bits is set.
  68. *
  69. * ICV registers fall into four categories:
  70. * * access if NS EL1 and HCR_EL2.FMO == 1:
  71. * all ICV regs with '0' in their name
  72. * * access if NS EL1 and HCR_EL2.IMO == 1:
  73. * all ICV regs with '1' in their name
  74. * * access if NS EL1 and either IMO or FMO == 1:
  75. * CTLR, DIR, PMR, RPR
  76. */
  77. uint64_t hcr_el2 = arm_hcr_el2_eff(env);
  78. bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
  79. return flagmatch && arm_current_el(env) == 1
  80. && !arm_is_secure_below_el3(env);
  81. }
  82. static int read_vbpr(GICv3CPUState *cs, int grp)
  83. {
  84. /* Read VBPR value out of the VMCR field (caller must handle
  85. * VCBPR effects if required)
  86. */
  87. if (grp == GICV3_G0) {
  88. return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
  89. ICH_VMCR_EL2_VBPR0_LENGTH);
  90. } else {
  91. return extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
  92. ICH_VMCR_EL2_VBPR1_LENGTH);
  93. }
  94. }
  95. static void write_vbpr(GICv3CPUState *cs, int grp, int value)
  96. {
  97. /* Write new VBPR1 value, handling the "writing a value less than
  98. * the minimum sets it to the minimum" semantics.
  99. */
  100. int min = icv_min_vbpr(cs);
  101. if (grp != GICV3_G0) {
  102. min++;
  103. }
  104. value = MAX(value, min);
  105. if (grp == GICV3_G0) {
  106. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR0_SHIFT,
  107. ICH_VMCR_EL2_VBPR0_LENGTH, value);
  108. } else {
  109. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VBPR1_SHIFT,
  110. ICH_VMCR_EL2_VBPR1_LENGTH, value);
  111. }
  112. }
  113. static uint32_t icv_fullprio_mask(GICv3CPUState *cs)
  114. {
  115. /* Return a mask word which clears the unimplemented priority bits
  116. * from a priority value for a virtual interrupt. (Not to be confused
  117. * with the group priority, whose mask depends on the value of VBPR
  118. * for the interrupt group.)
  119. */
  120. return ~0U << (8 - cs->vpribits);
  121. }
  122. static int ich_highest_active_virt_prio(GICv3CPUState *cs)
  123. {
  124. /* Calculate the current running priority based on the set bits
  125. * in the ICH Active Priority Registers.
  126. */
  127. int i;
  128. int aprmax = 1 << (cs->vprebits - 5);
  129. assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
  130. for (i = 0; i < aprmax; i++) {
  131. uint32_t apr = cs->ich_apr[GICV3_G0][i] |
  132. cs->ich_apr[GICV3_G1NS][i];
  133. if (!apr) {
  134. continue;
  135. }
  136. return (i * 32 + ctz32(apr)) << (icv_min_vbpr(cs) + 1);
  137. }
  138. /* No current active interrupts: return idle priority */
  139. return 0xff;
  140. }
  141. static int hppvi_index(GICv3CPUState *cs)
  142. {
  143. /* Return the list register index of the highest priority pending
  144. * virtual interrupt, as per the HighestPriorityVirtualInterrupt
  145. * pseudocode. If no pending virtual interrupts, return -1.
  146. */
  147. int idx = -1;
  148. int i;
  149. /* Note that a list register entry with a priority of 0xff will
  150. * never be reported by this function; this is the architecturally
  151. * correct behaviour.
  152. */
  153. int prio = 0xff;
  154. if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) {
  155. /* Both groups disabled, definitely nothing to do */
  156. return idx;
  157. }
  158. for (i = 0; i < cs->num_list_regs; i++) {
  159. uint64_t lr = cs->ich_lr_el2[i];
  160. int thisprio;
  161. if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) {
  162. /* Not Pending */
  163. continue;
  164. }
  165. /* Ignore interrupts if relevant group enable not set */
  166. if (lr & ICH_LR_EL2_GROUP) {
  167. if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
  168. continue;
  169. }
  170. } else {
  171. if (!(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
  172. continue;
  173. }
  174. }
  175. thisprio = ich_lr_prio(lr);
  176. if (thisprio < prio) {
  177. prio = thisprio;
  178. idx = i;
  179. }
  180. }
  181. return idx;
  182. }
  183. static uint32_t icv_gprio_mask(GICv3CPUState *cs, int group)
  184. {
  185. /* Return a mask word which clears the subpriority bits from
  186. * a priority value for a virtual interrupt in the specified group.
  187. * This depends on the VBPR value.
  188. * If using VBPR0 then:
  189. * a BPR of 0 means the group priority bits are [7:1];
  190. * a BPR of 1 means they are [7:2], and so on down to
  191. * a BPR of 7 meaning no group priority bits at all.
  192. * If using VBPR1 then:
  193. * a BPR of 0 is impossible (the minimum value is 1)
  194. * a BPR of 1 means the group priority bits are [7:1];
  195. * a BPR of 2 means they are [7:2], and so on down to
  196. * a BPR of 7 meaning the group priority is [7].
  197. *
  198. * Which BPR to use depends on the group of the interrupt and
  199. * the current ICH_VMCR_EL2.VCBPR settings.
  200. *
  201. * This corresponds to the VGroupBits() pseudocode.
  202. */
  203. int bpr;
  204. if (group == GICV3_G1NS && cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
  205. group = GICV3_G0;
  206. }
  207. bpr = read_vbpr(cs, group);
  208. if (group == GICV3_G1NS) {
  209. assert(bpr > 0);
  210. bpr--;
  211. }
  212. return ~0U << (bpr + 1);
  213. }
  214. static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr)
  215. {
  216. /* Return true if we can signal this virtual interrupt defined by
  217. * the given list register value; see the pseudocode functions
  218. * CanSignalVirtualInterrupt and CanSignalVirtualInt.
  219. * Compare also icc_hppi_can_preempt() which is the non-virtual
  220. * equivalent of these checks.
  221. */
  222. int grp;
  223. uint32_t mask, prio, rprio, vpmr;
  224. if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) {
  225. /* Virtual interface disabled */
  226. return false;
  227. }
  228. /* We don't need to check that this LR is in Pending state because
  229. * that has already been done in hppvi_index().
  230. */
  231. prio = ich_lr_prio(lr);
  232. vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
  233. ICH_VMCR_EL2_VPMR_LENGTH);
  234. if (prio >= vpmr) {
  235. /* Priority mask masks this interrupt */
  236. return false;
  237. }
  238. rprio = ich_highest_active_virt_prio(cs);
  239. if (rprio == 0xff) {
  240. /* No running interrupt so we can preempt */
  241. return true;
  242. }
  243. grp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
  244. mask = icv_gprio_mask(cs, grp);
  245. /* We only preempt a running interrupt if the pending interrupt's
  246. * group priority is sufficient (the subpriorities are not considered).
  247. */
  248. if ((prio & mask) < (rprio & mask)) {
  249. return true;
  250. }
  251. return false;
  252. }
  253. static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs,
  254. uint32_t *misr)
  255. {
  256. /* Return a set of bits indicating the EOI maintenance interrupt status
  257. * for each list register. The EOI maintenance interrupt status is
  258. * 1 if LR.State == 0 && LR.HW == 0 && LR.EOI == 1
  259. * (see the GICv3 spec for the ICH_EISR_EL2 register).
  260. * If misr is not NULL then we should also collect the information
  261. * about the MISR.EOI, MISR.NP and MISR.U bits.
  262. */
  263. uint32_t value = 0;
  264. int validcount = 0;
  265. bool seenpending = false;
  266. int i;
  267. for (i = 0; i < cs->num_list_regs; i++) {
  268. uint64_t lr = cs->ich_lr_el2[i];
  269. if ((lr & (ICH_LR_EL2_STATE_MASK | ICH_LR_EL2_HW | ICH_LR_EL2_EOI))
  270. == ICH_LR_EL2_EOI) {
  271. value |= (1 << i);
  272. }
  273. if ((lr & ICH_LR_EL2_STATE_MASK)) {
  274. validcount++;
  275. }
  276. if (ich_lr_state(lr) == ICH_LR_EL2_STATE_PENDING) {
  277. seenpending = true;
  278. }
  279. }
  280. if (misr) {
  281. if (validcount < 2 && (cs->ich_hcr_el2 & ICH_HCR_EL2_UIE)) {
  282. *misr |= ICH_MISR_EL2_U;
  283. }
  284. if (!seenpending && (cs->ich_hcr_el2 & ICH_HCR_EL2_NPIE)) {
  285. *misr |= ICH_MISR_EL2_NP;
  286. }
  287. if (value) {
  288. *misr |= ICH_MISR_EL2_EOI;
  289. }
  290. }
  291. return value;
  292. }
  293. static uint32_t maintenance_interrupt_state(GICv3CPUState *cs)
  294. {
  295. /* Return a set of bits indicating the maintenance interrupt status
  296. * (as seen in the ICH_MISR_EL2 register).
  297. */
  298. uint32_t value = 0;
  299. /* Scan list registers and fill in the U, NP and EOI bits */
  300. eoi_maintenance_interrupt_state(cs, &value);
  301. if (cs->ich_hcr_el2 & (ICH_HCR_EL2_LRENPIE | ICH_HCR_EL2_EOICOUNT_MASK)) {
  302. value |= ICH_MISR_EL2_LRENP;
  303. }
  304. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0EIE) &&
  305. (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0)) {
  306. value |= ICH_MISR_EL2_VGRP0E;
  307. }
  308. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP0DIE) &&
  309. !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
  310. value |= ICH_MISR_EL2_VGRP0D;
  311. }
  312. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1EIE) &&
  313. (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
  314. value |= ICH_MISR_EL2_VGRP1E;
  315. }
  316. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_VGRP1DIE) &&
  317. !(cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1)) {
  318. value |= ICH_MISR_EL2_VGRP1D;
  319. }
  320. return value;
  321. }
  322. static void gicv3_cpuif_virt_update(GICv3CPUState *cs)
  323. {
  324. /* Tell the CPU about any pending virtual interrupts or
  325. * maintenance interrupts, following a change to the state
  326. * of the CPU interface relevant to virtual interrupts.
  327. *
  328. * CAUTION: this function will call qemu_set_irq() on the
  329. * CPU maintenance IRQ line, which is typically wired up
  330. * to the GIC as a per-CPU interrupt. This means that it
  331. * will recursively call back into the GIC code via
  332. * gicv3_redist_set_irq() and thus into the CPU interface code's
  333. * gicv3_cpuif_update(). It is therefore important that this
  334. * function is only called as the final action of a CPU interface
  335. * register write implementation, after all the GIC state
  336. * fields have been updated. gicv3_cpuif_update() also must
  337. * not cause this function to be called, but that happens
  338. * naturally as a result of there being no architectural
  339. * linkage between the physical and virtual GIC logic.
  340. */
  341. int idx;
  342. int irqlevel = 0;
  343. int fiqlevel = 0;
  344. int maintlevel = 0;
  345. idx = hppvi_index(cs);
  346. trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx);
  347. if (idx >= 0) {
  348. uint64_t lr = cs->ich_lr_el2[idx];
  349. if (icv_hppi_can_preempt(cs, lr)) {
  350. /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */
  351. if (lr & ICH_LR_EL2_GROUP) {
  352. irqlevel = 1;
  353. } else {
  354. fiqlevel = 1;
  355. }
  356. }
  357. }
  358. if (cs->ich_hcr_el2 & ICH_HCR_EL2_EN) {
  359. maintlevel = maintenance_interrupt_state(cs);
  360. }
  361. trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel,
  362. irqlevel, maintlevel);
  363. qemu_set_irq(cs->parent_vfiq, fiqlevel);
  364. qemu_set_irq(cs->parent_virq, irqlevel);
  365. qemu_set_irq(cs->maintenance_irq, maintlevel);
  366. }
  367. static uint64_t icv_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
  368. {
  369. GICv3CPUState *cs = icc_cs_from_env(env);
  370. int regno = ri->opc2 & 3;
  371. int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
  372. uint64_t value = cs->ich_apr[grp][regno];
  373. trace_gicv3_icv_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  374. return value;
  375. }
  376. static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
  377. uint64_t value)
  378. {
  379. GICv3CPUState *cs = icc_cs_from_env(env);
  380. int regno = ri->opc2 & 3;
  381. int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
  382. trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  383. cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
  384. gicv3_cpuif_virt_update(cs);
  385. return;
  386. }
  387. static uint64_t icv_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  388. {
  389. GICv3CPUState *cs = icc_cs_from_env(env);
  390. int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
  391. uint64_t bpr;
  392. bool satinc = false;
  393. if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
  394. /* reads return bpr0 + 1 saturated to 7, writes ignored */
  395. grp = GICV3_G0;
  396. satinc = true;
  397. }
  398. bpr = read_vbpr(cs, grp);
  399. if (satinc) {
  400. bpr++;
  401. bpr = MIN(bpr, 7);
  402. }
  403. trace_gicv3_icv_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
  404. return bpr;
  405. }
  406. static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  407. uint64_t value)
  408. {
  409. GICv3CPUState *cs = icc_cs_from_env(env);
  410. int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1NS;
  411. trace_gicv3_icv_bpr_write(ri->crm == 8 ? 0 : 1,
  412. gicv3_redist_affid(cs), value);
  413. if (grp == GICV3_G1NS && (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR)) {
  414. /* reads return bpr0 + 1 saturated to 7, writes ignored */
  415. return;
  416. }
  417. write_vbpr(cs, grp, value);
  418. gicv3_cpuif_virt_update(cs);
  419. }
  420. static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  421. {
  422. GICv3CPUState *cs = icc_cs_from_env(env);
  423. uint64_t value;
  424. value = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
  425. ICH_VMCR_EL2_VPMR_LENGTH);
  426. trace_gicv3_icv_pmr_read(gicv3_redist_affid(cs), value);
  427. return value;
  428. }
  429. static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  430. uint64_t value)
  431. {
  432. GICv3CPUState *cs = icc_cs_from_env(env);
  433. trace_gicv3_icv_pmr_write(gicv3_redist_affid(cs), value);
  434. value &= icv_fullprio_mask(cs);
  435. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT,
  436. ICH_VMCR_EL2_VPMR_LENGTH, value);
  437. gicv3_cpuif_virt_update(cs);
  438. }
  439. static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
  440. {
  441. GICv3CPUState *cs = icc_cs_from_env(env);
  442. int enbit;
  443. uint64_t value;
  444. enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
  445. value = extract64(cs->ich_vmcr_el2, enbit, 1);
  446. trace_gicv3_icv_igrpen_read(ri->opc2 & 1 ? 1 : 0,
  447. gicv3_redist_affid(cs), value);
  448. return value;
  449. }
  450. static void icv_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
  451. uint64_t value)
  452. {
  453. GICv3CPUState *cs = icc_cs_from_env(env);
  454. int enbit;
  455. trace_gicv3_icv_igrpen_write(ri->opc2 & 1 ? 1 : 0,
  456. gicv3_redist_affid(cs), value);
  457. enbit = ri->opc2 & 1 ? ICH_VMCR_EL2_VENG1_SHIFT : ICH_VMCR_EL2_VENG0_SHIFT;
  458. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, enbit, 1, value);
  459. gicv3_cpuif_virt_update(cs);
  460. }
  461. static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  462. {
  463. GICv3CPUState *cs = icc_cs_from_env(env);
  464. uint64_t value;
  465. /* Note that the fixed fields here (A3V, SEIS, IDbits, PRIbits)
  466. * should match the ones reported in ich_vtr_read().
  467. */
  468. value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
  469. (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
  470. if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
  471. value |= ICC_CTLR_EL1_EOIMODE;
  472. }
  473. if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VCBPR) {
  474. value |= ICC_CTLR_EL1_CBPR;
  475. }
  476. trace_gicv3_icv_ctlr_read(gicv3_redist_affid(cs), value);
  477. return value;
  478. }
  479. static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  480. uint64_t value)
  481. {
  482. GICv3CPUState *cs = icc_cs_from_env(env);
  483. trace_gicv3_icv_ctlr_write(gicv3_redist_affid(cs), value);
  484. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VCBPR_SHIFT,
  485. 1, value & ICC_CTLR_EL1_CBPR ? 1 : 0);
  486. cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT,
  487. 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0);
  488. gicv3_cpuif_virt_update(cs);
  489. }
  490. static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  491. {
  492. GICv3CPUState *cs = icc_cs_from_env(env);
  493. int prio = ich_highest_active_virt_prio(cs);
  494. trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio);
  495. return prio;
  496. }
  497. static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri)
  498. {
  499. GICv3CPUState *cs = icc_cs_from_env(env);
  500. int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
  501. int idx = hppvi_index(cs);
  502. uint64_t value = INTID_SPURIOUS;
  503. if (idx >= 0) {
  504. uint64_t lr = cs->ich_lr_el2[idx];
  505. int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
  506. if (grp == thisgrp) {
  507. value = ich_lr_vintid(lr);
  508. }
  509. }
  510. trace_gicv3_icv_hppir_read(grp, gicv3_redist_affid(cs), value);
  511. return value;
  512. }
  513. static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp)
  514. {
  515. /* Activate the interrupt in the specified list register
  516. * by moving it from Pending to Active state, and update the
  517. * Active Priority Registers.
  518. */
  519. uint32_t mask = icv_gprio_mask(cs, grp);
  520. int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask;
  521. int aprbit = prio >> (8 - cs->vprebits);
  522. int regno = aprbit / 32;
  523. int regbit = aprbit % 32;
  524. cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
  525. cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT;
  526. cs->ich_apr[grp][regno] |= (1 << regbit);
  527. }
  528. static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
  529. {
  530. GICv3CPUState *cs = icc_cs_from_env(env);
  531. int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
  532. int idx = hppvi_index(cs);
  533. uint64_t intid = INTID_SPURIOUS;
  534. if (idx >= 0) {
  535. uint64_t lr = cs->ich_lr_el2[idx];
  536. int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
  537. if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) {
  538. intid = ich_lr_vintid(lr);
  539. if (intid < INTID_SECURE) {
  540. icv_activate_irq(cs, idx, grp);
  541. } else {
  542. /* Interrupt goes from Pending to Invalid */
  543. cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT;
  544. /* We will now return the (bogus) ID from the list register,
  545. * as per the pseudocode.
  546. */
  547. }
  548. }
  549. }
  550. trace_gicv3_icv_iar_read(ri->crm == 8 ? 0 : 1,
  551. gicv3_redist_affid(cs), intid);
  552. return intid;
  553. }
  554. static int icc_highest_active_prio(GICv3CPUState *cs)
  555. {
  556. /* Calculate the current running priority based on the set bits
  557. * in the Active Priority Registers.
  558. */
  559. int i;
  560. for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
  561. uint32_t apr = cs->icc_apr[GICV3_G0][i] |
  562. cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
  563. if (!apr) {
  564. continue;
  565. }
  566. return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
  567. }
  568. /* No current active interrupts: return idle priority */
  569. return 0xff;
  570. }
  571. static uint32_t icc_gprio_mask(GICv3CPUState *cs, int group)
  572. {
  573. /* Return a mask word which clears the subpriority bits from
  574. * a priority value for an interrupt in the specified group.
  575. * This depends on the BPR value. For CBPR0 (S or NS):
  576. * a BPR of 0 means the group priority bits are [7:1];
  577. * a BPR of 1 means they are [7:2], and so on down to
  578. * a BPR of 7 meaning no group priority bits at all.
  579. * For CBPR1 NS:
  580. * a BPR of 0 is impossible (the minimum value is 1)
  581. * a BPR of 1 means the group priority bits are [7:1];
  582. * a BPR of 2 means they are [7:2], and so on down to
  583. * a BPR of 7 meaning the group priority is [7].
  584. *
  585. * Which BPR to use depends on the group of the interrupt and
  586. * the current ICC_CTLR.CBPR settings.
  587. *
  588. * This corresponds to the GroupBits() pseudocode.
  589. */
  590. int bpr;
  591. if ((group == GICV3_G1 && cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR) ||
  592. (group == GICV3_G1NS &&
  593. cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
  594. group = GICV3_G0;
  595. }
  596. bpr = cs->icc_bpr[group] & 7;
  597. if (group == GICV3_G1NS) {
  598. assert(bpr > 0);
  599. bpr--;
  600. }
  601. return ~0U << (bpr + 1);
  602. }
  603. static bool icc_no_enabled_hppi(GICv3CPUState *cs)
  604. {
  605. /* Return true if there is no pending interrupt, or the
  606. * highest priority pending interrupt is in a group which has been
  607. * disabled at the CPU interface by the ICC_IGRPEN* register enable bits.
  608. */
  609. return cs->hppi.prio == 0xff || (cs->icc_igrpen[cs->hppi.grp] == 0);
  610. }
  611. static bool icc_hppi_can_preempt(GICv3CPUState *cs)
  612. {
  613. /* Return true if we have a pending interrupt of sufficient
  614. * priority to preempt.
  615. */
  616. int rprio;
  617. uint32_t mask;
  618. if (icc_no_enabled_hppi(cs)) {
  619. return false;
  620. }
  621. if (cs->hppi.prio >= cs->icc_pmr_el1) {
  622. /* Priority mask masks this interrupt */
  623. return false;
  624. }
  625. rprio = icc_highest_active_prio(cs);
  626. if (rprio == 0xff) {
  627. /* No currently running interrupt so we can preempt */
  628. return true;
  629. }
  630. mask = icc_gprio_mask(cs, cs->hppi.grp);
  631. /* We only preempt a running interrupt if the pending interrupt's
  632. * group priority is sufficient (the subpriorities are not considered).
  633. */
  634. if ((cs->hppi.prio & mask) < (rprio & mask)) {
  635. return true;
  636. }
  637. return false;
  638. }
  639. void gicv3_cpuif_update(GICv3CPUState *cs)
  640. {
  641. /* Tell the CPU about its highest priority pending interrupt */
  642. int irqlevel = 0;
  643. int fiqlevel = 0;
  644. ARMCPU *cpu = ARM_CPU(cs->cpu);
  645. CPUARMState *env = &cpu->env;
  646. g_assert(qemu_mutex_iothread_locked());
  647. trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq,
  648. cs->hppi.grp, cs->hppi.prio);
  649. if (cs->hppi.grp == GICV3_G1 && !arm_feature(env, ARM_FEATURE_EL3)) {
  650. /* If a Security-enabled GIC sends a G1S interrupt to a
  651. * Security-disabled CPU, we must treat it as if it were G0.
  652. */
  653. cs->hppi.grp = GICV3_G0;
  654. }
  655. if (icc_hppi_can_preempt(cs)) {
  656. /* We have an interrupt: should we signal it as IRQ or FIQ?
  657. * This is described in the GICv3 spec section 4.6.2.
  658. */
  659. bool isfiq;
  660. switch (cs->hppi.grp) {
  661. case GICV3_G0:
  662. isfiq = true;
  663. break;
  664. case GICV3_G1:
  665. isfiq = (!arm_is_secure(env) ||
  666. (arm_current_el(env) == 3 && arm_el_is_aa64(env, 3)));
  667. break;
  668. case GICV3_G1NS:
  669. isfiq = arm_is_secure(env);
  670. break;
  671. default:
  672. g_assert_not_reached();
  673. }
  674. if (isfiq) {
  675. fiqlevel = 1;
  676. } else {
  677. irqlevel = 1;
  678. }
  679. }
  680. trace_gicv3_cpuif_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel);
  681. qemu_set_irq(cs->parent_fiq, fiqlevel);
  682. qemu_set_irq(cs->parent_irq, irqlevel);
  683. }
  684. static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  685. {
  686. GICv3CPUState *cs = icc_cs_from_env(env);
  687. uint32_t value = cs->icc_pmr_el1;
  688. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  689. return icv_pmr_read(env, ri);
  690. }
  691. if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
  692. (env->cp15.scr_el3 & SCR_FIQ)) {
  693. /* NS access and Group 0 is inaccessible to NS: return the
  694. * NS view of the current priority
  695. */
  696. if ((value & 0x80) == 0) {
  697. /* Secure priorities not visible to NS */
  698. value = 0;
  699. } else if (value != 0xff) {
  700. value = (value << 1) & 0xff;
  701. }
  702. }
  703. trace_gicv3_icc_pmr_read(gicv3_redist_affid(cs), value);
  704. return value;
  705. }
  706. static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  707. uint64_t value)
  708. {
  709. GICv3CPUState *cs = icc_cs_from_env(env);
  710. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  711. return icv_pmr_write(env, ri, value);
  712. }
  713. trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
  714. value &= 0xff;
  715. if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
  716. (env->cp15.scr_el3 & SCR_FIQ)) {
  717. /* NS access and Group 0 is inaccessible to NS: return the
  718. * NS view of the current priority
  719. */
  720. if (!(cs->icc_pmr_el1 & 0x80)) {
  721. /* Current PMR in the secure range, don't allow NS to change it */
  722. return;
  723. }
  724. value = (value >> 1) | 0x80;
  725. }
  726. cs->icc_pmr_el1 = value;
  727. gicv3_cpuif_update(cs);
  728. }
  729. static void icc_activate_irq(GICv3CPUState *cs, int irq)
  730. {
  731. /* Move the interrupt from the Pending state to Active, and update
  732. * the Active Priority Registers
  733. */
  734. uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
  735. int prio = cs->hppi.prio & mask;
  736. int aprbit = prio >> 1;
  737. int regno = aprbit / 32;
  738. int regbit = aprbit % 32;
  739. cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit);
  740. if (irq < GIC_INTERNAL) {
  741. cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1);
  742. cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 0);
  743. gicv3_redist_update(cs);
  744. } else {
  745. gicv3_gicd_active_set(cs->gic, irq);
  746. gicv3_gicd_pending_clear(cs->gic, irq);
  747. gicv3_update(cs->gic, irq, 1);
  748. }
  749. }
  750. static uint64_t icc_hppir0_value(GICv3CPUState *cs, CPUARMState *env)
  751. {
  752. /* Return the highest priority pending interrupt register value
  753. * for group 0.
  754. */
  755. bool irq_is_secure;
  756. if (cs->hppi.prio == 0xff) {
  757. return INTID_SPURIOUS;
  758. }
  759. /* Check whether we can return the interrupt or if we should return
  760. * a special identifier, as per the CheckGroup0ForSpecialIdentifiers
  761. * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
  762. * is always zero.)
  763. */
  764. irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
  765. (cs->hppi.grp != GICV3_G1NS));
  766. if (cs->hppi.grp != GICV3_G0 && !arm_is_el3_or_mon(env)) {
  767. return INTID_SPURIOUS;
  768. }
  769. if (irq_is_secure && !arm_is_secure(env)) {
  770. /* Secure interrupts not visible to Nonsecure */
  771. return INTID_SPURIOUS;
  772. }
  773. if (cs->hppi.grp != GICV3_G0) {
  774. /* Indicate to EL3 that there's a Group 1 interrupt for the other
  775. * state pending.
  776. */
  777. return irq_is_secure ? INTID_SECURE : INTID_NONSECURE;
  778. }
  779. return cs->hppi.irq;
  780. }
  781. static uint64_t icc_hppir1_value(GICv3CPUState *cs, CPUARMState *env)
  782. {
  783. /* Return the highest priority pending interrupt register value
  784. * for group 1.
  785. */
  786. bool irq_is_secure;
  787. if (cs->hppi.prio == 0xff) {
  788. return INTID_SPURIOUS;
  789. }
  790. /* Check whether we can return the interrupt or if we should return
  791. * a special identifier, as per the CheckGroup1ForSpecialIdentifiers
  792. * pseudocode. (We can simplify a little because for us ICC_SRE_EL1.RM
  793. * is always zero.)
  794. */
  795. irq_is_secure = (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) &&
  796. (cs->hppi.grp != GICV3_G1NS));
  797. if (cs->hppi.grp == GICV3_G0) {
  798. /* Group 0 interrupts not visible via HPPIR1 */
  799. return INTID_SPURIOUS;
  800. }
  801. if (irq_is_secure) {
  802. if (!arm_is_secure(env)) {
  803. /* Secure interrupts not visible in Non-secure */
  804. return INTID_SPURIOUS;
  805. }
  806. } else if (!arm_is_el3_or_mon(env) && arm_is_secure(env)) {
  807. /* Group 1 non-secure interrupts not visible in Secure EL1 */
  808. return INTID_SPURIOUS;
  809. }
  810. return cs->hppi.irq;
  811. }
  812. static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri)
  813. {
  814. GICv3CPUState *cs = icc_cs_from_env(env);
  815. uint64_t intid;
  816. if (icv_access(env, HCR_FMO)) {
  817. return icv_iar_read(env, ri);
  818. }
  819. if (!icc_hppi_can_preempt(cs)) {
  820. intid = INTID_SPURIOUS;
  821. } else {
  822. intid = icc_hppir0_value(cs, env);
  823. }
  824. if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
  825. icc_activate_irq(cs, intid);
  826. }
  827. trace_gicv3_icc_iar0_read(gicv3_redist_affid(cs), intid);
  828. return intid;
  829. }
  830. static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri)
  831. {
  832. GICv3CPUState *cs = icc_cs_from_env(env);
  833. uint64_t intid;
  834. if (icv_access(env, HCR_IMO)) {
  835. return icv_iar_read(env, ri);
  836. }
  837. if (!icc_hppi_can_preempt(cs)) {
  838. intid = INTID_SPURIOUS;
  839. } else {
  840. intid = icc_hppir1_value(cs, env);
  841. }
  842. if (!(intid >= INTID_SECURE && intid <= INTID_SPURIOUS)) {
  843. icc_activate_irq(cs, intid);
  844. }
  845. trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid);
  846. return intid;
  847. }
  848. static void icc_drop_prio(GICv3CPUState *cs, int grp)
  849. {
  850. /* Drop the priority of the currently active interrupt in
  851. * the specified group.
  852. *
  853. * Note that we can guarantee (because of the requirement to nest
  854. * ICC_IAR reads [which activate an interrupt and raise priority]
  855. * with ICC_EOIR writes [which drop the priority for the interrupt])
  856. * that the interrupt we're being called for is the highest priority
  857. * active interrupt, meaning that it has the lowest set bit in the
  858. * APR registers.
  859. *
  860. * If the guest does not honour the ordering constraints then the
  861. * behaviour of the GIC is UNPREDICTABLE, which for us means that
  862. * the values of the APR registers might become incorrect and the
  863. * running priority will be wrong, so interrupts that should preempt
  864. * might not do so, and interrupts that should not preempt might do so.
  865. */
  866. int i;
  867. for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) {
  868. uint64_t *papr = &cs->icc_apr[grp][i];
  869. if (!*papr) {
  870. continue;
  871. }
  872. /* Clear the lowest set bit */
  873. *papr &= *papr - 1;
  874. break;
  875. }
  876. /* running priority change means we need an update for this cpu i/f */
  877. gicv3_cpuif_update(cs);
  878. }
  879. static bool icc_eoi_split(CPUARMState *env, GICv3CPUState *cs)
  880. {
  881. /* Return true if we should split priority drop and interrupt
  882. * deactivation, ie whether the relevant EOIMode bit is set.
  883. */
  884. if (arm_is_el3_or_mon(env)) {
  885. return cs->icc_ctlr_el3 & ICC_CTLR_EL3_EOIMODE_EL3;
  886. }
  887. if (arm_is_secure_below_el3(env)) {
  888. return cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_EOIMODE;
  889. } else {
  890. return cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE;
  891. }
  892. }
  893. static int icc_highest_active_group(GICv3CPUState *cs)
  894. {
  895. /* Return the group with the highest priority active interrupt.
  896. * We can do this by just comparing the APRs to see which one
  897. * has the lowest set bit.
  898. * (If more than one group is active at the same priority then
  899. * we're in UNPREDICTABLE territory.)
  900. */
  901. int i;
  902. for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
  903. int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]);
  904. int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]);
  905. int g1nsctz = ctz32(cs->icc_apr[GICV3_G1NS][i]);
  906. if (g1nsctz < g0ctz && g1nsctz < g1ctz) {
  907. return GICV3_G1NS;
  908. }
  909. if (g1ctz < g0ctz) {
  910. return GICV3_G1;
  911. }
  912. if (g0ctz < 32) {
  913. return GICV3_G0;
  914. }
  915. }
  916. /* No set active bits? UNPREDICTABLE; return -1 so the caller
  917. * ignores the spurious EOI attempt.
  918. */
  919. return -1;
  920. }
  921. static void icc_deactivate_irq(GICv3CPUState *cs, int irq)
  922. {
  923. if (irq < GIC_INTERNAL) {
  924. cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 0);
  925. gicv3_redist_update(cs);
  926. } else {
  927. gicv3_gicd_active_clear(cs->gic, irq);
  928. gicv3_update(cs->gic, irq, 1);
  929. }
  930. }
  931. static bool icv_eoi_split(CPUARMState *env, GICv3CPUState *cs)
  932. {
  933. /* Return true if we should split priority drop and interrupt
  934. * deactivation, ie whether the virtual EOIMode bit is set.
  935. */
  936. return cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM;
  937. }
  938. static int icv_find_active(GICv3CPUState *cs, int irq)
  939. {
  940. /* Given an interrupt number for an active interrupt, return the index
  941. * of the corresponding list register, or -1 if there is no match.
  942. * Corresponds to FindActiveVirtualInterrupt pseudocode.
  943. */
  944. int i;
  945. for (i = 0; i < cs->num_list_regs; i++) {
  946. uint64_t lr = cs->ich_lr_el2[i];
  947. if ((lr & ICH_LR_EL2_STATE_ACTIVE_BIT) && ich_lr_vintid(lr) == irq) {
  948. return i;
  949. }
  950. }
  951. return -1;
  952. }
  953. static void icv_deactivate_irq(GICv3CPUState *cs, int idx)
  954. {
  955. /* Deactivate the interrupt in the specified list register index */
  956. uint64_t lr = cs->ich_lr_el2[idx];
  957. if (lr & ICH_LR_EL2_HW) {
  958. /* Deactivate the associated physical interrupt */
  959. int pirq = ich_lr_pintid(lr);
  960. if (pirq < INTID_SECURE) {
  961. icc_deactivate_irq(cs, pirq);
  962. }
  963. }
  964. /* Clear the 'active' part of the state, so ActivePending->Pending
  965. * and Active->Invalid.
  966. */
  967. lr &= ~ICH_LR_EL2_STATE_ACTIVE_BIT;
  968. cs->ich_lr_el2[idx] = lr;
  969. }
  970. static void icv_increment_eoicount(GICv3CPUState *cs)
  971. {
  972. /* Increment the EOICOUNT field in ICH_HCR_EL2 */
  973. int eoicount = extract64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
  974. ICH_HCR_EL2_EOICOUNT_LENGTH);
  975. cs->ich_hcr_el2 = deposit64(cs->ich_hcr_el2, ICH_HCR_EL2_EOICOUNT_SHIFT,
  976. ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1);
  977. }
  978. static int icv_drop_prio(GICv3CPUState *cs)
  979. {
  980. /* Drop the priority of the currently active virtual interrupt
  981. * (favouring group 0 if there is a set active bit at
  982. * the same priority for both group 0 and group 1).
  983. * Return the priority value for the bit we just cleared,
  984. * or 0xff if no bits were set in the AP registers at all.
  985. * Note that though the ich_apr[] are uint64_t only the low
  986. * 32 bits are actually relevant.
  987. */
  988. int i;
  989. int aprmax = 1 << (cs->vprebits - 5);
  990. assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
  991. for (i = 0; i < aprmax; i++) {
  992. uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
  993. uint64_t *papr1 = &cs->ich_apr[GICV3_G1NS][i];
  994. int apr0count, apr1count;
  995. if (!*papr0 && !*papr1) {
  996. continue;
  997. }
  998. /* We can't just use the bit-twiddling hack icc_drop_prio() does
  999. * because we need to return the bit number we cleared so
  1000. * it can be compared against the list register's priority field.
  1001. */
  1002. apr0count = ctz32(*papr0);
  1003. apr1count = ctz32(*papr1);
  1004. if (apr0count <= apr1count) {
  1005. *papr0 &= *papr0 - 1;
  1006. return (apr0count + i * 32) << (icv_min_vbpr(cs) + 1);
  1007. } else {
  1008. *papr1 &= *papr1 - 1;
  1009. return (apr1count + i * 32) << (icv_min_vbpr(cs) + 1);
  1010. }
  1011. }
  1012. return 0xff;
  1013. }
  1014. static void icv_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1015. uint64_t value)
  1016. {
  1017. /* Deactivate interrupt */
  1018. GICv3CPUState *cs = icc_cs_from_env(env);
  1019. int idx;
  1020. int irq = value & 0xffffff;
  1021. trace_gicv3_icv_dir_write(gicv3_redist_affid(cs), value);
  1022. if (irq >= cs->gic->num_irq) {
  1023. /* Also catches special interrupt numbers and LPIs */
  1024. return;
  1025. }
  1026. if (!icv_eoi_split(env, cs)) {
  1027. return;
  1028. }
  1029. idx = icv_find_active(cs, irq);
  1030. if (idx < 0) {
  1031. /* No list register matching this, so increment the EOI count
  1032. * (might trigger a maintenance interrupt)
  1033. */
  1034. icv_increment_eoicount(cs);
  1035. } else {
  1036. icv_deactivate_irq(cs, idx);
  1037. }
  1038. gicv3_cpuif_virt_update(cs);
  1039. }
  1040. static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1041. uint64_t value)
  1042. {
  1043. /* End of Interrupt */
  1044. GICv3CPUState *cs = icc_cs_from_env(env);
  1045. int irq = value & 0xffffff;
  1046. int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS;
  1047. int idx, dropprio;
  1048. trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1,
  1049. gicv3_redist_affid(cs), value);
  1050. if (irq >= cs->gic->num_irq) {
  1051. /* Also catches special interrupt numbers and LPIs */
  1052. return;
  1053. }
  1054. /* We implement the IMPDEF choice of "drop priority before doing
  1055. * error checks" (because that lets us avoid scanning the AP
  1056. * registers twice).
  1057. */
  1058. dropprio = icv_drop_prio(cs);
  1059. if (dropprio == 0xff) {
  1060. /* No active interrupt. It is CONSTRAINED UNPREDICTABLE
  1061. * whether the list registers are checked in this
  1062. * situation; we choose not to.
  1063. */
  1064. return;
  1065. }
  1066. idx = icv_find_active(cs, irq);
  1067. if (idx < 0) {
  1068. /* No valid list register corresponding to EOI ID */
  1069. icv_increment_eoicount(cs);
  1070. } else {
  1071. uint64_t lr = cs->ich_lr_el2[idx];
  1072. int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0;
  1073. int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp);
  1074. if (thisgrp == grp && lr_gprio == dropprio) {
  1075. if (!icv_eoi_split(env, cs)) {
  1076. /* Priority drop and deactivate not split: deactivate irq now */
  1077. icv_deactivate_irq(cs, idx);
  1078. }
  1079. }
  1080. }
  1081. gicv3_cpuif_virt_update(cs);
  1082. }
  1083. static void icc_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1084. uint64_t value)
  1085. {
  1086. /* End of Interrupt */
  1087. GICv3CPUState *cs = icc_cs_from_env(env);
  1088. int irq = value & 0xffffff;
  1089. int grp;
  1090. if (icv_access(env, ri->crm == 8 ? HCR_FMO : HCR_IMO)) {
  1091. icv_eoir_write(env, ri, value);
  1092. return;
  1093. }
  1094. trace_gicv3_icc_eoir_write(ri->crm == 8 ? 0 : 1,
  1095. gicv3_redist_affid(cs), value);
  1096. if (ri->crm == 8) {
  1097. /* EOIR0 */
  1098. grp = GICV3_G0;
  1099. } else {
  1100. /* EOIR1 */
  1101. if (arm_is_secure(env)) {
  1102. grp = GICV3_G1;
  1103. } else {
  1104. grp = GICV3_G1NS;
  1105. }
  1106. }
  1107. if (irq >= cs->gic->num_irq) {
  1108. /* This handles two cases:
  1109. * 1. If software writes the ID of a spurious interrupt [ie 1020-1023]
  1110. * to the GICC_EOIR, the GIC ignores that write.
  1111. * 2. If software writes the number of a non-existent interrupt
  1112. * this must be a subcase of "value written does not match the last
  1113. * valid interrupt value read from the Interrupt Acknowledge
  1114. * register" and so this is UNPREDICTABLE. We choose to ignore it.
  1115. */
  1116. return;
  1117. }
  1118. if (icc_highest_active_group(cs) != grp) {
  1119. return;
  1120. }
  1121. icc_drop_prio(cs, grp);
  1122. if (!icc_eoi_split(env, cs)) {
  1123. /* Priority drop and deactivate not split: deactivate irq now */
  1124. icc_deactivate_irq(cs, irq);
  1125. }
  1126. }
  1127. static uint64_t icc_hppir0_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1128. {
  1129. GICv3CPUState *cs = icc_cs_from_env(env);
  1130. uint64_t value;
  1131. if (icv_access(env, HCR_FMO)) {
  1132. return icv_hppir_read(env, ri);
  1133. }
  1134. value = icc_hppir0_value(cs, env);
  1135. trace_gicv3_icc_hppir0_read(gicv3_redist_affid(cs), value);
  1136. return value;
  1137. }
  1138. static uint64_t icc_hppir1_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1139. {
  1140. GICv3CPUState *cs = icc_cs_from_env(env);
  1141. uint64_t value;
  1142. if (icv_access(env, HCR_IMO)) {
  1143. return icv_hppir_read(env, ri);
  1144. }
  1145. value = icc_hppir1_value(cs, env);
  1146. trace_gicv3_icc_hppir1_read(gicv3_redist_affid(cs), value);
  1147. return value;
  1148. }
  1149. static uint64_t icc_bpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1150. {
  1151. GICv3CPUState *cs = icc_cs_from_env(env);
  1152. int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
  1153. bool satinc = false;
  1154. uint64_t bpr;
  1155. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1156. return icv_bpr_read(env, ri);
  1157. }
  1158. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1159. grp = GICV3_G1NS;
  1160. }
  1161. if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
  1162. (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
  1163. /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
  1164. * modify BPR0
  1165. */
  1166. grp = GICV3_G0;
  1167. }
  1168. if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
  1169. (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
  1170. /* reads return bpr0 + 1 sat to 7, writes ignored */
  1171. grp = GICV3_G0;
  1172. satinc = true;
  1173. }
  1174. bpr = cs->icc_bpr[grp];
  1175. if (satinc) {
  1176. bpr++;
  1177. bpr = MIN(bpr, 7);
  1178. }
  1179. trace_gicv3_icc_bpr_read(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), bpr);
  1180. return bpr;
  1181. }
  1182. static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1183. uint64_t value)
  1184. {
  1185. GICv3CPUState *cs = icc_cs_from_env(env);
  1186. int grp = (ri->crm == 8) ? GICV3_G0 : GICV3_G1;
  1187. uint64_t minval;
  1188. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1189. icv_bpr_write(env, ri, value);
  1190. return;
  1191. }
  1192. trace_gicv3_icc_bpr_write(ri->crm == 8 ? 0 : 1,
  1193. gicv3_redist_affid(cs), value);
  1194. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1195. grp = GICV3_G1NS;
  1196. }
  1197. if (grp == GICV3_G1 && !arm_is_el3_or_mon(env) &&
  1198. (cs->icc_ctlr_el1[GICV3_S] & ICC_CTLR_EL1_CBPR)) {
  1199. /* CBPR_EL1S means secure EL1 or AArch32 EL3 !Mon BPR1 accesses
  1200. * modify BPR0
  1201. */
  1202. grp = GICV3_G0;
  1203. }
  1204. if (grp == GICV3_G1NS && arm_current_el(env) < 3 &&
  1205. (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR)) {
  1206. /* reads return bpr0 + 1 sat to 7, writes ignored */
  1207. return;
  1208. }
  1209. minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR;
  1210. if (value < minval) {
  1211. value = minval;
  1212. }
  1213. cs->icc_bpr[grp] = value & 7;
  1214. gicv3_cpuif_update(cs);
  1215. }
  1216. static uint64_t icc_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1217. {
  1218. GICv3CPUState *cs = icc_cs_from_env(env);
  1219. uint64_t value;
  1220. int regno = ri->opc2 & 3;
  1221. int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
  1222. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1223. return icv_ap_read(env, ri);
  1224. }
  1225. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1226. grp = GICV3_G1NS;
  1227. }
  1228. value = cs->icc_apr[grp][regno];
  1229. trace_gicv3_icc_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  1230. return value;
  1231. }
  1232. static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1233. uint64_t value)
  1234. {
  1235. GICv3CPUState *cs = icc_cs_from_env(env);
  1236. int regno = ri->opc2 & 3;
  1237. int grp = (ri->crm & 1) ? GICV3_G1 : GICV3_G0;
  1238. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1239. icv_ap_write(env, ri, value);
  1240. return;
  1241. }
  1242. trace_gicv3_icc_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  1243. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1244. grp = GICV3_G1NS;
  1245. }
  1246. /* It's not possible to claim that a Non-secure interrupt is active
  1247. * at a priority outside the Non-secure range (128..255), since this
  1248. * would otherwise allow malicious NS code to block delivery of S interrupts
  1249. * by writing a bad value to these registers.
  1250. */
  1251. if (grp == GICV3_G1NS && regno < 2 && arm_feature(env, ARM_FEATURE_EL3)) {
  1252. return;
  1253. }
  1254. cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU;
  1255. gicv3_cpuif_update(cs);
  1256. }
  1257. static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1258. uint64_t value)
  1259. {
  1260. /* Deactivate interrupt */
  1261. GICv3CPUState *cs = icc_cs_from_env(env);
  1262. int irq = value & 0xffffff;
  1263. bool irq_is_secure, single_sec_state, irq_is_grp0;
  1264. bool route_fiq_to_el3, route_irq_to_el3, route_fiq_to_el2, route_irq_to_el2;
  1265. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  1266. icv_dir_write(env, ri, value);
  1267. return;
  1268. }
  1269. trace_gicv3_icc_dir_write(gicv3_redist_affid(cs), value);
  1270. if (irq >= cs->gic->num_irq) {
  1271. /* Also catches special interrupt numbers and LPIs */
  1272. return;
  1273. }
  1274. if (!icc_eoi_split(env, cs)) {
  1275. return;
  1276. }
  1277. int grp = gicv3_irq_group(cs->gic, cs, irq);
  1278. single_sec_state = cs->gic->gicd_ctlr & GICD_CTLR_DS;
  1279. irq_is_secure = !single_sec_state && (grp != GICV3_G1NS);
  1280. irq_is_grp0 = grp == GICV3_G0;
  1281. /* Check whether we're allowed to deactivate this interrupt based
  1282. * on its group and the current CPU state.
  1283. * These checks are laid out to correspond to the spec's pseudocode.
  1284. */
  1285. route_fiq_to_el3 = env->cp15.scr_el3 & SCR_FIQ;
  1286. route_irq_to_el3 = env->cp15.scr_el3 & SCR_IRQ;
  1287. /* No need to include !IsSecure in route_*_to_el2 as it's only
  1288. * tested in cases where we know !IsSecure is true.
  1289. */
  1290. uint64_t hcr_el2 = arm_hcr_el2_eff(env);
  1291. route_fiq_to_el2 = hcr_el2 & HCR_FMO;
  1292. route_irq_to_el2 = hcr_el2 & HCR_IMO;
  1293. switch (arm_current_el(env)) {
  1294. case 3:
  1295. break;
  1296. case 2:
  1297. if (single_sec_state && irq_is_grp0 && !route_fiq_to_el3) {
  1298. break;
  1299. }
  1300. if (!irq_is_secure && !irq_is_grp0 && !route_irq_to_el3) {
  1301. break;
  1302. }
  1303. return;
  1304. case 1:
  1305. if (!arm_is_secure_below_el3(env)) {
  1306. if (single_sec_state && irq_is_grp0 &&
  1307. !route_fiq_to_el3 && !route_fiq_to_el2) {
  1308. break;
  1309. }
  1310. if (!irq_is_secure && !irq_is_grp0 &&
  1311. !route_irq_to_el3 && !route_irq_to_el2) {
  1312. break;
  1313. }
  1314. } else {
  1315. if (irq_is_grp0 && !route_fiq_to_el3) {
  1316. break;
  1317. }
  1318. if (!irq_is_grp0 &&
  1319. (!irq_is_secure || !single_sec_state) &&
  1320. !route_irq_to_el3) {
  1321. break;
  1322. }
  1323. }
  1324. return;
  1325. default:
  1326. g_assert_not_reached();
  1327. }
  1328. icc_deactivate_irq(cs, irq);
  1329. }
  1330. static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1331. {
  1332. GICv3CPUState *cs = icc_cs_from_env(env);
  1333. int prio;
  1334. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  1335. return icv_rpr_read(env, ri);
  1336. }
  1337. prio = icc_highest_active_prio(cs);
  1338. if (arm_feature(env, ARM_FEATURE_EL3) &&
  1339. !arm_is_secure(env) && (env->cp15.scr_el3 & SCR_FIQ)) {
  1340. /* NS GIC access and Group 0 is inaccessible to NS */
  1341. if ((prio & 0x80) == 0) {
  1342. /* NS mustn't see priorities in the Secure half of the range */
  1343. prio = 0;
  1344. } else if (prio != 0xff) {
  1345. /* Non-idle priority: show the Non-secure view of it */
  1346. prio = (prio << 1) & 0xff;
  1347. }
  1348. }
  1349. trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio);
  1350. return prio;
  1351. }
  1352. static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs,
  1353. uint64_t value, int grp, bool ns)
  1354. {
  1355. GICv3State *s = cs->gic;
  1356. /* Extract Aff3/Aff2/Aff1 and shift into the bottom 24 bits */
  1357. uint64_t aff = extract64(value, 48, 8) << 16 |
  1358. extract64(value, 32, 8) << 8 |
  1359. extract64(value, 16, 8);
  1360. uint32_t targetlist = extract64(value, 0, 16);
  1361. uint32_t irq = extract64(value, 24, 4);
  1362. bool irm = extract64(value, 40, 1);
  1363. int i;
  1364. if (grp == GICV3_G1 && s->gicd_ctlr & GICD_CTLR_DS) {
  1365. /* If GICD_CTLR.DS == 1, the Distributor treats Secure Group 1
  1366. * interrupts as Group 0 interrupts and must send Secure Group 0
  1367. * interrupts to the target CPUs.
  1368. */
  1369. grp = GICV3_G0;
  1370. }
  1371. trace_gicv3_icc_generate_sgi(gicv3_redist_affid(cs), irq, irm,
  1372. aff, targetlist);
  1373. for (i = 0; i < s->num_cpu; i++) {
  1374. GICv3CPUState *ocs = &s->cpu[i];
  1375. if (irm) {
  1376. /* IRM == 1 : route to all CPUs except self */
  1377. if (cs == ocs) {
  1378. continue;
  1379. }
  1380. } else {
  1381. /* IRM == 0 : route to Aff3.Aff2.Aff1.n for all n in [0..15]
  1382. * where the corresponding bit is set in targetlist
  1383. */
  1384. int aff0;
  1385. if (ocs->gicr_typer >> 40 != aff) {
  1386. continue;
  1387. }
  1388. aff0 = extract64(ocs->gicr_typer, 32, 8);
  1389. if (aff0 > 15 || extract32(targetlist, aff0, 1) == 0) {
  1390. continue;
  1391. }
  1392. }
  1393. /* The redistributor will check against its own GICR_NSACR as needed */
  1394. gicv3_redist_send_sgi(ocs, grp, irq, ns);
  1395. }
  1396. }
  1397. static void icc_sgi0r_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1398. uint64_t value)
  1399. {
  1400. /* Generate Secure Group 0 SGI. */
  1401. GICv3CPUState *cs = icc_cs_from_env(env);
  1402. bool ns = !arm_is_secure(env);
  1403. icc_generate_sgi(env, cs, value, GICV3_G0, ns);
  1404. }
  1405. static void icc_sgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1406. uint64_t value)
  1407. {
  1408. /* Generate Group 1 SGI for the current Security state */
  1409. GICv3CPUState *cs = icc_cs_from_env(env);
  1410. int grp;
  1411. bool ns = !arm_is_secure(env);
  1412. grp = ns ? GICV3_G1NS : GICV3_G1;
  1413. icc_generate_sgi(env, cs, value, grp, ns);
  1414. }
  1415. static void icc_asgi1r_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1416. uint64_t value)
  1417. {
  1418. /* Generate Group 1 SGI for the Security state that is not
  1419. * the current state
  1420. */
  1421. GICv3CPUState *cs = icc_cs_from_env(env);
  1422. int grp;
  1423. bool ns = !arm_is_secure(env);
  1424. grp = ns ? GICV3_G1 : GICV3_G1NS;
  1425. icc_generate_sgi(env, cs, value, grp, ns);
  1426. }
  1427. static uint64_t icc_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1428. {
  1429. GICv3CPUState *cs = icc_cs_from_env(env);
  1430. int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
  1431. uint64_t value;
  1432. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1433. return icv_igrpen_read(env, ri);
  1434. }
  1435. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1436. grp = GICV3_G1NS;
  1437. }
  1438. value = cs->icc_igrpen[grp];
  1439. trace_gicv3_icc_igrpen_read(ri->opc2 & 1 ? 1 : 0,
  1440. gicv3_redist_affid(cs), value);
  1441. return value;
  1442. }
  1443. static void icc_igrpen_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1444. uint64_t value)
  1445. {
  1446. GICv3CPUState *cs = icc_cs_from_env(env);
  1447. int grp = ri->opc2 & 1 ? GICV3_G1 : GICV3_G0;
  1448. if (icv_access(env, grp == GICV3_G0 ? HCR_FMO : HCR_IMO)) {
  1449. icv_igrpen_write(env, ri, value);
  1450. return;
  1451. }
  1452. trace_gicv3_icc_igrpen_write(ri->opc2 & 1 ? 1 : 0,
  1453. gicv3_redist_affid(cs), value);
  1454. if (grp == GICV3_G1 && gicv3_use_ns_bank(env)) {
  1455. grp = GICV3_G1NS;
  1456. }
  1457. cs->icc_igrpen[grp] = value & ICC_IGRPEN_ENABLE;
  1458. gicv3_cpuif_update(cs);
  1459. }
  1460. static uint64_t icc_igrpen1_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1461. {
  1462. GICv3CPUState *cs = icc_cs_from_env(env);
  1463. uint64_t value;
  1464. /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
  1465. value = cs->icc_igrpen[GICV3_G1NS] | (cs->icc_igrpen[GICV3_G1] << 1);
  1466. trace_gicv3_icc_igrpen1_el3_read(gicv3_redist_affid(cs), value);
  1467. return value;
  1468. }
  1469. static void icc_igrpen1_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1470. uint64_t value)
  1471. {
  1472. GICv3CPUState *cs = icc_cs_from_env(env);
  1473. trace_gicv3_icc_igrpen1_el3_write(gicv3_redist_affid(cs), value);
  1474. /* IGRPEN1_EL3 bits 0 and 1 are r/w aliases into IGRPEN1_EL1 NS and S */
  1475. cs->icc_igrpen[GICV3_G1NS] = extract32(value, 0, 1);
  1476. cs->icc_igrpen[GICV3_G1] = extract32(value, 1, 1);
  1477. gicv3_cpuif_update(cs);
  1478. }
  1479. static uint64_t icc_ctlr_el1_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1480. {
  1481. GICv3CPUState *cs = icc_cs_from_env(env);
  1482. int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
  1483. uint64_t value;
  1484. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  1485. return icv_ctlr_read(env, ri);
  1486. }
  1487. value = cs->icc_ctlr_el1[bank];
  1488. trace_gicv3_icc_ctlr_read(gicv3_redist_affid(cs), value);
  1489. return value;
  1490. }
  1491. static void icc_ctlr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1492. uint64_t value)
  1493. {
  1494. GICv3CPUState *cs = icc_cs_from_env(env);
  1495. int bank = gicv3_use_ns_bank(env) ? GICV3_NS : GICV3_S;
  1496. uint64_t mask;
  1497. if (icv_access(env, HCR_FMO | HCR_IMO)) {
  1498. icv_ctlr_write(env, ri, value);
  1499. return;
  1500. }
  1501. trace_gicv3_icc_ctlr_write(gicv3_redist_affid(cs), value);
  1502. /* Only CBPR and EOIMODE can be RW;
  1503. * for us PMHE is RAZ/WI (we don't implement 1-of-N interrupts or
  1504. * the asseciated priority-based routing of them);
  1505. * if EL3 is implemented and GICD_CTLR.DS == 0, then PMHE and CBPR are RO.
  1506. */
  1507. if (arm_feature(env, ARM_FEATURE_EL3) &&
  1508. ((cs->gic->gicd_ctlr & GICD_CTLR_DS) == 0)) {
  1509. mask = ICC_CTLR_EL1_EOIMODE;
  1510. } else {
  1511. mask = ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE;
  1512. }
  1513. cs->icc_ctlr_el1[bank] &= ~mask;
  1514. cs->icc_ctlr_el1[bank] |= (value & mask);
  1515. gicv3_cpuif_update(cs);
  1516. }
  1517. static uint64_t icc_ctlr_el3_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1518. {
  1519. GICv3CPUState *cs = icc_cs_from_env(env);
  1520. uint64_t value;
  1521. value = cs->icc_ctlr_el3;
  1522. if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
  1523. value |= ICC_CTLR_EL3_EOIMODE_EL1NS;
  1524. }
  1525. if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
  1526. value |= ICC_CTLR_EL3_CBPR_EL1NS;
  1527. }
  1528. if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_EOIMODE) {
  1529. value |= ICC_CTLR_EL3_EOIMODE_EL1S;
  1530. }
  1531. if (cs->icc_ctlr_el1[GICV3_NS] & ICC_CTLR_EL1_CBPR) {
  1532. value |= ICC_CTLR_EL3_CBPR_EL1S;
  1533. }
  1534. trace_gicv3_icc_ctlr_el3_read(gicv3_redist_affid(cs), value);
  1535. return value;
  1536. }
  1537. static void icc_ctlr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1538. uint64_t value)
  1539. {
  1540. GICv3CPUState *cs = icc_cs_from_env(env);
  1541. uint64_t mask;
  1542. trace_gicv3_icc_ctlr_el3_write(gicv3_redist_affid(cs), value);
  1543. /* *_EL1NS and *_EL1S bits are aliases into the ICC_CTLR_EL1 bits. */
  1544. cs->icc_ctlr_el1[GICV3_NS] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
  1545. if (value & ICC_CTLR_EL3_EOIMODE_EL1NS) {
  1546. cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_EOIMODE;
  1547. }
  1548. if (value & ICC_CTLR_EL3_CBPR_EL1NS) {
  1549. cs->icc_ctlr_el1[GICV3_NS] |= ICC_CTLR_EL1_CBPR;
  1550. }
  1551. cs->icc_ctlr_el1[GICV3_S] &= ~(ICC_CTLR_EL1_CBPR | ICC_CTLR_EL1_EOIMODE);
  1552. if (value & ICC_CTLR_EL3_EOIMODE_EL1S) {
  1553. cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_EOIMODE;
  1554. }
  1555. if (value & ICC_CTLR_EL3_CBPR_EL1S) {
  1556. cs->icc_ctlr_el1[GICV3_S] |= ICC_CTLR_EL1_CBPR;
  1557. }
  1558. /* The only bit stored in icc_ctlr_el3 which is writeable is EOIMODE_EL3: */
  1559. mask = ICC_CTLR_EL3_EOIMODE_EL3;
  1560. cs->icc_ctlr_el3 &= ~mask;
  1561. cs->icc_ctlr_el3 |= (value & mask);
  1562. gicv3_cpuif_update(cs);
  1563. }
  1564. static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
  1565. const ARMCPRegInfo *ri, bool isread)
  1566. {
  1567. CPAccessResult r = CP_ACCESS_OK;
  1568. GICv3CPUState *cs = icc_cs_from_env(env);
  1569. int el = arm_current_el(env);
  1570. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TC) &&
  1571. el == 1 && !arm_is_secure_below_el3(env)) {
  1572. /* Takes priority over a possible EL3 trap */
  1573. return CP_ACCESS_TRAP_EL2;
  1574. }
  1575. if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
  1576. switch (el) {
  1577. case 1:
  1578. /* Note that arm_hcr_el2_eff takes secure state into account. */
  1579. if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
  1580. r = CP_ACCESS_TRAP_EL3;
  1581. }
  1582. break;
  1583. case 2:
  1584. r = CP_ACCESS_TRAP_EL3;
  1585. break;
  1586. case 3:
  1587. if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
  1588. r = CP_ACCESS_TRAP_EL3;
  1589. }
  1590. break;
  1591. default:
  1592. g_assert_not_reached();
  1593. }
  1594. }
  1595. if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
  1596. r = CP_ACCESS_TRAP;
  1597. }
  1598. return r;
  1599. }
  1600. static CPAccessResult gicv3_dir_access(CPUARMState *env,
  1601. const ARMCPRegInfo *ri, bool isread)
  1602. {
  1603. GICv3CPUState *cs = icc_cs_from_env(env);
  1604. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TDIR) &&
  1605. arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
  1606. /* Takes priority over a possible EL3 trap */
  1607. return CP_ACCESS_TRAP_EL2;
  1608. }
  1609. return gicv3_irqfiq_access(env, ri, isread);
  1610. }
  1611. static CPAccessResult gicv3_sgi_access(CPUARMState *env,
  1612. const ARMCPRegInfo *ri, bool isread)
  1613. {
  1614. if (arm_current_el(env) == 1 &&
  1615. (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
  1616. /* Takes priority over a possible EL3 trap */
  1617. return CP_ACCESS_TRAP_EL2;
  1618. }
  1619. return gicv3_irqfiq_access(env, ri, isread);
  1620. }
  1621. static CPAccessResult gicv3_fiq_access(CPUARMState *env,
  1622. const ARMCPRegInfo *ri, bool isread)
  1623. {
  1624. CPAccessResult r = CP_ACCESS_OK;
  1625. GICv3CPUState *cs = icc_cs_from_env(env);
  1626. int el = arm_current_el(env);
  1627. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL0) &&
  1628. el == 1 && !arm_is_secure_below_el3(env)) {
  1629. /* Takes priority over a possible EL3 trap */
  1630. return CP_ACCESS_TRAP_EL2;
  1631. }
  1632. if (env->cp15.scr_el3 & SCR_FIQ) {
  1633. switch (el) {
  1634. case 1:
  1635. if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
  1636. r = CP_ACCESS_TRAP_EL3;
  1637. }
  1638. break;
  1639. case 2:
  1640. r = CP_ACCESS_TRAP_EL3;
  1641. break;
  1642. case 3:
  1643. if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
  1644. r = CP_ACCESS_TRAP_EL3;
  1645. }
  1646. break;
  1647. default:
  1648. g_assert_not_reached();
  1649. }
  1650. }
  1651. if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
  1652. r = CP_ACCESS_TRAP;
  1653. }
  1654. return r;
  1655. }
  1656. static CPAccessResult gicv3_irq_access(CPUARMState *env,
  1657. const ARMCPRegInfo *ri, bool isread)
  1658. {
  1659. CPAccessResult r = CP_ACCESS_OK;
  1660. GICv3CPUState *cs = icc_cs_from_env(env);
  1661. int el = arm_current_el(env);
  1662. if ((cs->ich_hcr_el2 & ICH_HCR_EL2_TALL1) &&
  1663. el == 1 && !arm_is_secure_below_el3(env)) {
  1664. /* Takes priority over a possible EL3 trap */
  1665. return CP_ACCESS_TRAP_EL2;
  1666. }
  1667. if (env->cp15.scr_el3 & SCR_IRQ) {
  1668. switch (el) {
  1669. case 1:
  1670. if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
  1671. r = CP_ACCESS_TRAP_EL3;
  1672. }
  1673. break;
  1674. case 2:
  1675. r = CP_ACCESS_TRAP_EL3;
  1676. break;
  1677. case 3:
  1678. if (!is_a64(env) && !arm_is_el3_or_mon(env)) {
  1679. r = CP_ACCESS_TRAP_EL3;
  1680. }
  1681. break;
  1682. default:
  1683. g_assert_not_reached();
  1684. }
  1685. }
  1686. if (r == CP_ACCESS_TRAP_EL3 && !arm_el_is_aa64(env, 3)) {
  1687. r = CP_ACCESS_TRAP;
  1688. }
  1689. return r;
  1690. }
  1691. static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
  1692. {
  1693. GICv3CPUState *cs = icc_cs_from_env(env);
  1694. cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
  1695. (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
  1696. (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
  1697. cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
  1698. (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
  1699. (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
  1700. cs->icc_pmr_el1 = 0;
  1701. cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
  1702. cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
  1703. cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS;
  1704. memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
  1705. memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
  1706. cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
  1707. (1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
  1708. (7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
  1709. memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
  1710. cs->ich_hcr_el2 = 0;
  1711. memset(cs->ich_lr_el2, 0, sizeof(cs->ich_lr_el2));
  1712. cs->ich_vmcr_el2 = ICH_VMCR_EL2_VFIQEN |
  1713. ((icv_min_vbpr(cs) + 1) << ICH_VMCR_EL2_VBPR1_SHIFT) |
  1714. (icv_min_vbpr(cs) << ICH_VMCR_EL2_VBPR0_SHIFT);
  1715. }
  1716. static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
  1717. { .name = "ICC_PMR_EL1", .state = ARM_CP_STATE_BOTH,
  1718. .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 6, .opc2 = 0,
  1719. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1720. .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
  1721. .readfn = icc_pmr_read,
  1722. .writefn = icc_pmr_write,
  1723. /* We hang the whole cpu interface reset routine off here
  1724. * rather than parcelling it out into one little function
  1725. * per register
  1726. */
  1727. .resetfn = icc_reset,
  1728. },
  1729. { .name = "ICC_IAR0_EL1", .state = ARM_CP_STATE_BOTH,
  1730. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 0,
  1731. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1732. .access = PL1_R, .accessfn = gicv3_fiq_access,
  1733. .readfn = icc_iar0_read,
  1734. },
  1735. { .name = "ICC_EOIR0_EL1", .state = ARM_CP_STATE_BOTH,
  1736. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 1,
  1737. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1738. .access = PL1_W, .accessfn = gicv3_fiq_access,
  1739. .writefn = icc_eoir_write,
  1740. },
  1741. { .name = "ICC_HPPIR0_EL1", .state = ARM_CP_STATE_BOTH,
  1742. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 2,
  1743. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1744. .access = PL1_R, .accessfn = gicv3_fiq_access,
  1745. .readfn = icc_hppir0_read,
  1746. },
  1747. { .name = "ICC_BPR0_EL1", .state = ARM_CP_STATE_BOTH,
  1748. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 3,
  1749. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1750. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  1751. .readfn = icc_bpr_read,
  1752. .writefn = icc_bpr_write,
  1753. },
  1754. { .name = "ICC_AP0R0_EL1", .state = ARM_CP_STATE_BOTH,
  1755. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 4,
  1756. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1757. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  1758. .readfn = icc_ap_read,
  1759. .writefn = icc_ap_write,
  1760. },
  1761. { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
  1762. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
  1763. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1764. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  1765. .readfn = icc_ap_read,
  1766. .writefn = icc_ap_write,
  1767. },
  1768. { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
  1769. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
  1770. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1771. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  1772. .readfn = icc_ap_read,
  1773. .writefn = icc_ap_write,
  1774. },
  1775. { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
  1776. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
  1777. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1778. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  1779. .readfn = icc_ap_read,
  1780. .writefn = icc_ap_write,
  1781. },
  1782. /* All the ICC_AP1R*_EL1 registers are banked */
  1783. { .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
  1784. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
  1785. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1786. .access = PL1_RW, .accessfn = gicv3_irq_access,
  1787. .readfn = icc_ap_read,
  1788. .writefn = icc_ap_write,
  1789. },
  1790. { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
  1791. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
  1792. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1793. .access = PL1_RW, .accessfn = gicv3_irq_access,
  1794. .readfn = icc_ap_read,
  1795. .writefn = icc_ap_write,
  1796. },
  1797. { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
  1798. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
  1799. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1800. .access = PL1_RW, .accessfn = gicv3_irq_access,
  1801. .readfn = icc_ap_read,
  1802. .writefn = icc_ap_write,
  1803. },
  1804. { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
  1805. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
  1806. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1807. .access = PL1_RW, .accessfn = gicv3_irq_access,
  1808. .readfn = icc_ap_read,
  1809. .writefn = icc_ap_write,
  1810. },
  1811. { .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
  1812. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
  1813. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1814. .access = PL1_W, .accessfn = gicv3_dir_access,
  1815. .writefn = icc_dir_write,
  1816. },
  1817. { .name = "ICC_RPR_EL1", .state = ARM_CP_STATE_BOTH,
  1818. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 3,
  1819. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1820. .access = PL1_R, .accessfn = gicv3_irqfiq_access,
  1821. .readfn = icc_rpr_read,
  1822. },
  1823. { .name = "ICC_SGI1R_EL1", .state = ARM_CP_STATE_AA64,
  1824. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 5,
  1825. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1826. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1827. .writefn = icc_sgi1r_write,
  1828. },
  1829. { .name = "ICC_SGI1R",
  1830. .cp = 15, .opc1 = 0, .crm = 12,
  1831. .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
  1832. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1833. .writefn = icc_sgi1r_write,
  1834. },
  1835. { .name = "ICC_ASGI1R_EL1", .state = ARM_CP_STATE_AA64,
  1836. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 6,
  1837. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1838. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1839. .writefn = icc_asgi1r_write,
  1840. },
  1841. { .name = "ICC_ASGI1R",
  1842. .cp = 15, .opc1 = 1, .crm = 12,
  1843. .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
  1844. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1845. .writefn = icc_asgi1r_write,
  1846. },
  1847. { .name = "ICC_SGI0R_EL1", .state = ARM_CP_STATE_AA64,
  1848. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 7,
  1849. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1850. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1851. .writefn = icc_sgi0r_write,
  1852. },
  1853. { .name = "ICC_SGI0R",
  1854. .cp = 15, .opc1 = 2, .crm = 12,
  1855. .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_RAW,
  1856. .access = PL1_W, .accessfn = gicv3_sgi_access,
  1857. .writefn = icc_sgi0r_write,
  1858. },
  1859. { .name = "ICC_IAR1_EL1", .state = ARM_CP_STATE_BOTH,
  1860. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 0,
  1861. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1862. .access = PL1_R, .accessfn = gicv3_irq_access,
  1863. .readfn = icc_iar1_read,
  1864. },
  1865. { .name = "ICC_EOIR1_EL1", .state = ARM_CP_STATE_BOTH,
  1866. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 1,
  1867. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1868. .access = PL1_W, .accessfn = gicv3_irq_access,
  1869. .writefn = icc_eoir_write,
  1870. },
  1871. { .name = "ICC_HPPIR1_EL1", .state = ARM_CP_STATE_BOTH,
  1872. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 2,
  1873. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1874. .access = PL1_R, .accessfn = gicv3_irq_access,
  1875. .readfn = icc_hppir1_read,
  1876. },
  1877. /* This register is banked */
  1878. { .name = "ICC_BPR1_EL1", .state = ARM_CP_STATE_BOTH,
  1879. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 3,
  1880. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1881. .access = PL1_RW, .accessfn = gicv3_irq_access,
  1882. .readfn = icc_bpr_read,
  1883. .writefn = icc_bpr_write,
  1884. },
  1885. /* This register is banked */
  1886. { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH,
  1887. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4,
  1888. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1889. .access = PL1_RW, .accessfn = gicv3_irqfiq_access,
  1890. .readfn = icc_ctlr_el1_read,
  1891. .writefn = icc_ctlr_el1_write,
  1892. },
  1893. { .name = "ICC_SRE_EL1", .state = ARM_CP_STATE_BOTH,
  1894. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 5,
  1895. .type = ARM_CP_NO_RAW | ARM_CP_CONST,
  1896. .access = PL1_RW,
  1897. /* We don't support IRQ/FIQ bypass and system registers are
  1898. * always enabled, so all our bits are RAZ/WI or RAO/WI.
  1899. * This register is banked but since it's constant we don't
  1900. * need to do anything special.
  1901. */
  1902. .resetvalue = 0x7,
  1903. },
  1904. { .name = "ICC_IGRPEN0_EL1", .state = ARM_CP_STATE_BOTH,
  1905. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 6,
  1906. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1907. .access = PL1_RW, .accessfn = gicv3_fiq_access,
  1908. .readfn = icc_igrpen_read,
  1909. .writefn = icc_igrpen_write,
  1910. },
  1911. /* This register is banked */
  1912. { .name = "ICC_IGRPEN1_EL1", .state = ARM_CP_STATE_BOTH,
  1913. .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 7,
  1914. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1915. .access = PL1_RW, .accessfn = gicv3_irq_access,
  1916. .readfn = icc_igrpen_read,
  1917. .writefn = icc_igrpen_write,
  1918. },
  1919. { .name = "ICC_SRE_EL2", .state = ARM_CP_STATE_BOTH,
  1920. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 5,
  1921. .type = ARM_CP_NO_RAW | ARM_CP_CONST,
  1922. .access = PL2_RW,
  1923. /* We don't support IRQ/FIQ bypass and system registers are
  1924. * always enabled, so all our bits are RAZ/WI or RAO/WI.
  1925. */
  1926. .resetvalue = 0xf,
  1927. },
  1928. { .name = "ICC_CTLR_EL3", .state = ARM_CP_STATE_BOTH,
  1929. .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 4,
  1930. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1931. .access = PL3_RW,
  1932. .readfn = icc_ctlr_el3_read,
  1933. .writefn = icc_ctlr_el3_write,
  1934. },
  1935. { .name = "ICC_SRE_EL3", .state = ARM_CP_STATE_BOTH,
  1936. .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 5,
  1937. .type = ARM_CP_NO_RAW | ARM_CP_CONST,
  1938. .access = PL3_RW,
  1939. /* We don't support IRQ/FIQ bypass and system registers are
  1940. * always enabled, so all our bits are RAZ/WI or RAO/WI.
  1941. */
  1942. .resetvalue = 0xf,
  1943. },
  1944. { .name = "ICC_IGRPEN1_EL3", .state = ARM_CP_STATE_BOTH,
  1945. .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 12, .opc2 = 7,
  1946. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  1947. .access = PL3_RW,
  1948. .readfn = icc_igrpen1_el3_read,
  1949. .writefn = icc_igrpen1_el3_write,
  1950. },
  1951. REGINFO_SENTINEL
  1952. };
  1953. static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1954. {
  1955. GICv3CPUState *cs = icc_cs_from_env(env);
  1956. int regno = ri->opc2 & 3;
  1957. int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
  1958. uint64_t value;
  1959. value = cs->ich_apr[grp][regno];
  1960. trace_gicv3_ich_ap_read(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  1961. return value;
  1962. }
  1963. static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1964. uint64_t value)
  1965. {
  1966. GICv3CPUState *cs = icc_cs_from_env(env);
  1967. int regno = ri->opc2 & 3;
  1968. int grp = (ri->crm & 1) ? GICV3_G1NS : GICV3_G0;
  1969. trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value);
  1970. cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU;
  1971. gicv3_cpuif_virt_update(cs);
  1972. }
  1973. static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1974. {
  1975. GICv3CPUState *cs = icc_cs_from_env(env);
  1976. uint64_t value = cs->ich_hcr_el2;
  1977. trace_gicv3_ich_hcr_read(gicv3_redist_affid(cs), value);
  1978. return value;
  1979. }
  1980. static void ich_hcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  1981. uint64_t value)
  1982. {
  1983. GICv3CPUState *cs = icc_cs_from_env(env);
  1984. trace_gicv3_ich_hcr_write(gicv3_redist_affid(cs), value);
  1985. value &= ICH_HCR_EL2_EN | ICH_HCR_EL2_UIE | ICH_HCR_EL2_LRENPIE |
  1986. ICH_HCR_EL2_NPIE | ICH_HCR_EL2_VGRP0EIE | ICH_HCR_EL2_VGRP0DIE |
  1987. ICH_HCR_EL2_VGRP1EIE | ICH_HCR_EL2_VGRP1DIE | ICH_HCR_EL2_TC |
  1988. ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 | ICH_HCR_EL2_TSEI |
  1989. ICH_HCR_EL2_TDIR | ICH_HCR_EL2_EOICOUNT_MASK;
  1990. cs->ich_hcr_el2 = value;
  1991. gicv3_cpuif_virt_update(cs);
  1992. }
  1993. static uint64_t ich_vmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  1994. {
  1995. GICv3CPUState *cs = icc_cs_from_env(env);
  1996. uint64_t value = cs->ich_vmcr_el2;
  1997. trace_gicv3_ich_vmcr_read(gicv3_redist_affid(cs), value);
  1998. return value;
  1999. }
  2000. static void ich_vmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  2001. uint64_t value)
  2002. {
  2003. GICv3CPUState *cs = icc_cs_from_env(env);
  2004. trace_gicv3_ich_vmcr_write(gicv3_redist_affid(cs), value);
  2005. value &= ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1 | ICH_VMCR_EL2_VCBPR |
  2006. ICH_VMCR_EL2_VEOIM | ICH_VMCR_EL2_VBPR1_MASK |
  2007. ICH_VMCR_EL2_VBPR0_MASK | ICH_VMCR_EL2_VPMR_MASK;
  2008. value |= ICH_VMCR_EL2_VFIQEN;
  2009. cs->ich_vmcr_el2 = value;
  2010. /* Enforce "writing BPRs to less than minimum sets them to the minimum"
  2011. * by reading and writing back the fields.
  2012. */
  2013. write_vbpr(cs, GICV3_G0, read_vbpr(cs, GICV3_G0));
  2014. write_vbpr(cs, GICV3_G1, read_vbpr(cs, GICV3_G1));
  2015. gicv3_cpuif_virt_update(cs);
  2016. }
  2017. static uint64_t ich_lr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2018. {
  2019. GICv3CPUState *cs = icc_cs_from_env(env);
  2020. int regno = ri->opc2 | ((ri->crm & 1) << 3);
  2021. uint64_t value;
  2022. /* This read function handles all of:
  2023. * 64-bit reads of the whole LR
  2024. * 32-bit reads of the low half of the LR
  2025. * 32-bit reads of the high half of the LR
  2026. */
  2027. if (ri->state == ARM_CP_STATE_AA32) {
  2028. if (ri->crm >= 14) {
  2029. value = extract64(cs->ich_lr_el2[regno], 32, 32);
  2030. trace_gicv3_ich_lrc_read(regno, gicv3_redist_affid(cs), value);
  2031. } else {
  2032. value = extract64(cs->ich_lr_el2[regno], 0, 32);
  2033. trace_gicv3_ich_lr32_read(regno, gicv3_redist_affid(cs), value);
  2034. }
  2035. } else {
  2036. value = cs->ich_lr_el2[regno];
  2037. trace_gicv3_ich_lr_read(regno, gicv3_redist_affid(cs), value);
  2038. }
  2039. return value;
  2040. }
  2041. static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri,
  2042. uint64_t value)
  2043. {
  2044. GICv3CPUState *cs = icc_cs_from_env(env);
  2045. int regno = ri->opc2 | ((ri->crm & 1) << 3);
  2046. /* This write function handles all of:
  2047. * 64-bit writes to the whole LR
  2048. * 32-bit writes to the low half of the LR
  2049. * 32-bit writes to the high half of the LR
  2050. */
  2051. if (ri->state == ARM_CP_STATE_AA32) {
  2052. if (ri->crm >= 14) {
  2053. trace_gicv3_ich_lrc_write(regno, gicv3_redist_affid(cs), value);
  2054. value = deposit64(cs->ich_lr_el2[regno], 32, 32, value);
  2055. } else {
  2056. trace_gicv3_ich_lr32_write(regno, gicv3_redist_affid(cs), value);
  2057. value = deposit64(cs->ich_lr_el2[regno], 0, 32, value);
  2058. }
  2059. } else {
  2060. trace_gicv3_ich_lr_write(regno, gicv3_redist_affid(cs), value);
  2061. }
  2062. /* Enforce RES0 bits in priority field */
  2063. if (cs->vpribits < 8) {
  2064. value = deposit64(value, ICH_LR_EL2_PRIORITY_SHIFT,
  2065. 8 - cs->vpribits, 0);
  2066. }
  2067. cs->ich_lr_el2[regno] = value;
  2068. gicv3_cpuif_virt_update(cs);
  2069. }
  2070. static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2071. {
  2072. GICv3CPUState *cs = icc_cs_from_env(env);
  2073. uint64_t value;
  2074. value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT)
  2075. | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V
  2076. | (1 << ICH_VTR_EL2_IDBITS_SHIFT)
  2077. | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT)
  2078. | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT);
  2079. trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value);
  2080. return value;
  2081. }
  2082. static uint64_t ich_misr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2083. {
  2084. GICv3CPUState *cs = icc_cs_from_env(env);
  2085. uint64_t value = maintenance_interrupt_state(cs);
  2086. trace_gicv3_ich_misr_read(gicv3_redist_affid(cs), value);
  2087. return value;
  2088. }
  2089. static uint64_t ich_eisr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2090. {
  2091. GICv3CPUState *cs = icc_cs_from_env(env);
  2092. uint64_t value = eoi_maintenance_interrupt_state(cs, NULL);
  2093. trace_gicv3_ich_eisr_read(gicv3_redist_affid(cs), value);
  2094. return value;
  2095. }
  2096. static uint64_t ich_elrsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
  2097. {
  2098. GICv3CPUState *cs = icc_cs_from_env(env);
  2099. uint64_t value = 0;
  2100. int i;
  2101. for (i = 0; i < cs->num_list_regs; i++) {
  2102. uint64_t lr = cs->ich_lr_el2[i];
  2103. if ((lr & ICH_LR_EL2_STATE_MASK) == 0 &&
  2104. ((lr & ICH_LR_EL2_HW) != 0 || (lr & ICH_LR_EL2_EOI) == 0)) {
  2105. value |= (1 << i);
  2106. }
  2107. }
  2108. trace_gicv3_ich_elrsr_read(gicv3_redist_affid(cs), value);
  2109. return value;
  2110. }
  2111. static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = {
  2112. { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH,
  2113. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0,
  2114. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2115. .access = PL2_RW,
  2116. .readfn = ich_ap_read,
  2117. .writefn = ich_ap_write,
  2118. },
  2119. { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH,
  2120. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0,
  2121. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2122. .access = PL2_RW,
  2123. .readfn = ich_ap_read,
  2124. .writefn = ich_ap_write,
  2125. },
  2126. { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH,
  2127. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0,
  2128. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2129. .access = PL2_RW,
  2130. .readfn = ich_hcr_read,
  2131. .writefn = ich_hcr_write,
  2132. },
  2133. { .name = "ICH_VTR_EL2", .state = ARM_CP_STATE_BOTH,
  2134. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 1,
  2135. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2136. .access = PL2_R,
  2137. .readfn = ich_vtr_read,
  2138. },
  2139. { .name = "ICH_MISR_EL2", .state = ARM_CP_STATE_BOTH,
  2140. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 2,
  2141. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2142. .access = PL2_R,
  2143. .readfn = ich_misr_read,
  2144. },
  2145. { .name = "ICH_EISR_EL2", .state = ARM_CP_STATE_BOTH,
  2146. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 3,
  2147. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2148. .access = PL2_R,
  2149. .readfn = ich_eisr_read,
  2150. },
  2151. { .name = "ICH_ELRSR_EL2", .state = ARM_CP_STATE_BOTH,
  2152. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 5,
  2153. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2154. .access = PL2_R,
  2155. .readfn = ich_elrsr_read,
  2156. },
  2157. { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH,
  2158. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7,
  2159. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2160. .access = PL2_RW,
  2161. .readfn = ich_vmcr_read,
  2162. .writefn = ich_vmcr_write,
  2163. },
  2164. REGINFO_SENTINEL
  2165. };
  2166. static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = {
  2167. { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH,
  2168. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1,
  2169. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2170. .access = PL2_RW,
  2171. .readfn = ich_ap_read,
  2172. .writefn = ich_ap_write,
  2173. },
  2174. { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH,
  2175. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1,
  2176. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2177. .access = PL2_RW,
  2178. .readfn = ich_ap_read,
  2179. .writefn = ich_ap_write,
  2180. },
  2181. REGINFO_SENTINEL
  2182. };
  2183. static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = {
  2184. { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH,
  2185. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2,
  2186. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2187. .access = PL2_RW,
  2188. .readfn = ich_ap_read,
  2189. .writefn = ich_ap_write,
  2190. },
  2191. { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH,
  2192. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3,
  2193. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2194. .access = PL2_RW,
  2195. .readfn = ich_ap_read,
  2196. .writefn = ich_ap_write,
  2197. },
  2198. { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH,
  2199. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2,
  2200. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2201. .access = PL2_RW,
  2202. .readfn = ich_ap_read,
  2203. .writefn = ich_ap_write,
  2204. },
  2205. { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH,
  2206. .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3,
  2207. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2208. .access = PL2_RW,
  2209. .readfn = ich_ap_read,
  2210. .writefn = ich_ap_write,
  2211. },
  2212. REGINFO_SENTINEL
  2213. };
  2214. static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque)
  2215. {
  2216. GICv3CPUState *cs = opaque;
  2217. gicv3_cpuif_update(cs);
  2218. }
  2219. void gicv3_init_cpuif(GICv3State *s)
  2220. {
  2221. /* Called from the GICv3 realize function; register our system
  2222. * registers with the CPU
  2223. */
  2224. int i;
  2225. for (i = 0; i < s->num_cpu; i++) {
  2226. ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
  2227. GICv3CPUState *cs = &s->cpu[i];
  2228. /* Note that we can't just use the GICv3CPUState as an opaque pointer
  2229. * in define_arm_cp_regs_with_opaque(), because when we're called back
  2230. * it might be with code translated by CPU 0 but run by CPU 1, in
  2231. * which case we'd get the wrong value.
  2232. * So instead we define the regs with no ri->opaque info, and
  2233. * get back to the GICv3CPUState from the CPUARMState.
  2234. */
  2235. define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
  2236. if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
  2237. && cpu->gic_num_lrs) {
  2238. int j;
  2239. cs->maintenance_irq = cpu->gicv3_maintenance_interrupt;
  2240. cs->num_list_regs = cpu->gic_num_lrs;
  2241. cs->vpribits = cpu->gic_vpribits;
  2242. cs->vprebits = cpu->gic_vprebits;
  2243. /* Check against architectural constraints: getting these
  2244. * wrong would be a bug in the CPU code defining these,
  2245. * and the implementation relies on them holding.
  2246. */
  2247. g_assert(cs->vprebits <= cs->vpribits);
  2248. g_assert(cs->vprebits >= 5 && cs->vprebits <= 7);
  2249. g_assert(cs->vpribits >= 5 && cs->vpribits <= 8);
  2250. define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo);
  2251. for (j = 0; j < cs->num_list_regs; j++) {
  2252. /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs
  2253. * are split into two cp15 regs, LR (the low part, with the
  2254. * same encoding as the AArch64 LR) and LRC (the high part).
  2255. */
  2256. ARMCPRegInfo lr_regset[] = {
  2257. { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH,
  2258. .opc0 = 3, .opc1 = 4, .crn = 12,
  2259. .crm = 12 + (j >> 3), .opc2 = j & 7,
  2260. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2261. .access = PL2_RW,
  2262. .readfn = ich_lr_read,
  2263. .writefn = ich_lr_write,
  2264. },
  2265. { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32,
  2266. .cp = 15, .opc1 = 4, .crn = 12,
  2267. .crm = 14 + (j >> 3), .opc2 = j & 7,
  2268. .type = ARM_CP_IO | ARM_CP_NO_RAW,
  2269. .access = PL2_RW,
  2270. .readfn = ich_lr_read,
  2271. .writefn = ich_lr_write,
  2272. },
  2273. REGINFO_SENTINEL
  2274. };
  2275. define_arm_cp_regs(cpu, lr_regset);
  2276. }
  2277. if (cs->vprebits >= 6) {
  2278. define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo);
  2279. }
  2280. if (cs->vprebits == 7) {
  2281. define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo);
  2282. }
  2283. }
  2284. arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs);
  2285. }
  2286. }