arm_gicv3.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. /*
  2. * ARM Generic Interrupt Controller v3 (emulation)
  3. *
  4. * Copyright (c) 2015 Huawei.
  5. * Copyright (c) 2016 Linaro Limited
  6. * Written by Shlomo Pongratz, Peter Maydell
  7. *
  8. * This code is licensed under the GPL, version 2 or (at your option)
  9. * any later version.
  10. */
  11. /* This file contains implementation code for an interrupt controller
  12. * which implements the GICv3 architecture. Specifically this is where
  13. * the device class itself and the functions for handling interrupts
  14. * coming in and going out live.
  15. */
  16. #include "qemu/osdep.h"
  17. #include "qapi/error.h"
  18. #include "qemu/module.h"
  19. #include "hw/intc/arm_gicv3.h"
  20. #include "gicv3_internal.h"
  21. static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio)
  22. {
  23. /* Return true if this IRQ at this priority should take
  24. * precedence over the current recorded highest priority
  25. * pending interrupt for this CPU. We also return true if
  26. * the current recorded highest priority pending interrupt
  27. * is the same as this one (a property which the calling code
  28. * relies on).
  29. */
  30. if (prio < cs->hppi.prio) {
  31. return true;
  32. }
  33. /* If multiple pending interrupts have the same priority then it is an
  34. * IMPDEF choice which of them to signal to the CPU. We choose to
  35. * signal the one with the lowest interrupt number.
  36. */
  37. if (prio == cs->hppi.prio && irq <= cs->hppi.irq) {
  38. return true;
  39. }
  40. return false;
  41. }
  42. static uint32_t gicd_int_pending(GICv3State *s, int irq)
  43. {
  44. /* Recalculate which distributor interrupts are actually pending
  45. * in the group of 32 interrupts starting at irq (which should be a multiple
  46. * of 32), and return a 32-bit integer which has a bit set for each
  47. * interrupt that is eligible to be signaled to the CPU interface.
  48. *
  49. * An interrupt is pending if:
  50. * + the PENDING latch is set OR it is level triggered and the input is 1
  51. * + its ENABLE bit is set
  52. * + the GICD enable bit for its group is set
  53. * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
  54. * Conveniently we can bulk-calculate this with bitwise operations.
  55. */
  56. uint32_t pend, grpmask;
  57. uint32_t pending = *gic_bmp_ptr32(s->pending, irq);
  58. uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq);
  59. uint32_t level = *gic_bmp_ptr32(s->level, irq);
  60. uint32_t group = *gic_bmp_ptr32(s->group, irq);
  61. uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq);
  62. uint32_t enable = *gic_bmp_ptr32(s->enabled, irq);
  63. uint32_t active = *gic_bmp_ptr32(s->active, irq);
  64. pend = pending | (~edge_trigger & level);
  65. pend &= enable;
  66. pend &= ~active;
  67. if (s->gicd_ctlr & GICD_CTLR_DS) {
  68. grpmod = 0;
  69. }
  70. grpmask = 0;
  71. if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
  72. grpmask |= group;
  73. }
  74. if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
  75. grpmask |= (~group & grpmod);
  76. }
  77. if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) {
  78. grpmask |= (~group & ~grpmod);
  79. }
  80. pend &= grpmask;
  81. return pend;
  82. }
  83. static uint32_t gicr_int_pending(GICv3CPUState *cs)
  84. {
  85. /* Recalculate which redistributor interrupts are actually pending,
  86. * and return a 32-bit integer which has a bit set for each interrupt
  87. * that is eligible to be signaled to the CPU interface.
  88. *
  89. * An interrupt is pending if:
  90. * + the PENDING latch is set OR it is level triggered and the input is 1
  91. * + its ENABLE bit is set
  92. * + the GICD enable bit for its group is set
  93. * + its ACTIVE bit is not set (otherwise it would be Active+Pending)
  94. * Conveniently we can bulk-calculate this with bitwise operations.
  95. */
  96. uint32_t pend, grpmask, grpmod;
  97. pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
  98. pend &= cs->gicr_ienabler0;
  99. pend &= ~cs->gicr_iactiver0;
  100. if (cs->gic->gicd_ctlr & GICD_CTLR_DS) {
  101. grpmod = 0;
  102. } else {
  103. grpmod = cs->gicr_igrpmodr0;
  104. }
  105. grpmask = 0;
  106. if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) {
  107. grpmask |= cs->gicr_igroupr0;
  108. }
  109. if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) {
  110. grpmask |= (~cs->gicr_igroupr0 & grpmod);
  111. }
  112. if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) {
  113. grpmask |= (~cs->gicr_igroupr0 & ~grpmod);
  114. }
  115. pend &= grpmask;
  116. return pend;
  117. }
  118. /* Update the interrupt status after state in a redistributor
  119. * or CPU interface has changed, but don't tell the CPU i/f.
  120. */
  121. static void gicv3_redist_update_noirqset(GICv3CPUState *cs)
  122. {
  123. /* Find the highest priority pending interrupt among the
  124. * redistributor interrupts (SGIs and PPIs).
  125. */
  126. bool seenbetter = false;
  127. uint8_t prio;
  128. int i;
  129. uint32_t pend;
  130. /* Find out which redistributor interrupts are eligible to be
  131. * signaled to the CPU interface.
  132. */
  133. pend = gicr_int_pending(cs);
  134. if (pend) {
  135. for (i = 0; i < GIC_INTERNAL; i++) {
  136. if (!(pend & (1 << i))) {
  137. continue;
  138. }
  139. prio = cs->gicr_ipriorityr[i];
  140. if (irqbetter(cs, i, prio)) {
  141. cs->hppi.irq = i;
  142. cs->hppi.prio = prio;
  143. seenbetter = true;
  144. }
  145. }
  146. }
  147. if (seenbetter) {
  148. cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
  149. }
  150. if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable &&
  151. (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) &&
  152. (cs->hpplpi.prio != 0xff)) {
  153. if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) {
  154. cs->hppi.irq = cs->hpplpi.irq;
  155. cs->hppi.prio = cs->hpplpi.prio;
  156. cs->hppi.grp = cs->hpplpi.grp;
  157. seenbetter = true;
  158. }
  159. }
  160. /* If the best interrupt we just found would preempt whatever
  161. * was the previous best interrupt before this update, then
  162. * we know it's definitely the best one now.
  163. * If we didn't find an interrupt that would preempt the previous
  164. * best, and the previous best is outside our range (or there was no
  165. * previous pending interrupt at all), then that is still valid, and
  166. * we leave it as the best.
  167. * Otherwise, we need to do a full update (because the previous best
  168. * interrupt has reduced in priority and any other interrupt could
  169. * now be the new best one).
  170. */
  171. if (!seenbetter && cs->hppi.prio != 0xff &&
  172. (cs->hppi.irq < GIC_INTERNAL ||
  173. cs->hppi.irq >= GICV3_LPI_INTID_START)) {
  174. gicv3_full_update_noirqset(cs->gic);
  175. }
  176. }
  177. /* Update the GIC status after state in a redistributor or
  178. * CPU interface has changed, and inform the CPU i/f of
  179. * its new highest priority pending interrupt.
  180. */
  181. void gicv3_redist_update(GICv3CPUState *cs)
  182. {
  183. gicv3_redist_update_noirqset(cs);
  184. gicv3_cpuif_update(cs);
  185. }
  186. /* Update the GIC status after state in the distributor has
  187. * changed affecting @len interrupts starting at @start,
  188. * but don't tell the CPU i/f.
  189. */
  190. static void gicv3_update_noirqset(GICv3State *s, int start, int len)
  191. {
  192. int i;
  193. uint8_t prio;
  194. uint32_t pend = 0;
  195. assert(start >= GIC_INTERNAL);
  196. assert(len > 0);
  197. for (i = 0; i < s->num_cpu; i++) {
  198. s->cpu[i].seenbetter = false;
  199. }
  200. /* Find the highest priority pending interrupt in this range. */
  201. for (i = start; i < start + len; i++) {
  202. GICv3CPUState *cs;
  203. if (i == start || (i & 0x1f) == 0) {
  204. /* Calculate the next 32 bits worth of pending status */
  205. pend = gicd_int_pending(s, i & ~0x1f);
  206. }
  207. if (!(pend & (1 << (i & 0x1f)))) {
  208. continue;
  209. }
  210. cs = s->gicd_irouter_target[i];
  211. if (!cs) {
  212. /* Interrupts targeting no implemented CPU should remain pending
  213. * and not be forwarded to any CPU.
  214. */
  215. continue;
  216. }
  217. prio = s->gicd_ipriority[i];
  218. if (irqbetter(cs, i, prio)) {
  219. cs->hppi.irq = i;
  220. cs->hppi.prio = prio;
  221. cs->seenbetter = true;
  222. }
  223. }
  224. /* If the best interrupt we just found would preempt whatever
  225. * was the previous best interrupt before this update, then
  226. * we know it's definitely the best one now.
  227. * If we didn't find an interrupt that would preempt the previous
  228. * best, and the previous best is outside our range (or there was
  229. * no previous pending interrupt at all), then that
  230. * is still valid, and we leave it as the best.
  231. * Otherwise, we need to do a full update (because the previous best
  232. * interrupt has reduced in priority and any other interrupt could
  233. * now be the new best one).
  234. */
  235. for (i = 0; i < s->num_cpu; i++) {
  236. GICv3CPUState *cs = &s->cpu[i];
  237. if (cs->seenbetter) {
  238. cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq);
  239. }
  240. if (!cs->seenbetter && cs->hppi.prio != 0xff &&
  241. cs->hppi.irq >= start && cs->hppi.irq < start + len) {
  242. gicv3_full_update_noirqset(s);
  243. break;
  244. }
  245. }
  246. }
  247. void gicv3_update(GICv3State *s, int start, int len)
  248. {
  249. int i;
  250. gicv3_update_noirqset(s, start, len);
  251. for (i = 0; i < s->num_cpu; i++) {
  252. gicv3_cpuif_update(&s->cpu[i]);
  253. }
  254. }
  255. void gicv3_full_update_noirqset(GICv3State *s)
  256. {
  257. /* Completely recalculate the GIC status from scratch, but
  258. * don't update any outbound IRQ lines.
  259. */
  260. int i;
  261. for (i = 0; i < s->num_cpu; i++) {
  262. s->cpu[i].hppi.prio = 0xff;
  263. }
  264. /* Note that we can guarantee that these functions will not
  265. * recursively call back into gicv3_full_update(), because
  266. * at each point the "previous best" is always outside the
  267. * range we ask them to update.
  268. */
  269. gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL);
  270. for (i = 0; i < s->num_cpu; i++) {
  271. gicv3_redist_update_noirqset(&s->cpu[i]);
  272. }
  273. }
  274. void gicv3_full_update(GICv3State *s)
  275. {
  276. /* Completely recalculate the GIC status from scratch, including
  277. * updating outbound IRQ lines.
  278. */
  279. int i;
  280. gicv3_full_update_noirqset(s);
  281. for (i = 0; i < s->num_cpu; i++) {
  282. gicv3_cpuif_update(&s->cpu[i]);
  283. }
  284. }
  285. /* Process a change in an external IRQ input. */
  286. static void gicv3_set_irq(void *opaque, int irq, int level)
  287. {
  288. /* Meaning of the 'irq' parameter:
  289. * [0..N-1] : external interrupts
  290. * [N..N+31] : PPI (internal) interrupts for CPU 0
  291. * [N+32..N+63] : PPI (internal interrupts for CPU 1
  292. * ...
  293. */
  294. GICv3State *s = opaque;
  295. if (irq < (s->num_irq - GIC_INTERNAL)) {
  296. /* external interrupt (SPI) */
  297. gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level);
  298. } else {
  299. /* per-cpu interrupt (PPI) */
  300. int cpu;
  301. irq -= (s->num_irq - GIC_INTERNAL);
  302. cpu = irq / GIC_INTERNAL;
  303. irq %= GIC_INTERNAL;
  304. assert(cpu < s->num_cpu);
  305. /* Raising SGIs via this function would be a bug in how the board
  306. * model wires up interrupts.
  307. */
  308. assert(irq >= GIC_NR_SGIS);
  309. gicv3_redist_set_irq(&s->cpu[cpu], irq, level);
  310. }
  311. }
  312. static void arm_gicv3_post_load(GICv3State *s)
  313. {
  314. int i;
  315. /* Recalculate our cached idea of the current highest priority
  316. * pending interrupt, but don't set IRQ or FIQ lines.
  317. */
  318. for (i = 0; i < s->num_cpu; i++) {
  319. gicv3_redist_update_lpi_only(&s->cpu[i]);
  320. }
  321. gicv3_full_update_noirqset(s);
  322. /* Repopulate the cache of GICv3CPUState pointers for target CPUs */
  323. gicv3_cache_all_target_cpustates(s);
  324. }
  325. static const MemoryRegionOps gic_ops[] = {
  326. {
  327. .read_with_attrs = gicv3_dist_read,
  328. .write_with_attrs = gicv3_dist_write,
  329. .endianness = DEVICE_NATIVE_ENDIAN,
  330. .valid.min_access_size = 1,
  331. .valid.max_access_size = 8,
  332. .impl.min_access_size = 1,
  333. .impl.max_access_size = 8,
  334. },
  335. {
  336. .read_with_attrs = gicv3_redist_read,
  337. .write_with_attrs = gicv3_redist_write,
  338. .endianness = DEVICE_NATIVE_ENDIAN,
  339. .valid.min_access_size = 1,
  340. .valid.max_access_size = 8,
  341. .impl.min_access_size = 1,
  342. .impl.max_access_size = 8,
  343. }
  344. };
  345. static void arm_gic_realize(DeviceState *dev, Error **errp)
  346. {
  347. /* Device instance realize function for the GIC sysbus device */
  348. GICv3State *s = ARM_GICV3(dev);
  349. ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s);
  350. Error *local_err = NULL;
  351. agc->parent_realize(dev, &local_err);
  352. if (local_err) {
  353. error_propagate(errp, local_err);
  354. return;
  355. }
  356. gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops);
  357. gicv3_init_cpuif(s);
  358. }
  359. static void arm_gicv3_class_init(ObjectClass *klass, void *data)
  360. {
  361. DeviceClass *dc = DEVICE_CLASS(klass);
  362. ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass);
  363. ARMGICv3Class *agc = ARM_GICV3_CLASS(klass);
  364. agcc->post_load = arm_gicv3_post_load;
  365. device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize);
  366. }
  367. static const TypeInfo arm_gicv3_info = {
  368. .name = TYPE_ARM_GICV3,
  369. .parent = TYPE_ARM_GICV3_COMMON,
  370. .instance_size = sizeof(GICv3State),
  371. .class_init = arm_gicv3_class_init,
  372. .class_size = sizeof(ARMGICv3Class),
  373. };
  374. static void arm_gicv3_register_types(void)
  375. {
  376. type_register_static(&arm_gicv3_info);
  377. }
  378. type_init(arm_gicv3_register_types)