gic_internal.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. /*
  2. * ARM GIC support - internal interfaces
  3. *
  4. * Copyright (c) 2012 Linaro Limited
  5. * Written by Peter Maydell
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation, either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #ifndef QEMU_ARM_GIC_INTERNAL_H
  21. #define QEMU_ARM_GIC_INTERNAL_H
  22. #include "hw/registerfields.h"
  23. #include "hw/intc/arm_gic.h"
  24. #define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1)))
  25. #define GIC_DIST_SET_ENABLED(irq, cm) (s->irq_state[irq].enabled |= (cm))
  26. #define GIC_DIST_CLEAR_ENABLED(irq, cm) (s->irq_state[irq].enabled &= ~(cm))
  27. #define GIC_DIST_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
  28. #define GIC_DIST_SET_PENDING(irq, cm) (s->irq_state[irq].pending |= (cm))
  29. #define GIC_DIST_CLEAR_PENDING(irq, cm) (s->irq_state[irq].pending &= ~(cm))
  30. #define GIC_DIST_SET_ACTIVE(irq, cm) (s->irq_state[irq].active |= (cm))
  31. #define GIC_DIST_CLEAR_ACTIVE(irq, cm) (s->irq_state[irq].active &= ~(cm))
  32. #define GIC_DIST_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
  33. #define GIC_DIST_SET_MODEL(irq) (s->irq_state[irq].model = true)
  34. #define GIC_DIST_CLEAR_MODEL(irq) (s->irq_state[irq].model = false)
  35. #define GIC_DIST_TEST_MODEL(irq) (s->irq_state[irq].model)
  36. #define GIC_DIST_SET_LEVEL(irq, cm) (s->irq_state[irq].level |= (cm))
  37. #define GIC_DIST_CLEAR_LEVEL(irq, cm) (s->irq_state[irq].level &= ~(cm))
  38. #define GIC_DIST_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
  39. #define GIC_DIST_SET_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger = true)
  40. #define GIC_DIST_CLEAR_EDGE_TRIGGER(irq) \
  41. (s->irq_state[irq].edge_trigger = false)
  42. #define GIC_DIST_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger)
  43. #define GIC_DIST_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
  44. s->priority1[irq][cpu] : \
  45. s->priority2[(irq) - GIC_INTERNAL])
  46. #define GIC_DIST_TARGET(irq) (s->irq_target[irq])
  47. #define GIC_DIST_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm))
  48. #define GIC_DIST_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm))
  49. #define GIC_DIST_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0)
  50. #define GICD_CTLR_EN_GRP0 (1U << 0)
  51. #define GICD_CTLR_EN_GRP1 (1U << 1)
  52. #define GICC_CTLR_EN_GRP0 (1U << 0)
  53. #define GICC_CTLR_EN_GRP1 (1U << 1)
  54. #define GICC_CTLR_ACK_CTL (1U << 2)
  55. #define GICC_CTLR_FIQ_EN (1U << 3)
  56. #define GICC_CTLR_CBPR (1U << 4) /* GICv1: SBPR */
  57. #define GICC_CTLR_EOIMODE (1U << 9)
  58. #define GICC_CTLR_EOIMODE_NS (1U << 10)
  59. REG32(GICH_HCR, 0x0)
  60. FIELD(GICH_HCR, EN, 0, 1)
  61. FIELD(GICH_HCR, UIE, 1, 1)
  62. FIELD(GICH_HCR, LRENPIE, 2, 1)
  63. FIELD(GICH_HCR, NPIE, 3, 1)
  64. FIELD(GICH_HCR, VGRP0EIE, 4, 1)
  65. FIELD(GICH_HCR, VGRP0DIE, 5, 1)
  66. FIELD(GICH_HCR, VGRP1EIE, 6, 1)
  67. FIELD(GICH_HCR, VGRP1DIE, 7, 1)
  68. FIELD(GICH_HCR, EOICount, 27, 5)
  69. #define GICH_HCR_MASK \
  70. (R_GICH_HCR_EN_MASK | R_GICH_HCR_UIE_MASK | \
  71. R_GICH_HCR_LRENPIE_MASK | R_GICH_HCR_NPIE_MASK | \
  72. R_GICH_HCR_VGRP0EIE_MASK | R_GICH_HCR_VGRP0DIE_MASK | \
  73. R_GICH_HCR_VGRP1EIE_MASK | R_GICH_HCR_VGRP1DIE_MASK | \
  74. R_GICH_HCR_EOICount_MASK)
  75. REG32(GICH_VTR, 0x4)
  76. FIELD(GICH_VTR, ListRegs, 0, 6)
  77. FIELD(GICH_VTR, PREbits, 26, 3)
  78. FIELD(GICH_VTR, PRIbits, 29, 3)
  79. REG32(GICH_VMCR, 0x8)
  80. FIELD(GICH_VMCR, VMCCtlr, 0, 10)
  81. FIELD(GICH_VMCR, VMABP, 18, 3)
  82. FIELD(GICH_VMCR, VMBP, 21, 3)
  83. FIELD(GICH_VMCR, VMPriMask, 27, 5)
  84. REG32(GICH_MISR, 0x10)
  85. FIELD(GICH_MISR, EOI, 0, 1)
  86. FIELD(GICH_MISR, U, 1, 1)
  87. FIELD(GICH_MISR, LRENP, 2, 1)
  88. FIELD(GICH_MISR, NP, 3, 1)
  89. FIELD(GICH_MISR, VGrp0E, 4, 1)
  90. FIELD(GICH_MISR, VGrp0D, 5, 1)
  91. FIELD(GICH_MISR, VGrp1E, 6, 1)
  92. FIELD(GICH_MISR, VGrp1D, 7, 1)
  93. REG32(GICH_EISR0, 0x20)
  94. REG32(GICH_EISR1, 0x24)
  95. REG32(GICH_ELRSR0, 0x30)
  96. REG32(GICH_ELRSR1, 0x34)
  97. REG32(GICH_APR, 0xf0)
  98. REG32(GICH_LR0, 0x100)
  99. FIELD(GICH_LR0, VirtualID, 0, 10)
  100. FIELD(GICH_LR0, PhysicalID, 10, 10)
  101. FIELD(GICH_LR0, CPUID, 10, 3)
  102. FIELD(GICH_LR0, EOI, 19, 1)
  103. FIELD(GICH_LR0, Priority, 23, 5)
  104. FIELD(GICH_LR0, State, 28, 2)
  105. FIELD(GICH_LR0, Grp1, 30, 1)
  106. FIELD(GICH_LR0, HW, 31, 1)
  107. /* Last LR register */
  108. REG32(GICH_LR63, 0x1fc)
  109. #define GICH_LR_MASK \
  110. (R_GICH_LR0_VirtualID_MASK | R_GICH_LR0_PhysicalID_MASK | \
  111. R_GICH_LR0_CPUID_MASK | R_GICH_LR0_EOI_MASK | \
  112. R_GICH_LR0_Priority_MASK | R_GICH_LR0_State_MASK | \
  113. R_GICH_LR0_Grp1_MASK | R_GICH_LR0_HW_MASK)
  114. #define GICH_LR_STATE_INVALID 0
  115. #define GICH_LR_STATE_PENDING 1
  116. #define GICH_LR_STATE_ACTIVE 2
  117. #define GICH_LR_STATE_ACTIVE_PENDING 3
  118. #define GICH_LR_VIRT_ID(entry) (FIELD_EX32(entry, GICH_LR0, VirtualID))
  119. #define GICH_LR_PHYS_ID(entry) (FIELD_EX32(entry, GICH_LR0, PhysicalID))
  120. #define GICH_LR_CPUID(entry) (FIELD_EX32(entry, GICH_LR0, CPUID))
  121. #define GICH_LR_EOI(entry) (FIELD_EX32(entry, GICH_LR0, EOI))
  122. #define GICH_LR_PRIORITY(entry) (FIELD_EX32(entry, GICH_LR0, Priority) << 3)
  123. #define GICH_LR_STATE(entry) (FIELD_EX32(entry, GICH_LR0, State))
  124. #define GICH_LR_GROUP(entry) (FIELD_EX32(entry, GICH_LR0, Grp1))
  125. #define GICH_LR_HW(entry) (FIELD_EX32(entry, GICH_LR0, HW))
  126. #define GICH_LR_CLEAR_PENDING(entry) \
  127. ((entry) &= ~(GICH_LR_STATE_PENDING << R_GICH_LR0_State_SHIFT))
  128. #define GICH_LR_SET_ACTIVE(entry) \
  129. ((entry) |= (GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
  130. #define GICH_LR_CLEAR_ACTIVE(entry) \
  131. ((entry) &= ~(GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
  132. /* Valid bits for GICC_CTLR for GICv1, v1 with security extensions,
  133. * GICv2 and GICv2 with security extensions:
  134. */
  135. #define GICC_CTLR_V1_MASK 0x1
  136. #define GICC_CTLR_V1_S_MASK 0x1f
  137. #define GICC_CTLR_V2_MASK 0x21f
  138. #define GICC_CTLR_V2_S_MASK 0x61f
  139. /* The special cases for the revision property: */
  140. #define REV_11MPCORE 0
  141. uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs);
  142. void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
  143. MemTxAttrs attrs);
  144. static inline bool gic_test_pending(GICState *s, int irq, int cm)
  145. {
  146. if (s->revision == REV_11MPCORE) {
  147. return s->irq_state[irq].pending & cm;
  148. } else {
  149. /* Edge-triggered interrupts are marked pending on a rising edge, but
  150. * level-triggered interrupts are either considered pending when the
  151. * level is active or if software has explicitly written to
  152. * GICD_ISPENDR to set the state pending.
  153. */
  154. return (s->irq_state[irq].pending & cm) ||
  155. (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_LEVEL(irq, cm));
  156. }
  157. }
  158. static inline bool gic_is_vcpu(int cpu)
  159. {
  160. return cpu >= GIC_NCPU;
  161. }
  162. static inline int gic_get_vcpu_real_id(int cpu)
  163. {
  164. return (cpu >= GIC_NCPU) ? (cpu - GIC_NCPU) : cpu;
  165. }
  166. /* Return true if the given vIRQ state exists in a LR and is either active or
  167. * pending and active.
  168. *
  169. * This function is used to check that a guest's `end of interrupt' or
  170. * `interrupts deactivation' request is valid, and matches with a LR of an
  171. * already acknowledged vIRQ (i.e. has the active bit set in its state).
  172. */
  173. static inline bool gic_virq_is_valid(GICState *s, int irq, int vcpu)
  174. {
  175. int cpu = gic_get_vcpu_real_id(vcpu);
  176. int lr_idx;
  177. for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
  178. uint32_t *entry = &s->h_lr[lr_idx][cpu];
  179. if ((GICH_LR_VIRT_ID(*entry) == irq) &&
  180. (GICH_LR_STATE(*entry) & GICH_LR_STATE_ACTIVE)) {
  181. return true;
  182. }
  183. }
  184. return false;
  185. }
  186. /* Return a pointer on the LR entry matching the given vIRQ.
  187. *
  188. * This function is used to retrieve an LR for which we know for sure that the
  189. * corresponding vIRQ exists in the current context (i.e. its current state is
  190. * not `invalid'):
  191. * - Either the corresponding vIRQ has been validated with gic_virq_is_valid()
  192. * so it is `active' or `active and pending',
  193. * - Or it was pending and has been selected by gic_get_best_virq(). It is now
  194. * `pending', `active' or `active and pending', depending on what the guest
  195. * already did with this vIRQ.
  196. *
  197. * Having multiple LRs with the same VirtualID leads to UNPREDICTABLE
  198. * behaviour in the GIC. We choose to return the first one that matches.
  199. */
  200. static inline uint32_t *gic_get_lr_entry(GICState *s, int irq, int vcpu)
  201. {
  202. int cpu = gic_get_vcpu_real_id(vcpu);
  203. int lr_idx;
  204. for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
  205. uint32_t *entry = &s->h_lr[lr_idx][cpu];
  206. if ((GICH_LR_VIRT_ID(*entry) == irq) &&
  207. (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID)) {
  208. return entry;
  209. }
  210. }
  211. g_assert_not_reached();
  212. }
  213. static inline bool gic_test_group(GICState *s, int irq, int cpu)
  214. {
  215. if (gic_is_vcpu(cpu)) {
  216. uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
  217. return GICH_LR_GROUP(*entry);
  218. } else {
  219. return GIC_DIST_TEST_GROUP(irq, 1 << cpu);
  220. }
  221. }
  222. static inline void gic_clear_pending(GICState *s, int irq, int cpu)
  223. {
  224. if (gic_is_vcpu(cpu)) {
  225. uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
  226. GICH_LR_CLEAR_PENDING(*entry);
  227. } else {
  228. /* Clear pending state for both level and edge triggered
  229. * interrupts. (level triggered interrupts with an active line
  230. * remain pending, see gic_test_pending)
  231. */
  232. GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
  233. : (1 << cpu));
  234. }
  235. }
  236. static inline void gic_set_active(GICState *s, int irq, int cpu)
  237. {
  238. if (gic_is_vcpu(cpu)) {
  239. uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
  240. GICH_LR_SET_ACTIVE(*entry);
  241. } else {
  242. GIC_DIST_SET_ACTIVE(irq, 1 << cpu);
  243. }
  244. }
  245. static inline void gic_clear_active(GICState *s, int irq, int cpu)
  246. {
  247. unsigned int cm;
  248. if (gic_is_vcpu(cpu)) {
  249. uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
  250. GICH_LR_CLEAR_ACTIVE(*entry);
  251. if (GICH_LR_HW(*entry)) {
  252. /* Hardware interrupt. We must forward the deactivation request to
  253. * the distributor.
  254. */
  255. int phys_irq = GICH_LR_PHYS_ID(*entry);
  256. int rcpu = gic_get_vcpu_real_id(cpu);
  257. if (phys_irq < GIC_NR_SGIS || phys_irq >= GIC_MAXIRQ) {
  258. /* UNPREDICTABLE behaviour, we choose to ignore the request */
  259. return;
  260. }
  261. /* This is equivalent to a NS write to DIR on the physical CPU
  262. * interface. Hence group0 interrupt deactivation is ignored if
  263. * the GIC is secure.
  264. */
  265. if (!s->security_extn || GIC_DIST_TEST_GROUP(phys_irq, 1 << rcpu)) {
  266. cm = phys_irq < GIC_INTERNAL ? 1 << rcpu : ALL_CPU_MASK;
  267. GIC_DIST_CLEAR_ACTIVE(phys_irq, cm);
  268. }
  269. }
  270. } else {
  271. cm = irq < GIC_INTERNAL ? 1 << cpu : ALL_CPU_MASK;
  272. GIC_DIST_CLEAR_ACTIVE(irq, cm);
  273. }
  274. }
  275. static inline int gic_get_priority(GICState *s, int irq, int cpu)
  276. {
  277. if (gic_is_vcpu(cpu)) {
  278. uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
  279. return GICH_LR_PRIORITY(*entry);
  280. } else {
  281. return GIC_DIST_GET_PRIORITY(irq, cpu);
  282. }
  283. }
  284. #endif /* QEMU_ARM_GIC_INTERNAL_H */