arm_gicv3_its.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044
  1. /*
  2. * ITS emulation for a GICv3-based system
  3. *
  4. * Copyright Linaro.org 2021
  5. *
  6. * Authors:
  7. * Shashi Mallela <shashi.mallela@linaro.org>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or (at your
  10. * option) any later version. See the COPYING file in the top-level directory.
  11. *
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qemu/log.h"
  15. #include "trace.h"
  16. #include "hw/qdev-properties.h"
  17. #include "hw/intc/arm_gicv3_its_common.h"
  18. #include "gicv3_internal.h"
  19. #include "qom/object.h"
  20. #include "qapi/error.h"
  21. typedef struct GICv3ITSClass GICv3ITSClass;
  22. /* This is reusing the GICv3ITSState typedef from ARM_GICV3_ITS_COMMON */
  23. DECLARE_OBJ_CHECKERS(GICv3ITSState, GICv3ITSClass,
  24. ARM_GICV3_ITS, TYPE_ARM_GICV3_ITS)
  25. struct GICv3ITSClass {
  26. GICv3ITSCommonClass parent_class;
  27. ResettablePhases parent_phases;
  28. };
  29. /*
  30. * This is an internal enum used to distinguish between LPI triggered
  31. * via command queue and LPI triggered via gits_translater write.
  32. */
  33. typedef enum ItsCmdType {
  34. NONE = 0, /* internal indication for GITS_TRANSLATER write */
  35. CLEAR = 1,
  36. DISCARD = 2,
  37. INTERRUPT = 3,
  38. } ItsCmdType;
  39. typedef struct DTEntry {
  40. bool valid;
  41. unsigned size;
  42. uint64_t ittaddr;
  43. } DTEntry;
  44. typedef struct CTEntry {
  45. bool valid;
  46. uint32_t rdbase;
  47. } CTEntry;
  48. typedef struct ITEntry {
  49. bool valid;
  50. int inttype;
  51. uint32_t intid;
  52. uint32_t doorbell;
  53. uint32_t icid;
  54. uint32_t vpeid;
  55. } ITEntry;
  56. typedef struct VTEntry {
  57. bool valid;
  58. unsigned vptsize;
  59. uint32_t rdbase;
  60. uint64_t vptaddr;
  61. } VTEntry;
  62. /*
  63. * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options
  64. * if a command parameter is not correct. These include both "stall
  65. * processing of the command queue" and "ignore this command, and
  66. * keep processing the queue". In our implementation we choose that
  67. * memory transaction errors reading the command packet provoke a
  68. * stall, but errors in parameters cause us to ignore the command
  69. * and continue processing.
  70. * The process_* functions which handle individual ITS commands all
  71. * return an ItsCmdResult which tells process_cmdq() whether it should
  72. * stall, keep going because of an error, or keep going because the
  73. * command was a success.
  74. */
  75. typedef enum ItsCmdResult {
  76. CMD_STALL = 0,
  77. CMD_CONTINUE = 1,
  78. CMD_CONTINUE_OK = 2,
  79. } ItsCmdResult;
  80. /* True if the ITS supports the GICv4 virtual LPI feature */
  81. static bool its_feature_virtual(GICv3ITSState *s)
  82. {
  83. return s->typer & R_GITS_TYPER_VIRTUAL_MASK;
  84. }
  85. static inline bool intid_in_lpi_range(uint32_t id)
  86. {
  87. return id >= GICV3_LPI_INTID_START &&
  88. id < (1 << (GICD_TYPER_IDBITS + 1));
  89. }
  90. static inline bool valid_doorbell(uint32_t id)
  91. {
  92. /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */
  93. return id == INTID_SPURIOUS || intid_in_lpi_range(id);
  94. }
  95. static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz)
  96. {
  97. uint64_t result = 0;
  98. switch (page_sz) {
  99. case GITS_PAGE_SIZE_4K:
  100. case GITS_PAGE_SIZE_16K:
  101. result = FIELD_EX64(value, GITS_BASER, PHYADDR) << 12;
  102. break;
  103. case GITS_PAGE_SIZE_64K:
  104. result = FIELD_EX64(value, GITS_BASER, PHYADDRL_64K) << 16;
  105. result |= FIELD_EX64(value, GITS_BASER, PHYADDRH_64K) << 48;
  106. break;
  107. default:
  108. break;
  109. }
  110. return result;
  111. }
  112. static uint64_t table_entry_addr(GICv3ITSState *s, TableDesc *td,
  113. uint32_t idx, MemTxResult *res)
  114. {
  115. /*
  116. * Given a TableDesc describing one of the ITS in-guest-memory
  117. * tables and an index into it, return the guest address
  118. * corresponding to that table entry.
  119. * If there was a memory error reading the L1 table of an
  120. * indirect table, *res is set accordingly, and we return -1.
  121. * If the L1 table entry is marked not valid, we return -1 with
  122. * *res set to MEMTX_OK.
  123. *
  124. * The specification defines the format of level 1 entries of a
  125. * 2-level table, but the format of level 2 entries and the format
  126. * of flat-mapped tables is IMPDEF.
  127. */
  128. AddressSpace *as = &s->gicv3->dma_as;
  129. uint32_t l2idx;
  130. uint64_t l2;
  131. uint32_t num_l2_entries;
  132. *res = MEMTX_OK;
  133. if (!td->indirect) {
  134. /* Single level table */
  135. return td->base_addr + idx * td->entry_sz;
  136. }
  137. /* Two level table */
  138. l2idx = idx / (td->page_sz / L1TABLE_ENTRY_SIZE);
  139. l2 = address_space_ldq_le(as,
  140. td->base_addr + (l2idx * L1TABLE_ENTRY_SIZE),
  141. MEMTXATTRS_UNSPECIFIED, res);
  142. if (*res != MEMTX_OK) {
  143. return -1;
  144. }
  145. if (!(l2 & L2_TABLE_VALID_MASK)) {
  146. return -1;
  147. }
  148. num_l2_entries = td->page_sz / td->entry_sz;
  149. return (l2 & ((1ULL << 51) - 1)) + (idx % num_l2_entries) * td->entry_sz;
  150. }
  151. /*
  152. * Read the Collection Table entry at index @icid. On success (including
  153. * successfully determining that there is no valid CTE for this index),
  154. * we return MEMTX_OK and populate the CTEntry struct @cte accordingly.
  155. * If there is an error reading memory then we return the error code.
  156. */
  157. static MemTxResult get_cte(GICv3ITSState *s, uint16_t icid, CTEntry *cte)
  158. {
  159. AddressSpace *as = &s->gicv3->dma_as;
  160. MemTxResult res = MEMTX_OK;
  161. uint64_t entry_addr = table_entry_addr(s, &s->ct, icid, &res);
  162. uint64_t cteval;
  163. if (entry_addr == -1) {
  164. /* No L2 table entry, i.e. no valid CTE, or a memory error */
  165. cte->valid = false;
  166. goto out;
  167. }
  168. cteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
  169. if (res != MEMTX_OK) {
  170. goto out;
  171. }
  172. cte->valid = FIELD_EX64(cteval, CTE, VALID);
  173. cte->rdbase = FIELD_EX64(cteval, CTE, RDBASE);
  174. out:
  175. if (res != MEMTX_OK) {
  176. trace_gicv3_its_cte_read_fault(icid);
  177. } else {
  178. trace_gicv3_its_cte_read(icid, cte->valid, cte->rdbase);
  179. }
  180. return res;
  181. }
  182. /*
  183. * Update the Interrupt Table entry at index @evinted in the table specified
  184. * by the dte @dte. Returns true on success, false if there was a memory
  185. * access error.
  186. */
  187. static bool update_ite(GICv3ITSState *s, uint32_t eventid, const DTEntry *dte,
  188. const ITEntry *ite)
  189. {
  190. AddressSpace *as = &s->gicv3->dma_as;
  191. MemTxResult res = MEMTX_OK;
  192. hwaddr iteaddr = dte->ittaddr + eventid * s->itt_entry_size;
  193. uint64_t itel = 0;
  194. uint32_t iteh = 0;
  195. trace_gicv3_its_ite_write(dte->ittaddr, eventid, ite->valid,
  196. ite->inttype, ite->intid, ite->icid,
  197. ite->vpeid, ite->doorbell);
  198. if (ite->valid) {
  199. itel = FIELD_DP64(itel, ITE_L, VALID, 1);
  200. itel = FIELD_DP64(itel, ITE_L, INTTYPE, ite->inttype);
  201. itel = FIELD_DP64(itel, ITE_L, INTID, ite->intid);
  202. itel = FIELD_DP64(itel, ITE_L, ICID, ite->icid);
  203. itel = FIELD_DP64(itel, ITE_L, VPEID, ite->vpeid);
  204. iteh = FIELD_DP32(iteh, ITE_H, DOORBELL, ite->doorbell);
  205. }
  206. address_space_stq_le(as, iteaddr, itel, MEMTXATTRS_UNSPECIFIED, &res);
  207. if (res != MEMTX_OK) {
  208. return false;
  209. }
  210. address_space_stl_le(as, iteaddr + 8, iteh, MEMTXATTRS_UNSPECIFIED, &res);
  211. return res == MEMTX_OK;
  212. }
  213. /*
  214. * Read the Interrupt Table entry at index @eventid from the table specified
  215. * by the DTE @dte. On success, we return MEMTX_OK and populate the ITEntry
  216. * struct @ite accordingly. If there is an error reading memory then we return
  217. * the error code.
  218. */
  219. static MemTxResult get_ite(GICv3ITSState *s, uint32_t eventid,
  220. const DTEntry *dte, ITEntry *ite)
  221. {
  222. AddressSpace *as = &s->gicv3->dma_as;
  223. MemTxResult res = MEMTX_OK;
  224. uint64_t itel;
  225. uint32_t iteh;
  226. hwaddr iteaddr = dte->ittaddr + eventid * s->itt_entry_size;
  227. itel = address_space_ldq_le(as, iteaddr, MEMTXATTRS_UNSPECIFIED, &res);
  228. if (res != MEMTX_OK) {
  229. trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
  230. return res;
  231. }
  232. iteh = address_space_ldl_le(as, iteaddr + 8, MEMTXATTRS_UNSPECIFIED, &res);
  233. if (res != MEMTX_OK) {
  234. trace_gicv3_its_ite_read_fault(dte->ittaddr, eventid);
  235. return res;
  236. }
  237. ite->valid = FIELD_EX64(itel, ITE_L, VALID);
  238. ite->inttype = FIELD_EX64(itel, ITE_L, INTTYPE);
  239. ite->intid = FIELD_EX64(itel, ITE_L, INTID);
  240. ite->icid = FIELD_EX64(itel, ITE_L, ICID);
  241. ite->vpeid = FIELD_EX64(itel, ITE_L, VPEID);
  242. ite->doorbell = FIELD_EX64(iteh, ITE_H, DOORBELL);
  243. trace_gicv3_its_ite_read(dte->ittaddr, eventid, ite->valid,
  244. ite->inttype, ite->intid, ite->icid,
  245. ite->vpeid, ite->doorbell);
  246. return MEMTX_OK;
  247. }
  248. /*
  249. * Read the Device Table entry at index @devid. On success (including
  250. * successfully determining that there is no valid DTE for this index),
  251. * we return MEMTX_OK and populate the DTEntry struct accordingly.
  252. * If there is an error reading memory then we return the error code.
  253. */
  254. static MemTxResult get_dte(GICv3ITSState *s, uint32_t devid, DTEntry *dte)
  255. {
  256. MemTxResult res = MEMTX_OK;
  257. AddressSpace *as = &s->gicv3->dma_as;
  258. uint64_t entry_addr = table_entry_addr(s, &s->dt, devid, &res);
  259. uint64_t dteval;
  260. if (entry_addr == -1) {
  261. /* No L2 table entry, i.e. no valid DTE, or a memory error */
  262. dte->valid = false;
  263. goto out;
  264. }
  265. dteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
  266. if (res != MEMTX_OK) {
  267. goto out;
  268. }
  269. dte->valid = FIELD_EX64(dteval, DTE, VALID);
  270. dte->size = FIELD_EX64(dteval, DTE, SIZE);
  271. /* DTE word field stores bits [51:8] of the ITT address */
  272. dte->ittaddr = FIELD_EX64(dteval, DTE, ITTADDR) << ITTADDR_SHIFT;
  273. out:
  274. if (res != MEMTX_OK) {
  275. trace_gicv3_its_dte_read_fault(devid);
  276. } else {
  277. trace_gicv3_its_dte_read(devid, dte->valid, dte->size, dte->ittaddr);
  278. }
  279. return res;
  280. }
  281. /*
  282. * Read the vPE Table entry at index @vpeid. On success (including
  283. * successfully determining that there is no valid entry for this index),
  284. * we return MEMTX_OK and populate the VTEntry struct accordingly.
  285. * If there is an error reading memory then we return the error code.
  286. */
  287. static MemTxResult get_vte(GICv3ITSState *s, uint32_t vpeid, VTEntry *vte)
  288. {
  289. MemTxResult res = MEMTX_OK;
  290. AddressSpace *as = &s->gicv3->dma_as;
  291. uint64_t entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
  292. uint64_t vteval;
  293. if (entry_addr == -1) {
  294. /* No L2 table entry, i.e. no valid VTE, or a memory error */
  295. vte->valid = false;
  296. trace_gicv3_its_vte_read_fault(vpeid);
  297. return MEMTX_OK;
  298. }
  299. vteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res);
  300. if (res != MEMTX_OK) {
  301. trace_gicv3_its_vte_read_fault(vpeid);
  302. return res;
  303. }
  304. vte->valid = FIELD_EX64(vteval, VTE, VALID);
  305. vte->vptsize = FIELD_EX64(vteval, VTE, VPTSIZE);
  306. vte->vptaddr = FIELD_EX64(vteval, VTE, VPTADDR);
  307. vte->rdbase = FIELD_EX64(vteval, VTE, RDBASE);
  308. trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize,
  309. vte->vptaddr, vte->rdbase);
  310. return res;
  311. }
  312. /*
  313. * Given a (DeviceID, EventID), look up the corresponding ITE, including
  314. * checking for the various invalid-value cases. If we find a valid ITE,
  315. * fill in @ite and @dte and return CMD_CONTINUE_OK. Otherwise return
  316. * CMD_STALL or CMD_CONTINUE as appropriate (and the contents of @ite
  317. * should not be relied on).
  318. *
  319. * The string @who is purely for the LOG_GUEST_ERROR messages,
  320. * and should indicate the name of the calling function or similar.
  321. */
  322. static ItsCmdResult lookup_ite(GICv3ITSState *s, const char *who,
  323. uint32_t devid, uint32_t eventid, ITEntry *ite,
  324. DTEntry *dte)
  325. {
  326. uint64_t num_eventids;
  327. if (devid >= s->dt.num_entries) {
  328. qemu_log_mask(LOG_GUEST_ERROR,
  329. "%s: invalid command attributes: devid %d>=%d",
  330. who, devid, s->dt.num_entries);
  331. return CMD_CONTINUE;
  332. }
  333. if (get_dte(s, devid, dte) != MEMTX_OK) {
  334. return CMD_STALL;
  335. }
  336. if (!dte->valid) {
  337. qemu_log_mask(LOG_GUEST_ERROR,
  338. "%s: invalid command attributes: "
  339. "invalid dte for %d\n", who, devid);
  340. return CMD_CONTINUE;
  341. }
  342. num_eventids = 1ULL << (dte->size + 1);
  343. if (eventid >= num_eventids) {
  344. qemu_log_mask(LOG_GUEST_ERROR,
  345. "%s: invalid command attributes: eventid %d >= %"
  346. PRId64 "\n", who, eventid, num_eventids);
  347. return CMD_CONTINUE;
  348. }
  349. if (get_ite(s, eventid, dte, ite) != MEMTX_OK) {
  350. return CMD_STALL;
  351. }
  352. if (!ite->valid) {
  353. qemu_log_mask(LOG_GUEST_ERROR,
  354. "%s: invalid command attributes: invalid ITE\n", who);
  355. return CMD_CONTINUE;
  356. }
  357. return CMD_CONTINUE_OK;
  358. }
  359. /*
  360. * Given an ICID, look up the corresponding CTE, including checking for various
  361. * invalid-value cases. If we find a valid CTE, fill in @cte and return
  362. * CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE (and the
  363. * contents of @cte should not be relied on).
  364. *
  365. * The string @who is purely for the LOG_GUEST_ERROR messages,
  366. * and should indicate the name of the calling function or similar.
  367. */
  368. static ItsCmdResult lookup_cte(GICv3ITSState *s, const char *who,
  369. uint32_t icid, CTEntry *cte)
  370. {
  371. if (icid >= s->ct.num_entries) {
  372. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid ICID 0x%x\n", who, icid);
  373. return CMD_CONTINUE;
  374. }
  375. if (get_cte(s, icid, cte) != MEMTX_OK) {
  376. return CMD_STALL;
  377. }
  378. if (!cte->valid) {
  379. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid CTE\n", who);
  380. return CMD_CONTINUE;
  381. }
  382. if (cte->rdbase >= s->gicv3->num_cpu) {
  383. return CMD_CONTINUE;
  384. }
  385. return CMD_CONTINUE_OK;
  386. }
  387. /*
  388. * Given a VPEID, look up the corresponding VTE, including checking
  389. * for various invalid-value cases. if we find a valid VTE, fill in @vte
  390. * and return CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE
  391. * (and the contents of @vte should not be relied on).
  392. *
  393. * The string @who is purely for the LOG_GUEST_ERROR messages,
  394. * and should indicate the name of the calling function or similar.
  395. */
  396. static ItsCmdResult lookup_vte(GICv3ITSState *s, const char *who,
  397. uint32_t vpeid, VTEntry *vte)
  398. {
  399. if (vpeid >= s->vpet.num_entries) {
  400. qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid VPEID 0x%x\n", who, vpeid);
  401. return CMD_CONTINUE;
  402. }
  403. if (get_vte(s, vpeid, vte) != MEMTX_OK) {
  404. return CMD_STALL;
  405. }
  406. if (!vte->valid) {
  407. qemu_log_mask(LOG_GUEST_ERROR,
  408. "%s: invalid VTE for VPEID 0x%x\n", who, vpeid);
  409. return CMD_CONTINUE;
  410. }
  411. if (vte->rdbase >= s->gicv3->num_cpu) {
  412. return CMD_CONTINUE;
  413. }
  414. return CMD_CONTINUE_OK;
  415. }
  416. static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite,
  417. int irqlevel)
  418. {
  419. CTEntry cte = {};
  420. ItsCmdResult cmdres;
  421. cmdres = lookup_cte(s, __func__, ite->icid, &cte);
  422. if (cmdres != CMD_CONTINUE_OK) {
  423. return cmdres;
  424. }
  425. gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite->intid, irqlevel);
  426. return CMD_CONTINUE_OK;
  427. }
  428. static ItsCmdResult process_its_cmd_virt(GICv3ITSState *s, const ITEntry *ite,
  429. int irqlevel)
  430. {
  431. VTEntry vte = {};
  432. ItsCmdResult cmdres;
  433. cmdres = lookup_vte(s, __func__, ite->vpeid, &vte);
  434. if (cmdres != CMD_CONTINUE_OK) {
  435. return cmdres;
  436. }
  437. if (!intid_in_lpi_range(ite->intid) ||
  438. ite->intid >= (1ULL << (vte.vptsize + 1))) {
  439. qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n",
  440. __func__, ite->intid);
  441. return CMD_CONTINUE;
  442. }
  443. /*
  444. * For QEMU the actual pending of the vLPI is handled in the
  445. * redistributor code
  446. */
  447. gicv3_redist_process_vlpi(&s->gicv3->cpu[vte.rdbase], ite->intid,
  448. vte.vptaddr << 16, ite->doorbell, irqlevel);
  449. return CMD_CONTINUE_OK;
  450. }
  451. /*
  452. * This function handles the processing of following commands based on
  453. * the ItsCmdType parameter passed:-
  454. * 1. triggering of lpi interrupt translation via ITS INT command
  455. * 2. triggering of lpi interrupt translation via gits_translater register
  456. * 3. handling of ITS CLEAR command
  457. * 4. handling of ITS DISCARD command
  458. */
  459. static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid,
  460. uint32_t eventid, ItsCmdType cmd)
  461. {
  462. DTEntry dte = {};
  463. ITEntry ite = {};
  464. ItsCmdResult cmdres;
  465. int irqlevel;
  466. cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
  467. if (cmdres != CMD_CONTINUE_OK) {
  468. return cmdres;
  469. }
  470. irqlevel = (cmd == CLEAR || cmd == DISCARD) ? 0 : 1;
  471. switch (ite.inttype) {
  472. case ITE_INTTYPE_PHYSICAL:
  473. cmdres = process_its_cmd_phys(s, &ite, irqlevel);
  474. break;
  475. case ITE_INTTYPE_VIRTUAL:
  476. if (!its_feature_virtual(s)) {
  477. /* Can't happen unless guest is illegally writing to table memory */
  478. qemu_log_mask(LOG_GUEST_ERROR,
  479. "%s: invalid type %d in ITE (table corrupted?)\n",
  480. __func__, ite.inttype);
  481. return CMD_CONTINUE;
  482. }
  483. cmdres = process_its_cmd_virt(s, &ite, irqlevel);
  484. break;
  485. default:
  486. g_assert_not_reached();
  487. }
  488. if (cmdres == CMD_CONTINUE_OK && cmd == DISCARD) {
  489. ITEntry i = {};
  490. /* remove mapping from interrupt translation table */
  491. i.valid = false;
  492. return update_ite(s, eventid, &dte, &i) ? CMD_CONTINUE_OK : CMD_STALL;
  493. }
  494. return CMD_CONTINUE_OK;
  495. }
  496. static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt,
  497. ItsCmdType cmd)
  498. {
  499. uint32_t devid, eventid;
  500. devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
  501. eventid = cmdpkt[1] & EVENTID_MASK;
  502. switch (cmd) {
  503. case INTERRUPT:
  504. trace_gicv3_its_cmd_int(devid, eventid);
  505. break;
  506. case CLEAR:
  507. trace_gicv3_its_cmd_clear(devid, eventid);
  508. break;
  509. case DISCARD:
  510. trace_gicv3_its_cmd_discard(devid, eventid);
  511. break;
  512. default:
  513. g_assert_not_reached();
  514. }
  515. return do_process_its_cmd(s, devid, eventid, cmd);
  516. }
  517. static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt,
  518. bool ignore_pInt)
  519. {
  520. uint32_t devid, eventid;
  521. uint32_t pIntid = 0;
  522. uint64_t num_eventids;
  523. uint16_t icid = 0;
  524. DTEntry dte = {};
  525. ITEntry ite = {};
  526. devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
  527. eventid = cmdpkt[1] & EVENTID_MASK;
  528. icid = cmdpkt[2] & ICID_MASK;
  529. if (ignore_pInt) {
  530. pIntid = eventid;
  531. trace_gicv3_its_cmd_mapi(devid, eventid, icid);
  532. } else {
  533. pIntid = (cmdpkt[1] & pINTID_MASK) >> pINTID_SHIFT;
  534. trace_gicv3_its_cmd_mapti(devid, eventid, icid, pIntid);
  535. }
  536. if (devid >= s->dt.num_entries) {
  537. qemu_log_mask(LOG_GUEST_ERROR,
  538. "%s: invalid command attributes: devid %d>=%d",
  539. __func__, devid, s->dt.num_entries);
  540. return CMD_CONTINUE;
  541. }
  542. if (get_dte(s, devid, &dte) != MEMTX_OK) {
  543. return CMD_STALL;
  544. }
  545. num_eventids = 1ULL << (dte.size + 1);
  546. if (icid >= s->ct.num_entries) {
  547. qemu_log_mask(LOG_GUEST_ERROR,
  548. "%s: invalid ICID 0x%x >= 0x%x\n",
  549. __func__, icid, s->ct.num_entries);
  550. return CMD_CONTINUE;
  551. }
  552. if (!dte.valid) {
  553. qemu_log_mask(LOG_GUEST_ERROR,
  554. "%s: no valid DTE for devid 0x%x\n", __func__, devid);
  555. return CMD_CONTINUE;
  556. }
  557. if (eventid >= num_eventids) {
  558. qemu_log_mask(LOG_GUEST_ERROR,
  559. "%s: invalid event ID 0x%x >= 0x%" PRIx64 "\n",
  560. __func__, eventid, num_eventids);
  561. return CMD_CONTINUE;
  562. }
  563. if (!intid_in_lpi_range(pIntid)) {
  564. qemu_log_mask(LOG_GUEST_ERROR,
  565. "%s: invalid interrupt ID 0x%x\n", __func__, pIntid);
  566. return CMD_CONTINUE;
  567. }
  568. /* add ite entry to interrupt translation table */
  569. ite.valid = true;
  570. ite.inttype = ITE_INTTYPE_PHYSICAL;
  571. ite.intid = pIntid;
  572. ite.icid = icid;
  573. ite.doorbell = INTID_SPURIOUS;
  574. ite.vpeid = 0;
  575. return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
  576. }
  577. static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt,
  578. bool ignore_vintid)
  579. {
  580. uint32_t devid, eventid, vintid, doorbell, vpeid;
  581. uint32_t num_eventids;
  582. DTEntry dte = {};
  583. ITEntry ite = {};
  584. if (!its_feature_virtual(s)) {
  585. return CMD_CONTINUE;
  586. }
  587. devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID);
  588. eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID);
  589. vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID);
  590. doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL);
  591. if (ignore_vintid) {
  592. vintid = eventid;
  593. trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell);
  594. } else {
  595. vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID);
  596. trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell);
  597. }
  598. if (devid >= s->dt.num_entries) {
  599. qemu_log_mask(LOG_GUEST_ERROR,
  600. "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n",
  601. __func__, devid, s->dt.num_entries);
  602. return CMD_CONTINUE;
  603. }
  604. if (get_dte(s, devid, &dte) != MEMTX_OK) {
  605. return CMD_STALL;
  606. }
  607. if (!dte.valid) {
  608. qemu_log_mask(LOG_GUEST_ERROR,
  609. "%s: no entry in device table for DeviceID 0x%x\n",
  610. __func__, devid);
  611. return CMD_CONTINUE;
  612. }
  613. num_eventids = 1ULL << (dte.size + 1);
  614. if (eventid >= num_eventids) {
  615. qemu_log_mask(LOG_GUEST_ERROR,
  616. "%s: EventID 0x%x too large for DeviceID 0x%x "
  617. "(must be less than 0x%x)\n",
  618. __func__, eventid, devid, num_eventids);
  619. return CMD_CONTINUE;
  620. }
  621. if (!intid_in_lpi_range(vintid)) {
  622. qemu_log_mask(LOG_GUEST_ERROR,
  623. "%s: VIntID 0x%x not a valid LPI\n",
  624. __func__, vintid);
  625. return CMD_CONTINUE;
  626. }
  627. if (!valid_doorbell(doorbell)) {
  628. qemu_log_mask(LOG_GUEST_ERROR,
  629. "%s: Doorbell %d not 1023 and not a valid LPI\n",
  630. __func__, doorbell);
  631. return CMD_CONTINUE;
  632. }
  633. if (vpeid >= s->vpet.num_entries) {
  634. qemu_log_mask(LOG_GUEST_ERROR,
  635. "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
  636. __func__, vpeid, s->vpet.num_entries);
  637. return CMD_CONTINUE;
  638. }
  639. /* add ite entry to interrupt translation table */
  640. ite.valid = true;
  641. ite.inttype = ITE_INTTYPE_VIRTUAL;
  642. ite.intid = vintid;
  643. ite.icid = 0;
  644. ite.doorbell = doorbell;
  645. ite.vpeid = vpeid;
  646. return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
  647. }
  648. /*
  649. * Update the Collection Table entry for @icid to @cte. Returns true
  650. * on success, false if there was a memory access error.
  651. */
  652. static bool update_cte(GICv3ITSState *s, uint16_t icid, const CTEntry *cte)
  653. {
  654. AddressSpace *as = &s->gicv3->dma_as;
  655. uint64_t entry_addr;
  656. uint64_t cteval = 0;
  657. MemTxResult res = MEMTX_OK;
  658. trace_gicv3_its_cte_write(icid, cte->valid, cte->rdbase);
  659. if (cte->valid) {
  660. /* add mapping entry to collection table */
  661. cteval = FIELD_DP64(cteval, CTE, VALID, 1);
  662. cteval = FIELD_DP64(cteval, CTE, RDBASE, cte->rdbase);
  663. }
  664. entry_addr = table_entry_addr(s, &s->ct, icid, &res);
  665. if (res != MEMTX_OK) {
  666. /* memory access error: stall */
  667. return false;
  668. }
  669. if (entry_addr == -1) {
  670. /* No L2 table for this index: discard write and continue */
  671. return true;
  672. }
  673. address_space_stq_le(as, entry_addr, cteval, MEMTXATTRS_UNSPECIFIED, &res);
  674. return res == MEMTX_OK;
  675. }
  676. static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt)
  677. {
  678. uint16_t icid;
  679. CTEntry cte = {};
  680. icid = cmdpkt[2] & ICID_MASK;
  681. cte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
  682. if (cte.valid) {
  683. cte.rdbase = (cmdpkt[2] & R_MAPC_RDBASE_MASK) >> R_MAPC_RDBASE_SHIFT;
  684. cte.rdbase &= RDBASE_PROCNUM_MASK;
  685. } else {
  686. cte.rdbase = 0;
  687. }
  688. trace_gicv3_its_cmd_mapc(icid, cte.rdbase, cte.valid);
  689. if (icid >= s->ct.num_entries) {
  690. qemu_log_mask(LOG_GUEST_ERROR, "ITS MAPC: invalid ICID 0x%x\n", icid);
  691. return CMD_CONTINUE;
  692. }
  693. if (cte.valid && cte.rdbase >= s->gicv3->num_cpu) {
  694. qemu_log_mask(LOG_GUEST_ERROR,
  695. "ITS MAPC: invalid RDBASE %u\n", cte.rdbase);
  696. return CMD_CONTINUE;
  697. }
  698. return update_cte(s, icid, &cte) ? CMD_CONTINUE_OK : CMD_STALL;
  699. }
  700. /*
  701. * Update the Device Table entry for @devid to @dte. Returns true
  702. * on success, false if there was a memory access error.
  703. */
  704. static bool update_dte(GICv3ITSState *s, uint32_t devid, const DTEntry *dte)
  705. {
  706. AddressSpace *as = &s->gicv3->dma_as;
  707. uint64_t entry_addr;
  708. uint64_t dteval = 0;
  709. MemTxResult res = MEMTX_OK;
  710. trace_gicv3_its_dte_write(devid, dte->valid, dte->size, dte->ittaddr);
  711. if (dte->valid) {
  712. /* add mapping entry to device table */
  713. dteval = FIELD_DP64(dteval, DTE, VALID, 1);
  714. dteval = FIELD_DP64(dteval, DTE, SIZE, dte->size);
  715. dteval = FIELD_DP64(dteval, DTE, ITTADDR, dte->ittaddr);
  716. }
  717. entry_addr = table_entry_addr(s, &s->dt, devid, &res);
  718. if (res != MEMTX_OK) {
  719. /* memory access error: stall */
  720. return false;
  721. }
  722. if (entry_addr == -1) {
  723. /* No L2 table for this index: discard write and continue */
  724. return true;
  725. }
  726. address_space_stq_le(as, entry_addr, dteval, MEMTXATTRS_UNSPECIFIED, &res);
  727. return res == MEMTX_OK;
  728. }
  729. static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt)
  730. {
  731. uint32_t devid;
  732. DTEntry dte = {};
  733. devid = (cmdpkt[0] & DEVID_MASK) >> DEVID_SHIFT;
  734. dte.size = cmdpkt[1] & SIZE_MASK;
  735. dte.ittaddr = (cmdpkt[2] & ITTADDR_MASK) >> ITTADDR_SHIFT;
  736. dte.valid = cmdpkt[2] & CMD_FIELD_VALID_MASK;
  737. trace_gicv3_its_cmd_mapd(devid, dte.size, dte.ittaddr, dte.valid);
  738. if (devid >= s->dt.num_entries) {
  739. qemu_log_mask(LOG_GUEST_ERROR,
  740. "ITS MAPD: invalid device ID field 0x%x >= 0x%x\n",
  741. devid, s->dt.num_entries);
  742. return CMD_CONTINUE;
  743. }
  744. if (dte.size > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
  745. qemu_log_mask(LOG_GUEST_ERROR,
  746. "ITS MAPD: invalid size %d\n", dte.size);
  747. return CMD_CONTINUE;
  748. }
  749. return update_dte(s, devid, &dte) ? CMD_CONTINUE_OK : CMD_STALL;
  750. }
  751. static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt)
  752. {
  753. uint64_t rd1, rd2;
  754. rd1 = FIELD_EX64(cmdpkt[2], MOVALL_2, RDBASE1);
  755. rd2 = FIELD_EX64(cmdpkt[3], MOVALL_3, RDBASE2);
  756. trace_gicv3_its_cmd_movall(rd1, rd2);
  757. if (rd1 >= s->gicv3->num_cpu) {
  758. qemu_log_mask(LOG_GUEST_ERROR,
  759. "%s: RDBASE1 %" PRId64
  760. " out of range (must be less than %d)\n",
  761. __func__, rd1, s->gicv3->num_cpu);
  762. return CMD_CONTINUE;
  763. }
  764. if (rd2 >= s->gicv3->num_cpu) {
  765. qemu_log_mask(LOG_GUEST_ERROR,
  766. "%s: RDBASE2 %" PRId64
  767. " out of range (must be less than %d)\n",
  768. __func__, rd2, s->gicv3->num_cpu);
  769. return CMD_CONTINUE;
  770. }
  771. if (rd1 == rd2) {
  772. /* Move to same target must succeed as a no-op */
  773. return CMD_CONTINUE_OK;
  774. }
  775. /* Move all pending LPIs from redistributor 1 to redistributor 2 */
  776. gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]);
  777. return CMD_CONTINUE_OK;
  778. }
  779. static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt)
  780. {
  781. uint32_t devid, eventid;
  782. uint16_t new_icid;
  783. DTEntry dte = {};
  784. CTEntry old_cte = {}, new_cte = {};
  785. ITEntry old_ite = {};
  786. ItsCmdResult cmdres;
  787. devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID);
  788. eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID);
  789. new_icid = FIELD_EX64(cmdpkt[2], MOVI_2, ICID);
  790. trace_gicv3_its_cmd_movi(devid, eventid, new_icid);
  791. cmdres = lookup_ite(s, __func__, devid, eventid, &old_ite, &dte);
  792. if (cmdres != CMD_CONTINUE_OK) {
  793. return cmdres;
  794. }
  795. if (old_ite.inttype != ITE_INTTYPE_PHYSICAL) {
  796. qemu_log_mask(LOG_GUEST_ERROR,
  797. "%s: invalid command attributes: invalid ITE\n",
  798. __func__);
  799. return CMD_CONTINUE;
  800. }
  801. cmdres = lookup_cte(s, __func__, old_ite.icid, &old_cte);
  802. if (cmdres != CMD_CONTINUE_OK) {
  803. return cmdres;
  804. }
  805. cmdres = lookup_cte(s, __func__, new_icid, &new_cte);
  806. if (cmdres != CMD_CONTINUE_OK) {
  807. return cmdres;
  808. }
  809. if (old_cte.rdbase != new_cte.rdbase) {
  810. /* Move the LPI from the old redistributor to the new one */
  811. gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase],
  812. &s->gicv3->cpu[new_cte.rdbase],
  813. old_ite.intid);
  814. }
  815. /* Update the ICID field in the interrupt translation table entry */
  816. old_ite.icid = new_icid;
  817. return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE_OK : CMD_STALL;
  818. }
  819. /*
  820. * Update the vPE Table entry at index @vpeid with the entry @vte.
  821. * Returns true on success, false if there was a memory access error.
  822. */
  823. static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte)
  824. {
  825. AddressSpace *as = &s->gicv3->dma_as;
  826. uint64_t entry_addr;
  827. uint64_t vteval = 0;
  828. MemTxResult res = MEMTX_OK;
  829. trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr,
  830. vte->rdbase);
  831. if (vte->valid) {
  832. vteval = FIELD_DP64(vteval, VTE, VALID, 1);
  833. vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize);
  834. vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr);
  835. vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase);
  836. }
  837. entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res);
  838. if (res != MEMTX_OK) {
  839. return false;
  840. }
  841. if (entry_addr == -1) {
  842. /* No L2 table for this index: discard write and continue */
  843. return true;
  844. }
  845. address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res);
  846. return res == MEMTX_OK;
  847. }
  848. static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt)
  849. {
  850. VTEntry vte = {};
  851. uint32_t vpeid;
  852. if (!its_feature_virtual(s)) {
  853. return CMD_CONTINUE;
  854. }
  855. vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID);
  856. vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE);
  857. vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V);
  858. vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE);
  859. vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR);
  860. trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid,
  861. vte.vptaddr, vte.vptsize);
  862. /*
  863. * For GICv4.0 the VPT_size field is only 5 bits, whereas we
  864. * define our field macros to include the full GICv4.1 8 bits.
  865. * The range check on VPT_size will catch the cases where
  866. * the guest set the RES0-in-GICv4.0 bits [7:6].
  867. */
  868. if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) {
  869. qemu_log_mask(LOG_GUEST_ERROR,
  870. "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize);
  871. return CMD_CONTINUE;
  872. }
  873. if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) {
  874. qemu_log_mask(LOG_GUEST_ERROR,
  875. "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase);
  876. return CMD_CONTINUE;
  877. }
  878. if (vpeid >= s->vpet.num_entries) {
  879. qemu_log_mask(LOG_GUEST_ERROR,
  880. "%s: VPEID 0x%x out of range (must be less than 0x%x)\n",
  881. __func__, vpeid, s->vpet.num_entries);
  882. return CMD_CONTINUE;
  883. }
  884. return update_vte(s, vpeid, &vte) ? CMD_CONTINUE_OK : CMD_STALL;
  885. }
  886. typedef struct VmovpCallbackData {
  887. uint64_t rdbase;
  888. uint32_t vpeid;
  889. /*
  890. * Overall command result. If more than one callback finds an
  891. * error, STALL beats CONTINUE.
  892. */
  893. ItsCmdResult result;
  894. } VmovpCallbackData;
  895. static void vmovp_callback(gpointer data, gpointer opaque)
  896. {
  897. /*
  898. * This function is called to update the VPEID field in a VPE
  899. * table entry for this ITS. This might be because of a VMOVP
  900. * command executed on any ITS that is connected to the same GIC
  901. * as this ITS. We need to read the VPE table entry for the VPEID
  902. * and update its RDBASE field.
  903. */
  904. GICv3ITSState *s = data;
  905. VmovpCallbackData *cbdata = opaque;
  906. VTEntry vte = {};
  907. ItsCmdResult cmdres;
  908. cmdres = lookup_vte(s, __func__, cbdata->vpeid, &vte);
  909. switch (cmdres) {
  910. case CMD_STALL:
  911. cbdata->result = CMD_STALL;
  912. return;
  913. case CMD_CONTINUE:
  914. if (cbdata->result != CMD_STALL) {
  915. cbdata->result = CMD_CONTINUE;
  916. }
  917. return;
  918. case CMD_CONTINUE_OK:
  919. break;
  920. }
  921. vte.rdbase = cbdata->rdbase;
  922. if (!update_vte(s, cbdata->vpeid, &vte)) {
  923. cbdata->result = CMD_STALL;
  924. }
  925. }
  926. static ItsCmdResult process_vmovp(GICv3ITSState *s, const uint64_t *cmdpkt)
  927. {
  928. VmovpCallbackData cbdata;
  929. if (!its_feature_virtual(s)) {
  930. return CMD_CONTINUE;
  931. }
  932. cbdata.vpeid = FIELD_EX64(cmdpkt[1], VMOVP_1, VPEID);
  933. cbdata.rdbase = FIELD_EX64(cmdpkt[2], VMOVP_2, RDBASE);
  934. trace_gicv3_its_cmd_vmovp(cbdata.vpeid, cbdata.rdbase);
  935. if (cbdata.rdbase >= s->gicv3->num_cpu) {
  936. return CMD_CONTINUE;
  937. }
  938. /*
  939. * Our ITS implementation reports GITS_TYPER.VMOVP == 1, which means
  940. * that when the VMOVP command is executed on an ITS to change the
  941. * VPEID field in a VPE table entry the change must be propagated
  942. * to all the ITSes connected to the same GIC.
  943. */
  944. cbdata.result = CMD_CONTINUE_OK;
  945. gicv3_foreach_its(s->gicv3, vmovp_callback, &cbdata);
  946. return cbdata.result;
  947. }
  948. static ItsCmdResult process_vmovi(GICv3ITSState *s, const uint64_t *cmdpkt)
  949. {
  950. uint32_t devid, eventid, vpeid, doorbell;
  951. bool doorbell_valid;
  952. DTEntry dte = {};
  953. ITEntry ite = {};
  954. VTEntry old_vte = {}, new_vte = {};
  955. ItsCmdResult cmdres;
  956. if (!its_feature_virtual(s)) {
  957. return CMD_CONTINUE;
  958. }
  959. devid = FIELD_EX64(cmdpkt[0], VMOVI_0, DEVICEID);
  960. eventid = FIELD_EX64(cmdpkt[1], VMOVI_1, EVENTID);
  961. vpeid = FIELD_EX64(cmdpkt[1], VMOVI_1, VPEID);
  962. doorbell_valid = FIELD_EX64(cmdpkt[2], VMOVI_2, D);
  963. doorbell = FIELD_EX64(cmdpkt[2], VMOVI_2, DOORBELL);
  964. trace_gicv3_its_cmd_vmovi(devid, eventid, vpeid, doorbell_valid, doorbell);
  965. if (doorbell_valid && !valid_doorbell(doorbell)) {
  966. qemu_log_mask(LOG_GUEST_ERROR,
  967. "%s: invalid doorbell 0x%x\n", __func__, doorbell);
  968. return CMD_CONTINUE;
  969. }
  970. cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
  971. if (cmdres != CMD_CONTINUE_OK) {
  972. return cmdres;
  973. }
  974. if (ite.inttype != ITE_INTTYPE_VIRTUAL) {
  975. qemu_log_mask(LOG_GUEST_ERROR, "%s: ITE is not for virtual interrupt\n",
  976. __func__);
  977. return CMD_CONTINUE;
  978. }
  979. cmdres = lookup_vte(s, __func__, ite.vpeid, &old_vte);
  980. if (cmdres != CMD_CONTINUE_OK) {
  981. return cmdres;
  982. }
  983. cmdres = lookup_vte(s, __func__, vpeid, &new_vte);
  984. if (cmdres != CMD_CONTINUE_OK) {
  985. return cmdres;
  986. }
  987. if (!intid_in_lpi_range(ite.intid) ||
  988. ite.intid >= (1ULL << (old_vte.vptsize + 1)) ||
  989. ite.intid >= (1ULL << (new_vte.vptsize + 1))) {
  990. qemu_log_mask(LOG_GUEST_ERROR,
  991. "%s: ITE intid 0x%x out of range\n",
  992. __func__, ite.intid);
  993. return CMD_CONTINUE;
  994. }
  995. ite.vpeid = vpeid;
  996. if (doorbell_valid) {
  997. ite.doorbell = doorbell;
  998. }
  999. /*
  1000. * Move the LPI from the old redistributor to the new one. We don't
  1001. * need to do anything if the guest somehow specified the
  1002. * same pending table for source and destination.
  1003. */
  1004. if (old_vte.vptaddr != new_vte.vptaddr) {
  1005. gicv3_redist_mov_vlpi(&s->gicv3->cpu[old_vte.rdbase],
  1006. old_vte.vptaddr << 16,
  1007. &s->gicv3->cpu[new_vte.rdbase],
  1008. new_vte.vptaddr << 16,
  1009. ite.intid,
  1010. ite.doorbell);
  1011. }
  1012. /* Update the ITE to the new VPEID and possibly doorbell values */
  1013. return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL;
  1014. }
  1015. static ItsCmdResult process_vinvall(GICv3ITSState *s, const uint64_t *cmdpkt)
  1016. {
  1017. VTEntry vte;
  1018. uint32_t vpeid;
  1019. ItsCmdResult cmdres;
  1020. if (!its_feature_virtual(s)) {
  1021. return CMD_CONTINUE;
  1022. }
  1023. vpeid = FIELD_EX64(cmdpkt[1], VINVALL_1, VPEID);
  1024. trace_gicv3_its_cmd_vinvall(vpeid);
  1025. cmdres = lookup_vte(s, __func__, vpeid, &vte);
  1026. if (cmdres != CMD_CONTINUE_OK) {
  1027. return cmdres;
  1028. }
  1029. gicv3_redist_vinvall(&s->gicv3->cpu[vte.rdbase], vte.vptaddr << 16);
  1030. return CMD_CONTINUE_OK;
  1031. }
  1032. static ItsCmdResult process_inv(GICv3ITSState *s, const uint64_t *cmdpkt)
  1033. {
  1034. uint32_t devid, eventid;
  1035. ITEntry ite = {};
  1036. DTEntry dte = {};
  1037. CTEntry cte = {};
  1038. VTEntry vte = {};
  1039. ItsCmdResult cmdres;
  1040. devid = FIELD_EX64(cmdpkt[0], INV_0, DEVICEID);
  1041. eventid = FIELD_EX64(cmdpkt[1], INV_1, EVENTID);
  1042. trace_gicv3_its_cmd_inv(devid, eventid);
  1043. cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte);
  1044. if (cmdres != CMD_CONTINUE_OK) {
  1045. return cmdres;
  1046. }
  1047. switch (ite.inttype) {
  1048. case ITE_INTTYPE_PHYSICAL:
  1049. cmdres = lookup_cte(s, __func__, ite.icid, &cte);
  1050. if (cmdres != CMD_CONTINUE_OK) {
  1051. return cmdres;
  1052. }
  1053. gicv3_redist_inv_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid);
  1054. break;
  1055. case ITE_INTTYPE_VIRTUAL:
  1056. if (!its_feature_virtual(s)) {
  1057. /* Can't happen unless guest is illegally writing to table memory */
  1058. qemu_log_mask(LOG_GUEST_ERROR,
  1059. "%s: invalid type %d in ITE (table corrupted?)\n",
  1060. __func__, ite.inttype);
  1061. return CMD_CONTINUE;
  1062. }
  1063. cmdres = lookup_vte(s, __func__, ite.vpeid, &vte);
  1064. if (cmdres != CMD_CONTINUE_OK) {
  1065. return cmdres;
  1066. }
  1067. if (!intid_in_lpi_range(ite.intid) ||
  1068. ite.intid >= (1ULL << (vte.vptsize + 1))) {
  1069. qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n",
  1070. __func__, ite.intid);
  1071. return CMD_CONTINUE;
  1072. }
  1073. gicv3_redist_inv_vlpi(&s->gicv3->cpu[vte.rdbase], ite.intid,
  1074. vte.vptaddr << 16);
  1075. break;
  1076. default:
  1077. g_assert_not_reached();
  1078. }
  1079. return CMD_CONTINUE_OK;
  1080. }
  1081. /*
  1082. * Current implementation blocks until all
  1083. * commands are processed
  1084. */
  1085. static void process_cmdq(GICv3ITSState *s)
  1086. {
  1087. uint32_t wr_offset = 0;
  1088. uint32_t rd_offset = 0;
  1089. uint32_t cq_offset = 0;
  1090. AddressSpace *as = &s->gicv3->dma_as;
  1091. uint8_t cmd;
  1092. int i;
  1093. if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
  1094. return;
  1095. }
  1096. wr_offset = FIELD_EX64(s->cwriter, GITS_CWRITER, OFFSET);
  1097. if (wr_offset >= s->cq.num_entries) {
  1098. qemu_log_mask(LOG_GUEST_ERROR,
  1099. "%s: invalid write offset "
  1100. "%d\n", __func__, wr_offset);
  1101. return;
  1102. }
  1103. rd_offset = FIELD_EX64(s->creadr, GITS_CREADR, OFFSET);
  1104. if (rd_offset >= s->cq.num_entries) {
  1105. qemu_log_mask(LOG_GUEST_ERROR,
  1106. "%s: invalid read offset "
  1107. "%d\n", __func__, rd_offset);
  1108. return;
  1109. }
  1110. while (wr_offset != rd_offset) {
  1111. ItsCmdResult result = CMD_CONTINUE_OK;
  1112. void *hostmem;
  1113. hwaddr buflen;
  1114. uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS];
  1115. cq_offset = (rd_offset * GITS_CMDQ_ENTRY_SIZE);
  1116. buflen = GITS_CMDQ_ENTRY_SIZE;
  1117. hostmem = address_space_map(as, s->cq.base_addr + cq_offset,
  1118. &buflen, false, MEMTXATTRS_UNSPECIFIED);
  1119. if (!hostmem || buflen != GITS_CMDQ_ENTRY_SIZE) {
  1120. if (hostmem) {
  1121. address_space_unmap(as, hostmem, buflen, false, 0);
  1122. }
  1123. s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
  1124. qemu_log_mask(LOG_GUEST_ERROR,
  1125. "%s: could not read command at 0x%" PRIx64 "\n",
  1126. __func__, s->cq.base_addr + cq_offset);
  1127. break;
  1128. }
  1129. for (i = 0; i < ARRAY_SIZE(cmdpkt); i++) {
  1130. cmdpkt[i] = ldq_le_p(hostmem + i * sizeof(uint64_t));
  1131. }
  1132. address_space_unmap(as, hostmem, buflen, false, 0);
  1133. cmd = cmdpkt[0] & CMD_MASK;
  1134. trace_gicv3_its_process_command(rd_offset, cmd);
  1135. switch (cmd) {
  1136. case GITS_CMD_INT:
  1137. result = process_its_cmd(s, cmdpkt, INTERRUPT);
  1138. break;
  1139. case GITS_CMD_CLEAR:
  1140. result = process_its_cmd(s, cmdpkt, CLEAR);
  1141. break;
  1142. case GITS_CMD_SYNC:
  1143. /*
  1144. * Current implementation makes a blocking synchronous call
  1145. * for every command issued earlier, hence the internal state
  1146. * is already consistent by the time SYNC command is executed.
  1147. * Hence no further processing is required for SYNC command.
  1148. */
  1149. trace_gicv3_its_cmd_sync();
  1150. break;
  1151. case GITS_CMD_VSYNC:
  1152. /*
  1153. * VSYNC also is a nop, because our implementation is always
  1154. * in sync.
  1155. */
  1156. if (!its_feature_virtual(s)) {
  1157. result = CMD_CONTINUE;
  1158. break;
  1159. }
  1160. trace_gicv3_its_cmd_vsync();
  1161. break;
  1162. case GITS_CMD_MAPD:
  1163. result = process_mapd(s, cmdpkt);
  1164. break;
  1165. case GITS_CMD_MAPC:
  1166. result = process_mapc(s, cmdpkt);
  1167. break;
  1168. case GITS_CMD_MAPTI:
  1169. result = process_mapti(s, cmdpkt, false);
  1170. break;
  1171. case GITS_CMD_MAPI:
  1172. result = process_mapti(s, cmdpkt, true);
  1173. break;
  1174. case GITS_CMD_DISCARD:
  1175. result = process_its_cmd(s, cmdpkt, DISCARD);
  1176. break;
  1177. case GITS_CMD_INV:
  1178. result = process_inv(s, cmdpkt);
  1179. break;
  1180. case GITS_CMD_INVALL:
  1181. /*
  1182. * Current implementation doesn't cache any ITS tables,
  1183. * but the calculated lpi priority information. We only
  1184. * need to trigger lpi priority re-calculation to be in
  1185. * sync with LPI config table or pending table changes.
  1186. * INVALL operates on a collection specified by ICID so
  1187. * it only affects physical LPIs.
  1188. */
  1189. trace_gicv3_its_cmd_invall();
  1190. for (i = 0; i < s->gicv3->num_cpu; i++) {
  1191. gicv3_redist_update_lpi(&s->gicv3->cpu[i]);
  1192. }
  1193. break;
  1194. case GITS_CMD_MOVI:
  1195. result = process_movi(s, cmdpkt);
  1196. break;
  1197. case GITS_CMD_MOVALL:
  1198. result = process_movall(s, cmdpkt);
  1199. break;
  1200. case GITS_CMD_VMAPTI:
  1201. result = process_vmapti(s, cmdpkt, false);
  1202. break;
  1203. case GITS_CMD_VMAPI:
  1204. result = process_vmapti(s, cmdpkt, true);
  1205. break;
  1206. case GITS_CMD_VMAPP:
  1207. result = process_vmapp(s, cmdpkt);
  1208. break;
  1209. case GITS_CMD_VMOVP:
  1210. result = process_vmovp(s, cmdpkt);
  1211. break;
  1212. case GITS_CMD_VMOVI:
  1213. result = process_vmovi(s, cmdpkt);
  1214. break;
  1215. case GITS_CMD_VINVALL:
  1216. result = process_vinvall(s, cmdpkt);
  1217. break;
  1218. default:
  1219. trace_gicv3_its_cmd_unknown(cmd);
  1220. break;
  1221. }
  1222. if (result != CMD_STALL) {
  1223. /* CMD_CONTINUE or CMD_CONTINUE_OK */
  1224. rd_offset++;
  1225. rd_offset %= s->cq.num_entries;
  1226. s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset);
  1227. } else {
  1228. /* CMD_STALL */
  1229. s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, STALLED, 1);
  1230. qemu_log_mask(LOG_GUEST_ERROR,
  1231. "%s: 0x%x cmd processing failed, stalling\n",
  1232. __func__, cmd);
  1233. break;
  1234. }
  1235. }
  1236. }
  1237. /*
  1238. * This function extracts the ITS Device and Collection table specific
  1239. * parameters (like base_addr, size etc) from GITS_BASER register.
  1240. * It is called during ITS enable and also during post_load migration
  1241. */
  1242. static void extract_table_params(GICv3ITSState *s)
  1243. {
  1244. uint16_t num_pages = 0;
  1245. uint8_t page_sz_type;
  1246. uint8_t type;
  1247. uint32_t page_sz = 0;
  1248. uint64_t value;
  1249. for (int i = 0; i < 8; i++) {
  1250. TableDesc *td;
  1251. int idbits;
  1252. value = s->baser[i];
  1253. if (!value) {
  1254. continue;
  1255. }
  1256. page_sz_type = FIELD_EX64(value, GITS_BASER, PAGESIZE);
  1257. switch (page_sz_type) {
  1258. case 0:
  1259. page_sz = GITS_PAGE_SIZE_4K;
  1260. break;
  1261. case 1:
  1262. page_sz = GITS_PAGE_SIZE_16K;
  1263. break;
  1264. case 2:
  1265. case 3:
  1266. page_sz = GITS_PAGE_SIZE_64K;
  1267. break;
  1268. default:
  1269. g_assert_not_reached();
  1270. }
  1271. num_pages = FIELD_EX64(value, GITS_BASER, SIZE) + 1;
  1272. type = FIELD_EX64(value, GITS_BASER, TYPE);
  1273. switch (type) {
  1274. case GITS_BASER_TYPE_DEVICE:
  1275. td = &s->dt;
  1276. idbits = FIELD_EX64(s->typer, GITS_TYPER, DEVBITS) + 1;
  1277. break;
  1278. case GITS_BASER_TYPE_COLLECTION:
  1279. td = &s->ct;
  1280. if (FIELD_EX64(s->typer, GITS_TYPER, CIL)) {
  1281. idbits = FIELD_EX64(s->typer, GITS_TYPER, CIDBITS) + 1;
  1282. } else {
  1283. /* 16-bit CollectionId supported when CIL == 0 */
  1284. idbits = 16;
  1285. }
  1286. break;
  1287. case GITS_BASER_TYPE_VPE:
  1288. td = &s->vpet;
  1289. /*
  1290. * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an
  1291. * implementation to implement fewer bits and report this
  1292. * via GICD_TYPER2.)
  1293. */
  1294. idbits = 16;
  1295. break;
  1296. default:
  1297. /*
  1298. * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK
  1299. * ensures we will only see type values corresponding to
  1300. * the values set up in gicv3_its_reset().
  1301. */
  1302. g_assert_not_reached();
  1303. }
  1304. memset(td, 0, sizeof(*td));
  1305. /*
  1306. * If GITS_BASER<n>.Valid is 0 for any <n> then we will not process
  1307. * interrupts. (GITS_TYPER.HCC is 0 for this implementation, so we
  1308. * do not have a special case where the GITS_BASER<n>.Valid bit is 0
  1309. * for the register corresponding to the Collection table but we
  1310. * still have to process interrupts using non-memory-backed
  1311. * Collection table entries.)
  1312. * The specification makes it UNPREDICTABLE to enable the ITS without
  1313. * marking each BASER<n> as valid. We choose to handle these as if
  1314. * the table was zero-sized, so commands using the table will fail
  1315. * and interrupts requested via GITS_TRANSLATER writes will be ignored.
  1316. * This happens automatically by leaving the num_entries field at
  1317. * zero, which will be caught by the bounds checks we have before
  1318. * every table lookup anyway.
  1319. */
  1320. if (!FIELD_EX64(value, GITS_BASER, VALID)) {
  1321. continue;
  1322. }
  1323. td->page_sz = page_sz;
  1324. td->indirect = FIELD_EX64(value, GITS_BASER, INDIRECT);
  1325. td->entry_sz = FIELD_EX64(value, GITS_BASER, ENTRYSIZE) + 1;
  1326. td->base_addr = baser_base_addr(value, page_sz);
  1327. if (!td->indirect) {
  1328. td->num_entries = (num_pages * page_sz) / td->entry_sz;
  1329. } else {
  1330. td->num_entries = (((num_pages * page_sz) /
  1331. L1TABLE_ENTRY_SIZE) *
  1332. (page_sz / td->entry_sz));
  1333. }
  1334. td->num_entries = MIN(td->num_entries, 1ULL << idbits);
  1335. }
  1336. }
  1337. static void extract_cmdq_params(GICv3ITSState *s)
  1338. {
  1339. uint16_t num_pages = 0;
  1340. uint64_t value = s->cbaser;
  1341. num_pages = FIELD_EX64(value, GITS_CBASER, SIZE) + 1;
  1342. memset(&s->cq, 0 , sizeof(s->cq));
  1343. if (FIELD_EX64(value, GITS_CBASER, VALID)) {
  1344. s->cq.num_entries = (num_pages * GITS_PAGE_SIZE_4K) /
  1345. GITS_CMDQ_ENTRY_SIZE;
  1346. s->cq.base_addr = FIELD_EX64(value, GITS_CBASER, PHYADDR);
  1347. s->cq.base_addr <<= R_GITS_CBASER_PHYADDR_SHIFT;
  1348. }
  1349. }
  1350. static MemTxResult gicv3_its_translation_read(void *opaque, hwaddr offset,
  1351. uint64_t *data, unsigned size,
  1352. MemTxAttrs attrs)
  1353. {
  1354. /*
  1355. * GITS_TRANSLATER is write-only, and all other addresses
  1356. * in the interrupt translation space frame are RES0.
  1357. */
  1358. *data = 0;
  1359. return MEMTX_OK;
  1360. }
  1361. static MemTxResult gicv3_its_translation_write(void *opaque, hwaddr offset,
  1362. uint64_t data, unsigned size,
  1363. MemTxAttrs attrs)
  1364. {
  1365. GICv3ITSState *s = (GICv3ITSState *)opaque;
  1366. bool result = true;
  1367. trace_gicv3_its_translation_write(offset, data, size, attrs.requester_id);
  1368. switch (offset) {
  1369. case GITS_TRANSLATER:
  1370. if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
  1371. result = do_process_its_cmd(s, attrs.requester_id, data, NONE);
  1372. }
  1373. break;
  1374. default:
  1375. break;
  1376. }
  1377. if (result) {
  1378. return MEMTX_OK;
  1379. } else {
  1380. return MEMTX_ERROR;
  1381. }
  1382. }
  1383. static bool its_writel(GICv3ITSState *s, hwaddr offset,
  1384. uint64_t value, MemTxAttrs attrs)
  1385. {
  1386. bool result = true;
  1387. int index;
  1388. switch (offset) {
  1389. case GITS_CTLR:
  1390. if (value & R_GITS_CTLR_ENABLED_MASK) {
  1391. s->ctlr |= R_GITS_CTLR_ENABLED_MASK;
  1392. extract_table_params(s);
  1393. extract_cmdq_params(s);
  1394. process_cmdq(s);
  1395. } else {
  1396. s->ctlr &= ~R_GITS_CTLR_ENABLED_MASK;
  1397. }
  1398. break;
  1399. case GITS_CBASER:
  1400. /*
  1401. * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
  1402. * already enabled
  1403. */
  1404. if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
  1405. s->cbaser = deposit64(s->cbaser, 0, 32, value);
  1406. s->creadr = 0;
  1407. }
  1408. break;
  1409. case GITS_CBASER + 4:
  1410. /*
  1411. * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
  1412. * already enabled
  1413. */
  1414. if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
  1415. s->cbaser = deposit64(s->cbaser, 32, 32, value);
  1416. s->creadr = 0;
  1417. }
  1418. break;
  1419. case GITS_CWRITER:
  1420. s->cwriter = deposit64(s->cwriter, 0, 32,
  1421. (value & ~R_GITS_CWRITER_RETRY_MASK));
  1422. if (s->cwriter != s->creadr) {
  1423. process_cmdq(s);
  1424. }
  1425. break;
  1426. case GITS_CWRITER + 4:
  1427. s->cwriter = deposit64(s->cwriter, 32, 32, value);
  1428. break;
  1429. case GITS_CREADR:
  1430. if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
  1431. s->creadr = deposit64(s->creadr, 0, 32,
  1432. (value & ~R_GITS_CREADR_STALLED_MASK));
  1433. } else {
  1434. /* RO register, ignore the write */
  1435. qemu_log_mask(LOG_GUEST_ERROR,
  1436. "%s: invalid guest write to RO register at offset "
  1437. HWADDR_FMT_plx "\n", __func__, offset);
  1438. }
  1439. break;
  1440. case GITS_CREADR + 4:
  1441. if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
  1442. s->creadr = deposit64(s->creadr, 32, 32, value);
  1443. } else {
  1444. /* RO register, ignore the write */
  1445. qemu_log_mask(LOG_GUEST_ERROR,
  1446. "%s: invalid guest write to RO register at offset "
  1447. HWADDR_FMT_plx "\n", __func__, offset);
  1448. }
  1449. break;
  1450. case GITS_BASER ... GITS_BASER + 0x3f:
  1451. /*
  1452. * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
  1453. * already enabled
  1454. */
  1455. if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
  1456. index = (offset - GITS_BASER) / 8;
  1457. if (s->baser[index] == 0) {
  1458. /* Unimplemented GITS_BASERn: RAZ/WI */
  1459. break;
  1460. }
  1461. if (offset & 7) {
  1462. value <<= 32;
  1463. value &= ~GITS_BASER_RO_MASK;
  1464. s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(0, 32);
  1465. s->baser[index] |= value;
  1466. } else {
  1467. value &= ~GITS_BASER_RO_MASK;
  1468. s->baser[index] &= GITS_BASER_RO_MASK | MAKE_64BIT_MASK(32, 32);
  1469. s->baser[index] |= value;
  1470. }
  1471. }
  1472. break;
  1473. case GITS_IIDR:
  1474. case GITS_IDREGS ... GITS_IDREGS + 0x2f:
  1475. /* RO registers, ignore the write */
  1476. qemu_log_mask(LOG_GUEST_ERROR,
  1477. "%s: invalid guest write to RO register at offset "
  1478. HWADDR_FMT_plx "\n", __func__, offset);
  1479. break;
  1480. default:
  1481. result = false;
  1482. break;
  1483. }
  1484. return result;
  1485. }
  1486. static bool its_readl(GICv3ITSState *s, hwaddr offset,
  1487. uint64_t *data, MemTxAttrs attrs)
  1488. {
  1489. bool result = true;
  1490. int index;
  1491. switch (offset) {
  1492. case GITS_CTLR:
  1493. *data = s->ctlr;
  1494. break;
  1495. case GITS_IIDR:
  1496. *data = gicv3_iidr();
  1497. break;
  1498. case GITS_IDREGS ... GITS_IDREGS + 0x2f:
  1499. /* ID registers */
  1500. *data = gicv3_idreg(s->gicv3, offset - GITS_IDREGS, GICV3_PIDR0_ITS);
  1501. break;
  1502. case GITS_TYPER:
  1503. *data = extract64(s->typer, 0, 32);
  1504. break;
  1505. case GITS_TYPER + 4:
  1506. *data = extract64(s->typer, 32, 32);
  1507. break;
  1508. case GITS_CBASER:
  1509. *data = extract64(s->cbaser, 0, 32);
  1510. break;
  1511. case GITS_CBASER + 4:
  1512. *data = extract64(s->cbaser, 32, 32);
  1513. break;
  1514. case GITS_CREADR:
  1515. *data = extract64(s->creadr, 0, 32);
  1516. break;
  1517. case GITS_CREADR + 4:
  1518. *data = extract64(s->creadr, 32, 32);
  1519. break;
  1520. case GITS_CWRITER:
  1521. *data = extract64(s->cwriter, 0, 32);
  1522. break;
  1523. case GITS_CWRITER + 4:
  1524. *data = extract64(s->cwriter, 32, 32);
  1525. break;
  1526. case GITS_BASER ... GITS_BASER + 0x3f:
  1527. index = (offset - GITS_BASER) / 8;
  1528. if (offset & 7) {
  1529. *data = extract64(s->baser[index], 32, 32);
  1530. } else {
  1531. *data = extract64(s->baser[index], 0, 32);
  1532. }
  1533. break;
  1534. default:
  1535. result = false;
  1536. break;
  1537. }
  1538. return result;
  1539. }
  1540. static bool its_writell(GICv3ITSState *s, hwaddr offset,
  1541. uint64_t value, MemTxAttrs attrs)
  1542. {
  1543. bool result = true;
  1544. int index;
  1545. switch (offset) {
  1546. case GITS_BASER ... GITS_BASER + 0x3f:
  1547. /*
  1548. * IMPDEF choice:- GITS_BASERn register becomes RO if ITS is
  1549. * already enabled
  1550. */
  1551. if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
  1552. index = (offset - GITS_BASER) / 8;
  1553. if (s->baser[index] == 0) {
  1554. /* Unimplemented GITS_BASERn: RAZ/WI */
  1555. break;
  1556. }
  1557. s->baser[index] &= GITS_BASER_RO_MASK;
  1558. s->baser[index] |= (value & ~GITS_BASER_RO_MASK);
  1559. }
  1560. break;
  1561. case GITS_CBASER:
  1562. /*
  1563. * IMPDEF choice:- GITS_CBASER register becomes RO if ITS is
  1564. * already enabled
  1565. */
  1566. if (!(s->ctlr & R_GITS_CTLR_ENABLED_MASK)) {
  1567. s->cbaser = value;
  1568. s->creadr = 0;
  1569. }
  1570. break;
  1571. case GITS_CWRITER:
  1572. s->cwriter = value & ~R_GITS_CWRITER_RETRY_MASK;
  1573. if (s->cwriter != s->creadr) {
  1574. process_cmdq(s);
  1575. }
  1576. break;
  1577. case GITS_CREADR:
  1578. if (s->gicv3->gicd_ctlr & GICD_CTLR_DS) {
  1579. s->creadr = value & ~R_GITS_CREADR_STALLED_MASK;
  1580. } else {
  1581. /* RO register, ignore the write */
  1582. qemu_log_mask(LOG_GUEST_ERROR,
  1583. "%s: invalid guest write to RO register at offset "
  1584. HWADDR_FMT_plx "\n", __func__, offset);
  1585. }
  1586. break;
  1587. case GITS_TYPER:
  1588. /* RO registers, ignore the write */
  1589. qemu_log_mask(LOG_GUEST_ERROR,
  1590. "%s: invalid guest write to RO register at offset "
  1591. HWADDR_FMT_plx "\n", __func__, offset);
  1592. break;
  1593. default:
  1594. result = false;
  1595. break;
  1596. }
  1597. return result;
  1598. }
  1599. static bool its_readll(GICv3ITSState *s, hwaddr offset,
  1600. uint64_t *data, MemTxAttrs attrs)
  1601. {
  1602. bool result = true;
  1603. int index;
  1604. switch (offset) {
  1605. case GITS_TYPER:
  1606. *data = s->typer;
  1607. break;
  1608. case GITS_BASER ... GITS_BASER + 0x3f:
  1609. index = (offset - GITS_BASER) / 8;
  1610. *data = s->baser[index];
  1611. break;
  1612. case GITS_CBASER:
  1613. *data = s->cbaser;
  1614. break;
  1615. case GITS_CREADR:
  1616. *data = s->creadr;
  1617. break;
  1618. case GITS_CWRITER:
  1619. *data = s->cwriter;
  1620. break;
  1621. default:
  1622. result = false;
  1623. break;
  1624. }
  1625. return result;
  1626. }
  1627. static MemTxResult gicv3_its_read(void *opaque, hwaddr offset, uint64_t *data,
  1628. unsigned size, MemTxAttrs attrs)
  1629. {
  1630. GICv3ITSState *s = (GICv3ITSState *)opaque;
  1631. bool result;
  1632. switch (size) {
  1633. case 4:
  1634. result = its_readl(s, offset, data, attrs);
  1635. break;
  1636. case 8:
  1637. result = its_readll(s, offset, data, attrs);
  1638. break;
  1639. default:
  1640. result = false;
  1641. break;
  1642. }
  1643. if (!result) {
  1644. qemu_log_mask(LOG_GUEST_ERROR,
  1645. "%s: invalid guest read at offset " HWADDR_FMT_plx
  1646. " size %u\n", __func__, offset, size);
  1647. trace_gicv3_its_badread(offset, size);
  1648. /*
  1649. * The spec requires that reserved registers are RAZ/WI;
  1650. * so use false returns from leaf functions as a way to
  1651. * trigger the guest-error logging but don't return it to
  1652. * the caller, or we'll cause a spurious guest data abort.
  1653. */
  1654. *data = 0;
  1655. } else {
  1656. trace_gicv3_its_read(offset, *data, size);
  1657. }
  1658. return MEMTX_OK;
  1659. }
  1660. static MemTxResult gicv3_its_write(void *opaque, hwaddr offset, uint64_t data,
  1661. unsigned size, MemTxAttrs attrs)
  1662. {
  1663. GICv3ITSState *s = (GICv3ITSState *)opaque;
  1664. bool result;
  1665. switch (size) {
  1666. case 4:
  1667. result = its_writel(s, offset, data, attrs);
  1668. break;
  1669. case 8:
  1670. result = its_writell(s, offset, data, attrs);
  1671. break;
  1672. default:
  1673. result = false;
  1674. break;
  1675. }
  1676. if (!result) {
  1677. qemu_log_mask(LOG_GUEST_ERROR,
  1678. "%s: invalid guest write at offset " HWADDR_FMT_plx
  1679. " size %u\n", __func__, offset, size);
  1680. trace_gicv3_its_badwrite(offset, data, size);
  1681. /*
  1682. * The spec requires that reserved registers are RAZ/WI;
  1683. * so use false returns from leaf functions as a way to
  1684. * trigger the guest-error logging but don't return it to
  1685. * the caller, or we'll cause a spurious guest data abort.
  1686. */
  1687. } else {
  1688. trace_gicv3_its_write(offset, data, size);
  1689. }
  1690. return MEMTX_OK;
  1691. }
  1692. static const MemoryRegionOps gicv3_its_control_ops = {
  1693. .read_with_attrs = gicv3_its_read,
  1694. .write_with_attrs = gicv3_its_write,
  1695. .valid.min_access_size = 4,
  1696. .valid.max_access_size = 8,
  1697. .impl.min_access_size = 4,
  1698. .impl.max_access_size = 8,
  1699. .endianness = DEVICE_NATIVE_ENDIAN,
  1700. };
  1701. static const MemoryRegionOps gicv3_its_translation_ops = {
  1702. .read_with_attrs = gicv3_its_translation_read,
  1703. .write_with_attrs = gicv3_its_translation_write,
  1704. .valid.min_access_size = 2,
  1705. .valid.max_access_size = 4,
  1706. .impl.min_access_size = 2,
  1707. .impl.max_access_size = 4,
  1708. .endianness = DEVICE_NATIVE_ENDIAN,
  1709. };
  1710. static void gicv3_arm_its_realize(DeviceState *dev, Error **errp)
  1711. {
  1712. GICv3ITSState *s = ARM_GICV3_ITS_COMMON(dev);
  1713. int i;
  1714. for (i = 0; i < s->gicv3->num_cpu; i++) {
  1715. if (!(s->gicv3->cpu[i].gicr_typer & GICR_TYPER_PLPIS)) {
  1716. error_setg(errp, "Physical LPI not supported by CPU %d", i);
  1717. return;
  1718. }
  1719. }
  1720. if (s->itt_entry_size < MIN_ITS_ITT_ENTRY_SIZE) {
  1721. error_setg(errp, "ITT entry size must be at least %d",
  1722. MIN_ITS_ITT_ENTRY_SIZE);
  1723. return;
  1724. }
  1725. gicv3_add_its(s->gicv3, dev);
  1726. gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops);
  1727. /* set the ITS default features supported */
  1728. s->typer = FIELD_DP64(s->typer, GITS_TYPER, PHYSICAL, 1);
  1729. s->typer = FIELD_DP64(s->typer, GITS_TYPER, ITT_ENTRY_SIZE,
  1730. s->itt_entry_size - 1);
  1731. s->typer = FIELD_DP64(s->typer, GITS_TYPER, IDBITS, ITS_IDBITS);
  1732. s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS);
  1733. s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1);
  1734. s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS);
  1735. if (s->gicv3->revision >= 4) {
  1736. /* Our VMOVP handles cross-ITS synchronization itself */
  1737. s->typer = FIELD_DP64(s->typer, GITS_TYPER, VMOVP, 1);
  1738. s->typer = FIELD_DP64(s->typer, GITS_TYPER, VIRTUAL, 1);
  1739. }
  1740. }
  1741. static void gicv3_its_reset_hold(Object *obj, ResetType type)
  1742. {
  1743. GICv3ITSState *s = ARM_GICV3_ITS_COMMON(obj);
  1744. GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s);
  1745. if (c->parent_phases.hold) {
  1746. c->parent_phases.hold(obj, type);
  1747. }
  1748. /* Quiescent bit reset to 1 */
  1749. s->ctlr = FIELD_DP32(s->ctlr, GITS_CTLR, QUIESCENT, 1);
  1750. /*
  1751. * setting GITS_BASER0.Type = 0b001 (Device)
  1752. * GITS_BASER1.Type = 0b100 (Collection Table)
  1753. * GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later
  1754. * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented)
  1755. * GITS_BASER<0,1>.Page_Size = 64KB
  1756. * and default translation table entry size to 16 bytes
  1757. */
  1758. s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, TYPE,
  1759. GITS_BASER_TYPE_DEVICE);
  1760. s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, PAGESIZE,
  1761. GITS_BASER_PAGESIZE_64K);
  1762. s->baser[0] = FIELD_DP64(s->baser[0], GITS_BASER, ENTRYSIZE,
  1763. GITS_DTE_SIZE - 1);
  1764. s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, TYPE,
  1765. GITS_BASER_TYPE_COLLECTION);
  1766. s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, PAGESIZE,
  1767. GITS_BASER_PAGESIZE_64K);
  1768. s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE,
  1769. GITS_CTE_SIZE - 1);
  1770. if (its_feature_virtual(s)) {
  1771. s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE,
  1772. GITS_BASER_TYPE_VPE);
  1773. s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE,
  1774. GITS_BASER_PAGESIZE_64K);
  1775. s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE,
  1776. GITS_VPE_SIZE - 1);
  1777. }
  1778. }
  1779. static void gicv3_its_post_load(GICv3ITSState *s)
  1780. {
  1781. if (s->ctlr & R_GITS_CTLR_ENABLED_MASK) {
  1782. extract_table_params(s);
  1783. extract_cmdq_params(s);
  1784. }
  1785. }
  1786. static const Property gicv3_its_props[] = {
  1787. DEFINE_PROP_LINK("parent-gicv3", GICv3ITSState, gicv3, "arm-gicv3",
  1788. GICv3State *),
  1789. DEFINE_PROP_UINT8("itt-entry-size", GICv3ITSState, itt_entry_size, 16),
  1790. };
  1791. static void gicv3_its_class_init(ObjectClass *klass, void *data)
  1792. {
  1793. DeviceClass *dc = DEVICE_CLASS(klass);
  1794. ResettableClass *rc = RESETTABLE_CLASS(klass);
  1795. GICv3ITSClass *ic = ARM_GICV3_ITS_CLASS(klass);
  1796. GICv3ITSCommonClass *icc = ARM_GICV3_ITS_COMMON_CLASS(klass);
  1797. dc->realize = gicv3_arm_its_realize;
  1798. device_class_set_props(dc, gicv3_its_props);
  1799. resettable_class_set_parent_phases(rc, NULL, gicv3_its_reset_hold, NULL,
  1800. &ic->parent_phases);
  1801. icc->post_load = gicv3_its_post_load;
  1802. }
  1803. static const TypeInfo gicv3_its_info = {
  1804. .name = TYPE_ARM_GICV3_ITS,
  1805. .parent = TYPE_ARM_GICV3_ITS_COMMON,
  1806. .instance_size = sizeof(GICv3ITSState),
  1807. .class_init = gicv3_its_class_init,
  1808. .class_size = sizeof(GICv3ITSClass),
  1809. };
  1810. static void gicv3_its_register_types(void)
  1811. {
  1812. type_register_static(&gicv3_its_info);
  1813. }
  1814. type_init(gicv3_its_register_types)