2
0

pnv_xive.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120
  1. /*
  2. * QEMU PowerPC XIVE interrupt controller model
  3. *
  4. * Copyright (c) 2017-2024, IBM Corporation.
  5. *
  6. * SPDX-License-Identifier: GPL-2.0-or-later
  7. */
  8. #include "qemu/osdep.h"
  9. #include "qemu/log.h"
  10. #include "qemu/module.h"
  11. #include "qapi/error.h"
  12. #include "target/ppc/cpu.h"
  13. #include "system/cpus.h"
  14. #include "system/dma.h"
  15. #include "system/reset.h"
  16. #include "hw/ppc/fdt.h"
  17. #include "hw/ppc/pnv.h"
  18. #include "hw/ppc/pnv_chip.h"
  19. #include "hw/ppc/pnv_core.h"
  20. #include "hw/ppc/pnv_xscom.h"
  21. #include "hw/ppc/pnv_xive.h"
  22. #include "hw/ppc/xive_regs.h"
  23. #include "hw/qdev-properties.h"
  24. #include "hw/ppc/ppc.h"
  25. #include "trace.h"
  26. #include <libfdt.h>
  27. #include "pnv_xive_regs.h"
  28. #undef XIVE_DEBUG
  29. /*
  30. * Virtual structures table (VST)
  31. */
  32. #define SBE_PER_BYTE 4
  33. typedef struct XiveVstInfo {
  34. const char *name;
  35. uint32_t size;
  36. uint32_t max_blocks;
  37. } XiveVstInfo;
  38. static const XiveVstInfo vst_infos[] = {
  39. [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 },
  40. [VST_TSEL_SBE] = { "SBE", 1, 16 },
  41. [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 },
  42. [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 },
  43. /*
  44. * Interrupt fifo backing store table (not modeled) :
  45. *
  46. * 0 - IPI,
  47. * 1 - HWD,
  48. * 2 - First escalate,
  49. * 3 - Second escalate,
  50. * 4 - Redistribution,
  51. * 5 - IPI cascaded queue ?
  52. */
  53. [VST_TSEL_IRQ] = { "IRQ", 1, 6 },
  54. };
  55. #define xive_error(xive, fmt, ...) \
  56. qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
  57. (xive)->chip->chip_id, ## __VA_ARGS__);
  58. /*
  59. * When PC_TCTXT_CHIPID_OVERRIDE is configured, the PC_TCTXT_CHIPID
  60. * field overrides the hardwired chip ID in the Powerbus operations
  61. * and for CAM compares
  62. */
  63. static uint8_t pnv_xive_block_id(PnvXive *xive)
  64. {
  65. uint8_t blk = xive->chip->chip_id;
  66. uint64_t cfg_val = xive->regs[PC_TCTXT_CFG >> 3];
  67. if (cfg_val & PC_TCTXT_CHIPID_OVERRIDE) {
  68. blk = GETFIELD(PC_TCTXT_CHIPID, cfg_val);
  69. }
  70. return blk;
  71. }
  72. /*
  73. * VST accessors for SBE, EAT, ENDT, NVT
  74. *
  75. * Indirect VST tables are arrays of VSDs pointing to a page (of same
  76. * size). Each page is a direct VST table.
  77. */
  78. #define XIVE_VSD_SIZE 8
  79. /* Indirect page size can be 4K, 64K, 2M, 16M. */
  80. static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift)
  81. {
  82. return page_shift == 12 || page_shift == 16 ||
  83. page_shift == 21 || page_shift == 24;
  84. }
  85. static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type,
  86. uint64_t vsd, uint32_t idx)
  87. {
  88. const XiveVstInfo *info = &vst_infos[type];
  89. uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
  90. uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
  91. uint32_t idx_max;
  92. idx_max = vst_tsize / info->size - 1;
  93. if (idx > idx_max) {
  94. #ifdef XIVE_DEBUG
  95. xive_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
  96. info->name, idx, idx_max);
  97. #endif
  98. return 0;
  99. }
  100. return vst_addr + idx * info->size;
  101. }
  102. static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type,
  103. uint64_t vsd, uint32_t idx)
  104. {
  105. const XiveVstInfo *info = &vst_infos[type];
  106. uint64_t vsd_addr;
  107. uint32_t vsd_idx;
  108. uint32_t page_shift;
  109. uint32_t vst_per_page;
  110. /* Get the page size of the indirect table. */
  111. vsd_addr = vsd & VSD_ADDRESS_MASK;
  112. if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
  113. MEMTXATTRS_UNSPECIFIED)) {
  114. xive_error(xive, "VST: failed to access %s entry %x @0x%" PRIx64,
  115. info->name, idx, vsd_addr);
  116. return 0;
  117. }
  118. if (!(vsd & VSD_ADDRESS_MASK)) {
  119. #ifdef XIVE_DEBUG
  120. xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
  121. #endif
  122. return 0;
  123. }
  124. page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
  125. if (!pnv_xive_vst_page_size_allowed(page_shift)) {
  126. xive_error(xive, "VST: invalid %s page shift %d", info->name,
  127. page_shift);
  128. return 0;
  129. }
  130. vst_per_page = (1ull << page_shift) / info->size;
  131. vsd_idx = idx / vst_per_page;
  132. /* Load the VSD we are looking for, if not already done */
  133. if (vsd_idx) {
  134. vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
  135. if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
  136. MEMTXATTRS_UNSPECIFIED)) {
  137. xive_error(xive, "VST: failed to access %s entry %x @0x%"
  138. PRIx64, info->name, vsd_idx, vsd_addr);
  139. return 0;
  140. }
  141. if (!(vsd & VSD_ADDRESS_MASK)) {
  142. #ifdef XIVE_DEBUG
  143. xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
  144. #endif
  145. return 0;
  146. }
  147. /*
  148. * Check that the pages have a consistent size across the
  149. * indirect table
  150. */
  151. if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
  152. xive_error(xive, "VST: %s entry %x indirect page size differ !?",
  153. info->name, idx);
  154. return 0;
  155. }
  156. }
  157. return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
  158. }
  159. /*
  160. * This is a simplified model of operation forwarding on a remote IC.
  161. *
  162. * A PC MMIO address is built to identify the NVT structure. The load
  163. * on the remote IC will return the address of the structure in RAM,
  164. * which will then be used by pnv_xive_vst_write/read to perform the
  165. * RAM operation.
  166. */
  167. static uint64_t pnv_xive_vst_addr_remote(PnvXive *xive, uint32_t type,
  168. uint64_t vsd, uint8_t blk,
  169. uint32_t idx)
  170. {
  171. const XiveVstInfo *info = &vst_infos[type];
  172. uint64_t remote_addr = vsd & VSD_ADDRESS_MASK;
  173. uint64_t vst_addr;
  174. MemTxResult result;
  175. if (type != VST_TSEL_VPDT) {
  176. xive_error(xive, "VST: invalid access on remote VST %s %x/%x !?",
  177. info->name, blk, idx);
  178. return 0;
  179. }
  180. remote_addr |= ((uint64_t)idx) << xive->pc_shift;
  181. vst_addr = address_space_ldq_be(&address_space_memory, remote_addr,
  182. MEMTXATTRS_UNSPECIFIED, &result);
  183. if (result != MEMTX_OK) {
  184. xive_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
  185. " for NVT %x/%x\n", remote_addr, blk, idx);
  186. return 0;
  187. }
  188. return vst_addr;
  189. }
  190. static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk,
  191. uint32_t idx)
  192. {
  193. const XiveVstInfo *info = &vst_infos[type];
  194. uint64_t vsd;
  195. if (blk >= info->max_blocks) {
  196. xive_error(xive, "VST: invalid block id %d for VST %s %d !?",
  197. blk, info->name, idx);
  198. return 0;
  199. }
  200. vsd = xive->vsds[type][blk];
  201. /* Remote VST access */
  202. if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
  203. return pnv_xive_vst_addr_remote(xive, type, vsd, blk, idx);
  204. }
  205. if (VSD_INDIRECT & vsd) {
  206. return pnv_xive_vst_addr_indirect(xive, type, vsd, idx);
  207. }
  208. return pnv_xive_vst_addr_direct(xive, type, vsd, idx);
  209. }
  210. static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk,
  211. uint32_t idx, void *data)
  212. {
  213. const XiveVstInfo *info = &vst_infos[type];
  214. uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
  215. MemTxResult result;
  216. if (!addr) {
  217. return -1;
  218. }
  219. result = address_space_read(&address_space_memory, addr,
  220. MEMTXATTRS_UNSPECIFIED, data,
  221. info->size);
  222. if (result != MEMTX_OK) {
  223. xive_error(xive, "VST: read failed at @0x%" HWADDR_PRIx
  224. " for VST %s %x/%x\n", addr, info->name, blk, idx);
  225. return -1;
  226. }
  227. return 0;
  228. }
  229. #define XIVE_VST_WORD_ALL -1
  230. static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk,
  231. uint32_t idx, void *data, uint32_t word_number)
  232. {
  233. const XiveVstInfo *info = &vst_infos[type];
  234. uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx);
  235. MemTxResult result;
  236. if (!addr) {
  237. return -1;
  238. }
  239. if (word_number == XIVE_VST_WORD_ALL) {
  240. result = address_space_write(&address_space_memory, addr,
  241. MEMTXATTRS_UNSPECIFIED, data,
  242. info->size);
  243. } else {
  244. result = address_space_write(&address_space_memory,
  245. addr + word_number * 4,
  246. MEMTXATTRS_UNSPECIFIED,
  247. data + word_number * 4, 4);
  248. }
  249. if (result != MEMTX_OK) {
  250. xive_error(xive, "VST: write failed at @0x%" HWADDR_PRIx
  251. "for VST %s %x/%x\n", addr, info->name, blk, idx);
  252. return -1;
  253. }
  254. return 0;
  255. }
  256. static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
  257. XiveEND *end)
  258. {
  259. PnvXive *xive = PNV_XIVE(xrtr);
  260. if (pnv_xive_block_id(xive) != blk) {
  261. xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
  262. return -1;
  263. }
  264. return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end);
  265. }
  266. static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
  267. XiveEND *end, uint8_t word_number)
  268. {
  269. PnvXive *xive = PNV_XIVE(xrtr);
  270. if (pnv_xive_block_id(xive) != blk) {
  271. xive_error(xive, "VST: END %x/%x is remote !?", blk, idx);
  272. return -1;
  273. }
  274. return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end,
  275. word_number);
  276. }
  277. static int pnv_xive_end_update(PnvXive *xive)
  278. {
  279. uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
  280. xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
  281. uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
  282. xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
  283. int i;
  284. uint64_t eqc_watch[4];
  285. for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
  286. eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]);
  287. }
  288. return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch,
  289. XIVE_VST_WORD_ALL);
  290. }
  291. static void pnv_xive_end_cache_load(PnvXive *xive)
  292. {
  293. uint8_t blk = GETFIELD(VC_EQC_CWATCH_BLOCKID,
  294. xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
  295. uint32_t idx = GETFIELD(VC_EQC_CWATCH_OFFSET,
  296. xive->regs[(VC_EQC_CWATCH_SPEC >> 3)]);
  297. uint64_t eqc_watch[4] = { 0 };
  298. int i;
  299. if (pnv_xive_vst_read(xive, VST_TSEL_EQDT, blk, idx, eqc_watch)) {
  300. xive_error(xive, "VST: no END entry %x/%x !?", blk, idx);
  301. }
  302. for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) {
  303. xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(eqc_watch[i]);
  304. }
  305. }
  306. static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
  307. XiveNVT *nvt)
  308. {
  309. return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt);
  310. }
  311. static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
  312. XiveNVT *nvt, uint8_t word_number)
  313. {
  314. return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt,
  315. word_number);
  316. }
  317. static int pnv_xive_nvt_update(PnvXive *xive)
  318. {
  319. uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
  320. xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
  321. uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
  322. xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
  323. int i;
  324. uint64_t vpc_watch[8];
  325. for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
  326. vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]);
  327. }
  328. return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch,
  329. XIVE_VST_WORD_ALL);
  330. }
  331. static void pnv_xive_nvt_cache_load(PnvXive *xive)
  332. {
  333. uint8_t blk = GETFIELD(PC_VPC_CWATCH_BLOCKID,
  334. xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
  335. uint32_t idx = GETFIELD(PC_VPC_CWATCH_OFFSET,
  336. xive->regs[(PC_VPC_CWATCH_SPEC >> 3)]);
  337. uint64_t vpc_watch[8] = { 0 };
  338. int i;
  339. if (pnv_xive_vst_read(xive, VST_TSEL_VPDT, blk, idx, vpc_watch)) {
  340. xive_error(xive, "VST: no NVT entry %x/%x !?", blk, idx);
  341. }
  342. for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) {
  343. xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i] = be64_to_cpu(vpc_watch[i]);
  344. }
  345. }
  346. static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
  347. XiveEAS *eas)
  348. {
  349. PnvXive *xive = PNV_XIVE(xrtr);
  350. /*
  351. * EAT lookups should be local to the IC
  352. */
  353. if (pnv_xive_block_id(xive) != blk) {
  354. xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
  355. return -1;
  356. }
  357. return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
  358. }
  359. static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
  360. uint8_t *pq)
  361. {
  362. PnvXive *xive = PNV_XIVE(xrtr);
  363. if (pnv_xive_block_id(xive) != blk) {
  364. xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
  365. return -1;
  366. }
  367. *pq = xive_source_esb_get(&xive->ipi_source, idx);
  368. return 0;
  369. }
  370. static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
  371. uint8_t *pq)
  372. {
  373. PnvXive *xive = PNV_XIVE(xrtr);
  374. if (pnv_xive_block_id(xive) != blk) {
  375. xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
  376. return -1;
  377. }
  378. *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
  379. return 0;
  380. }
  381. /*
  382. * One bit per thread id. The first register PC_THREAD_EN_REG0 covers
  383. * the first cores 0-15 (normal) of the chip or 0-7 (fused). The
  384. * second register covers cores 16-23 (normal) or 8-11 (fused).
  385. */
  386. static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
  387. {
  388. int pir = ppc_cpu_pir(cpu);
  389. uint32_t fc = PNV9_PIR2FUSEDCORE(pir);
  390. uint64_t reg = fc < 8 ? PC_THREAD_EN_REG0 : PC_THREAD_EN_REG1;
  391. uint32_t bit = pir & 0x3f;
  392. return xive->regs[reg >> 3] & PPC_BIT(bit);
  393. }
  394. static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
  395. uint8_t nvt_blk, uint32_t nvt_idx,
  396. bool crowd, bool cam_ignore, uint8_t priority,
  397. uint32_t logic_serv, XiveTCTXMatch *match)
  398. {
  399. PnvXive *xive = PNV_XIVE(xptr);
  400. PnvChip *chip = xive->chip;
  401. int count = 0;
  402. int i, j;
  403. for (i = 0; i < chip->nr_cores; i++) {
  404. PnvCore *pc = chip->cores[i];
  405. CPUCore *cc = CPU_CORE(pc);
  406. for (j = 0; j < cc->nr_threads; j++) {
  407. PowerPCCPU *cpu = pc->threads[j];
  408. XiveTCTX *tctx;
  409. int ring;
  410. if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
  411. continue;
  412. }
  413. tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
  414. /*
  415. * Check the thread context CAM lines and record matches.
  416. */
  417. ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
  418. nvt_idx, cam_ignore,
  419. logic_serv);
  420. /*
  421. * Save the context and follow on to catch duplicates, that we
  422. * don't support yet.
  423. */
  424. if (ring != -1) {
  425. if (match->tctx) {
  426. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
  427. "thread context NVT %x/%x\n",
  428. nvt_blk, nvt_idx);
  429. return -1;
  430. }
  431. match->ring = ring;
  432. match->tctx = tctx;
  433. count++;
  434. }
  435. }
  436. }
  437. return count;
  438. }
  439. static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr)
  440. {
  441. uint32_t cfg = 0;
  442. /* TIMA GEN1 is all P9 knows */
  443. cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
  444. return cfg;
  445. }
  446. static uint8_t pnv_xive_get_block_id(XiveRouter *xrtr)
  447. {
  448. return pnv_xive_block_id(PNV_XIVE(xrtr));
  449. }
  450. /*
  451. * The TIMA MMIO space is shared among the chips and to identify the
  452. * chip from which the access is being done, we extract the chip id
  453. * from the PIR.
  454. */
  455. static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
  456. {
  457. int pir = ppc_cpu_pir(cpu);
  458. XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
  459. PnvXive *xive = PNV_XIVE(xptr);
  460. if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
  461. xive_error(xive, "IC: CPU %x is not enabled", pir);
  462. }
  463. return xive;
  464. }
  465. /*
  466. * The internal sources (IPIs) of the interrupt controller have no
  467. * knowledge of the XIVE chip on which they reside. Encode the block
  468. * id in the source interrupt number before forwarding the source
  469. * event notification to the Router. This is required on a multichip
  470. * system.
  471. */
  472. static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
  473. {
  474. PnvXive *xive = PNV_XIVE(xn);
  475. uint8_t blk = pnv_xive_block_id(xive);
  476. xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
  477. }
  478. /*
  479. * XIVE helpers
  480. */
  481. static uint64_t pnv_xive_vc_size(PnvXive *xive)
  482. {
  483. return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK;
  484. }
  485. static uint64_t pnv_xive_edt_shift(PnvXive *xive)
  486. {
  487. return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX);
  488. }
  489. static uint64_t pnv_xive_pc_size(PnvXive *xive)
  490. {
  491. return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK;
  492. }
  493. static uint32_t pnv_xive_nr_ipis(PnvXive *xive, uint8_t blk)
  494. {
  495. uint64_t vsd = xive->vsds[VST_TSEL_SBE][blk];
  496. uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
  497. return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
  498. }
  499. /*
  500. * Compute the number of entries per indirect subpage.
  501. */
  502. static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type)
  503. {
  504. uint8_t blk = pnv_xive_block_id(xive);
  505. uint64_t vsd = xive->vsds[type][blk];
  506. const XiveVstInfo *info = &vst_infos[type];
  507. uint64_t vsd_addr;
  508. uint32_t page_shift;
  509. /* For direct tables, fake a valid value */
  510. if (!(VSD_INDIRECT & vsd)) {
  511. return 1;
  512. }
  513. /* Get the page size of the indirect table. */
  514. vsd_addr = vsd & VSD_ADDRESS_MASK;
  515. if (ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
  516. MEMTXATTRS_UNSPECIFIED)) {
  517. xive_error(xive, "VST: failed to access %s entry @0x%" PRIx64,
  518. info->name, vsd_addr);
  519. return 0;
  520. }
  521. if (!(vsd & VSD_ADDRESS_MASK)) {
  522. #ifdef XIVE_DEBUG
  523. xive_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
  524. #endif
  525. return 0;
  526. }
  527. page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
  528. if (!pnv_xive_vst_page_size_allowed(page_shift)) {
  529. xive_error(xive, "VST: invalid %s page shift %d", info->name,
  530. page_shift);
  531. return 0;
  532. }
  533. return (1ull << page_shift) / info->size;
  534. }
  535. /*
  536. * EDT Table
  537. *
  538. * The Virtualization Controller MMIO region containing the IPI ESB
  539. * pages and END ESB pages is sub-divided into "sets" which map
  540. * portions of the VC region to the different ESB pages. It is
  541. * configured at runtime through the EDT "Domain Table" to let the
  542. * firmware decide how to split the VC address space between IPI ESB
  543. * pages and END ESB pages.
  544. */
  545. /*
  546. * Computes the overall size of the IPI or the END ESB pages
  547. */
  548. static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type)
  549. {
  550. uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
  551. uint64_t size = 0;
  552. int i;
  553. for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) {
  554. uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
  555. if (edt_type == type) {
  556. size += edt_size;
  557. }
  558. }
  559. return size;
  560. }
  561. /*
  562. * Maps an offset of the VC region in the IPI or END region using the
  563. * layout defined by the EDT "Domaine Table"
  564. */
  565. static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset,
  566. uint64_t type)
  567. {
  568. int i;
  569. uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive);
  570. uint64_t edt_offset = vc_offset;
  571. for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) {
  572. uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]);
  573. if (edt_type != type) {
  574. edt_offset -= edt_size;
  575. }
  576. }
  577. return edt_offset;
  578. }
  579. static void pnv_xive_edt_resize(PnvXive *xive)
  580. {
  581. uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI);
  582. uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ);
  583. memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size);
  584. memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio);
  585. memory_region_set_size(&xive->end_edt_mmio, end_edt_size);
  586. memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio);
  587. }
  588. /*
  589. * XIVE Table configuration. Only EDT is supported.
  590. */
  591. static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val)
  592. {
  593. uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL;
  594. uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]);
  595. uint64_t *xive_table;
  596. uint8_t max_index;
  597. switch (tsel) {
  598. case CQ_TAR_TSEL_BLK:
  599. max_index = ARRAY_SIZE(xive->blk);
  600. xive_table = xive->blk;
  601. break;
  602. case CQ_TAR_TSEL_MIG:
  603. max_index = ARRAY_SIZE(xive->mig);
  604. xive_table = xive->mig;
  605. break;
  606. case CQ_TAR_TSEL_EDT:
  607. max_index = ARRAY_SIZE(xive->edt);
  608. xive_table = xive->edt;
  609. break;
  610. case CQ_TAR_TSEL_VDT:
  611. max_index = ARRAY_SIZE(xive->vdt);
  612. xive_table = xive->vdt;
  613. break;
  614. default:
  615. xive_error(xive, "IC: invalid table %d", (int) tsel);
  616. return -1;
  617. }
  618. if (tsel_index >= max_index) {
  619. xive_error(xive, "IC: invalid index %d", (int) tsel_index);
  620. return -1;
  621. }
  622. xive_table[tsel_index] = val;
  623. if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) {
  624. xive->regs[CQ_TAR >> 3] =
  625. SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index);
  626. }
  627. /*
  628. * EDT configuration is complete. Resize the MMIO windows exposing
  629. * the IPI and the END ESBs in the VC region.
  630. */
  631. if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) {
  632. pnv_xive_edt_resize(xive);
  633. }
  634. return 0;
  635. }
  636. /*
  637. * Virtual Structure Tables (VST) configuration
  638. */
  639. static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type,
  640. uint8_t blk, uint64_t vsd)
  641. {
  642. XiveENDSource *end_xsrc = &xive->end_source;
  643. XiveSource *xsrc = &xive->ipi_source;
  644. const XiveVstInfo *info = &vst_infos[type];
  645. uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
  646. uint64_t vst_tsize = 1ull << page_shift;
  647. uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
  648. /* Basic checks */
  649. if (VSD_INDIRECT & vsd) {
  650. if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) {
  651. xive_error(xive, "VST: %s indirect tables are not enabled",
  652. info->name);
  653. return;
  654. }
  655. if (!pnv_xive_vst_page_size_allowed(page_shift)) {
  656. xive_error(xive, "VST: invalid %s page shift %d", info->name,
  657. page_shift);
  658. return;
  659. }
  660. }
  661. if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
  662. xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with"
  663. " page shift %d", info->name, vst_addr, page_shift);
  664. return;
  665. }
  666. /* Record the table configuration (in SRAM on HW) */
  667. xive->vsds[type][blk] = vsd;
  668. /* Now tune the models with the configuration provided by the FW */
  669. switch (type) {
  670. case VST_TSEL_IVT: /* Nothing to be done */
  671. break;
  672. case VST_TSEL_EQDT:
  673. /*
  674. * Backing store pages for the END.
  675. *
  676. * If the table is direct, we can compute the number of PQ
  677. * entries provisioned by FW (such as skiboot) and resize the
  678. * END ESB window accordingly.
  679. */
  680. if (!(VSD_INDIRECT & vsd)) {
  681. memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
  682. * (1ull << xsrc->esb_shift));
  683. }
  684. memory_region_add_subregion(&xive->end_edt_mmio, 0,
  685. &end_xsrc->esb_mmio);
  686. break;
  687. case VST_TSEL_SBE:
  688. /*
  689. * Backing store pages for the source PQ bits. The model does
  690. * not use these PQ bits backed in RAM because the XiveSource
  691. * model has its own.
  692. *
  693. * If the table is direct, we can compute the number of PQ
  694. * entries provisioned by FW (such as skiboot) and resize the
  695. * ESB window accordingly.
  696. */
  697. if (!(VSD_INDIRECT & vsd)) {
  698. memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
  699. * (1ull << xsrc->esb_shift));
  700. }
  701. memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio);
  702. break;
  703. case VST_TSEL_VPDT: /* Not modeled */
  704. case VST_TSEL_IRQ: /* Not modeled */
  705. /*
  706. * These tables contains the backing store pages for the
  707. * interrupt fifos of the VC sub-engine in case of overflow.
  708. */
  709. break;
  710. default:
  711. g_assert_not_reached();
  712. }
  713. }
  714. /*
  715. * Both PC and VC sub-engines are configured as each use the Virtual
  716. * Structure Tables : SBE, EAS, END and NVT.
  717. */
  718. static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine)
  719. {
  720. uint8_t mode = GETFIELD(VSD_MODE, vsd);
  721. uint8_t type = GETFIELD(VST_TABLE_SELECT,
  722. xive->regs[VC_VSD_TABLE_ADDR >> 3]);
  723. uint8_t blk = GETFIELD(VST_TABLE_BLOCK,
  724. xive->regs[VC_VSD_TABLE_ADDR >> 3]);
  725. uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
  726. if (type > VST_TSEL_IRQ) {
  727. xive_error(xive, "VST: invalid table type %d", type);
  728. return;
  729. }
  730. if (blk >= vst_infos[type].max_blocks) {
  731. xive_error(xive, "VST: invalid block id %d for"
  732. " %s table", blk, vst_infos[type].name);
  733. return;
  734. }
  735. /*
  736. * Only take the VC sub-engine configuration into account because
  737. * the XiveRouter model combines both VC and PC sub-engines
  738. */
  739. if (pc_engine) {
  740. return;
  741. }
  742. if (!vst_addr) {
  743. xive_error(xive, "VST: invalid %s table address", vst_infos[type].name);
  744. return;
  745. }
  746. switch (mode) {
  747. case VSD_MODE_FORWARD:
  748. xive->vsds[type][blk] = vsd;
  749. break;
  750. case VSD_MODE_EXCLUSIVE:
  751. pnv_xive_vst_set_exclusive(xive, type, blk, vsd);
  752. break;
  753. default:
  754. xive_error(xive, "VST: unsupported table mode %d", mode);
  755. return;
  756. }
  757. }
  758. /*
  759. * Interrupt controller MMIO region. The layout is compatible between
  760. * 4K and 64K pages :
  761. *
  762. * Page 0 sub-engine BARs
  763. * 0x000 - 0x3FF IC registers
  764. * 0x400 - 0x7FF PC registers
  765. * 0x800 - 0xFFF VC registers
  766. *
  767. * Page 1 Notify page (writes only)
  768. * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB)
  769. * 0x800 - 0xFFF forwards and syncs
  770. *
  771. * Page 2 LSI Trigger page (writes only) (not modeled)
  772. * Page 3 LSI SB EOI page (reads only) (not modeled)
  773. *
  774. * Page 4-7 indirect TIMA
  775. */
  776. /*
  777. * IC - registers MMIO
  778. */
  779. static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset,
  780. uint64_t val, unsigned size)
  781. {
  782. PnvXive *xive = PNV_XIVE(opaque);
  783. MemoryRegion *sysmem = get_system_memory();
  784. uint32_t reg = offset >> 3;
  785. bool is_chip0 = xive->chip->chip_id == 0;
  786. switch (offset) {
  787. /*
  788. * XIVE CQ (PowerBus bridge) settings
  789. */
  790. case CQ_MSGSND: /* msgsnd for doorbells */
  791. case CQ_FIRMASK_OR: /* FIR error reporting */
  792. break;
  793. case CQ_PBI_CTL:
  794. if (val & CQ_PBI_PC_64K) {
  795. xive->pc_shift = 16;
  796. }
  797. if (val & CQ_PBI_VC_64K) {
  798. xive->vc_shift = 16;
  799. }
  800. break;
  801. case CQ_CFG_PB_GEN: /* PowerBus General Configuration */
  802. /*
  803. * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode
  804. */
  805. break;
  806. /*
  807. * XIVE Virtualization Controller settings
  808. */
  809. case VC_GLOBAL_CONFIG:
  810. break;
  811. /*
  812. * XIVE Presenter Controller settings
  813. */
  814. case PC_GLOBAL_CONFIG:
  815. /*
  816. * PC_GCONF_CHIPID_OVR
  817. * Overrides Int command Chip ID with the Chip ID field (DEBUG)
  818. */
  819. break;
  820. case PC_TCTXT_CFG:
  821. /*
  822. * TODO: block group support
  823. */
  824. break;
  825. case PC_TCTXT_TRACK:
  826. /*
  827. * PC_TCTXT_TRACK_EN:
  828. * enable block tracking and exchange of block ownership
  829. * information between Interrupt controllers
  830. */
  831. break;
  832. /*
  833. * Misc settings
  834. */
  835. case VC_SBC_CONFIG: /* Store EOI configuration */
  836. /*
  837. * Configure store EOI if required by firmware (skiboot has removed
  838. * support recently though)
  839. */
  840. if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) {
  841. xive->ipi_source.esb_flags |= XIVE_SRC_STORE_EOI;
  842. }
  843. break;
  844. case VC_EQC_CONFIG: /* TODO: silent escalation */
  845. case VC_AIB_TX_ORDER_TAG2: /* relax ordering */
  846. break;
  847. /*
  848. * XIVE BAR settings (XSCOM only)
  849. */
  850. case CQ_RST_CTL:
  851. /* bit4: resets all BAR registers */
  852. break;
  853. case CQ_IC_BAR: /* IC BAR. 8 pages */
  854. xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
  855. if (!(val & CQ_IC_BAR_VALID)) {
  856. xive->ic_base = 0;
  857. if (xive->regs[reg] & CQ_IC_BAR_VALID) {
  858. memory_region_del_subregion(&xive->ic_mmio,
  859. &xive->ic_reg_mmio);
  860. memory_region_del_subregion(&xive->ic_mmio,
  861. &xive->ic_notify_mmio);
  862. memory_region_del_subregion(&xive->ic_mmio,
  863. &xive->ic_lsi_mmio);
  864. memory_region_del_subregion(&xive->ic_mmio,
  865. &xive->tm_indirect_mmio);
  866. memory_region_del_subregion(sysmem, &xive->ic_mmio);
  867. }
  868. } else {
  869. xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
  870. if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) {
  871. memory_region_add_subregion(sysmem, xive->ic_base,
  872. &xive->ic_mmio);
  873. memory_region_add_subregion(&xive->ic_mmio, 0,
  874. &xive->ic_reg_mmio);
  875. memory_region_add_subregion(&xive->ic_mmio,
  876. 1ul << xive->ic_shift,
  877. &xive->ic_notify_mmio);
  878. memory_region_add_subregion(&xive->ic_mmio,
  879. 2ul << xive->ic_shift,
  880. &xive->ic_lsi_mmio);
  881. memory_region_add_subregion(&xive->ic_mmio,
  882. 4ull << xive->ic_shift,
  883. &xive->tm_indirect_mmio);
  884. }
  885. }
  886. break;
  887. case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */
  888. case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */
  889. xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
  890. if (!(val & CQ_TM_BAR_VALID)) {
  891. xive->tm_base = 0;
  892. if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) {
  893. memory_region_del_subregion(sysmem, &xive->tm_mmio);
  894. }
  895. } else {
  896. xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
  897. if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) {
  898. memory_region_add_subregion(sysmem, xive->tm_base,
  899. &xive->tm_mmio);
  900. }
  901. }
  902. break;
  903. case CQ_PC_BARM:
  904. xive->regs[reg] = val;
  905. memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive));
  906. break;
  907. case CQ_PC_BAR: /* From 32M to 512G */
  908. if (!(val & CQ_PC_BAR_VALID)) {
  909. xive->pc_base = 0;
  910. if (xive->regs[reg] & CQ_PC_BAR_VALID) {
  911. memory_region_del_subregion(sysmem, &xive->pc_mmio);
  912. }
  913. } else {
  914. xive->pc_base = val & ~(CQ_PC_BAR_VALID);
  915. if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) {
  916. memory_region_add_subregion(sysmem, xive->pc_base,
  917. &xive->pc_mmio);
  918. }
  919. }
  920. break;
  921. case CQ_VC_BARM:
  922. xive->regs[reg] = val;
  923. memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive));
  924. break;
  925. case CQ_VC_BAR: /* From 64M to 4TB */
  926. if (!(val & CQ_VC_BAR_VALID)) {
  927. xive->vc_base = 0;
  928. if (xive->regs[reg] & CQ_VC_BAR_VALID) {
  929. memory_region_del_subregion(sysmem, &xive->vc_mmio);
  930. }
  931. } else {
  932. xive->vc_base = val & ~(CQ_VC_BAR_VALID);
  933. if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) {
  934. memory_region_add_subregion(sysmem, xive->vc_base,
  935. &xive->vc_mmio);
  936. }
  937. }
  938. break;
  939. /*
  940. * XIVE Table settings.
  941. */
  942. case CQ_TAR: /* Table Address */
  943. break;
  944. case CQ_TDR: /* Table Data */
  945. pnv_xive_table_set_data(xive, val);
  946. break;
  947. /*
  948. * XIVE VC & PC Virtual Structure Table settings
  949. */
  950. case VC_VSD_TABLE_ADDR:
  951. case PC_VSD_TABLE_ADDR: /* Virtual table selector */
  952. break;
  953. case VC_VSD_TABLE_DATA: /* Virtual table setting */
  954. case PC_VSD_TABLE_DATA:
  955. pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA);
  956. break;
  957. /*
  958. * Interrupt fifo overflow in memory backing store (Not modeled)
  959. */
  960. case VC_IRQ_CONFIG_IPI:
  961. case VC_IRQ_CONFIG_HW:
  962. case VC_IRQ_CONFIG_CASCADE1:
  963. case VC_IRQ_CONFIG_CASCADE2:
  964. case VC_IRQ_CONFIG_REDIST:
  965. case VC_IRQ_CONFIG_IPI_CASC:
  966. break;
  967. /*
  968. * XIVE hardware thread enablement
  969. */
  970. case PC_THREAD_EN_REG0: /* Physical Thread Enable */
  971. case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */
  972. break;
  973. case PC_THREAD_EN_REG0_SET:
  974. xive->regs[PC_THREAD_EN_REG0 >> 3] |= val;
  975. break;
  976. case PC_THREAD_EN_REG1_SET:
  977. xive->regs[PC_THREAD_EN_REG1 >> 3] |= val;
  978. break;
  979. case PC_THREAD_EN_REG0_CLR:
  980. xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val;
  981. break;
  982. case PC_THREAD_EN_REG1_CLR:
  983. xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val;
  984. break;
  985. /*
  986. * Indirect TIMA access set up. Defines the PIR of the HW thread
  987. * to use.
  988. */
  989. case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3:
  990. break;
  991. /*
  992. * XIVE PC & VC cache updates for EAS, NVT and END
  993. */
  994. case VC_IVC_SCRUB_MASK:
  995. case VC_IVC_SCRUB_TRIG:
  996. break;
  997. case VC_EQC_CWATCH_SPEC:
  998. val &= ~VC_EQC_CWATCH_CONFLICT; /* HW resets this bit */
  999. break;
  1000. case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
  1001. break;
  1002. case VC_EQC_CWATCH_DAT0:
  1003. /* writing to DATA0 triggers the cache write */
  1004. xive->regs[reg] = val;
  1005. pnv_xive_end_update(xive);
  1006. break;
  1007. case VC_EQC_SCRUB_MASK:
  1008. case VC_EQC_SCRUB_TRIG:
  1009. /*
  1010. * The scrubbing registers flush the cache in RAM and can also
  1011. * invalidate.
  1012. */
  1013. break;
  1014. case PC_VPC_CWATCH_SPEC:
  1015. val &= ~PC_VPC_CWATCH_CONFLICT; /* HW resets this bit */
  1016. break;
  1017. case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
  1018. break;
  1019. case PC_VPC_CWATCH_DAT0:
  1020. /* writing to DATA0 triggers the cache write */
  1021. xive->regs[reg] = val;
  1022. pnv_xive_nvt_update(xive);
  1023. break;
  1024. case PC_VPC_SCRUB_MASK:
  1025. case PC_VPC_SCRUB_TRIG:
  1026. /*
  1027. * The scrubbing registers flush the cache in RAM and can also
  1028. * invalidate.
  1029. */
  1030. break;
  1031. /*
  1032. * XIVE PC & VC cache invalidation
  1033. */
  1034. case PC_AT_KILL:
  1035. break;
  1036. case VC_AT_MACRO_KILL:
  1037. break;
  1038. case PC_AT_KILL_MASK:
  1039. case VC_AT_MACRO_KILL_MASK:
  1040. break;
  1041. default:
  1042. xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset);
  1043. return;
  1044. }
  1045. xive->regs[reg] = val;
  1046. }
  1047. static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size)
  1048. {
  1049. PnvXive *xive = PNV_XIVE(opaque);
  1050. uint64_t val = 0;
  1051. uint32_t reg = offset >> 3;
  1052. switch (offset) {
  1053. case CQ_CFG_PB_GEN:
  1054. case CQ_IC_BAR:
  1055. case CQ_TM1_BAR:
  1056. case CQ_TM2_BAR:
  1057. case CQ_PC_BAR:
  1058. case CQ_PC_BARM:
  1059. case CQ_VC_BAR:
  1060. case CQ_VC_BARM:
  1061. case CQ_TAR:
  1062. case CQ_TDR:
  1063. case CQ_PBI_CTL:
  1064. case PC_TCTXT_CFG:
  1065. case PC_TCTXT_TRACK:
  1066. case PC_TCTXT_INDIR0:
  1067. case PC_TCTXT_INDIR1:
  1068. case PC_TCTXT_INDIR2:
  1069. case PC_TCTXT_INDIR3:
  1070. case PC_GLOBAL_CONFIG:
  1071. case PC_VPC_SCRUB_MASK:
  1072. case VC_GLOBAL_CONFIG:
  1073. case VC_AIB_TX_ORDER_TAG2:
  1074. case VC_IRQ_CONFIG_IPI:
  1075. case VC_IRQ_CONFIG_HW:
  1076. case VC_IRQ_CONFIG_CASCADE1:
  1077. case VC_IRQ_CONFIG_CASCADE2:
  1078. case VC_IRQ_CONFIG_REDIST:
  1079. case VC_IRQ_CONFIG_IPI_CASC:
  1080. case VC_EQC_SCRUB_MASK:
  1081. case VC_IVC_SCRUB_MASK:
  1082. case VC_SBC_CONFIG:
  1083. case VC_AT_MACRO_KILL_MASK:
  1084. case VC_VSD_TABLE_ADDR:
  1085. case PC_VSD_TABLE_ADDR:
  1086. case VC_VSD_TABLE_DATA:
  1087. case PC_VSD_TABLE_DATA:
  1088. case PC_THREAD_EN_REG0:
  1089. case PC_THREAD_EN_REG1:
  1090. val = xive->regs[reg];
  1091. break;
  1092. /*
  1093. * XIVE hardware thread enablement
  1094. */
  1095. case PC_THREAD_EN_REG0_SET:
  1096. case PC_THREAD_EN_REG0_CLR:
  1097. val = xive->regs[PC_THREAD_EN_REG0 >> 3];
  1098. break;
  1099. case PC_THREAD_EN_REG1_SET:
  1100. case PC_THREAD_EN_REG1_CLR:
  1101. val = xive->regs[PC_THREAD_EN_REG1 >> 3];
  1102. break;
  1103. case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */
  1104. val = 0xffffff0000000000;
  1105. break;
  1106. /*
  1107. * XIVE PC & VC cache updates for EAS, NVT and END
  1108. */
  1109. case VC_EQC_CWATCH_SPEC:
  1110. xive->regs[reg] = ~(VC_EQC_CWATCH_FULL | VC_EQC_CWATCH_CONFLICT);
  1111. val = xive->regs[reg];
  1112. break;
  1113. case VC_EQC_CWATCH_DAT0:
  1114. /*
  1115. * Load DATA registers from cache with data requested by the
  1116. * SPEC register
  1117. */
  1118. pnv_xive_end_cache_load(xive);
  1119. val = xive->regs[reg];
  1120. break;
  1121. case VC_EQC_CWATCH_DAT1 ... VC_EQC_CWATCH_DAT3:
  1122. val = xive->regs[reg];
  1123. break;
  1124. case PC_VPC_CWATCH_SPEC:
  1125. xive->regs[reg] = ~(PC_VPC_CWATCH_FULL | PC_VPC_CWATCH_CONFLICT);
  1126. val = xive->regs[reg];
  1127. break;
  1128. case PC_VPC_CWATCH_DAT0:
  1129. /*
  1130. * Load DATA registers from cache with data requested by the
  1131. * SPEC register
  1132. */
  1133. pnv_xive_nvt_cache_load(xive);
  1134. val = xive->regs[reg];
  1135. break;
  1136. case PC_VPC_CWATCH_DAT1 ... PC_VPC_CWATCH_DAT7:
  1137. val = xive->regs[reg];
  1138. break;
  1139. case PC_VPC_SCRUB_TRIG:
  1140. case VC_IVC_SCRUB_TRIG:
  1141. case VC_EQC_SCRUB_TRIG:
  1142. xive->regs[reg] &= ~VC_SCRUB_VALID;
  1143. val = xive->regs[reg];
  1144. break;
  1145. /*
  1146. * XIVE PC & VC cache invalidation
  1147. */
  1148. case PC_AT_KILL:
  1149. xive->regs[reg] &= ~PC_AT_KILL_VALID;
  1150. val = xive->regs[reg];
  1151. break;
  1152. case VC_AT_MACRO_KILL:
  1153. xive->regs[reg] &= ~VC_KILL_VALID;
  1154. val = xive->regs[reg];
  1155. break;
  1156. /*
  1157. * XIVE synchronisation
  1158. */
  1159. case VC_EQC_CONFIG:
  1160. val = VC_EQC_SYNC_MASK;
  1161. break;
  1162. default:
  1163. xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset);
  1164. }
  1165. return val;
  1166. }
  1167. static const MemoryRegionOps pnv_xive_ic_reg_ops = {
  1168. .read = pnv_xive_ic_reg_read,
  1169. .write = pnv_xive_ic_reg_write,
  1170. .endianness = DEVICE_BIG_ENDIAN,
  1171. .valid = {
  1172. .min_access_size = 8,
  1173. .max_access_size = 8,
  1174. },
  1175. .impl = {
  1176. .min_access_size = 8,
  1177. .max_access_size = 8,
  1178. },
  1179. };
  1180. /*
  1181. * IC - Notify MMIO port page (write only)
  1182. */
  1183. #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */
  1184. #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */
  1185. #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */
  1186. #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */
  1187. #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */
  1188. #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */
  1189. #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */
  1190. #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */
  1191. /* VC synchronisation */
  1192. #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */
  1193. #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */
  1194. #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */
  1195. #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */
  1196. #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */
  1197. /* PC synchronisation */
  1198. #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */
  1199. #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */
  1200. #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */
  1201. static void pnv_xive_end_notify(XiveRouter *xrtr, XiveEAS *eas)
  1202. {
  1203. PnvXive *xive = PNV_XIVE(xrtr);
  1204. uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
  1205. uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
  1206. uint32_t end_data = xive_get_field64(EAS_END_DATA, eas->w);
  1207. uint64_t end_vsd = xive->vsds[VST_TSEL_EQDT][end_blk];
  1208. switch (GETFIELD(VSD_MODE, end_vsd)) {
  1209. case VSD_MODE_EXCLUSIVE:
  1210. /* Perform the END notification on the local IC. */
  1211. xive_router_end_notify(xrtr, eas);
  1212. break;
  1213. case VSD_MODE_FORWARD: {
  1214. MemTxResult result;
  1215. uint64_t notif_port = end_vsd & VSD_ADDRESS_MASK;
  1216. uint64_t data = XIVE_TRIGGER_END | XIVE_TRIGGER_PQ |
  1217. be64_to_cpu(eas->w);
  1218. /* Forward the store on the remote IC notify page. */
  1219. address_space_stq_be(&address_space_memory, notif_port, data,
  1220. MEMTXATTRS_UNSPECIFIED, &result);
  1221. if (result != MEMTX_OK) {
  1222. xive_error(xive, "IC: Forward notif END %x/%x [%x] failed @%"
  1223. HWADDR_PRIx, end_blk, end_idx, end_data, notif_port);
  1224. return;
  1225. }
  1226. break;
  1227. }
  1228. case VSD_MODE_INVALID:
  1229. default:
  1230. /* Set FIR */
  1231. xive_error(xive, "IC: Invalid END VSD for block %x", end_blk);
  1232. return;
  1233. }
  1234. }
  1235. /*
  1236. * The notify page can either be used to receive trigger events from
  1237. * the HW controllers (PHB, PSI) or to reroute interrupts between
  1238. * Interrupt controllers.
  1239. */
  1240. static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
  1241. {
  1242. uint8_t blk;
  1243. uint32_t idx;
  1244. trace_pnv_xive_ic_hw_trigger(addr, val);
  1245. if (val & XIVE_TRIGGER_END) {
  1246. val = cpu_to_be64(val);
  1247. pnv_xive_end_notify(XIVE_ROUTER(xive), (XiveEAS *) &val);
  1248. return;
  1249. }
  1250. /*
  1251. * Forward the source event notification directly to the Router.
  1252. * The source interrupt number should already be correctly encoded
  1253. * with the chip block id by the sending device (PHB, PSI).
  1254. */
  1255. blk = XIVE_EAS_BLOCK(val);
  1256. idx = XIVE_EAS_INDEX(val);
  1257. xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
  1258. !!(val & XIVE_TRIGGER_PQ));
  1259. }
  1260. static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
  1261. unsigned size)
  1262. {
  1263. PnvXive *xive = PNV_XIVE(opaque);
  1264. /* VC: HW triggers */
  1265. switch (addr) {
  1266. case 0x000 ... 0x7FF:
  1267. pnv_xive_ic_hw_trigger(opaque, addr, val);
  1268. break;
  1269. /* VC: Forwarded IRQs */
  1270. case PNV_XIVE_FORWARD_IPI:
  1271. case PNV_XIVE_FORWARD_HW:
  1272. case PNV_XIVE_FORWARD_OS_ESC:
  1273. case PNV_XIVE_FORWARD_HW_ESC:
  1274. case PNV_XIVE_FORWARD_REDIS:
  1275. /* TODO: forwarded IRQs. Should be like HW triggers */
  1276. xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64,
  1277. addr, val);
  1278. break;
  1279. /* VC syncs */
  1280. case PNV_XIVE_SYNC_IPI:
  1281. case PNV_XIVE_SYNC_HW:
  1282. case PNV_XIVE_SYNC_OS_ESC:
  1283. case PNV_XIVE_SYNC_HW_ESC:
  1284. case PNV_XIVE_SYNC_REDIS:
  1285. break;
  1286. /* PC syncs */
  1287. case PNV_XIVE_SYNC_PULL:
  1288. case PNV_XIVE_SYNC_PUSH:
  1289. case PNV_XIVE_SYNC_VPC:
  1290. break;
  1291. default:
  1292. xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr);
  1293. }
  1294. }
  1295. static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr,
  1296. unsigned size)
  1297. {
  1298. PnvXive *xive = PNV_XIVE(opaque);
  1299. /* loads are invalid */
  1300. xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr);
  1301. return -1;
  1302. }
  1303. static const MemoryRegionOps pnv_xive_ic_notify_ops = {
  1304. .read = pnv_xive_ic_notify_read,
  1305. .write = pnv_xive_ic_notify_write,
  1306. .endianness = DEVICE_BIG_ENDIAN,
  1307. .valid = {
  1308. .min_access_size = 8,
  1309. .max_access_size = 8,
  1310. },
  1311. .impl = {
  1312. .min_access_size = 8,
  1313. .max_access_size = 8,
  1314. },
  1315. };
  1316. /*
  1317. * IC - LSI MMIO handlers (not modeled)
  1318. */
  1319. static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr,
  1320. uint64_t val, unsigned size)
  1321. {
  1322. PnvXive *xive = PNV_XIVE(opaque);
  1323. xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr);
  1324. }
  1325. static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size)
  1326. {
  1327. PnvXive *xive = PNV_XIVE(opaque);
  1328. xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr);
  1329. return -1;
  1330. }
  1331. static const MemoryRegionOps pnv_xive_ic_lsi_ops = {
  1332. .read = pnv_xive_ic_lsi_read,
  1333. .write = pnv_xive_ic_lsi_write,
  1334. .endianness = DEVICE_BIG_ENDIAN,
  1335. .valid = {
  1336. .min_access_size = 8,
  1337. .max_access_size = 8,
  1338. },
  1339. .impl = {
  1340. .min_access_size = 8,
  1341. .max_access_size = 8,
  1342. },
  1343. };
  1344. /*
  1345. * IC - Indirect TIMA MMIO handlers
  1346. */
  1347. /*
  1348. * When the TIMA is accessed from the indirect page, the thread id of
  1349. * the target CPU is configured in the PC_TCTXT_INDIR0 register before
  1350. * use. This is used for resets and for debug purpose also.
  1351. */
  1352. static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive)
  1353. {
  1354. PnvChip *chip = xive->chip;
  1355. uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3];
  1356. PowerPCCPU *cpu = NULL;
  1357. int pir;
  1358. if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) {
  1359. xive_error(xive, "IC: no indirect TIMA access in progress");
  1360. return NULL;
  1361. }
  1362. pir = (chip->chip_id << 8) | GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir);
  1363. cpu = pnv_chip_find_cpu(chip, pir);
  1364. if (!cpu) {
  1365. xive_error(xive, "IC: invalid PIR %x for indirect access", pir);
  1366. return NULL;
  1367. }
  1368. /* Check that HW thread is XIVE enabled */
  1369. if (!pnv_xive_is_cpu_enabled(xive, cpu)) {
  1370. xive_error(xive, "IC: CPU %x is not enabled", pir);
  1371. }
  1372. return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
  1373. }
  1374. static void xive_tm_indirect_write(void *opaque, hwaddr offset,
  1375. uint64_t value, unsigned size)
  1376. {
  1377. XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
  1378. xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
  1379. }
  1380. static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset,
  1381. unsigned size)
  1382. {
  1383. XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque));
  1384. return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
  1385. }
  1386. static const MemoryRegionOps xive_tm_indirect_ops = {
  1387. .read = xive_tm_indirect_read,
  1388. .write = xive_tm_indirect_write,
  1389. .endianness = DEVICE_BIG_ENDIAN,
  1390. .valid = {
  1391. .min_access_size = 1,
  1392. .max_access_size = 8,
  1393. },
  1394. .impl = {
  1395. .min_access_size = 1,
  1396. .max_access_size = 8,
  1397. },
  1398. };
  1399. static void pnv_xive_tm_write(void *opaque, hwaddr offset,
  1400. uint64_t value, unsigned size)
  1401. {
  1402. PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
  1403. PnvXive *xive = pnv_xive_tm_get_xive(cpu);
  1404. XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
  1405. xive_tctx_tm_write(XIVE_PRESENTER(xive), tctx, offset, value, size);
  1406. }
  1407. static uint64_t pnv_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
  1408. {
  1409. PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
  1410. PnvXive *xive = pnv_xive_tm_get_xive(cpu);
  1411. XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
  1412. return xive_tctx_tm_read(XIVE_PRESENTER(xive), tctx, offset, size);
  1413. }
  1414. const MemoryRegionOps pnv_xive_tm_ops = {
  1415. .read = pnv_xive_tm_read,
  1416. .write = pnv_xive_tm_write,
  1417. .endianness = DEVICE_BIG_ENDIAN,
  1418. .valid = {
  1419. .min_access_size = 1,
  1420. .max_access_size = 8,
  1421. },
  1422. .impl = {
  1423. .min_access_size = 1,
  1424. .max_access_size = 8,
  1425. },
  1426. };
  1427. /*
  1428. * Interrupt controller XSCOM region.
  1429. */
  1430. static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size)
  1431. {
  1432. switch (addr >> 3) {
  1433. case X_VC_EQC_CONFIG:
  1434. /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */
  1435. return VC_EQC_SYNC_MASK;
  1436. default:
  1437. return pnv_xive_ic_reg_read(opaque, addr, size);
  1438. }
  1439. }
  1440. static void pnv_xive_xscom_write(void *opaque, hwaddr addr,
  1441. uint64_t val, unsigned size)
  1442. {
  1443. pnv_xive_ic_reg_write(opaque, addr, val, size);
  1444. }
  1445. static const MemoryRegionOps pnv_xive_xscom_ops = {
  1446. .read = pnv_xive_xscom_read,
  1447. .write = pnv_xive_xscom_write,
  1448. .endianness = DEVICE_BIG_ENDIAN,
  1449. .valid = {
  1450. .min_access_size = 8,
  1451. .max_access_size = 8,
  1452. },
  1453. .impl = {
  1454. .min_access_size = 8,
  1455. .max_access_size = 8,
  1456. }
  1457. };
  1458. /*
  1459. * Virtualization Controller MMIO region containing the IPI and END ESB pages
  1460. */
  1461. static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset,
  1462. unsigned size)
  1463. {
  1464. PnvXive *xive = PNV_XIVE(opaque);
  1465. uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
  1466. uint64_t edt_type = 0;
  1467. uint64_t edt_offset;
  1468. MemTxResult result;
  1469. AddressSpace *edt_as = NULL;
  1470. uint64_t ret = -1;
  1471. if (edt_index < XIVE_TABLE_EDT_MAX) {
  1472. edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
  1473. }
  1474. switch (edt_type) {
  1475. case CQ_TDR_EDT_IPI:
  1476. edt_as = &xive->ipi_as;
  1477. break;
  1478. case CQ_TDR_EDT_EQ:
  1479. edt_as = &xive->end_as;
  1480. break;
  1481. default:
  1482. xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset);
  1483. return -1;
  1484. }
  1485. /* Remap the offset for the targeted address space */
  1486. edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
  1487. ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED,
  1488. &result);
  1489. if (result != MEMTX_OK) {
  1490. xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%"
  1491. HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END",
  1492. offset, edt_offset);
  1493. return -1;
  1494. }
  1495. return ret;
  1496. }
  1497. static void pnv_xive_vc_write(void *opaque, hwaddr offset,
  1498. uint64_t val, unsigned size)
  1499. {
  1500. PnvXive *xive = PNV_XIVE(opaque);
  1501. uint64_t edt_index = offset >> pnv_xive_edt_shift(xive);
  1502. uint64_t edt_type = 0;
  1503. uint64_t edt_offset;
  1504. MemTxResult result;
  1505. AddressSpace *edt_as = NULL;
  1506. if (edt_index < XIVE_TABLE_EDT_MAX) {
  1507. edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]);
  1508. }
  1509. switch (edt_type) {
  1510. case CQ_TDR_EDT_IPI:
  1511. edt_as = &xive->ipi_as;
  1512. break;
  1513. case CQ_TDR_EDT_EQ:
  1514. edt_as = &xive->end_as;
  1515. break;
  1516. default:
  1517. xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx,
  1518. offset);
  1519. return;
  1520. }
  1521. /* Remap the offset for the targeted address space */
  1522. edt_offset = pnv_xive_edt_offset(xive, offset, edt_type);
  1523. address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result);
  1524. if (result != MEMTX_OK) {
  1525. xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset);
  1526. }
  1527. }
  1528. static const MemoryRegionOps pnv_xive_vc_ops = {
  1529. .read = pnv_xive_vc_read,
  1530. .write = pnv_xive_vc_write,
  1531. .endianness = DEVICE_BIG_ENDIAN,
  1532. .valid = {
  1533. .min_access_size = 8,
  1534. .max_access_size = 8,
  1535. },
  1536. .impl = {
  1537. .min_access_size = 8,
  1538. .max_access_size = 8,
  1539. },
  1540. };
  1541. /*
  1542. * Presenter Controller MMIO region. Points to the NVT sets.
  1543. *
  1544. * HW implements all possible mem ops to the underlying NVT structure
  1545. * but QEMU does not need to be so precise. The model implementation
  1546. * simply returns the RAM address of the NVT structure which is then
  1547. * used by pnv_xive_vst_write/read to perform the RAM operation.
  1548. */
  1549. static uint64_t pnv_xive_pc_read(void *opaque, hwaddr offset, unsigned size)
  1550. {
  1551. PnvXive *xive = PNV_XIVE(opaque);
  1552. uint32_t nvt_idx = offset >> xive->pc_shift;
  1553. uint8_t blk = pnv_xive_block_id(xive); /* TODO: VDT -> block xlate */
  1554. return pnv_xive_vst_addr(xive, VST_TSEL_VPDT, blk, nvt_idx);
  1555. }
  1556. static void pnv_xive_pc_write(void *opaque, hwaddr addr,
  1557. uint64_t value, unsigned size)
  1558. {
  1559. PnvXive *xive = PNV_XIVE(opaque);
  1560. xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr);
  1561. }
  1562. static const MemoryRegionOps pnv_xive_pc_ops = {
  1563. .read = pnv_xive_pc_read,
  1564. .write = pnv_xive_pc_write,
  1565. .endianness = DEVICE_BIG_ENDIAN,
  1566. .valid = {
  1567. .min_access_size = 8,
  1568. .max_access_size = 8,
  1569. },
  1570. .impl = {
  1571. .min_access_size = 8,
  1572. .max_access_size = 8,
  1573. },
  1574. };
  1575. static void xive_nvt_pic_print_info(XiveNVT *nvt, uint32_t nvt_idx,
  1576. GString *buf)
  1577. {
  1578. uint8_t eq_blk = xive_get_field32(NVT_W1_EQ_BLOCK, nvt->w1);
  1579. uint32_t eq_idx = xive_get_field32(NVT_W1_EQ_INDEX, nvt->w1);
  1580. if (!xive_nvt_is_valid(nvt)) {
  1581. return;
  1582. }
  1583. g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x\n",
  1584. nvt_idx, eq_blk, eq_idx,
  1585. xive_get_field32(NVT_W4_IPB, nvt->w4));
  1586. }
  1587. void pnv_xive_pic_print_info(PnvXive *xive, GString *buf)
  1588. {
  1589. XiveRouter *xrtr = XIVE_ROUTER(xive);
  1590. uint8_t blk = pnv_xive_block_id(xive);
  1591. uint8_t chip_id = xive->chip->chip_id;
  1592. uint32_t srcno0 = XIVE_EAS(blk, 0);
  1593. uint32_t nr_ipis = pnv_xive_nr_ipis(xive, blk);
  1594. XiveEAS eas;
  1595. XiveEND end;
  1596. XiveNVT nvt;
  1597. int i;
  1598. uint64_t xive_nvt_per_subpage;
  1599. g_string_append_printf(buf, "XIVE[%x] #%d Source %08x .. %08x\n",
  1600. chip_id, blk, srcno0, srcno0 + nr_ipis - 1);
  1601. xive_source_pic_print_info(&xive->ipi_source, srcno0, buf);
  1602. g_string_append_printf(buf, "XIVE[%x] #%d EAT %08x .. %08x\n",
  1603. chip_id, blk, srcno0, srcno0 + nr_ipis - 1);
  1604. for (i = 0; i < nr_ipis; i++) {
  1605. if (xive_router_get_eas(xrtr, blk, i, &eas)) {
  1606. break;
  1607. }
  1608. if (!xive_eas_is_masked(&eas)) {
  1609. xive_eas_pic_print_info(&eas, i, buf);
  1610. }
  1611. }
  1612. g_string_append_printf(buf, "XIVE[%x] #%d ENDT\n", chip_id, blk);
  1613. i = 0;
  1614. while (!xive_router_get_end(xrtr, blk, i, &end)) {
  1615. xive_end_pic_print_info(&end, i++, buf);
  1616. }
  1617. g_string_append_printf(buf, "XIVE[%x] #%d END Escalation EAT\n",
  1618. chip_id, blk);
  1619. i = 0;
  1620. while (!xive_router_get_end(xrtr, blk, i, &end)) {
  1621. xive_end_eas_pic_print_info(&end, i++, buf);
  1622. }
  1623. g_string_append_printf(buf, "XIVE[%x] #%d NVTT %08x .. %08x\n",
  1624. chip_id, blk, 0, XIVE_NVT_COUNT - 1);
  1625. xive_nvt_per_subpage = pnv_xive_vst_per_subpage(xive, VST_TSEL_VPDT);
  1626. for (i = 0; i < XIVE_NVT_COUNT; i += xive_nvt_per_subpage) {
  1627. while (!xive_router_get_nvt(xrtr, blk, i, &nvt)) {
  1628. xive_nvt_pic_print_info(&nvt, i++, buf);
  1629. }
  1630. }
  1631. }
  1632. static void pnv_xive_reset(void *dev)
  1633. {
  1634. PnvXive *xive = PNV_XIVE(dev);
  1635. XiveSource *xsrc = &xive->ipi_source;
  1636. XiveENDSource *end_xsrc = &xive->end_source;
  1637. /* Default page size (Should be changed at runtime to 64k) */
  1638. xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
  1639. /* Clear subregions */
  1640. if (memory_region_is_mapped(&xsrc->esb_mmio)) {
  1641. memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio);
  1642. }
  1643. if (memory_region_is_mapped(&xive->ipi_edt_mmio)) {
  1644. memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio);
  1645. }
  1646. if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
  1647. memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio);
  1648. }
  1649. if (memory_region_is_mapped(&xive->end_edt_mmio)) {
  1650. memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio);
  1651. }
  1652. }
  1653. static void pnv_xive_init(Object *obj)
  1654. {
  1655. PnvXive *xive = PNV_XIVE(obj);
  1656. object_initialize_child(obj, "ipi_source", &xive->ipi_source,
  1657. TYPE_XIVE_SOURCE);
  1658. object_initialize_child(obj, "end_source", &xive->end_source,
  1659. TYPE_XIVE_END_SOURCE);
  1660. }
  1661. /*
  1662. * Maximum number of IRQs and ENDs supported by HW
  1663. */
  1664. #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
  1665. #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
  1666. static void pnv_xive_realize(DeviceState *dev, Error **errp)
  1667. {
  1668. PnvXive *xive = PNV_XIVE(dev);
  1669. PnvXiveClass *pxc = PNV_XIVE_GET_CLASS(dev);
  1670. XiveSource *xsrc = &xive->ipi_source;
  1671. XiveENDSource *end_xsrc = &xive->end_source;
  1672. Error *local_err = NULL;
  1673. pxc->parent_realize(dev, &local_err);
  1674. if (local_err) {
  1675. error_propagate(errp, local_err);
  1676. return;
  1677. }
  1678. assert(xive->chip);
  1679. /*
  1680. * The XiveSource and XiveENDSource objects are realized with the
  1681. * maximum allowed HW configuration. The ESB MMIO regions will be
  1682. * resized dynamically when the controller is configured by the FW
  1683. * to limit accesses to resources not provisioned.
  1684. */
  1685. object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE_NR_IRQS,
  1686. &error_fatal);
  1687. object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
  1688. if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
  1689. return;
  1690. }
  1691. object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE_NR_ENDS,
  1692. &error_fatal);
  1693. object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
  1694. &error_abort);
  1695. if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
  1696. return;
  1697. }
  1698. /* Default page size. Generally changed at runtime to 64k */
  1699. xive->ic_shift = xive->vc_shift = xive->pc_shift = 12;
  1700. /* XSCOM region, used for initial configuration of the BARs */
  1701. memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops,
  1702. xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3);
  1703. /* Interrupt controller MMIO regions */
  1704. memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
  1705. PNV9_XIVE_IC_SIZE);
  1706. memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops,
  1707. xive, "xive-ic-reg", 1 << xive->ic_shift);
  1708. memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev),
  1709. &pnv_xive_ic_notify_ops,
  1710. xive, "xive-ic-notify", 1 << xive->ic_shift);
  1711. xive->ic_notify_mmio.disable_reentrancy_guard = true;
  1712. /* The Pervasive LSI trigger and EOI pages (not modeled) */
  1713. memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops,
  1714. xive, "xive-ic-lsi", 2 << xive->ic_shift);
  1715. /* Thread Interrupt Management Area (Indirect) */
  1716. memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev),
  1717. &xive_tm_indirect_ops,
  1718. xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE);
  1719. /*
  1720. * Overall Virtualization Controller MMIO region containing the
  1721. * IPI ESB pages and END ESB pages. The layout is defined by the
  1722. * EDT "Domain table" and the accesses are dispatched using
  1723. * address spaces for each.
  1724. */
  1725. memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive,
  1726. "xive-vc", PNV9_XIVE_VC_SIZE);
  1727. memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi",
  1728. PNV9_XIVE_VC_SIZE);
  1729. address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi");
  1730. memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end",
  1731. PNV9_XIVE_VC_SIZE);
  1732. address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end");
  1733. /*
  1734. * The MMIO windows exposing the IPI ESBs and the END ESBs in the
  1735. * VC region. Their size is configured by the FW in the EDT table.
  1736. */
  1737. memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0);
  1738. memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0);
  1739. /* Presenter Controller MMIO region (not modeled) */
  1740. memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive,
  1741. "xive-pc", PNV9_XIVE_PC_SIZE);
  1742. xive->pc_mmio.disable_reentrancy_guard = true;
  1743. /* Thread Interrupt Management Area (Direct) */
  1744. memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &pnv_xive_tm_ops,
  1745. xive, "xive-tima", PNV9_XIVE_TM_SIZE);
  1746. qemu_register_reset(pnv_xive_reset, dev);
  1747. }
  1748. static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt,
  1749. int xscom_offset)
  1750. {
  1751. const char compat[] = "ibm,power9-xive-x";
  1752. char *name;
  1753. int offset;
  1754. uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE;
  1755. uint32_t reg[] = {
  1756. cpu_to_be32(lpc_pcba),
  1757. cpu_to_be32(PNV9_XSCOM_XIVE_SIZE)
  1758. };
  1759. name = g_strdup_printf("xive@%x", lpc_pcba);
  1760. offset = fdt_add_subnode(fdt, xscom_offset, name);
  1761. _FDT(offset);
  1762. g_free(name);
  1763. _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
  1764. _FDT((fdt_setprop(fdt, offset, "compatible", compat,
  1765. sizeof(compat))));
  1766. return 0;
  1767. }
  1768. static const Property pnv_xive_properties[] = {
  1769. DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0),
  1770. DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0),
  1771. DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0),
  1772. DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0),
  1773. /* The PnvChip id identifies the XIVE interrupt controller. */
  1774. DEFINE_PROP_LINK("chip", PnvXive, chip, TYPE_PNV_CHIP, PnvChip *),
  1775. };
  1776. static void pnv_xive_class_init(ObjectClass *klass, void *data)
  1777. {
  1778. DeviceClass *dc = DEVICE_CLASS(klass);
  1779. PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
  1780. XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
  1781. XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
  1782. XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
  1783. PnvXiveClass *pxc = PNV_XIVE_CLASS(klass);
  1784. xdc->dt_xscom = pnv_xive_dt_xscom;
  1785. dc->desc = "PowerNV XIVE Interrupt Controller";
  1786. device_class_set_parent_realize(dc, pnv_xive_realize, &pxc->parent_realize);
  1787. dc->realize = pnv_xive_realize;
  1788. device_class_set_props(dc, pnv_xive_properties);
  1789. xrc->get_eas = pnv_xive_get_eas;
  1790. xrc->get_pq = pnv_xive_get_pq;
  1791. xrc->set_pq = pnv_xive_set_pq;
  1792. xrc->get_end = pnv_xive_get_end;
  1793. xrc->write_end = pnv_xive_write_end;
  1794. xrc->get_nvt = pnv_xive_get_nvt;
  1795. xrc->write_nvt = pnv_xive_write_nvt;
  1796. xrc->get_block_id = pnv_xive_get_block_id;
  1797. xrc->end_notify = pnv_xive_end_notify;
  1798. xnc->notify = pnv_xive_notify;
  1799. xpc->match_nvt = pnv_xive_match_nvt;
  1800. xpc->get_config = pnv_xive_presenter_get_config;
  1801. };
  1802. static const TypeInfo pnv_xive_info = {
  1803. .name = TYPE_PNV_XIVE,
  1804. .parent = TYPE_XIVE_ROUTER,
  1805. .instance_init = pnv_xive_init,
  1806. .instance_size = sizeof(PnvXive),
  1807. .class_init = pnv_xive_class_init,
  1808. .class_size = sizeof(PnvXiveClass),
  1809. .interfaces = (InterfaceInfo[]) {
  1810. { TYPE_PNV_XSCOM_INTERFACE },
  1811. { }
  1812. }
  1813. };
  1814. static void pnv_xive_register_types(void)
  1815. {
  1816. type_register_static(&pnv_xive_info);
  1817. }
  1818. type_init(pnv_xive_register_types)