pnv_phb3.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184
  1. /*
  2. * QEMU PowerPC PowerNV (POWER8) PHB3 model
  3. *
  4. * Copyright (c) 2014-2020, IBM Corporation.
  5. *
  6. * This code is licensed under the GPL version 2 or later. See the
  7. * COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/log.h"
  11. #include "qapi/visitor.h"
  12. #include "qapi/error.h"
  13. #include "hw/pci-host/pnv_phb3_regs.h"
  14. #include "hw/pci-host/pnv_phb.h"
  15. #include "hw/pci-host/pnv_phb3.h"
  16. #include "hw/pci/pcie_host.h"
  17. #include "hw/pci/pcie_port.h"
  18. #include "hw/ppc/pnv.h"
  19. #include "hw/ppc/pnv_chip.h"
  20. #include "hw/irq.h"
  21. #include "hw/qdev-properties.h"
  22. #include "qom/object.h"
  23. #include "sysemu/sysemu.h"
  24. #define phb3_error(phb, fmt, ...) \
  25. qemu_log_mask(LOG_GUEST_ERROR, "phb3[%d:%d]: " fmt "\n", \
  26. (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__)
  27. static PCIDevice *pnv_phb3_find_cfg_dev(PnvPHB3 *phb)
  28. {
  29. PCIHostState *pci = PCI_HOST_BRIDGE(phb->phb_base);
  30. uint64_t addr = phb->regs[PHB_CONFIG_ADDRESS >> 3];
  31. uint8_t bus, devfn;
  32. if (!(addr >> 63)) {
  33. return NULL;
  34. }
  35. bus = (addr >> 52) & 0xff;
  36. devfn = (addr >> 44) & 0xff;
  37. return pci_find_device(pci->bus, bus, devfn);
  38. }
  39. /*
  40. * The CONFIG_DATA register expects little endian accesses, but as the
  41. * region is big endian, we have to swap the value.
  42. */
  43. static void pnv_phb3_config_write(PnvPHB3 *phb, unsigned off,
  44. unsigned size, uint64_t val)
  45. {
  46. uint32_t cfg_addr, limit;
  47. PCIDevice *pdev;
  48. pdev = pnv_phb3_find_cfg_dev(phb);
  49. if (!pdev) {
  50. return;
  51. }
  52. cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc;
  53. cfg_addr |= off;
  54. limit = pci_config_size(pdev);
  55. if (limit <= cfg_addr) {
  56. /*
  57. * conventional pci device can be behind pcie-to-pci bridge.
  58. * 256 <= addr < 4K has no effects.
  59. */
  60. return;
  61. }
  62. switch (size) {
  63. case 1:
  64. break;
  65. case 2:
  66. val = bswap16(val);
  67. break;
  68. case 4:
  69. val = bswap32(val);
  70. break;
  71. default:
  72. g_assert_not_reached();
  73. }
  74. pci_host_config_write_common(pdev, cfg_addr, limit, val, size);
  75. }
  76. static uint64_t pnv_phb3_config_read(PnvPHB3 *phb, unsigned off,
  77. unsigned size)
  78. {
  79. uint32_t cfg_addr, limit;
  80. PCIDevice *pdev;
  81. uint64_t val;
  82. pdev = pnv_phb3_find_cfg_dev(phb);
  83. if (!pdev) {
  84. return ~0ull;
  85. }
  86. cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc;
  87. cfg_addr |= off;
  88. limit = pci_config_size(pdev);
  89. if (limit <= cfg_addr) {
  90. /*
  91. * conventional pci device can be behind pcie-to-pci bridge.
  92. * 256 <= addr < 4K has no effects.
  93. */
  94. return ~0ull;
  95. }
  96. val = pci_host_config_read_common(pdev, cfg_addr, limit, size);
  97. switch (size) {
  98. case 1:
  99. return val;
  100. case 2:
  101. return bswap16(val);
  102. case 4:
  103. return bswap32(val);
  104. default:
  105. g_assert_not_reached();
  106. }
  107. }
  108. static void pnv_phb3_check_m32(PnvPHB3 *phb)
  109. {
  110. uint64_t base, start, size;
  111. MemoryRegion *parent;
  112. PnvPBCQState *pbcq = &phb->pbcq;
  113. if (memory_region_is_mapped(&phb->mr_m32)) {
  114. memory_region_del_subregion(phb->mr_m32.container, &phb->mr_m32);
  115. }
  116. if (!(phb->regs[PHB_PHB3_CONFIG >> 3] & PHB_PHB3C_M32_EN)) {
  117. return;
  118. }
  119. /* Grab geometry from registers */
  120. base = phb->regs[PHB_M32_BASE_ADDR >> 3];
  121. start = phb->regs[PHB_M32_START_ADDR >> 3];
  122. size = ~(phb->regs[PHB_M32_BASE_MASK >> 3] | 0xfffc000000000000ull) + 1;
  123. /* Check if it matches an enabled MMIO region in the PBCQ */
  124. if (memory_region_is_mapped(&pbcq->mmbar0) &&
  125. base >= pbcq->mmio0_base &&
  126. (base + size) <= (pbcq->mmio0_base + pbcq->mmio0_size)) {
  127. parent = &pbcq->mmbar0;
  128. base -= pbcq->mmio0_base;
  129. } else if (memory_region_is_mapped(&pbcq->mmbar1) &&
  130. base >= pbcq->mmio1_base &&
  131. (base + size) <= (pbcq->mmio1_base + pbcq->mmio1_size)) {
  132. parent = &pbcq->mmbar1;
  133. base -= pbcq->mmio1_base;
  134. } else {
  135. return;
  136. }
  137. /* Create alias */
  138. memory_region_init_alias(&phb->mr_m32, OBJECT(phb), "phb3-m32",
  139. &phb->pci_mmio, start, size);
  140. memory_region_add_subregion(parent, base, &phb->mr_m32);
  141. }
  142. static void pnv_phb3_check_m64(PnvPHB3 *phb, uint32_t index)
  143. {
  144. uint64_t base, start, size, m64;
  145. MemoryRegion *parent;
  146. PnvPBCQState *pbcq = &phb->pbcq;
  147. if (memory_region_is_mapped(&phb->mr_m64[index])) {
  148. /* Should we destroy it in RCU friendly way... ? */
  149. memory_region_del_subregion(phb->mr_m64[index].container,
  150. &phb->mr_m64[index]);
  151. }
  152. /* Get table entry */
  153. m64 = phb->ioda_M64BT[index];
  154. if (!(m64 & IODA2_M64BT_ENABLE)) {
  155. return;
  156. }
  157. /* Grab geometry from registers */
  158. base = GETFIELD(IODA2_M64BT_BASE, m64) << 20;
  159. if (m64 & IODA2_M64BT_SINGLE_PE) {
  160. base &= ~0x1ffffffull;
  161. }
  162. size = GETFIELD(IODA2_M64BT_MASK, m64) << 20;
  163. size |= 0xfffc000000000000ull;
  164. size = ~size + 1;
  165. start = base | (phb->regs[PHB_M64_UPPER_BITS >> 3]);
  166. /* Check if it matches an enabled MMIO region in the PBCQ */
  167. if (memory_region_is_mapped(&pbcq->mmbar0) &&
  168. base >= pbcq->mmio0_base &&
  169. (base + size) <= (pbcq->mmio0_base + pbcq->mmio0_size)) {
  170. parent = &pbcq->mmbar0;
  171. base -= pbcq->mmio0_base;
  172. } else if (memory_region_is_mapped(&pbcq->mmbar1) &&
  173. base >= pbcq->mmio1_base &&
  174. (base + size) <= (pbcq->mmio1_base + pbcq->mmio1_size)) {
  175. parent = &pbcq->mmbar1;
  176. base -= pbcq->mmio1_base;
  177. } else {
  178. return;
  179. }
  180. /* Create alias */
  181. memory_region_init_alias(&phb->mr_m64[index], OBJECT(phb), "phb3-m64",
  182. &phb->pci_mmio, start, size);
  183. memory_region_add_subregion(parent, base, &phb->mr_m64[index]);
  184. }
  185. static void pnv_phb3_check_all_m64s(PnvPHB3 *phb)
  186. {
  187. uint64_t i;
  188. for (i = 0; i < PNV_PHB3_NUM_M64; i++) {
  189. pnv_phb3_check_m64(phb, i);
  190. }
  191. }
  192. static void pnv_phb3_lxivt_write(PnvPHB3 *phb, unsigned idx, uint64_t val)
  193. {
  194. uint8_t server, prio;
  195. phb->ioda_LXIVT[idx] = val & (IODA2_LXIVT_SERVER |
  196. IODA2_LXIVT_PRIORITY |
  197. IODA2_LXIVT_NODE_ID);
  198. server = GETFIELD(IODA2_LXIVT_SERVER, val);
  199. prio = GETFIELD(IODA2_LXIVT_PRIORITY, val);
  200. /*
  201. * The low order 2 bits are the link pointer (Type II interrupts).
  202. * Shift back to get a valid IRQ server.
  203. */
  204. server >>= 2;
  205. ics_write_xive(&phb->lsis, idx, server, prio, prio);
  206. }
  207. static uint64_t *pnv_phb3_ioda_access(PnvPHB3 *phb,
  208. unsigned *out_table, unsigned *out_idx)
  209. {
  210. uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3];
  211. unsigned int index = GETFIELD(PHB_IODA_AD_TADR, adreg);
  212. unsigned int table = GETFIELD(PHB_IODA_AD_TSEL, adreg);
  213. unsigned int mask;
  214. uint64_t *tptr = NULL;
  215. switch (table) {
  216. case IODA2_TBL_LIST:
  217. tptr = phb->ioda_LIST;
  218. mask = 7;
  219. break;
  220. case IODA2_TBL_LXIVT:
  221. tptr = phb->ioda_LXIVT;
  222. mask = 7;
  223. break;
  224. case IODA2_TBL_IVC_CAM:
  225. case IODA2_TBL_RBA:
  226. mask = 31;
  227. break;
  228. case IODA2_TBL_RCAM:
  229. mask = 63;
  230. break;
  231. case IODA2_TBL_MRT:
  232. mask = 7;
  233. break;
  234. case IODA2_TBL_PESTA:
  235. case IODA2_TBL_PESTB:
  236. mask = 255;
  237. break;
  238. case IODA2_TBL_TVT:
  239. tptr = phb->ioda_TVT;
  240. mask = 511;
  241. break;
  242. case IODA2_TBL_TCAM:
  243. case IODA2_TBL_TDR:
  244. mask = 63;
  245. break;
  246. case IODA2_TBL_M64BT:
  247. tptr = phb->ioda_M64BT;
  248. mask = 15;
  249. break;
  250. case IODA2_TBL_M32DT:
  251. tptr = phb->ioda_MDT;
  252. mask = 255;
  253. break;
  254. case IODA2_TBL_PEEV:
  255. tptr = phb->ioda_PEEV;
  256. mask = 3;
  257. break;
  258. default:
  259. phb3_error(phb, "invalid IODA table %d", table);
  260. return NULL;
  261. }
  262. index &= mask;
  263. if (out_idx) {
  264. *out_idx = index;
  265. }
  266. if (out_table) {
  267. *out_table = table;
  268. }
  269. if (tptr) {
  270. tptr += index;
  271. }
  272. if (adreg & PHB_IODA_AD_AUTOINC) {
  273. index = (index + 1) & mask;
  274. adreg = SETFIELD(PHB_IODA_AD_TADR, adreg, index);
  275. }
  276. phb->regs[PHB_IODA_ADDR >> 3] = adreg;
  277. return tptr;
  278. }
  279. static uint64_t pnv_phb3_ioda_read(PnvPHB3 *phb)
  280. {
  281. unsigned table;
  282. uint64_t *tptr;
  283. tptr = pnv_phb3_ioda_access(phb, &table, NULL);
  284. if (!tptr) {
  285. /* Return 0 on unsupported tables, not ff's */
  286. return 0;
  287. }
  288. return *tptr;
  289. }
  290. static void pnv_phb3_ioda_write(PnvPHB3 *phb, uint64_t val)
  291. {
  292. unsigned table, idx;
  293. uint64_t *tptr;
  294. tptr = pnv_phb3_ioda_access(phb, &table, &idx);
  295. if (!tptr) {
  296. return;
  297. }
  298. /* Handle side effects */
  299. switch (table) {
  300. case IODA2_TBL_LXIVT:
  301. pnv_phb3_lxivt_write(phb, idx, val);
  302. break;
  303. case IODA2_TBL_M64BT:
  304. *tptr = val;
  305. pnv_phb3_check_m64(phb, idx);
  306. break;
  307. default:
  308. *tptr = val;
  309. }
  310. }
  311. /*
  312. * This is called whenever the PHB LSI, MSI source ID register or
  313. * the PBCQ irq filters are written.
  314. */
  315. void pnv_phb3_remap_irqs(PnvPHB3 *phb)
  316. {
  317. ICSState *ics = &phb->lsis;
  318. uint32_t local, global, count, mask, comp;
  319. uint64_t baren;
  320. PnvPBCQState *pbcq = &phb->pbcq;
  321. /*
  322. * First check if we are enabled. Unlike real HW we don't separate
  323. * TX and RX so we enable if both are set
  324. */
  325. baren = pbcq->nest_regs[PBCQ_NEST_BAR_EN];
  326. if (!(baren & PBCQ_NEST_BAR_EN_IRSN_RX) ||
  327. !(baren & PBCQ_NEST_BAR_EN_IRSN_TX)) {
  328. ics->offset = 0;
  329. return;
  330. }
  331. /* Grab local LSI source ID */
  332. local = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]) << 3;
  333. /* Grab global one and compare */
  334. global = GETFIELD(PBCQ_NEST_LSI_SRC,
  335. pbcq->nest_regs[PBCQ_NEST_LSI_SRC_ID]) << 3;
  336. if (global != local) {
  337. /*
  338. * This happens during initialization, let's come back when we
  339. * are properly configured
  340. */
  341. ics->offset = 0;
  342. return;
  343. }
  344. /* Get the base on the powerbus */
  345. comp = GETFIELD(PBCQ_NEST_IRSN_COMP,
  346. pbcq->nest_regs[PBCQ_NEST_IRSN_COMPARE]);
  347. mask = GETFIELD(PBCQ_NEST_IRSN_COMP,
  348. pbcq->nest_regs[PBCQ_NEST_IRSN_MASK]);
  349. count = ((~mask) + 1) & 0x7ffff;
  350. phb->total_irq = count;
  351. /* Sanity checks */
  352. if ((global + PNV_PHB3_NUM_LSI) > count) {
  353. phb3_error(phb, "LSIs out of reach: LSI base=%d total irq=%d", global,
  354. count);
  355. }
  356. if (count > 2048) {
  357. phb3_error(phb, "More interrupts than supported: %d", count);
  358. }
  359. if ((comp & mask) != comp) {
  360. phb3_error(phb, "IRQ compare bits not in mask: comp=0x%x mask=0x%x",
  361. comp, mask);
  362. comp &= mask;
  363. }
  364. /* Setup LSI offset */
  365. ics->offset = comp + global;
  366. /* Setup MSI offset */
  367. pnv_phb3_msi_update_config(&phb->msis, comp, count - PNV_PHB3_NUM_LSI);
  368. }
  369. static void pnv_phb3_lsi_src_id_write(PnvPHB3 *phb, uint64_t val)
  370. {
  371. /* Sanitize content */
  372. val &= PHB_LSI_SRC_ID;
  373. phb->regs[PHB_LSI_SOURCE_ID >> 3] = val;
  374. pnv_phb3_remap_irqs(phb);
  375. }
  376. static void pnv_phb3_rtc_invalidate(PnvPHB3 *phb, uint64_t val)
  377. {
  378. PnvPhb3DMASpace *ds;
  379. /* Always invalidate all for now ... */
  380. QLIST_FOREACH(ds, &phb->dma_spaces, list) {
  381. ds->pe_num = PHB_INVALID_PE;
  382. }
  383. }
  384. static void pnv_phb3_update_msi_regions(PnvPhb3DMASpace *ds)
  385. {
  386. uint64_t cfg = ds->phb->regs[PHB_PHB3_CONFIG >> 3];
  387. if (cfg & PHB_PHB3C_32BIT_MSI_EN) {
  388. if (!memory_region_is_mapped(&ds->msi32_mr)) {
  389. memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr),
  390. 0xffff0000, &ds->msi32_mr);
  391. }
  392. } else {
  393. if (memory_region_is_mapped(&ds->msi32_mr)) {
  394. memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr),
  395. &ds->msi32_mr);
  396. }
  397. }
  398. if (cfg & PHB_PHB3C_64BIT_MSI_EN) {
  399. if (!memory_region_is_mapped(&ds->msi64_mr)) {
  400. memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr),
  401. (1ull << 60), &ds->msi64_mr);
  402. }
  403. } else {
  404. if (memory_region_is_mapped(&ds->msi64_mr)) {
  405. memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr),
  406. &ds->msi64_mr);
  407. }
  408. }
  409. }
  410. static void pnv_phb3_update_all_msi_regions(PnvPHB3 *phb)
  411. {
  412. PnvPhb3DMASpace *ds;
  413. QLIST_FOREACH(ds, &phb->dma_spaces, list) {
  414. pnv_phb3_update_msi_regions(ds);
  415. }
  416. }
  417. void pnv_phb3_reg_write(void *opaque, hwaddr off, uint64_t val, unsigned size)
  418. {
  419. PnvPHB3 *phb = opaque;
  420. bool changed;
  421. /* Special case configuration data */
  422. if ((off & 0xfffc) == PHB_CONFIG_DATA) {
  423. pnv_phb3_config_write(phb, off & 0x3, size, val);
  424. return;
  425. }
  426. /* Other registers are 64-bit only */
  427. if (size != 8 || off & 0x7) {
  428. phb3_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d",
  429. off, size);
  430. return;
  431. }
  432. /* Handle masking & filtering */
  433. switch (off) {
  434. case PHB_M64_UPPER_BITS:
  435. val &= 0xfffc000000000000ull;
  436. break;
  437. case PHB_Q_DMA_R:
  438. /*
  439. * This is enough logic to make SW happy but we aren't actually
  440. * quiescing the DMAs
  441. */
  442. if (val & PHB_Q_DMA_R_AUTORESET) {
  443. val = 0;
  444. } else {
  445. val &= PHB_Q_DMA_R_QUIESCE_DMA;
  446. }
  447. break;
  448. /* LEM stuff */
  449. case PHB_LEM_FIR_AND_MASK:
  450. phb->regs[PHB_LEM_FIR_ACCUM >> 3] &= val;
  451. return;
  452. case PHB_LEM_FIR_OR_MASK:
  453. phb->regs[PHB_LEM_FIR_ACCUM >> 3] |= val;
  454. return;
  455. case PHB_LEM_ERROR_AND_MASK:
  456. phb->regs[PHB_LEM_ERROR_MASK >> 3] &= val;
  457. return;
  458. case PHB_LEM_ERROR_OR_MASK:
  459. phb->regs[PHB_LEM_ERROR_MASK >> 3] |= val;
  460. return;
  461. case PHB_LEM_WOF:
  462. val = 0;
  463. break;
  464. }
  465. /* Record whether it changed */
  466. changed = phb->regs[off >> 3] != val;
  467. /* Store in register cache first */
  468. phb->regs[off >> 3] = val;
  469. /* Handle side effects */
  470. switch (off) {
  471. case PHB_PHB3_CONFIG:
  472. if (changed) {
  473. pnv_phb3_update_all_msi_regions(phb);
  474. }
  475. /* fall through */
  476. case PHB_M32_BASE_ADDR:
  477. case PHB_M32_BASE_MASK:
  478. case PHB_M32_START_ADDR:
  479. if (changed) {
  480. pnv_phb3_check_m32(phb);
  481. }
  482. break;
  483. case PHB_M64_UPPER_BITS:
  484. if (changed) {
  485. pnv_phb3_check_all_m64s(phb);
  486. }
  487. break;
  488. case PHB_LSI_SOURCE_ID:
  489. if (changed) {
  490. pnv_phb3_lsi_src_id_write(phb, val);
  491. }
  492. break;
  493. /* IODA table accesses */
  494. case PHB_IODA_DATA0:
  495. pnv_phb3_ioda_write(phb, val);
  496. break;
  497. /* RTC invalidation */
  498. case PHB_RTC_INVALIDATE:
  499. pnv_phb3_rtc_invalidate(phb, val);
  500. break;
  501. /* FFI request */
  502. case PHB_FFI_REQUEST:
  503. pnv_phb3_msi_ffi(&phb->msis, val);
  504. break;
  505. /* Silent simple writes */
  506. case PHB_CONFIG_ADDRESS:
  507. case PHB_IODA_ADDR:
  508. case PHB_TCE_KILL:
  509. case PHB_TCE_SPEC_CTL:
  510. case PHB_PEST_BAR:
  511. case PHB_PELTV_BAR:
  512. case PHB_RTT_BAR:
  513. case PHB_RBA_BAR:
  514. case PHB_IVT_BAR:
  515. case PHB_FFI_LOCK:
  516. case PHB_LEM_FIR_ACCUM:
  517. case PHB_LEM_ERROR_MASK:
  518. case PHB_LEM_ACTION0:
  519. case PHB_LEM_ACTION1:
  520. break;
  521. /* Noise on anything else */
  522. default:
  523. qemu_log_mask(LOG_UNIMP, "phb3: reg_write 0x%"PRIx64"=%"PRIx64"\n",
  524. off, val);
  525. }
  526. }
  527. uint64_t pnv_phb3_reg_read(void *opaque, hwaddr off, unsigned size)
  528. {
  529. PnvPHB3 *phb = opaque;
  530. PCIHostState *pci = PCI_HOST_BRIDGE(phb->phb_base);
  531. uint64_t val;
  532. if ((off & 0xfffc) == PHB_CONFIG_DATA) {
  533. return pnv_phb3_config_read(phb, off & 0x3, size);
  534. }
  535. /* Other registers are 64-bit only */
  536. if (size != 8 || off & 0x7) {
  537. phb3_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d",
  538. off, size);
  539. return ~0ull;
  540. }
  541. /* Default read from cache */
  542. val = phb->regs[off >> 3];
  543. switch (off) {
  544. /* Simulate venice DD2.0 */
  545. case PHB_VERSION:
  546. return 0x000000a300000005ull;
  547. case PHB_PCIE_SYSTEM_CONFIG:
  548. return 0x441100fc30000000;
  549. /* IODA table accesses */
  550. case PHB_IODA_DATA0:
  551. return pnv_phb3_ioda_read(phb);
  552. /* Link training always appears trained */
  553. case PHB_PCIE_DLP_TRAIN_CTL:
  554. if (!pci_find_device(pci->bus, 1, 0)) {
  555. return 0;
  556. }
  557. return PHB_PCIE_DLP_INBAND_PRESENCE | PHB_PCIE_DLP_TC_DL_LINKACT;
  558. /* FFI Lock */
  559. case PHB_FFI_LOCK:
  560. /* Set lock and return previous value */
  561. phb->regs[off >> 3] |= PHB_FFI_LOCK_STATE;
  562. return val;
  563. /* DMA read sync: make it look like it's complete */
  564. case PHB_DMARD_SYNC:
  565. return PHB_DMARD_SYNC_COMPLETE;
  566. /* Silent simple reads */
  567. case PHB_PHB3_CONFIG:
  568. case PHB_M32_BASE_ADDR:
  569. case PHB_M32_BASE_MASK:
  570. case PHB_M32_START_ADDR:
  571. case PHB_CONFIG_ADDRESS:
  572. case PHB_IODA_ADDR:
  573. case PHB_RTC_INVALIDATE:
  574. case PHB_TCE_KILL:
  575. case PHB_TCE_SPEC_CTL:
  576. case PHB_PEST_BAR:
  577. case PHB_PELTV_BAR:
  578. case PHB_RTT_BAR:
  579. case PHB_RBA_BAR:
  580. case PHB_IVT_BAR:
  581. case PHB_M64_UPPER_BITS:
  582. case PHB_LEM_FIR_ACCUM:
  583. case PHB_LEM_ERROR_MASK:
  584. case PHB_LEM_ACTION0:
  585. case PHB_LEM_ACTION1:
  586. break;
  587. /* Noise on anything else */
  588. default:
  589. qemu_log_mask(LOG_UNIMP, "phb3: reg_read 0x%"PRIx64"=%"PRIx64"\n",
  590. off, val);
  591. }
  592. return val;
  593. }
  594. static const MemoryRegionOps pnv_phb3_reg_ops = {
  595. .read = pnv_phb3_reg_read,
  596. .write = pnv_phb3_reg_write,
  597. .valid.min_access_size = 1,
  598. .valid.max_access_size = 8,
  599. .impl.min_access_size = 1,
  600. .impl.max_access_size = 8,
  601. .endianness = DEVICE_BIG_ENDIAN,
  602. };
  603. static int pnv_phb3_map_irq(PCIDevice *pci_dev, int irq_num)
  604. {
  605. /* Check that out properly ... */
  606. return irq_num & 3;
  607. }
  608. static void pnv_phb3_set_irq(void *opaque, int irq_num, int level)
  609. {
  610. PnvPHB3 *phb = opaque;
  611. /* LSI only ... */
  612. if (irq_num > 3) {
  613. phb3_error(phb, "Unknown IRQ to set %d", irq_num);
  614. }
  615. qemu_set_irq(phb->qirqs[irq_num], level);
  616. }
  617. static bool pnv_phb3_resolve_pe(PnvPhb3DMASpace *ds)
  618. {
  619. uint64_t rtt, addr;
  620. uint16_t rte;
  621. int bus_num;
  622. /* Already resolved ? */
  623. if (ds->pe_num != PHB_INVALID_PE) {
  624. return true;
  625. }
  626. /* We need to lookup the RTT */
  627. rtt = ds->phb->regs[PHB_RTT_BAR >> 3];
  628. if (!(rtt & PHB_RTT_BAR_ENABLE)) {
  629. phb3_error(ds->phb, "DMA with RTT BAR disabled !");
  630. /* Set error bits ? fence ? ... */
  631. return false;
  632. }
  633. /* Read RTE */
  634. bus_num = pci_bus_num(ds->bus);
  635. addr = rtt & PHB_RTT_BASE_ADDRESS_MASK;
  636. addr += 2 * ((bus_num << 8) | ds->devfn);
  637. if (dma_memory_read(&address_space_memory, addr, &rte,
  638. sizeof(rte), MEMTXATTRS_UNSPECIFIED)) {
  639. phb3_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr);
  640. /* Set error bits ? fence ? ... */
  641. return false;
  642. }
  643. rte = be16_to_cpu(rte);
  644. /* Fail upon reading of invalid PE# */
  645. if (rte >= PNV_PHB3_NUM_PE) {
  646. phb3_error(ds->phb, "RTE for RID 0x%x invalid (%04x", ds->devfn, rte);
  647. /* Set error bits ? fence ? ... */
  648. return false;
  649. }
  650. ds->pe_num = rte;
  651. return true;
  652. }
  653. static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr,
  654. bool is_write, uint64_t tve,
  655. IOMMUTLBEntry *tlb)
  656. {
  657. uint64_t tta = GETFIELD(IODA2_TVT_TABLE_ADDR, tve);
  658. int32_t lev = GETFIELD(IODA2_TVT_NUM_LEVELS, tve);
  659. uint32_t tts = GETFIELD(IODA2_TVT_TCE_TABLE_SIZE, tve);
  660. uint32_t tps = GETFIELD(IODA2_TVT_IO_PSIZE, tve);
  661. PnvPHB3 *phb = ds->phb;
  662. /* Invalid levels */
  663. if (lev > 4) {
  664. phb3_error(phb, "Invalid #levels in TVE %d", lev);
  665. return;
  666. }
  667. /* IO Page Size of 0 means untranslated, else use TCEs */
  668. if (tps == 0) {
  669. /*
  670. * We only support non-translate in top window.
  671. *
  672. * TODO: Venice/Murano support it on bottom window above 4G and
  673. * Naples suports it on everything
  674. */
  675. if (!(tve & PPC_BIT(51))) {
  676. phb3_error(phb, "xlate for invalid non-translate TVE");
  677. return;
  678. }
  679. /* TODO: Handle boundaries */
  680. /* Use 4k pages like q35 ... for now */
  681. tlb->iova = addr & 0xfffffffffffff000ull;
  682. tlb->translated_addr = addr & 0x0003fffffffff000ull;
  683. tlb->addr_mask = 0xfffull;
  684. tlb->perm = IOMMU_RW;
  685. } else {
  686. uint32_t tce_shift, tbl_shift, sh;
  687. uint64_t base, taddr, tce, tce_mask;
  688. /* TVE disabled ? */
  689. if (tts == 0) {
  690. phb3_error(phb, "xlate for invalid translated TVE");
  691. return;
  692. }
  693. /* Address bits per bottom level TCE entry */
  694. tce_shift = tps + 11;
  695. /* Address bits per table level */
  696. tbl_shift = tts + 8;
  697. /* Top level table base address */
  698. base = tta << 12;
  699. /* Total shift to first level */
  700. sh = tbl_shift * lev + tce_shift;
  701. /* TODO: Multi-level untested */
  702. do {
  703. lev--;
  704. /* Grab the TCE address */
  705. taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3);
  706. if (dma_memory_read(&address_space_memory, taddr, &tce,
  707. sizeof(tce), MEMTXATTRS_UNSPECIFIED)) {
  708. phb3_error(phb, "Failed to read TCE at 0x%"PRIx64, taddr);
  709. return;
  710. }
  711. tce = be64_to_cpu(tce);
  712. /* Check permission for indirect TCE */
  713. if ((lev >= 0) && !(tce & 3)) {
  714. phb3_error(phb, "Invalid indirect TCE at 0x%"PRIx64, taddr);
  715. phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr,
  716. is_write ? 'W' : 'R', tve);
  717. phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d",
  718. tta, lev, tts, tps);
  719. return;
  720. }
  721. sh -= tbl_shift;
  722. base = tce & ~0xfffull;
  723. } while (lev >= 0);
  724. /* We exit the loop with TCE being the final TCE */
  725. if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) {
  726. phb3_error(phb, "TCE access fault at 0x%"PRIx64, taddr);
  727. phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr,
  728. is_write ? 'W' : 'R', tve);
  729. phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d",
  730. tta, lev, tts, tps);
  731. return;
  732. }
  733. tce_mask = ~((1ull << tce_shift) - 1);
  734. tlb->iova = addr & tce_mask;
  735. tlb->translated_addr = tce & tce_mask;
  736. tlb->addr_mask = ~tce_mask;
  737. tlb->perm = tce & 3;
  738. }
  739. }
  740. static IOMMUTLBEntry pnv_phb3_translate_iommu(IOMMUMemoryRegion *iommu,
  741. hwaddr addr,
  742. IOMMUAccessFlags flag,
  743. int iommu_idx)
  744. {
  745. PnvPhb3DMASpace *ds = container_of(iommu, PnvPhb3DMASpace, dma_mr);
  746. int tve_sel;
  747. uint64_t tve, cfg;
  748. IOMMUTLBEntry ret = {
  749. .target_as = &address_space_memory,
  750. .iova = addr,
  751. .translated_addr = 0,
  752. .addr_mask = ~(hwaddr)0,
  753. .perm = IOMMU_NONE,
  754. };
  755. PnvPHB3 *phb = ds->phb;
  756. /* Resolve PE# */
  757. if (!pnv_phb3_resolve_pe(ds)) {
  758. phb3_error(phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x",
  759. ds->bus, pci_bus_num(ds->bus), ds->devfn);
  760. return ret;
  761. }
  762. /* Check top bits */
  763. switch (addr >> 60) {
  764. case 00:
  765. /* DMA or 32-bit MSI ? */
  766. cfg = ds->phb->regs[PHB_PHB3_CONFIG >> 3];
  767. if ((cfg & PHB_PHB3C_32BIT_MSI_EN) &&
  768. ((addr & 0xffffffffffff0000ull) == 0xffff0000ull)) {
  769. phb3_error(phb, "xlate on 32-bit MSI region");
  770. return ret;
  771. }
  772. /* Choose TVE XXX Use PHB3 Control Register */
  773. tve_sel = (addr >> 59) & 1;
  774. tve = ds->phb->ioda_TVT[ds->pe_num * 2 + tve_sel];
  775. pnv_phb3_translate_tve(ds, addr, flag & IOMMU_WO, tve, &ret);
  776. break;
  777. case 01:
  778. phb3_error(phb, "xlate on 64-bit MSI region");
  779. break;
  780. default:
  781. phb3_error(phb, "xlate on unsupported address 0x%"PRIx64, addr);
  782. }
  783. return ret;
  784. }
  785. #define TYPE_PNV_PHB3_IOMMU_MEMORY_REGION "pnv-phb3-iommu-memory-region"
  786. DECLARE_INSTANCE_CHECKER(IOMMUMemoryRegion, PNV_PHB3_IOMMU_MEMORY_REGION,
  787. TYPE_PNV_PHB3_IOMMU_MEMORY_REGION)
  788. static void pnv_phb3_iommu_memory_region_class_init(ObjectClass *klass,
  789. void *data)
  790. {
  791. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
  792. imrc->translate = pnv_phb3_translate_iommu;
  793. }
  794. static const TypeInfo pnv_phb3_iommu_memory_region_info = {
  795. .parent = TYPE_IOMMU_MEMORY_REGION,
  796. .name = TYPE_PNV_PHB3_IOMMU_MEMORY_REGION,
  797. .class_init = pnv_phb3_iommu_memory_region_class_init,
  798. };
  799. /*
  800. * MSI/MSIX memory region implementation.
  801. * The handler handles both MSI and MSIX.
  802. */
  803. static void pnv_phb3_msi_write(void *opaque, hwaddr addr,
  804. uint64_t data, unsigned size)
  805. {
  806. PnvPhb3DMASpace *ds = opaque;
  807. /* Resolve PE# */
  808. if (!pnv_phb3_resolve_pe(ds)) {
  809. phb3_error(ds->phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x",
  810. ds->bus, pci_bus_num(ds->bus), ds->devfn);
  811. return;
  812. }
  813. pnv_phb3_msi_send(&ds->phb->msis, addr, data, ds->pe_num);
  814. }
  815. /* There is no .read as the read result is undefined by PCI spec */
  816. static uint64_t pnv_phb3_msi_read(void *opaque, hwaddr addr, unsigned size)
  817. {
  818. PnvPhb3DMASpace *ds = opaque;
  819. phb3_error(ds->phb, "invalid read @ 0x%" HWADDR_PRIx, addr);
  820. return -1;
  821. }
  822. static const MemoryRegionOps pnv_phb3_msi_ops = {
  823. .read = pnv_phb3_msi_read,
  824. .write = pnv_phb3_msi_write,
  825. .endianness = DEVICE_LITTLE_ENDIAN
  826. };
  827. static AddressSpace *pnv_phb3_dma_iommu(PCIBus *bus, void *opaque, int devfn)
  828. {
  829. PnvPHB3 *phb = opaque;
  830. PnvPhb3DMASpace *ds;
  831. QLIST_FOREACH(ds, &phb->dma_spaces, list) {
  832. if (ds->bus == bus && ds->devfn == devfn) {
  833. break;
  834. }
  835. }
  836. if (ds == NULL) {
  837. ds = g_new0(PnvPhb3DMASpace, 1);
  838. ds->bus = bus;
  839. ds->devfn = devfn;
  840. ds->pe_num = PHB_INVALID_PE;
  841. ds->phb = phb;
  842. memory_region_init_iommu(&ds->dma_mr, sizeof(ds->dma_mr),
  843. TYPE_PNV_PHB3_IOMMU_MEMORY_REGION,
  844. OBJECT(phb), "phb3_iommu", UINT64_MAX);
  845. address_space_init(&ds->dma_as, MEMORY_REGION(&ds->dma_mr),
  846. "phb3_iommu");
  847. memory_region_init_io(&ds->msi32_mr, OBJECT(phb), &pnv_phb3_msi_ops,
  848. ds, "msi32", 0x10000);
  849. memory_region_init_io(&ds->msi64_mr, OBJECT(phb), &pnv_phb3_msi_ops,
  850. ds, "msi64", 0x100000);
  851. pnv_phb3_update_msi_regions(ds);
  852. QLIST_INSERT_HEAD(&phb->dma_spaces, ds, list);
  853. }
  854. return &ds->dma_as;
  855. }
  856. static void pnv_phb3_instance_init(Object *obj)
  857. {
  858. PnvPHB3 *phb = PNV_PHB3(obj);
  859. QLIST_INIT(&phb->dma_spaces);
  860. /* LSI sources */
  861. object_initialize_child(obj, "lsi", &phb->lsis, TYPE_ICS);
  862. /* Default init ... will be fixed by HW inits */
  863. phb->lsis.offset = 0;
  864. /* MSI sources */
  865. object_initialize_child(obj, "msi", &phb->msis, TYPE_PHB3_MSI);
  866. /* Power Bus Common Queue */
  867. object_initialize_child(obj, "pbcq", &phb->pbcq, TYPE_PNV_PBCQ);
  868. }
  869. void pnv_phb3_bus_init(DeviceState *dev, PnvPHB3 *phb)
  870. {
  871. PCIHostState *pci = PCI_HOST_BRIDGE(dev);
  872. /*
  873. * PHB3 doesn't support IO space. However, qemu gets very upset if
  874. * we don't have an IO region to anchor IO BARs onto so we just
  875. * initialize one which we never hook up to anything
  876. */
  877. memory_region_init(&phb->pci_io, OBJECT(phb), "pci-io", 0x10000);
  878. memory_region_init(&phb->pci_mmio, OBJECT(phb), "pci-mmio",
  879. PCI_MMIO_TOTAL_SIZE);
  880. pci->bus = pci_register_root_bus(dev,
  881. dev->id ? dev->id : NULL,
  882. pnv_phb3_set_irq, pnv_phb3_map_irq, phb,
  883. &phb->pci_mmio, &phb->pci_io,
  884. 0, 4, TYPE_PNV_PHB3_ROOT_BUS);
  885. object_property_set_int(OBJECT(pci->bus), "phb-id", phb->phb_id,
  886. &error_abort);
  887. object_property_set_int(OBJECT(pci->bus), "chip-id", phb->chip_id,
  888. &error_abort);
  889. pci_setup_iommu(pci->bus, pnv_phb3_dma_iommu, phb);
  890. }
  891. static void pnv_phb3_realize(DeviceState *dev, Error **errp)
  892. {
  893. PnvPHB3 *phb = PNV_PHB3(dev);
  894. PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
  895. int i;
  896. if (phb->phb_id >= PNV_CHIP_GET_CLASS(phb->chip)->num_phbs) {
  897. error_setg(errp, "invalid PHB index: %d", phb->phb_id);
  898. return;
  899. }
  900. /* LSI sources */
  901. object_property_set_link(OBJECT(&phb->lsis), "xics", OBJECT(pnv),
  902. &error_abort);
  903. object_property_set_int(OBJECT(&phb->lsis), "nr-irqs", PNV_PHB3_NUM_LSI,
  904. &error_abort);
  905. if (!qdev_realize(DEVICE(&phb->lsis), NULL, errp)) {
  906. return;
  907. }
  908. for (i = 0; i < phb->lsis.nr_irqs; i++) {
  909. ics_set_irq_type(&phb->lsis, i, true);
  910. }
  911. phb->qirqs = qemu_allocate_irqs(ics_set_irq, &phb->lsis, phb->lsis.nr_irqs);
  912. /* MSI sources */
  913. object_property_set_link(OBJECT(&phb->msis), "phb", OBJECT(phb),
  914. &error_abort);
  915. object_property_set_link(OBJECT(&phb->msis), "xics", OBJECT(pnv),
  916. &error_abort);
  917. object_property_set_int(OBJECT(&phb->msis), "nr-irqs", PHB3_MAX_MSI,
  918. &error_abort);
  919. if (!qdev_realize(DEVICE(&phb->msis), NULL, errp)) {
  920. return;
  921. }
  922. /* Power Bus Common Queue */
  923. object_property_set_link(OBJECT(&phb->pbcq), "phb", OBJECT(phb),
  924. &error_abort);
  925. if (!qdev_realize(DEVICE(&phb->pbcq), NULL, errp)) {
  926. return;
  927. }
  928. /* Controller Registers */
  929. memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb3_reg_ops, phb,
  930. "phb3-regs", 0x1000);
  931. }
  932. void pnv_phb3_update_regions(PnvPHB3 *phb)
  933. {
  934. PnvPBCQState *pbcq = &phb->pbcq;
  935. /* Unmap first always */
  936. if (memory_region_is_mapped(&phb->mr_regs)) {
  937. memory_region_del_subregion(&pbcq->phbbar, &phb->mr_regs);
  938. }
  939. /* Map registers if enabled */
  940. if (memory_region_is_mapped(&pbcq->phbbar)) {
  941. /* TODO: We should use the PHB BAR 2 register but we don't ... */
  942. memory_region_add_subregion(&pbcq->phbbar, 0, &phb->mr_regs);
  943. }
  944. /* Check/update m32 */
  945. if (memory_region_is_mapped(&phb->mr_m32)) {
  946. pnv_phb3_check_m32(phb);
  947. }
  948. pnv_phb3_check_all_m64s(phb);
  949. }
  950. static Property pnv_phb3_properties[] = {
  951. DEFINE_PROP_UINT32("index", PnvPHB3, phb_id, 0),
  952. DEFINE_PROP_UINT32("chip-id", PnvPHB3, chip_id, 0),
  953. DEFINE_PROP_LINK("chip", PnvPHB3, chip, TYPE_PNV_CHIP, PnvChip *),
  954. DEFINE_PROP_LINK("phb-base", PnvPHB3, phb_base, TYPE_PNV_PHB, PnvPHB *),
  955. DEFINE_PROP_END_OF_LIST(),
  956. };
  957. static void pnv_phb3_class_init(ObjectClass *klass, void *data)
  958. {
  959. DeviceClass *dc = DEVICE_CLASS(klass);
  960. dc->realize = pnv_phb3_realize;
  961. device_class_set_props(dc, pnv_phb3_properties);
  962. dc->user_creatable = false;
  963. }
  964. static const TypeInfo pnv_phb3_type_info = {
  965. .name = TYPE_PNV_PHB3,
  966. .parent = TYPE_DEVICE,
  967. .instance_size = sizeof(PnvPHB3),
  968. .class_init = pnv_phb3_class_init,
  969. .instance_init = pnv_phb3_instance_init,
  970. };
  971. static void pnv_phb3_root_bus_get_prop(Object *obj, Visitor *v,
  972. const char *name,
  973. void *opaque, Error **errp)
  974. {
  975. PnvPHB3RootBus *bus = PNV_PHB3_ROOT_BUS(obj);
  976. uint64_t value = 0;
  977. if (strcmp(name, "phb-id") == 0) {
  978. value = bus->phb_id;
  979. } else {
  980. value = bus->chip_id;
  981. }
  982. visit_type_size(v, name, &value, errp);
  983. }
  984. static void pnv_phb3_root_bus_set_prop(Object *obj, Visitor *v,
  985. const char *name,
  986. void *opaque, Error **errp)
  987. {
  988. PnvPHB3RootBus *bus = PNV_PHB3_ROOT_BUS(obj);
  989. uint64_t value;
  990. if (!visit_type_size(v, name, &value, errp)) {
  991. return;
  992. }
  993. if (strcmp(name, "phb-id") == 0) {
  994. bus->phb_id = value;
  995. } else {
  996. bus->chip_id = value;
  997. }
  998. }
  999. static void pnv_phb3_root_bus_class_init(ObjectClass *klass, void *data)
  1000. {
  1001. BusClass *k = BUS_CLASS(klass);
  1002. object_class_property_add(klass, "phb-id", "int",
  1003. pnv_phb3_root_bus_get_prop,
  1004. pnv_phb3_root_bus_set_prop,
  1005. NULL, NULL);
  1006. object_class_property_add(klass, "chip-id", "int",
  1007. pnv_phb3_root_bus_get_prop,
  1008. pnv_phb3_root_bus_set_prop,
  1009. NULL, NULL);
  1010. /*
  1011. * PHB3 has only a single root complex. Enforce the limit on the
  1012. * parent bus
  1013. */
  1014. k->max_dev = 1;
  1015. }
  1016. static const TypeInfo pnv_phb3_root_bus_info = {
  1017. .name = TYPE_PNV_PHB3_ROOT_BUS,
  1018. .parent = TYPE_PCIE_BUS,
  1019. .instance_size = sizeof(PnvPHB3RootBus),
  1020. .class_init = pnv_phb3_root_bus_class_init,
  1021. };
  1022. static void pnv_phb3_register_types(void)
  1023. {
  1024. type_register_static(&pnv_phb3_root_bus_info);
  1025. type_register_static(&pnv_phb3_type_info);
  1026. type_register_static(&pnv_phb3_iommu_memory_region_info);
  1027. }
  1028. type_init(pnv_phb3_register_types)