s390-pci-bus.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573
  1. /*
  2. * s390 PCI BUS
  3. *
  4. * Copyright 2014 IBM Corp.
  5. * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
  6. * Hong Bo Li <lihbbj@cn.ibm.com>
  7. * Yi Min Zhao <zyimin@cn.ibm.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2 or (at
  10. * your option) any later version. See the COPYING file in the top-level
  11. * directory.
  12. */
  13. #include "qemu/osdep.h"
  14. #include "qapi/error.h"
  15. #include "qapi/visitor.h"
  16. #include "hw/s390x/s390-pci-bus.h"
  17. #include "hw/s390x/s390-pci-inst.h"
  18. #include "hw/s390x/s390-pci-kvm.h"
  19. #include "hw/s390x/s390-pci-vfio.h"
  20. #include "hw/pci/pci_bus.h"
  21. #include "hw/qdev-properties.h"
  22. #include "hw/pci/pci_bridge.h"
  23. #include "hw/pci/msi.h"
  24. #include "qemu/error-report.h"
  25. #include "qemu/module.h"
  26. #include "system/reset.h"
  27. #include "system/runstate.h"
  28. #include "trace.h"
  29. S390pciState *s390_get_phb(void)
  30. {
  31. static S390pciState *phb;
  32. if (!phb) {
  33. phb = S390_PCI_HOST_BRIDGE(
  34. object_resolve_path(TYPE_S390_PCI_HOST_BRIDGE, NULL));
  35. assert(phb != NULL);
  36. }
  37. return phb;
  38. }
  39. int pci_chsc_sei_nt2_get_event(void *res)
  40. {
  41. ChscSeiNt2Res *nt2_res = (ChscSeiNt2Res *)res;
  42. PciCcdfAvail *accdf;
  43. PciCcdfErr *eccdf;
  44. int rc = 1;
  45. SeiContainer *sei_cont;
  46. S390pciState *s = s390_get_phb();
  47. sei_cont = QTAILQ_FIRST(&s->pending_sei);
  48. if (sei_cont) {
  49. QTAILQ_REMOVE(&s->pending_sei, sei_cont, link);
  50. nt2_res->nt = 2;
  51. nt2_res->cc = sei_cont->cc;
  52. nt2_res->length = cpu_to_be16(sizeof(ChscSeiNt2Res));
  53. switch (sei_cont->cc) {
  54. case 1: /* error event */
  55. eccdf = (PciCcdfErr *)nt2_res->ccdf;
  56. eccdf->fid = cpu_to_be32(sei_cont->fid);
  57. eccdf->fh = cpu_to_be32(sei_cont->fh);
  58. eccdf->e = cpu_to_be32(sei_cont->e);
  59. eccdf->faddr = cpu_to_be64(sei_cont->faddr);
  60. eccdf->pec = cpu_to_be16(sei_cont->pec);
  61. break;
  62. case 2: /* availability event */
  63. accdf = (PciCcdfAvail *)nt2_res->ccdf;
  64. accdf->fid = cpu_to_be32(sei_cont->fid);
  65. accdf->fh = cpu_to_be32(sei_cont->fh);
  66. accdf->pec = cpu_to_be16(sei_cont->pec);
  67. break;
  68. default:
  69. abort();
  70. }
  71. g_free(sei_cont);
  72. rc = 0;
  73. }
  74. return rc;
  75. }
  76. int pci_chsc_sei_nt2_have_event(void)
  77. {
  78. S390pciState *s = s390_get_phb();
  79. return !QTAILQ_EMPTY(&s->pending_sei);
  80. }
  81. S390PCIBusDevice *s390_pci_find_next_avail_dev(S390pciState *s,
  82. S390PCIBusDevice *pbdev)
  83. {
  84. S390PCIBusDevice *ret = pbdev ? QTAILQ_NEXT(pbdev, link) :
  85. QTAILQ_FIRST(&s->zpci_devs);
  86. while (ret && ret->state == ZPCI_FS_RESERVED) {
  87. ret = QTAILQ_NEXT(ret, link);
  88. }
  89. return ret;
  90. }
  91. S390PCIBusDevice *s390_pci_find_dev_by_fid(S390pciState *s, uint32_t fid)
  92. {
  93. S390PCIBusDevice *pbdev;
  94. QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
  95. if (pbdev->fid == fid) {
  96. return pbdev;
  97. }
  98. }
  99. return NULL;
  100. }
  101. void s390_pci_sclp_configure(SCCB *sccb)
  102. {
  103. IoaCfgSccb *psccb = (IoaCfgSccb *)sccb;
  104. S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
  105. be32_to_cpu(psccb->aid));
  106. uint16_t rc;
  107. if (!pbdev) {
  108. trace_s390_pci_sclp_nodev("configure", be32_to_cpu(psccb->aid));
  109. rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
  110. goto out;
  111. }
  112. switch (pbdev->state) {
  113. case ZPCI_FS_RESERVED:
  114. rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE;
  115. break;
  116. case ZPCI_FS_STANDBY:
  117. pbdev->state = ZPCI_FS_DISABLED;
  118. rc = SCLP_RC_NORMAL_COMPLETION;
  119. break;
  120. default:
  121. rc = SCLP_RC_NO_ACTION_REQUIRED;
  122. }
  123. out:
  124. psccb->header.response_code = cpu_to_be16(rc);
  125. }
  126. static void s390_pci_shutdown_notifier(Notifier *n, void *opaque)
  127. {
  128. S390PCIBusDevice *pbdev = container_of(n, S390PCIBusDevice,
  129. shutdown_notifier);
  130. pci_device_reset(pbdev->pdev);
  131. }
  132. static void s390_pci_perform_unplug(S390PCIBusDevice *pbdev)
  133. {
  134. HotplugHandler *hotplug_ctrl;
  135. if (pbdev->pft == ZPCI_PFT_ISM) {
  136. notifier_remove(&pbdev->shutdown_notifier);
  137. }
  138. /* Unplug the PCI device */
  139. if (pbdev->pdev) {
  140. DeviceState *pdev = DEVICE(pbdev->pdev);
  141. hotplug_ctrl = qdev_get_hotplug_handler(pdev);
  142. hotplug_handler_unplug(hotplug_ctrl, pdev, &error_abort);
  143. object_unparent(OBJECT(pdev));
  144. }
  145. /* Unplug the zPCI device */
  146. hotplug_ctrl = qdev_get_hotplug_handler(DEVICE(pbdev));
  147. hotplug_handler_unplug(hotplug_ctrl, DEVICE(pbdev), &error_abort);
  148. object_unparent(OBJECT(pbdev));
  149. }
  150. void s390_pci_sclp_deconfigure(SCCB *sccb)
  151. {
  152. IoaCfgSccb *psccb = (IoaCfgSccb *)sccb;
  153. S390PCIBusDevice *pbdev = s390_pci_find_dev_by_fid(s390_get_phb(),
  154. be32_to_cpu(psccb->aid));
  155. uint16_t rc;
  156. if (!pbdev) {
  157. trace_s390_pci_sclp_nodev("deconfigure", be32_to_cpu(psccb->aid));
  158. rc = SCLP_RC_ADAPTER_ID_NOT_RECOGNIZED;
  159. goto out;
  160. }
  161. switch (pbdev->state) {
  162. case ZPCI_FS_RESERVED:
  163. rc = SCLP_RC_ADAPTER_IN_RESERVED_STATE;
  164. break;
  165. case ZPCI_FS_STANDBY:
  166. rc = SCLP_RC_NO_ACTION_REQUIRED;
  167. break;
  168. default:
  169. if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
  170. /* Interpreted devices were using interrupt forwarding */
  171. s390_pci_kvm_aif_disable(pbdev);
  172. } else if (pbdev->summary_ind) {
  173. pci_dereg_irqs(pbdev);
  174. }
  175. if (pbdev->iommu->enabled) {
  176. pci_dereg_ioat(pbdev->iommu);
  177. }
  178. pbdev->state = ZPCI_FS_STANDBY;
  179. rc = SCLP_RC_NORMAL_COMPLETION;
  180. if (pbdev->unplug_requested) {
  181. s390_pci_perform_unplug(pbdev);
  182. }
  183. }
  184. out:
  185. psccb->header.response_code = cpu_to_be16(rc);
  186. }
  187. static S390PCIBusDevice *s390_pci_find_dev_by_uid(S390pciState *s, uint16_t uid)
  188. {
  189. S390PCIBusDevice *pbdev;
  190. QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
  191. if (pbdev->uid == uid) {
  192. return pbdev;
  193. }
  194. }
  195. return NULL;
  196. }
  197. S390PCIBusDevice *s390_pci_find_dev_by_target(S390pciState *s,
  198. const char *target)
  199. {
  200. S390PCIBusDevice *pbdev;
  201. if (!target) {
  202. return NULL;
  203. }
  204. QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
  205. if (!strcmp(pbdev->target, target)) {
  206. return pbdev;
  207. }
  208. }
  209. return NULL;
  210. }
  211. static S390PCIBusDevice *s390_pci_find_dev_by_pci(S390pciState *s,
  212. PCIDevice *pci_dev)
  213. {
  214. S390PCIBusDevice *pbdev;
  215. if (!pci_dev) {
  216. return NULL;
  217. }
  218. QTAILQ_FOREACH(pbdev, &s->zpci_devs, link) {
  219. if (pbdev->pdev == pci_dev) {
  220. return pbdev;
  221. }
  222. }
  223. return NULL;
  224. }
  225. S390PCIBusDevice *s390_pci_find_dev_by_idx(S390pciState *s, uint32_t idx)
  226. {
  227. return g_hash_table_lookup(s->zpci_table, &idx);
  228. }
  229. S390PCIBusDevice *s390_pci_find_dev_by_fh(S390pciState *s, uint32_t fh)
  230. {
  231. uint32_t idx = FH_MASK_INDEX & fh;
  232. S390PCIBusDevice *pbdev = s390_pci_find_dev_by_idx(s, idx);
  233. if (pbdev && pbdev->fh == fh) {
  234. return pbdev;
  235. }
  236. return NULL;
  237. }
  238. static void s390_pci_generate_event(uint8_t cc, uint16_t pec, uint32_t fh,
  239. uint32_t fid, uint64_t faddr, uint32_t e)
  240. {
  241. SeiContainer *sei_cont;
  242. S390pciState *s = s390_get_phb();
  243. sei_cont = g_new0(SeiContainer, 1);
  244. sei_cont->fh = fh;
  245. sei_cont->fid = fid;
  246. sei_cont->cc = cc;
  247. sei_cont->pec = pec;
  248. sei_cont->faddr = faddr;
  249. sei_cont->e = e;
  250. QTAILQ_INSERT_TAIL(&s->pending_sei, sei_cont, link);
  251. css_generate_css_crws(0);
  252. }
  253. static void s390_pci_generate_plug_event(uint16_t pec, uint32_t fh,
  254. uint32_t fid)
  255. {
  256. s390_pci_generate_event(2, pec, fh, fid, 0, 0);
  257. }
  258. void s390_pci_generate_error_event(uint16_t pec, uint32_t fh, uint32_t fid,
  259. uint64_t faddr, uint32_t e)
  260. {
  261. s390_pci_generate_event(1, pec, fh, fid, faddr, e);
  262. }
  263. static void s390_pci_set_irq(void *opaque, int irq, int level)
  264. {
  265. /* nothing to do */
  266. }
  267. static int s390_pci_map_irq(PCIDevice *pci_dev, int irq_num)
  268. {
  269. /* nothing to do */
  270. return 0;
  271. }
  272. static uint64_t s390_pci_get_table_origin(uint64_t iota)
  273. {
  274. return iota & ~ZPCI_IOTA_RTTO_FLAG;
  275. }
  276. static unsigned int calc_rtx(dma_addr_t ptr)
  277. {
  278. return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
  279. }
  280. static unsigned int calc_sx(dma_addr_t ptr)
  281. {
  282. return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
  283. }
  284. static unsigned int calc_px(dma_addr_t ptr)
  285. {
  286. return ((unsigned long) ptr >> TARGET_PAGE_BITS) & ZPCI_PT_MASK;
  287. }
  288. static uint64_t get_rt_sto(uint64_t entry)
  289. {
  290. return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
  291. ? (entry & ZPCI_RTE_ADDR_MASK)
  292. : 0;
  293. }
  294. static uint64_t get_st_pto(uint64_t entry)
  295. {
  296. return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
  297. ? (entry & ZPCI_STE_ADDR_MASK)
  298. : 0;
  299. }
  300. static bool rt_entry_isvalid(uint64_t entry)
  301. {
  302. return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
  303. }
  304. static bool pt_entry_isvalid(uint64_t entry)
  305. {
  306. return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
  307. }
  308. static bool entry_isprotected(uint64_t entry)
  309. {
  310. return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
  311. }
  312. /* ett is expected table type, -1 page table, 0 segment table, 1 region table */
  313. static uint64_t get_table_index(uint64_t iova, int8_t ett)
  314. {
  315. switch (ett) {
  316. case ZPCI_ETT_PT:
  317. return calc_px(iova);
  318. case ZPCI_ETT_ST:
  319. return calc_sx(iova);
  320. case ZPCI_ETT_RT:
  321. return calc_rtx(iova);
  322. }
  323. return -1;
  324. }
  325. static bool entry_isvalid(uint64_t entry, int8_t ett)
  326. {
  327. switch (ett) {
  328. case ZPCI_ETT_PT:
  329. return pt_entry_isvalid(entry);
  330. case ZPCI_ETT_ST:
  331. case ZPCI_ETT_RT:
  332. return rt_entry_isvalid(entry);
  333. }
  334. return false;
  335. }
  336. /* Return true if address translation is done */
  337. static bool translate_iscomplete(uint64_t entry, int8_t ett)
  338. {
  339. switch (ett) {
  340. case 0:
  341. return (entry & ZPCI_TABLE_FC) ? true : false;
  342. case 1:
  343. return false;
  344. }
  345. return true;
  346. }
  347. static uint64_t get_frame_size(int8_t ett)
  348. {
  349. switch (ett) {
  350. case ZPCI_ETT_PT:
  351. return 1ULL << 12;
  352. case ZPCI_ETT_ST:
  353. return 1ULL << 20;
  354. case ZPCI_ETT_RT:
  355. return 1ULL << 31;
  356. }
  357. return 0;
  358. }
  359. static uint64_t get_next_table_origin(uint64_t entry, int8_t ett)
  360. {
  361. switch (ett) {
  362. case ZPCI_ETT_PT:
  363. return entry & ZPCI_PTE_ADDR_MASK;
  364. case ZPCI_ETT_ST:
  365. return get_st_pto(entry);
  366. case ZPCI_ETT_RT:
  367. return get_rt_sto(entry);
  368. }
  369. return 0;
  370. }
  371. /**
  372. * table_translate: do translation within one table and return the following
  373. * table origin
  374. *
  375. * @entry: the entry being translated, the result is stored in this.
  376. * @to: the address of table origin.
  377. * @ett: expected table type, 1 region table, 0 segment table and -1 page table.
  378. * @error: error code
  379. */
  380. static uint64_t table_translate(S390IOTLBEntry *entry, uint64_t to, int8_t ett,
  381. uint16_t *error)
  382. {
  383. uint64_t tx, te, nto = 0;
  384. uint16_t err = 0;
  385. tx = get_table_index(entry->iova, ett);
  386. te = address_space_ldq(&address_space_memory, to + tx * sizeof(uint64_t),
  387. MEMTXATTRS_UNSPECIFIED, NULL);
  388. if (!te) {
  389. err = ERR_EVENT_INVALTE;
  390. goto out;
  391. }
  392. if (!entry_isvalid(te, ett)) {
  393. entry->perm &= IOMMU_NONE;
  394. goto out;
  395. }
  396. if (ett == ZPCI_ETT_RT && ((te & ZPCI_TABLE_LEN_RTX) != ZPCI_TABLE_LEN_RTX
  397. || te & ZPCI_TABLE_OFFSET_MASK)) {
  398. err = ERR_EVENT_INVALTL;
  399. goto out;
  400. }
  401. nto = get_next_table_origin(te, ett);
  402. if (!nto) {
  403. err = ERR_EVENT_TT;
  404. goto out;
  405. }
  406. if (entry_isprotected(te)) {
  407. entry->perm &= IOMMU_RO;
  408. } else {
  409. entry->perm &= IOMMU_RW;
  410. }
  411. if (translate_iscomplete(te, ett)) {
  412. switch (ett) {
  413. case ZPCI_ETT_PT:
  414. entry->translated_addr = te & ZPCI_PTE_ADDR_MASK;
  415. break;
  416. case ZPCI_ETT_ST:
  417. entry->translated_addr = (te & ZPCI_SFAA_MASK) |
  418. (entry->iova & ~ZPCI_SFAA_MASK);
  419. break;
  420. }
  421. nto = 0;
  422. }
  423. out:
  424. if (err) {
  425. entry->perm = IOMMU_NONE;
  426. *error = err;
  427. }
  428. entry->len = get_frame_size(ett);
  429. return nto;
  430. }
  431. uint16_t s390_guest_io_table_walk(uint64_t g_iota, hwaddr addr,
  432. S390IOTLBEntry *entry)
  433. {
  434. uint64_t to = s390_pci_get_table_origin(g_iota);
  435. int8_t ett = 1;
  436. uint16_t error = 0;
  437. entry->iova = addr & TARGET_PAGE_MASK;
  438. entry->translated_addr = 0;
  439. entry->perm = IOMMU_RW;
  440. if (entry_isprotected(g_iota)) {
  441. entry->perm &= IOMMU_RO;
  442. }
  443. while (to) {
  444. to = table_translate(entry, to, ett--, &error);
  445. }
  446. return error;
  447. }
  448. static IOMMUTLBEntry s390_translate_iommu(IOMMUMemoryRegion *mr, hwaddr addr,
  449. IOMMUAccessFlags flag, int iommu_idx)
  450. {
  451. S390PCIIOMMU *iommu = container_of(mr, S390PCIIOMMU, iommu_mr);
  452. S390IOTLBEntry *entry;
  453. uint64_t iova = addr & TARGET_PAGE_MASK;
  454. uint16_t error = 0;
  455. IOMMUTLBEntry ret = {
  456. .target_as = &address_space_memory,
  457. .iova = 0,
  458. .translated_addr = 0,
  459. .addr_mask = ~(hwaddr)0,
  460. .perm = IOMMU_NONE,
  461. };
  462. switch (iommu->pbdev->state) {
  463. case ZPCI_FS_ENABLED:
  464. case ZPCI_FS_BLOCKED:
  465. if (!iommu->enabled) {
  466. return ret;
  467. }
  468. break;
  469. default:
  470. return ret;
  471. }
  472. trace_s390_pci_iommu_xlate(addr);
  473. if (addr < iommu->pba || addr > iommu->pal) {
  474. error = ERR_EVENT_OORANGE;
  475. goto err;
  476. }
  477. entry = g_hash_table_lookup(iommu->iotlb, &iova);
  478. if (entry) {
  479. ret.iova = entry->iova;
  480. ret.translated_addr = entry->translated_addr;
  481. ret.addr_mask = entry->len - 1;
  482. ret.perm = entry->perm;
  483. } else {
  484. ret.iova = iova;
  485. ret.addr_mask = ~TARGET_PAGE_MASK;
  486. ret.perm = IOMMU_NONE;
  487. }
  488. if (flag != IOMMU_NONE && !(flag & ret.perm)) {
  489. error = ERR_EVENT_TPROTE;
  490. }
  491. err:
  492. if (error) {
  493. iommu->pbdev->state = ZPCI_FS_ERROR;
  494. s390_pci_generate_error_event(error, iommu->pbdev->fh,
  495. iommu->pbdev->fid, addr, 0);
  496. }
  497. return ret;
  498. }
  499. static void s390_pci_iommu_replay(IOMMUMemoryRegion *iommu,
  500. IOMMUNotifier *notifier)
  501. {
  502. /* It's impossible to plug a pci device on s390x that already has iommu
  503. * mappings which need to be replayed, that is due to the "one iommu per
  504. * zpci device" construct. But when we support migration of vfio-pci
  505. * devices in future, we need to revisit this.
  506. */
  507. return;
  508. }
  509. static S390PCIIOMMU *s390_pci_get_iommu(S390pciState *s, PCIBus *bus,
  510. int devfn)
  511. {
  512. uint64_t key = (uintptr_t)bus;
  513. S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
  514. S390PCIIOMMU *iommu;
  515. if (!table) {
  516. table = g_new0(S390PCIIOMMUTable, 1);
  517. table->key = key;
  518. g_hash_table_insert(s->iommu_table, &table->key, table);
  519. }
  520. iommu = table->iommu[PCI_SLOT(devfn)];
  521. if (!iommu) {
  522. iommu = S390_PCI_IOMMU(object_new(TYPE_S390_PCI_IOMMU));
  523. char *mr_name = g_strdup_printf("iommu-root-%02x:%02x.%01x",
  524. pci_bus_num(bus),
  525. PCI_SLOT(devfn),
  526. PCI_FUNC(devfn));
  527. char *as_name = g_strdup_printf("iommu-pci-%02x:%02x.%01x",
  528. pci_bus_num(bus),
  529. PCI_SLOT(devfn),
  530. PCI_FUNC(devfn));
  531. memory_region_init(&iommu->mr, OBJECT(iommu), mr_name, UINT64_MAX);
  532. address_space_init(&iommu->as, &iommu->mr, as_name);
  533. iommu->iotlb = g_hash_table_new_full(g_int64_hash, g_int64_equal,
  534. NULL, g_free);
  535. table->iommu[PCI_SLOT(devfn)] = iommu;
  536. g_free(mr_name);
  537. g_free(as_name);
  538. }
  539. return iommu;
  540. }
  541. static AddressSpace *s390_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
  542. {
  543. S390pciState *s = opaque;
  544. S390PCIIOMMU *iommu = s390_pci_get_iommu(s, bus, devfn);
  545. return &iommu->as;
  546. }
  547. static const PCIIOMMUOps s390_iommu_ops = {
  548. .get_address_space = s390_pci_dma_iommu,
  549. };
  550. static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set)
  551. {
  552. uint8_t expected, actual;
  553. hwaddr len = 1;
  554. /* avoid multiple fetches */
  555. uint8_t volatile *ind_addr;
  556. ind_addr = cpu_physical_memory_map(ind_loc, &len, true);
  557. if (!ind_addr) {
  558. s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0);
  559. return -1;
  560. }
  561. actual = *ind_addr;
  562. do {
  563. expected = actual;
  564. actual = qatomic_cmpxchg(ind_addr, expected, expected | to_be_set);
  565. } while (actual != expected);
  566. cpu_physical_memory_unmap((void *)ind_addr, len, 1, len);
  567. return actual;
  568. }
  569. static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data,
  570. unsigned int size)
  571. {
  572. S390PCIBusDevice *pbdev = opaque;
  573. uint32_t vec = data & ZPCI_MSI_VEC_MASK;
  574. uint64_t ind_bit;
  575. uint32_t sum_bit;
  576. assert(pbdev);
  577. trace_s390_pci_msi_ctrl_write(data, pbdev->idx, vec);
  578. if (pbdev->state != ZPCI_FS_ENABLED) {
  579. return;
  580. }
  581. ind_bit = pbdev->routes.adapter.ind_offset;
  582. sum_bit = pbdev->routes.adapter.summary_offset;
  583. set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8,
  584. 0x80 >> ((ind_bit + vec) % 8));
  585. if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8,
  586. 0x80 >> (sum_bit % 8))) {
  587. css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc);
  588. }
  589. }
  590. static uint64_t s390_msi_ctrl_read(void *opaque, hwaddr addr, unsigned size)
  591. {
  592. return 0xffffffff;
  593. }
  594. static const MemoryRegionOps s390_msi_ctrl_ops = {
  595. .write = s390_msi_ctrl_write,
  596. .read = s390_msi_ctrl_read,
  597. .endianness = DEVICE_LITTLE_ENDIAN,
  598. };
  599. void s390_pci_iommu_enable(S390PCIIOMMU *iommu)
  600. {
  601. /*
  602. * The iommu region is initialized against a 0-mapped address space,
  603. * so the smallest IOMMU region we can define runs from 0 to the end
  604. * of the PCI address space.
  605. */
  606. char *name = g_strdup_printf("iommu-s390-%04x", iommu->pbdev->uid);
  607. memory_region_init_iommu(&iommu->iommu_mr, sizeof(iommu->iommu_mr),
  608. TYPE_S390_IOMMU_MEMORY_REGION, OBJECT(&iommu->mr),
  609. name, iommu->pal + 1);
  610. iommu->enabled = true;
  611. memory_region_add_subregion(&iommu->mr, 0, MEMORY_REGION(&iommu->iommu_mr));
  612. g_free(name);
  613. }
  614. void s390_pci_iommu_disable(S390PCIIOMMU *iommu)
  615. {
  616. iommu->enabled = false;
  617. g_hash_table_remove_all(iommu->iotlb);
  618. memory_region_del_subregion(&iommu->mr, MEMORY_REGION(&iommu->iommu_mr));
  619. object_unparent(OBJECT(&iommu->iommu_mr));
  620. }
  621. static void s390_pci_iommu_free(S390pciState *s, PCIBus *bus, int32_t devfn)
  622. {
  623. uint64_t key = (uintptr_t)bus;
  624. S390PCIIOMMUTable *table = g_hash_table_lookup(s->iommu_table, &key);
  625. S390PCIIOMMU *iommu = table ? table->iommu[PCI_SLOT(devfn)] : NULL;
  626. if (!table || !iommu) {
  627. return;
  628. }
  629. table->iommu[PCI_SLOT(devfn)] = NULL;
  630. g_hash_table_destroy(iommu->iotlb);
  631. /*
  632. * An attached PCI device may have memory listeners, eg. VFIO PCI.
  633. * The associated subregion will already have been unmapped in
  634. * s390_pci_iommu_disable in response to the guest deconfigure request.
  635. * Remove the listeners now before destroying the address space.
  636. */
  637. address_space_remove_listeners(&iommu->as);
  638. address_space_destroy(&iommu->as);
  639. object_unparent(OBJECT(&iommu->mr));
  640. object_unparent(OBJECT(iommu));
  641. object_unref(OBJECT(iommu));
  642. }
  643. S390PCIGroup *s390_group_create(int id, int host_id)
  644. {
  645. S390PCIGroup *group;
  646. S390pciState *s = s390_get_phb();
  647. group = g_new0(S390PCIGroup, 1);
  648. group->id = id;
  649. group->host_id = host_id;
  650. QTAILQ_INSERT_TAIL(&s->zpci_groups, group, link);
  651. return group;
  652. }
  653. S390PCIGroup *s390_group_find(int id)
  654. {
  655. S390PCIGroup *group;
  656. S390pciState *s = s390_get_phb();
  657. QTAILQ_FOREACH(group, &s->zpci_groups, link) {
  658. if (group->id == id) {
  659. return group;
  660. }
  661. }
  662. return NULL;
  663. }
  664. S390PCIGroup *s390_group_find_host_sim(int host_id)
  665. {
  666. S390PCIGroup *group;
  667. S390pciState *s = s390_get_phb();
  668. QTAILQ_FOREACH(group, &s->zpci_groups, link) {
  669. if (group->id >= ZPCI_SIM_GRP_START && group->host_id == host_id) {
  670. return group;
  671. }
  672. }
  673. return NULL;
  674. }
  675. static void s390_pci_init_default_group(void)
  676. {
  677. S390PCIGroup *group;
  678. ClpRspQueryPciGrp *resgrp;
  679. group = s390_group_create(ZPCI_DEFAULT_FN_GRP, ZPCI_DEFAULT_FN_GRP);
  680. resgrp = &group->zpci_group;
  681. resgrp->fr = 1;
  682. resgrp->dasm = 0;
  683. resgrp->msia = ZPCI_MSI_ADDR;
  684. resgrp->mui = DEFAULT_MUI;
  685. resgrp->i = 128;
  686. resgrp->maxstbl = 128;
  687. resgrp->version = 0;
  688. resgrp->dtsm = ZPCI_DTSM;
  689. }
  690. static void set_pbdev_info(S390PCIBusDevice *pbdev)
  691. {
  692. pbdev->zpci_fn.sdma = ZPCI_SDMA_ADDR;
  693. pbdev->zpci_fn.edma = ZPCI_EDMA_ADDR;
  694. pbdev->zpci_fn.pchid = 0;
  695. pbdev->zpci_fn.pfgid = ZPCI_DEFAULT_FN_GRP;
  696. pbdev->zpci_fn.fid = pbdev->fid;
  697. pbdev->zpci_fn.uid = pbdev->uid;
  698. pbdev->pci_group = s390_group_find(ZPCI_DEFAULT_FN_GRP);
  699. }
  700. static void s390_pcihost_realize(DeviceState *dev, Error **errp)
  701. {
  702. PCIBus *b;
  703. BusState *bus;
  704. PCIHostState *phb = PCI_HOST_BRIDGE(dev);
  705. S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
  706. trace_s390_pcihost("realize");
  707. b = pci_register_root_bus(dev, NULL, s390_pci_set_irq, s390_pci_map_irq,
  708. NULL, get_system_memory(), get_system_io(), 0,
  709. 64, TYPE_PCI_BUS);
  710. pci_setup_iommu(b, &s390_iommu_ops, s);
  711. bus = BUS(b);
  712. qbus_set_hotplug_handler(bus, OBJECT(dev));
  713. phb->bus = b;
  714. s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL));
  715. qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev));
  716. s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal,
  717. NULL, g_free);
  718. s->zpci_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, NULL);
  719. s->bus_no = 0;
  720. s->next_sim_grp = ZPCI_SIM_GRP_START;
  721. QTAILQ_INIT(&s->pending_sei);
  722. QTAILQ_INIT(&s->zpci_devs);
  723. QTAILQ_INIT(&s->zpci_dma_limit);
  724. QTAILQ_INIT(&s->zpci_groups);
  725. s390_pci_init_default_group();
  726. css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false,
  727. S390_ADAPTER_SUPPRESSIBLE, errp);
  728. }
  729. static void s390_pcihost_unrealize(DeviceState *dev)
  730. {
  731. S390PCIGroup *group;
  732. S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
  733. while (!QTAILQ_EMPTY(&s->zpci_groups)) {
  734. group = QTAILQ_FIRST(&s->zpci_groups);
  735. QTAILQ_REMOVE(&s->zpci_groups, group, link);
  736. }
  737. }
  738. static int s390_pci_msix_init(S390PCIBusDevice *pbdev)
  739. {
  740. char *name;
  741. uint8_t pos;
  742. uint16_t ctrl;
  743. uint32_t table, pba;
  744. pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX);
  745. if (!pos) {
  746. return -1;
  747. }
  748. ctrl = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_FLAGS,
  749. pci_config_size(pbdev->pdev), sizeof(ctrl));
  750. table = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_TABLE,
  751. pci_config_size(pbdev->pdev), sizeof(table));
  752. pba = pci_host_config_read_common(pbdev->pdev, pos + PCI_MSIX_PBA,
  753. pci_config_size(pbdev->pdev), sizeof(pba));
  754. pbdev->msix.table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
  755. pbdev->msix.table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
  756. pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
  757. pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
  758. pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
  759. name = g_strdup_printf("msix-s390-%04x", pbdev->uid);
  760. memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev),
  761. &s390_msi_ctrl_ops, pbdev, name, TARGET_PAGE_SIZE);
  762. memory_region_add_subregion(&pbdev->iommu->mr,
  763. pbdev->pci_group->zpci_group.msia,
  764. &pbdev->msix_notify_mr);
  765. g_free(name);
  766. return 0;
  767. }
  768. static void s390_pci_msix_free(S390PCIBusDevice *pbdev)
  769. {
  770. if (pbdev->msix.entries == 0) {
  771. return;
  772. }
  773. memory_region_del_subregion(&pbdev->iommu->mr, &pbdev->msix_notify_mr);
  774. object_unparent(OBJECT(&pbdev->msix_notify_mr));
  775. }
  776. static S390PCIBusDevice *s390_pci_device_new(S390pciState *s,
  777. const char *target, Error **errp)
  778. {
  779. Error *local_err = NULL;
  780. DeviceState *dev;
  781. dev = qdev_try_new(TYPE_S390_PCI_DEVICE);
  782. if (!dev) {
  783. error_setg(errp, "zPCI device could not be created");
  784. return NULL;
  785. }
  786. if (!object_property_set_str(OBJECT(dev), "target", target, &local_err)) {
  787. object_unparent(OBJECT(dev));
  788. error_propagate_prepend(errp, local_err,
  789. "zPCI device could not be created: ");
  790. return NULL;
  791. }
  792. if (!qdev_realize_and_unref(dev, BUS(s->bus), &local_err)) {
  793. object_unparent(OBJECT(dev));
  794. error_propagate_prepend(errp, local_err,
  795. "zPCI device could not be created: ");
  796. return NULL;
  797. }
  798. return S390_PCI_DEVICE(dev);
  799. }
  800. static bool s390_pci_alloc_idx(S390pciState *s, S390PCIBusDevice *pbdev)
  801. {
  802. uint32_t idx;
  803. idx = s->next_idx;
  804. while (s390_pci_find_dev_by_idx(s, idx)) {
  805. idx = (idx + 1) & FH_MASK_INDEX;
  806. if (idx == s->next_idx) {
  807. return false;
  808. }
  809. }
  810. pbdev->idx = idx;
  811. return true;
  812. }
  813. static void s390_pcihost_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
  814. Error **errp)
  815. {
  816. S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
  817. if (!s390_has_feat(S390_FEAT_ZPCI)) {
  818. warn_report("Plugging a PCI/zPCI device without the 'zpci' CPU "
  819. "feature enabled; the guest will not be able to see/use "
  820. "this device");
  821. }
  822. if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
  823. S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev);
  824. if (!s390_pci_alloc_idx(s, pbdev)) {
  825. error_setg(errp, "no slot for plugging zpci device");
  826. return;
  827. }
  828. }
  829. }
  830. static void s390_pci_update_subordinate(PCIDevice *dev, uint32_t nr)
  831. {
  832. uint32_t old_nr;
  833. pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1);
  834. while (!pci_bus_is_root(pci_get_bus(dev))) {
  835. dev = pci_get_bus(dev)->parent_dev;
  836. old_nr = pci_default_read_config(dev, PCI_SUBORDINATE_BUS, 1);
  837. if (old_nr < nr) {
  838. pci_default_write_config(dev, PCI_SUBORDINATE_BUS, nr, 1);
  839. }
  840. }
  841. }
  842. static int s390_pci_interp_plug(S390pciState *s, S390PCIBusDevice *pbdev)
  843. {
  844. uint32_t idx, fh;
  845. if (!s390_pci_get_host_fh(pbdev, &fh)) {
  846. return -EPERM;
  847. }
  848. /*
  849. * The host device is already in an enabled state, but we always present
  850. * the initial device state to the guest as disabled (ZPCI_FS_DISABLED).
  851. * Therefore, mask off the enable bit from the passthrough handle until
  852. * the guest issues a CLP SET PCI FN later to enable the device.
  853. */
  854. pbdev->fh = fh & ~FH_MASK_ENABLE;
  855. /* Next, see if the idx is already in-use */
  856. idx = pbdev->fh & FH_MASK_INDEX;
  857. if (pbdev->idx != idx) {
  858. if (s390_pci_find_dev_by_idx(s, idx)) {
  859. return -EINVAL;
  860. }
  861. /*
  862. * Update the idx entry with the passed through idx
  863. * If the relinquished idx is lower than next_idx, use it
  864. * to replace next_idx
  865. */
  866. g_hash_table_remove(s->zpci_table, &pbdev->idx);
  867. if (idx < s->next_idx) {
  868. s->next_idx = idx;
  869. }
  870. pbdev->idx = idx;
  871. g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev);
  872. }
  873. return 0;
  874. }
  875. static void s390_pcihost_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
  876. Error **errp)
  877. {
  878. S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
  879. PCIDevice *pdev = NULL;
  880. S390PCIBusDevice *pbdev = NULL;
  881. int rc;
  882. if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
  883. PCIBridge *pb = PCI_BRIDGE(dev);
  884. pdev = PCI_DEVICE(dev);
  885. pci_bridge_map_irq(pb, dev->id, s390_pci_map_irq);
  886. pci_setup_iommu(&pb->sec_bus, &s390_iommu_ops, s);
  887. qbus_set_hotplug_handler(BUS(&pb->sec_bus), OBJECT(s));
  888. if (dev->hotplugged) {
  889. pci_default_write_config(pdev, PCI_PRIMARY_BUS,
  890. pci_dev_bus_num(pdev), 1);
  891. s->bus_no += 1;
  892. pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
  893. s390_pci_update_subordinate(pdev, s->bus_no);
  894. }
  895. } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
  896. pdev = PCI_DEVICE(dev);
  897. /*
  898. * Multifunction is not supported due to the lack of CLP. However,
  899. * do not check for multifunction capability for SR-IOV devices because
  900. * SR-IOV devices automatically add the multifunction capability whether
  901. * the user intends to use the functions other than the PF.
  902. */
  903. if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION &&
  904. !pdev->exp.sriov_cap) {
  905. error_setg(errp, "multifunction not supported in s390");
  906. return;
  907. }
  908. if (!dev->id) {
  909. /* In the case the PCI device does not define an id */
  910. /* we generate one based on the PCI address */
  911. dev->id = g_strdup_printf("auto_%02x:%02x.%01x",
  912. pci_dev_bus_num(pdev),
  913. PCI_SLOT(pdev->devfn),
  914. PCI_FUNC(pdev->devfn));
  915. }
  916. pbdev = s390_pci_find_dev_by_target(s, dev->id);
  917. if (!pbdev) {
  918. /*
  919. * VFs are automatically created by PF, and creating zpci for them
  920. * will result in unexpected usage of fids. Currently QEMU does not
  921. * support multifunction for s390x so we don't need zpci for VFs
  922. * anyway.
  923. */
  924. if (pci_is_vf(pdev)) {
  925. return;
  926. }
  927. pbdev = s390_pci_device_new(s, dev->id, errp);
  928. if (!pbdev) {
  929. return;
  930. }
  931. }
  932. pbdev->pdev = pdev;
  933. pbdev->iommu = s390_pci_get_iommu(s, pci_get_bus(pdev), pdev->devfn);
  934. pbdev->iommu->pbdev = pbdev;
  935. pbdev->state = ZPCI_FS_DISABLED;
  936. set_pbdev_info(pbdev);
  937. if (object_dynamic_cast(OBJECT(dev), "vfio-pci")) {
  938. /*
  939. * By default, interpretation is always requested; if the available
  940. * facilities indicate it is not available, fallback to the
  941. * interception model.
  942. */
  943. if (pbdev->interp) {
  944. if (s390_pci_kvm_interp_allowed()) {
  945. rc = s390_pci_interp_plug(s, pbdev);
  946. if (rc) {
  947. error_setg(errp, "Plug failed for zPCI device in "
  948. "interpretation mode: %d", rc);
  949. return;
  950. }
  951. } else {
  952. trace_s390_pcihost("zPCI interpretation missing");
  953. pbdev->interp = false;
  954. pbdev->forwarding_assist = false;
  955. }
  956. }
  957. pbdev->iommu->dma_limit = s390_pci_start_dma_count(s, pbdev);
  958. /* Fill in CLP information passed via the vfio region */
  959. s390_pci_get_clp_info(pbdev);
  960. if (!pbdev->interp) {
  961. /* Do vfio passthrough but intercept for I/O */
  962. pbdev->fh |= FH_SHM_VFIO;
  963. pbdev->forwarding_assist = false;
  964. }
  965. /* Register shutdown notifier and reset callback for ISM devices */
  966. if (pbdev->pft == ZPCI_PFT_ISM) {
  967. pbdev->shutdown_notifier.notify = s390_pci_shutdown_notifier;
  968. qemu_register_shutdown_notifier(&pbdev->shutdown_notifier);
  969. }
  970. } else {
  971. pbdev->fh |= FH_SHM_EMUL;
  972. /* Always intercept emulated devices */
  973. pbdev->interp = false;
  974. pbdev->forwarding_assist = false;
  975. }
  976. if (s390_pci_msix_init(pbdev) && !pbdev->interp) {
  977. error_setg(errp, "MSI-X support is mandatory "
  978. "in the S390 architecture");
  979. return;
  980. }
  981. if (dev->hotplugged) {
  982. s390_pci_generate_plug_event(HP_EVENT_TO_CONFIGURED ,
  983. pbdev->fh, pbdev->fid);
  984. }
  985. } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
  986. pbdev = S390_PCI_DEVICE(dev);
  987. /* the allocated idx is actually getting used */
  988. s->next_idx = (pbdev->idx + 1) & FH_MASK_INDEX;
  989. pbdev->fh = pbdev->idx;
  990. QTAILQ_INSERT_TAIL(&s->zpci_devs, pbdev, link);
  991. g_hash_table_insert(s->zpci_table, &pbdev->idx, pbdev);
  992. } else {
  993. g_assert_not_reached();
  994. }
  995. }
  996. static void s390_pcihost_unplug(HotplugHandler *hotplug_dev, DeviceState *dev,
  997. Error **errp)
  998. {
  999. S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
  1000. S390PCIBusDevice *pbdev = NULL;
  1001. if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
  1002. PCIDevice *pci_dev = PCI_DEVICE(dev);
  1003. PCIBus *bus;
  1004. int32_t devfn;
  1005. pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
  1006. if (!pbdev) {
  1007. g_assert(pci_is_vf(pci_dev));
  1008. return;
  1009. }
  1010. s390_pci_generate_plug_event(HP_EVENT_STANDBY_TO_RESERVED,
  1011. pbdev->fh, pbdev->fid);
  1012. bus = pci_get_bus(pci_dev);
  1013. devfn = pci_dev->devfn;
  1014. qdev_unrealize(dev);
  1015. s390_pci_msix_free(pbdev);
  1016. s390_pci_iommu_free(s, bus, devfn);
  1017. pbdev->pdev = NULL;
  1018. pbdev->state = ZPCI_FS_RESERVED;
  1019. } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
  1020. pbdev = S390_PCI_DEVICE(dev);
  1021. pbdev->fid = 0;
  1022. QTAILQ_REMOVE(&s->zpci_devs, pbdev, link);
  1023. g_hash_table_remove(s->zpci_table, &pbdev->idx);
  1024. if (pbdev->iommu->dma_limit) {
  1025. s390_pci_end_dma_count(s, pbdev->iommu->dma_limit);
  1026. }
  1027. qdev_unrealize(dev);
  1028. }
  1029. }
  1030. static void s390_pcihost_unplug_request(HotplugHandler *hotplug_dev,
  1031. DeviceState *dev,
  1032. Error **errp)
  1033. {
  1034. S390pciState *s = S390_PCI_HOST_BRIDGE(hotplug_dev);
  1035. S390PCIBusDevice *pbdev;
  1036. if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) {
  1037. error_setg(errp, "PCI bridge hot unplug currently not supported");
  1038. } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
  1039. /*
  1040. * Redirect the unplug request to the zPCI device and remember that
  1041. * we've checked the PCI device already (to prevent endless recursion).
  1042. */
  1043. pbdev = s390_pci_find_dev_by_pci(s, PCI_DEVICE(dev));
  1044. if (!pbdev) {
  1045. g_assert(pci_is_vf(PCI_DEVICE(dev)));
  1046. return;
  1047. }
  1048. pbdev->pci_unplug_request_processed = true;
  1049. qdev_unplug(DEVICE(pbdev), errp);
  1050. } else if (object_dynamic_cast(OBJECT(dev), TYPE_S390_PCI_DEVICE)) {
  1051. pbdev = S390_PCI_DEVICE(dev);
  1052. /*
  1053. * If unplug was initially requested for the zPCI device, we
  1054. * first have to redirect to the PCI device, which will in return
  1055. * redirect back to us after performing its checks (if the request
  1056. * is not blocked, e.g. because it's a PCI bridge).
  1057. */
  1058. if (pbdev->pdev && !pbdev->pci_unplug_request_processed) {
  1059. qdev_unplug(DEVICE(pbdev->pdev), errp);
  1060. return;
  1061. }
  1062. pbdev->pci_unplug_request_processed = false;
  1063. switch (pbdev->state) {
  1064. case ZPCI_FS_STANDBY:
  1065. case ZPCI_FS_RESERVED:
  1066. s390_pci_perform_unplug(pbdev);
  1067. break;
  1068. default:
  1069. /*
  1070. * Allow to send multiple requests, e.g. if the guest crashed
  1071. * before releasing the device, we would not be able to send
  1072. * another request to the same VM (e.g. fresh OS).
  1073. */
  1074. pbdev->unplug_requested = true;
  1075. s390_pci_generate_plug_event(HP_EVENT_DECONFIGURE_REQUEST,
  1076. pbdev->fh, pbdev->fid);
  1077. }
  1078. } else {
  1079. g_assert_not_reached();
  1080. }
  1081. }
  1082. static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev,
  1083. void *opaque)
  1084. {
  1085. S390pciState *s = opaque;
  1086. PCIBus *sec_bus = NULL;
  1087. if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
  1088. PCI_HEADER_TYPE_BRIDGE)) {
  1089. return;
  1090. }
  1091. (s->bus_no)++;
  1092. pci_default_write_config(pdev, PCI_PRIMARY_BUS, pci_dev_bus_num(pdev), 1);
  1093. pci_default_write_config(pdev, PCI_SECONDARY_BUS, s->bus_no, 1);
  1094. pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
  1095. sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
  1096. if (!sec_bus) {
  1097. return;
  1098. }
  1099. /* Assign numbers to all child bridges. The last is the highest number. */
  1100. pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s);
  1101. pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1);
  1102. }
  1103. void s390_pci_ism_reset(void)
  1104. {
  1105. S390pciState *s = s390_get_phb();
  1106. S390PCIBusDevice *pbdev, *next;
  1107. /* Trigger reset event for each passthrough ISM device currently in-use */
  1108. QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) {
  1109. if (pbdev->interp && pbdev->pft == ZPCI_PFT_ISM &&
  1110. pbdev->fh & FH_MASK_ENABLE) {
  1111. s390_pci_kvm_aif_disable(pbdev);
  1112. pci_device_reset(pbdev->pdev);
  1113. }
  1114. }
  1115. }
  1116. static void s390_pcihost_reset(DeviceState *dev)
  1117. {
  1118. S390pciState *s = S390_PCI_HOST_BRIDGE(dev);
  1119. PCIBus *bus = s->parent_obj.bus;
  1120. S390PCIBusDevice *pbdev, *next;
  1121. /* Process all pending unplug requests */
  1122. QTAILQ_FOREACH_SAFE(pbdev, &s->zpci_devs, link, next) {
  1123. if (pbdev->unplug_requested) {
  1124. if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
  1125. /* Interpreted devices were using interrupt forwarding */
  1126. s390_pci_kvm_aif_disable(pbdev);
  1127. } else if (pbdev->summary_ind) {
  1128. pci_dereg_irqs(pbdev);
  1129. }
  1130. if (pbdev->iommu->enabled) {
  1131. pci_dereg_ioat(pbdev->iommu);
  1132. }
  1133. pbdev->state = ZPCI_FS_STANDBY;
  1134. s390_pci_perform_unplug(pbdev);
  1135. }
  1136. }
  1137. /*
  1138. * When resetting a PCI bridge, the assigned numbers are set to 0. So
  1139. * on every system reset, we also have to reassign numbers.
  1140. */
  1141. s->bus_no = 0;
  1142. pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s);
  1143. }
  1144. static void s390_pcihost_class_init(ObjectClass *klass, void *data)
  1145. {
  1146. DeviceClass *dc = DEVICE_CLASS(klass);
  1147. HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
  1148. device_class_set_legacy_reset(dc, s390_pcihost_reset);
  1149. dc->realize = s390_pcihost_realize;
  1150. dc->unrealize = s390_pcihost_unrealize;
  1151. hc->pre_plug = s390_pcihost_pre_plug;
  1152. hc->plug = s390_pcihost_plug;
  1153. hc->unplug_request = s390_pcihost_unplug_request;
  1154. hc->unplug = s390_pcihost_unplug;
  1155. msi_nonbroken = true;
  1156. }
  1157. static const TypeInfo s390_pcihost_info = {
  1158. .name = TYPE_S390_PCI_HOST_BRIDGE,
  1159. .parent = TYPE_PCI_HOST_BRIDGE,
  1160. .instance_size = sizeof(S390pciState),
  1161. .class_init = s390_pcihost_class_init,
  1162. .interfaces = (InterfaceInfo[]) {
  1163. { TYPE_HOTPLUG_HANDLER },
  1164. { }
  1165. }
  1166. };
  1167. static const TypeInfo s390_pcibus_info = {
  1168. .name = TYPE_S390_PCI_BUS,
  1169. .parent = TYPE_BUS,
  1170. .instance_size = sizeof(S390PCIBus),
  1171. };
  1172. static uint16_t s390_pci_generate_uid(S390pciState *s)
  1173. {
  1174. uint16_t uid = 0;
  1175. do {
  1176. uid++;
  1177. if (!s390_pci_find_dev_by_uid(s, uid)) {
  1178. return uid;
  1179. }
  1180. } while (uid < ZPCI_MAX_UID);
  1181. return UID_UNDEFINED;
  1182. }
  1183. static uint32_t s390_pci_generate_fid(S390pciState *s, Error **errp)
  1184. {
  1185. uint32_t fid = 0;
  1186. do {
  1187. if (!s390_pci_find_dev_by_fid(s, fid)) {
  1188. return fid;
  1189. }
  1190. } while (fid++ != ZPCI_MAX_FID);
  1191. error_setg(errp, "no free fid could be found");
  1192. return 0;
  1193. }
  1194. static void s390_pci_device_realize(DeviceState *dev, Error **errp)
  1195. {
  1196. S390PCIBusDevice *zpci = S390_PCI_DEVICE(dev);
  1197. S390pciState *s = s390_get_phb();
  1198. if (!zpci->target) {
  1199. error_setg(errp, "target must be defined");
  1200. return;
  1201. }
  1202. if (s390_pci_find_dev_by_target(s, zpci->target)) {
  1203. error_setg(errp, "target %s already has an associated zpci device",
  1204. zpci->target);
  1205. return;
  1206. }
  1207. if (zpci->uid == UID_UNDEFINED) {
  1208. zpci->uid = s390_pci_generate_uid(s);
  1209. if (!zpci->uid) {
  1210. error_setg(errp, "no free uid could be found");
  1211. return;
  1212. }
  1213. } else if (s390_pci_find_dev_by_uid(s, zpci->uid)) {
  1214. error_setg(errp, "uid %u already in use", zpci->uid);
  1215. return;
  1216. }
  1217. if (!zpci->fid_defined) {
  1218. Error *local_error = NULL;
  1219. zpci->fid = s390_pci_generate_fid(s, &local_error);
  1220. if (local_error) {
  1221. error_propagate(errp, local_error);
  1222. return;
  1223. }
  1224. } else if (s390_pci_find_dev_by_fid(s, zpci->fid)) {
  1225. error_setg(errp, "fid %u already in use", zpci->fid);
  1226. return;
  1227. }
  1228. zpci->state = ZPCI_FS_RESERVED;
  1229. zpci->fmb.format = ZPCI_FMB_FORMAT;
  1230. }
  1231. static void s390_pci_device_reset(DeviceState *dev)
  1232. {
  1233. S390PCIBusDevice *pbdev = S390_PCI_DEVICE(dev);
  1234. switch (pbdev->state) {
  1235. case ZPCI_FS_RESERVED:
  1236. return;
  1237. case ZPCI_FS_STANDBY:
  1238. break;
  1239. default:
  1240. pbdev->fh &= ~FH_MASK_ENABLE;
  1241. pbdev->state = ZPCI_FS_DISABLED;
  1242. break;
  1243. }
  1244. if (pbdev->interp && (pbdev->fh & FH_MASK_ENABLE)) {
  1245. /* Interpreted devices were using interrupt forwarding */
  1246. s390_pci_kvm_aif_disable(pbdev);
  1247. } else if (pbdev->summary_ind) {
  1248. pci_dereg_irqs(pbdev);
  1249. }
  1250. if (pbdev->iommu->enabled) {
  1251. pci_dereg_ioat(pbdev->iommu);
  1252. }
  1253. fmb_timer_free(pbdev);
  1254. }
  1255. static void s390_pci_get_fid(Object *obj, Visitor *v, const char *name,
  1256. void *opaque, Error **errp)
  1257. {
  1258. const Property *prop = opaque;
  1259. uint32_t *ptr = object_field_prop_ptr(obj, prop);
  1260. visit_type_uint32(v, name, ptr, errp);
  1261. }
  1262. static void s390_pci_set_fid(Object *obj, Visitor *v, const char *name,
  1263. void *opaque, Error **errp)
  1264. {
  1265. S390PCIBusDevice *zpci = S390_PCI_DEVICE(obj);
  1266. const Property *prop = opaque;
  1267. uint32_t *ptr = object_field_prop_ptr(obj, prop);
  1268. if (!visit_type_uint32(v, name, ptr, errp)) {
  1269. return;
  1270. }
  1271. zpci->fid_defined = true;
  1272. }
  1273. static const PropertyInfo s390_pci_fid_propinfo = {
  1274. .name = "zpci_fid",
  1275. .get = s390_pci_get_fid,
  1276. .set = s390_pci_set_fid,
  1277. };
  1278. #define DEFINE_PROP_S390_PCI_FID(_n, _s, _f) \
  1279. DEFINE_PROP(_n, _s, _f, s390_pci_fid_propinfo, uint32_t)
  1280. static const Property s390_pci_device_properties[] = {
  1281. DEFINE_PROP_UINT16("uid", S390PCIBusDevice, uid, UID_UNDEFINED),
  1282. DEFINE_PROP_S390_PCI_FID("fid", S390PCIBusDevice, fid),
  1283. DEFINE_PROP_STRING("target", S390PCIBusDevice, target),
  1284. DEFINE_PROP_BOOL("interpret", S390PCIBusDevice, interp, true),
  1285. DEFINE_PROP_BOOL("forwarding-assist", S390PCIBusDevice, forwarding_assist,
  1286. true),
  1287. };
  1288. static const VMStateDescription s390_pci_device_vmstate = {
  1289. .name = TYPE_S390_PCI_DEVICE,
  1290. /*
  1291. * TODO: add state handling here, so migration works at least with
  1292. * emulated pci devices on s390x
  1293. */
  1294. .unmigratable = 1,
  1295. };
  1296. static void s390_pci_device_class_init(ObjectClass *klass, void *data)
  1297. {
  1298. DeviceClass *dc = DEVICE_CLASS(klass);
  1299. dc->desc = "zpci device";
  1300. set_bit(DEVICE_CATEGORY_MISC, dc->categories);
  1301. device_class_set_legacy_reset(dc, s390_pci_device_reset);
  1302. dc->bus_type = TYPE_S390_PCI_BUS;
  1303. dc->realize = s390_pci_device_realize;
  1304. device_class_set_props(dc, s390_pci_device_properties);
  1305. dc->vmsd = &s390_pci_device_vmstate;
  1306. }
  1307. static const TypeInfo s390_pci_device_info = {
  1308. .name = TYPE_S390_PCI_DEVICE,
  1309. .parent = TYPE_DEVICE,
  1310. .instance_size = sizeof(S390PCIBusDevice),
  1311. .class_init = s390_pci_device_class_init,
  1312. };
  1313. static const TypeInfo s390_pci_iommu_info = {
  1314. .name = TYPE_S390_PCI_IOMMU,
  1315. .parent = TYPE_OBJECT,
  1316. .instance_size = sizeof(S390PCIIOMMU),
  1317. };
  1318. static void s390_iommu_memory_region_class_init(ObjectClass *klass, void *data)
  1319. {
  1320. IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
  1321. imrc->translate = s390_translate_iommu;
  1322. imrc->replay = s390_pci_iommu_replay;
  1323. }
  1324. static const TypeInfo s390_iommu_memory_region_info = {
  1325. .parent = TYPE_IOMMU_MEMORY_REGION,
  1326. .name = TYPE_S390_IOMMU_MEMORY_REGION,
  1327. .class_init = s390_iommu_memory_region_class_init,
  1328. };
  1329. static void s390_pci_register_types(void)
  1330. {
  1331. type_register_static(&s390_pcihost_info);
  1332. type_register_static(&s390_pcibus_info);
  1333. type_register_static(&s390_pci_device_info);
  1334. type_register_static(&s390_pci_iommu_info);
  1335. type_register_static(&s390_iommu_memory_region_info);
  1336. }
  1337. type_init(s390_pci_register_types)