spapr_xive.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870
  1. /*
  2. * QEMU PowerPC sPAPR XIVE interrupt controller model
  3. *
  4. * Copyright (c) 2017-2024, IBM Corporation.
  5. *
  6. * SPDX-License-Identifier: GPL-2.0-or-later
  7. */
  8. #include "qemu/osdep.h"
  9. #include "qemu/log.h"
  10. #include "qemu/module.h"
  11. #include "qapi/error.h"
  12. #include "qemu/error-report.h"
  13. #include "target/ppc/cpu.h"
  14. #include "system/cpus.h"
  15. #include "system/reset.h"
  16. #include "migration/vmstate.h"
  17. #include "hw/ppc/fdt.h"
  18. #include "hw/ppc/spapr.h"
  19. #include "hw/ppc/spapr_cpu_core.h"
  20. #include "hw/ppc/spapr_xive.h"
  21. #include "hw/ppc/xive.h"
  22. #include "hw/ppc/xive_regs.h"
  23. #include "hw/qdev-properties.h"
  24. #include "trace.h"
  25. /*
  26. * XIVE Virtualization Controller BAR and Thread Management BAR that we
  27. * use for the ESB pages and the TIMA pages
  28. */
  29. #define SPAPR_XIVE_VC_BASE 0x0006010000000000ull
  30. #define SPAPR_XIVE_TM_BASE 0x0006030203180000ull
  31. /*
  32. * The allocation of VP blocks is a complex operation in OPAL and the
  33. * VP identifiers have a relation with the number of HW chips, the
  34. * size of the VP blocks, VP grouping, etc. The QEMU sPAPR XIVE
  35. * controller model does not have the same constraints and can use a
  36. * simple mapping scheme of the CPU vcpu_id
  37. *
  38. * These identifiers are never returned to the OS.
  39. */
  40. #define SPAPR_XIVE_NVT_BASE 0x400
  41. /*
  42. * sPAPR NVT and END indexing helpers
  43. */
  44. static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx)
  45. {
  46. return nvt_idx - SPAPR_XIVE_NVT_BASE;
  47. }
  48. static void spapr_xive_cpu_to_nvt(PowerPCCPU *cpu,
  49. uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
  50. {
  51. assert(cpu);
  52. if (out_nvt_blk) {
  53. *out_nvt_blk = SPAPR_XIVE_BLOCK_ID;
  54. }
  55. if (out_nvt_blk) {
  56. *out_nvt_idx = SPAPR_XIVE_NVT_BASE + cpu->vcpu_id;
  57. }
  58. }
  59. static int spapr_xive_target_to_nvt(uint32_t target,
  60. uint8_t *out_nvt_blk, uint32_t *out_nvt_idx)
  61. {
  62. PowerPCCPU *cpu = spapr_find_cpu(target);
  63. if (!cpu) {
  64. return -1;
  65. }
  66. spapr_xive_cpu_to_nvt(cpu, out_nvt_blk, out_nvt_idx);
  67. return 0;
  68. }
  69. /*
  70. * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8
  71. * priorities per CPU
  72. */
  73. int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx,
  74. uint32_t *out_server, uint8_t *out_prio)
  75. {
  76. assert(end_blk == SPAPR_XIVE_BLOCK_ID);
  77. if (out_server) {
  78. *out_server = end_idx >> 3;
  79. }
  80. if (out_prio) {
  81. *out_prio = end_idx & 0x7;
  82. }
  83. return 0;
  84. }
  85. static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio,
  86. uint8_t *out_end_blk, uint32_t *out_end_idx)
  87. {
  88. assert(cpu);
  89. if (out_end_blk) {
  90. *out_end_blk = SPAPR_XIVE_BLOCK_ID;
  91. }
  92. if (out_end_idx) {
  93. *out_end_idx = (cpu->vcpu_id << 3) + prio;
  94. }
  95. }
  96. static int spapr_xive_target_to_end(uint32_t target, uint8_t prio,
  97. uint8_t *out_end_blk, uint32_t *out_end_idx)
  98. {
  99. PowerPCCPU *cpu = spapr_find_cpu(target);
  100. if (!cpu) {
  101. return -1;
  102. }
  103. spapr_xive_cpu_to_end(cpu, prio, out_end_blk, out_end_idx);
  104. return 0;
  105. }
  106. /*
  107. * On sPAPR machines, use a simplified output for the XIVE END
  108. * structure dumping only the information related to the OS EQ.
  109. */
  110. static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end,
  111. GString *buf)
  112. {
  113. uint64_t qaddr_base = xive_end_qaddr(end);
  114. uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1);
  115. uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1);
  116. uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0);
  117. uint32_t qentries = 1 << (qsize + 10);
  118. uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6);
  119. uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
  120. g_string_append_printf(buf, "%3d/%d % 6d/%5d @%"PRIx64" ^%d",
  121. spapr_xive_nvt_to_target(0, nvt),
  122. priority, qindex, qentries, qaddr_base, qgen);
  123. xive_end_queue_pic_print_info(end, 6, buf);
  124. }
  125. /*
  126. * kvm_irqchip_in_kernel() will cause the compiler to turn this
  127. * info a nop if CONFIG_KVM isn't defined.
  128. */
  129. #define spapr_xive_in_kernel(xive) \
  130. (kvm_irqchip_in_kernel() && (xive)->fd != -1)
  131. static void spapr_xive_pic_print_info(SpaprXive *xive, GString *buf)
  132. {
  133. XiveSource *xsrc = &xive->source;
  134. int i;
  135. if (spapr_xive_in_kernel(xive)) {
  136. Error *local_err = NULL;
  137. kvmppc_xive_synchronize_state(xive, &local_err);
  138. if (local_err) {
  139. error_report_err(local_err);
  140. return;
  141. }
  142. }
  143. g_string_append_printf(buf, " LISN PQ EISN CPU/PRIO EQ\n");
  144. for (i = 0; i < xive->nr_irqs; i++) {
  145. uint8_t pq = xive_source_esb_get(xsrc, i);
  146. XiveEAS *eas = &xive->eat[i];
  147. if (!xive_eas_is_valid(eas)) {
  148. continue;
  149. }
  150. g_string_append_printf(buf, " %08x %s %c%c%c %s %08x ", i,
  151. xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI",
  152. pq & XIVE_ESB_VAL_P ? 'P' : '-',
  153. pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
  154. xive_source_is_asserted(xsrc, i) ? 'A' : ' ',
  155. xive_eas_is_masked(eas) ? "M" : " ",
  156. (int) xive_get_field64(EAS_END_DATA, eas->w));
  157. if (!xive_eas_is_masked(eas)) {
  158. uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
  159. XiveEND *end;
  160. assert(end_idx < xive->nr_ends);
  161. end = &xive->endt[end_idx];
  162. if (xive_end_is_valid(end)) {
  163. spapr_xive_end_pic_print_info(xive, end, buf);
  164. }
  165. }
  166. g_string_append_c(buf, '\n');
  167. }
  168. }
  169. void spapr_xive_mmio_set_enabled(SpaprXive *xive, bool enable)
  170. {
  171. memory_region_set_enabled(&xive->source.esb_mmio, enable);
  172. memory_region_set_enabled(&xive->tm_mmio, enable);
  173. /* Disable the END ESBs until a guest OS makes use of them */
  174. memory_region_set_enabled(&xive->end_source.esb_mmio, false);
  175. }
  176. static void spapr_xive_tm_write(void *opaque, hwaddr offset,
  177. uint64_t value, unsigned size)
  178. {
  179. XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
  180. xive_tctx_tm_write(XIVE_PRESENTER(opaque), tctx, offset, value, size);
  181. }
  182. static uint64_t spapr_xive_tm_read(void *opaque, hwaddr offset, unsigned size)
  183. {
  184. XiveTCTX *tctx = spapr_cpu_state(POWERPC_CPU(current_cpu))->tctx;
  185. return xive_tctx_tm_read(XIVE_PRESENTER(opaque), tctx, offset, size);
  186. }
  187. const MemoryRegionOps spapr_xive_tm_ops = {
  188. .read = spapr_xive_tm_read,
  189. .write = spapr_xive_tm_write,
  190. .endianness = DEVICE_BIG_ENDIAN,
  191. .valid = {
  192. .min_access_size = 1,
  193. .max_access_size = 8,
  194. },
  195. .impl = {
  196. .min_access_size = 1,
  197. .max_access_size = 8,
  198. },
  199. };
  200. static void spapr_xive_end_reset(XiveEND *end)
  201. {
  202. memset(end, 0, sizeof(*end));
  203. /* switch off the escalation and notification ESBs */
  204. end->w1 = cpu_to_be32(END_W1_ESe_Q | END_W1_ESn_Q);
  205. }
  206. static void spapr_xive_reset(void *dev)
  207. {
  208. SpaprXive *xive = SPAPR_XIVE(dev);
  209. int i;
  210. /*
  211. * The XiveSource has its own reset handler, which mask off all
  212. * IRQs (!P|Q)
  213. */
  214. /* Mask all valid EASs in the IRQ number space. */
  215. for (i = 0; i < xive->nr_irqs; i++) {
  216. XiveEAS *eas = &xive->eat[i];
  217. if (xive_eas_is_valid(eas)) {
  218. eas->w = cpu_to_be64(EAS_VALID | EAS_MASKED);
  219. } else {
  220. eas->w = 0;
  221. }
  222. }
  223. /* Clear all ENDs */
  224. for (i = 0; i < xive->nr_ends; i++) {
  225. spapr_xive_end_reset(&xive->endt[i]);
  226. }
  227. }
  228. static void spapr_xive_instance_init(Object *obj)
  229. {
  230. SpaprXive *xive = SPAPR_XIVE(obj);
  231. object_initialize_child(obj, "source", &xive->source, TYPE_XIVE_SOURCE);
  232. object_initialize_child(obj, "end_source", &xive->end_source,
  233. TYPE_XIVE_END_SOURCE);
  234. /* Not connected to the KVM XIVE device */
  235. xive->fd = -1;
  236. }
  237. static void spapr_xive_realize(DeviceState *dev, Error **errp)
  238. {
  239. SpaprXive *xive = SPAPR_XIVE(dev);
  240. SpaprXiveClass *sxc = SPAPR_XIVE_GET_CLASS(xive);
  241. XiveSource *xsrc = &xive->source;
  242. XiveENDSource *end_xsrc = &xive->end_source;
  243. Error *local_err = NULL;
  244. /* Set by spapr_irq_init() */
  245. g_assert(xive->nr_irqs);
  246. g_assert(xive->nr_ends);
  247. sxc->parent_realize(dev, &local_err);
  248. if (local_err) {
  249. error_propagate(errp, local_err);
  250. return;
  251. }
  252. /*
  253. * Initialize the internal sources, for IPIs and virtual devices.
  254. */
  255. object_property_set_int(OBJECT(xsrc), "nr-irqs", xive->nr_irqs,
  256. &error_fatal);
  257. object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_abort);
  258. if (!qdev_realize(DEVICE(xsrc), NULL, errp)) {
  259. return;
  260. }
  261. /*
  262. * Initialize the END ESB source
  263. */
  264. object_property_set_int(OBJECT(end_xsrc), "nr-ends", xive->nr_irqs,
  265. &error_fatal);
  266. object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
  267. &error_abort);
  268. if (!qdev_realize(DEVICE(end_xsrc), NULL, errp)) {
  269. return;
  270. }
  271. /* Set the mapping address of the END ESB pages after the source ESBs */
  272. xive->end_base = xive->vc_base + xive_source_esb_len(xsrc);
  273. /*
  274. * Allocate the routing tables
  275. */
  276. xive->eat = g_new0(XiveEAS, xive->nr_irqs);
  277. xive->endt = g_new0(XiveEND, xive->nr_ends);
  278. xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64,
  279. xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT));
  280. qemu_register_reset(spapr_xive_reset, dev);
  281. /* TIMA initialization */
  282. memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &spapr_xive_tm_ops,
  283. xive, "xive.tima", 4ull << TM_SHIFT);
  284. /*
  285. * Map all regions. These will be enabled or disabled at reset and
  286. * can also be overridden by KVM memory regions if active
  287. */
  288. memory_region_add_subregion(get_system_memory(), xive->vc_base,
  289. &xsrc->esb_mmio);
  290. memory_region_add_subregion(get_system_memory(), xive->end_base,
  291. &end_xsrc->esb_mmio);
  292. memory_region_add_subregion(get_system_memory(), xive->tm_base,
  293. &xive->tm_mmio);
  294. }
  295. static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk,
  296. uint32_t eas_idx, XiveEAS *eas)
  297. {
  298. SpaprXive *xive = SPAPR_XIVE(xrtr);
  299. if (eas_idx >= xive->nr_irqs) {
  300. return -1;
  301. }
  302. *eas = xive->eat[eas_idx];
  303. return 0;
  304. }
  305. static int spapr_xive_get_end(XiveRouter *xrtr,
  306. uint8_t end_blk, uint32_t end_idx, XiveEND *end)
  307. {
  308. SpaprXive *xive = SPAPR_XIVE(xrtr);
  309. if (end_idx >= xive->nr_ends) {
  310. return -1;
  311. }
  312. memcpy(end, &xive->endt[end_idx], sizeof(XiveEND));
  313. return 0;
  314. }
  315. static int spapr_xive_write_end(XiveRouter *xrtr, uint8_t end_blk,
  316. uint32_t end_idx, XiveEND *end,
  317. uint8_t word_number)
  318. {
  319. SpaprXive *xive = SPAPR_XIVE(xrtr);
  320. if (end_idx >= xive->nr_ends) {
  321. return -1;
  322. }
  323. memcpy(&xive->endt[end_idx], end, sizeof(XiveEND));
  324. return 0;
  325. }
  326. static int spapr_xive_get_nvt(XiveRouter *xrtr,
  327. uint8_t nvt_blk, uint32_t nvt_idx, XiveNVT *nvt)
  328. {
  329. uint32_t vcpu_id = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
  330. PowerPCCPU *cpu = spapr_find_cpu(vcpu_id);
  331. if (!cpu) {
  332. /* TODO: should we assert() if we can find a NVT ? */
  333. return -1;
  334. }
  335. /*
  336. * sPAPR does not maintain a NVT table. Return that the NVT is
  337. * valid if we have found a matching CPU
  338. */
  339. nvt->w0 = cpu_to_be32(NVT_W0_VALID);
  340. return 0;
  341. }
  342. static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
  343. uint32_t nvt_idx, XiveNVT *nvt,
  344. uint8_t word_number)
  345. {
  346. /*
  347. * We don't need to write back to the NVTs because the sPAPR
  348. * machine should never hit a non-scheduled NVT. It should never
  349. * get called.
  350. */
  351. g_assert_not_reached();
  352. }
  353. static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
  354. uint8_t nvt_blk, uint32_t nvt_idx,
  355. bool crowd, bool cam_ignore,
  356. uint8_t priority,
  357. uint32_t logic_serv, XiveTCTXMatch *match)
  358. {
  359. CPUState *cs;
  360. int count = 0;
  361. CPU_FOREACH(cs) {
  362. PowerPCCPU *cpu = POWERPC_CPU(cs);
  363. XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
  364. int ring;
  365. /*
  366. * Skip partially initialized vCPUs. This can happen when
  367. * vCPUs are hotplugged.
  368. */
  369. if (!tctx) {
  370. continue;
  371. }
  372. /*
  373. * Check the thread context CAM lines and record matches.
  374. */
  375. ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, nvt_idx,
  376. cam_ignore, logic_serv);
  377. /*
  378. * Save the matching thread interrupt context and follow on to
  379. * check for duplicates which are invalid.
  380. */
  381. if (ring != -1) {
  382. if (match->tctx) {
  383. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
  384. "context NVT %x/%x\n", nvt_blk, nvt_idx);
  385. return -1;
  386. }
  387. match->ring = ring;
  388. match->tctx = tctx;
  389. count++;
  390. }
  391. }
  392. return count;
  393. }
  394. static uint32_t spapr_xive_presenter_get_config(XivePresenter *xptr)
  395. {
  396. uint32_t cfg = 0;
  397. /*
  398. * Let's claim GEN1 TIMA format. If running with KVM on P10, the
  399. * correct answer is deep in the hardware and not accessible to
  400. * us. But it shouldn't matter as it only affects the presenter
  401. * as seen by a guest OS.
  402. */
  403. cfg |= XIVE_PRESENTER_GEN1_TIMA_OS;
  404. return cfg;
  405. }
  406. static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
  407. {
  408. return SPAPR_XIVE_BLOCK_ID;
  409. }
  410. static int spapr_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
  411. uint8_t *pq)
  412. {
  413. SpaprXive *xive = SPAPR_XIVE(xrtr);
  414. assert(SPAPR_XIVE_BLOCK_ID == blk);
  415. *pq = xive_source_esb_get(&xive->source, idx);
  416. return 0;
  417. }
  418. static int spapr_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
  419. uint8_t *pq)
  420. {
  421. SpaprXive *xive = SPAPR_XIVE(xrtr);
  422. assert(SPAPR_XIVE_BLOCK_ID == blk);
  423. *pq = xive_source_esb_set(&xive->source, idx, *pq);
  424. return 0;
  425. }
  426. static const VMStateDescription vmstate_spapr_xive_end = {
  427. .name = TYPE_SPAPR_XIVE "/end",
  428. .version_id = 1,
  429. .minimum_version_id = 1,
  430. .fields = (const VMStateField []) {
  431. VMSTATE_UINT32(w0, XiveEND),
  432. VMSTATE_UINT32(w1, XiveEND),
  433. VMSTATE_UINT32(w2, XiveEND),
  434. VMSTATE_UINT32(w3, XiveEND),
  435. VMSTATE_UINT32(w4, XiveEND),
  436. VMSTATE_UINT32(w5, XiveEND),
  437. VMSTATE_UINT32(w6, XiveEND),
  438. VMSTATE_UINT32(w7, XiveEND),
  439. VMSTATE_END_OF_LIST()
  440. },
  441. };
  442. static const VMStateDescription vmstate_spapr_xive_eas = {
  443. .name = TYPE_SPAPR_XIVE "/eas",
  444. .version_id = 1,
  445. .minimum_version_id = 1,
  446. .fields = (const VMStateField []) {
  447. VMSTATE_UINT64(w, XiveEAS),
  448. VMSTATE_END_OF_LIST()
  449. },
  450. };
  451. static int vmstate_spapr_xive_pre_save(void *opaque)
  452. {
  453. SpaprXive *xive = SPAPR_XIVE(opaque);
  454. if (spapr_xive_in_kernel(xive)) {
  455. return kvmppc_xive_pre_save(xive);
  456. }
  457. return 0;
  458. }
  459. /*
  460. * Called by the sPAPR IRQ backend 'post_load' method at the machine
  461. * level.
  462. */
  463. static int spapr_xive_post_load(SpaprInterruptController *intc, int version_id)
  464. {
  465. SpaprXive *xive = SPAPR_XIVE(intc);
  466. if (spapr_xive_in_kernel(xive)) {
  467. return kvmppc_xive_post_load(xive, version_id);
  468. }
  469. return 0;
  470. }
  471. static const VMStateDescription vmstate_spapr_xive = {
  472. .name = TYPE_SPAPR_XIVE,
  473. .version_id = 1,
  474. .minimum_version_id = 1,
  475. .pre_save = vmstate_spapr_xive_pre_save,
  476. .post_load = NULL, /* handled at the machine level */
  477. .fields = (const VMStateField[]) {
  478. VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL),
  479. VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs,
  480. vmstate_spapr_xive_eas, XiveEAS),
  481. VMSTATE_STRUCT_VARRAY_POINTER_UINT32(endt, SpaprXive, nr_ends,
  482. vmstate_spapr_xive_end, XiveEND),
  483. VMSTATE_END_OF_LIST()
  484. },
  485. };
  486. static int spapr_xive_claim_irq(SpaprInterruptController *intc, int lisn,
  487. bool lsi, Error **errp)
  488. {
  489. SpaprXive *xive = SPAPR_XIVE(intc);
  490. XiveSource *xsrc = &xive->source;
  491. assert(lisn < xive->nr_irqs);
  492. trace_spapr_xive_claim_irq(lisn, lsi);
  493. if (xive_eas_is_valid(&xive->eat[lisn])) {
  494. error_setg(errp, "IRQ %d is not free", lisn);
  495. return -EBUSY;
  496. }
  497. /*
  498. * Set default values when allocating an IRQ number
  499. */
  500. xive->eat[lisn].w |= cpu_to_be64(EAS_VALID | EAS_MASKED);
  501. if (lsi) {
  502. xive_source_irq_set_lsi(xsrc, lisn);
  503. }
  504. if (spapr_xive_in_kernel(xive)) {
  505. return kvmppc_xive_source_reset_one(xsrc, lisn, errp);
  506. }
  507. return 0;
  508. }
  509. static void spapr_xive_free_irq(SpaprInterruptController *intc, int lisn)
  510. {
  511. SpaprXive *xive = SPAPR_XIVE(intc);
  512. assert(lisn < xive->nr_irqs);
  513. trace_spapr_xive_free_irq(lisn);
  514. xive->eat[lisn].w &= cpu_to_be64(~EAS_VALID);
  515. }
  516. static const Property spapr_xive_properties[] = {
  517. DEFINE_PROP_UINT32("nr-irqs", SpaprXive, nr_irqs, 0),
  518. DEFINE_PROP_UINT32("nr-ends", SpaprXive, nr_ends, 0),
  519. DEFINE_PROP_UINT64("vc-base", SpaprXive, vc_base, SPAPR_XIVE_VC_BASE),
  520. DEFINE_PROP_UINT64("tm-base", SpaprXive, tm_base, SPAPR_XIVE_TM_BASE),
  521. DEFINE_PROP_UINT8("hv-prio", SpaprXive, hv_prio, 7),
  522. };
  523. static int spapr_xive_cpu_intc_create(SpaprInterruptController *intc,
  524. PowerPCCPU *cpu, Error **errp)
  525. {
  526. SpaprXive *xive = SPAPR_XIVE(intc);
  527. Object *obj;
  528. SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
  529. obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(xive), errp);
  530. if (!obj) {
  531. return -1;
  532. }
  533. spapr_cpu->tctx = XIVE_TCTX(obj);
  534. return 0;
  535. }
  536. static void xive_tctx_set_os_cam(XiveTCTX *tctx, uint32_t os_cam)
  537. {
  538. uint32_t qw1w2 = cpu_to_be32(TM_QW1W2_VO | os_cam);
  539. memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
  540. }
  541. static void spapr_xive_cpu_intc_reset(SpaprInterruptController *intc,
  542. PowerPCCPU *cpu)
  543. {
  544. XiveTCTX *tctx = spapr_cpu_state(cpu)->tctx;
  545. uint8_t nvt_blk;
  546. uint32_t nvt_idx;
  547. xive_tctx_reset(tctx);
  548. /*
  549. * When a Virtual Processor is scheduled to run on a HW thread,
  550. * the hypervisor pushes its identifier in the OS CAM line.
  551. * Emulate the same behavior under QEMU.
  552. */
  553. spapr_xive_cpu_to_nvt(cpu, &nvt_blk, &nvt_idx);
  554. xive_tctx_set_os_cam(tctx, xive_nvt_cam_line(nvt_blk, nvt_idx));
  555. }
  556. static void spapr_xive_cpu_intc_destroy(SpaprInterruptController *intc,
  557. PowerPCCPU *cpu)
  558. {
  559. SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
  560. xive_tctx_destroy(spapr_cpu->tctx);
  561. spapr_cpu->tctx = NULL;
  562. }
  563. static void spapr_xive_set_irq(SpaprInterruptController *intc, int irq, int val)
  564. {
  565. SpaprXive *xive = SPAPR_XIVE(intc);
  566. trace_spapr_xive_set_irq(irq, val);
  567. if (spapr_xive_in_kernel(xive)) {
  568. kvmppc_xive_source_set_irq(&xive->source, irq, val);
  569. } else {
  570. xive_source_set_irq(&xive->source, irq, val);
  571. }
  572. }
  573. static void spapr_xive_print_info(SpaprInterruptController *intc, GString *buf)
  574. {
  575. SpaprXive *xive = SPAPR_XIVE(intc);
  576. CPUState *cs;
  577. CPU_FOREACH(cs) {
  578. PowerPCCPU *cpu = POWERPC_CPU(cs);
  579. xive_tctx_pic_print_info(spapr_cpu_state(cpu)->tctx, buf);
  580. }
  581. spapr_xive_pic_print_info(xive, buf);
  582. }
  583. static void spapr_xive_dt(SpaprInterruptController *intc, uint32_t nr_servers,
  584. void *fdt, uint32_t phandle)
  585. {
  586. SpaprXive *xive = SPAPR_XIVE(intc);
  587. int node;
  588. uint64_t timas[2 * 2];
  589. /* Interrupt number ranges for the IPIs */
  590. uint32_t lisn_ranges[] = {
  591. cpu_to_be32(SPAPR_IRQ_IPI),
  592. cpu_to_be32(SPAPR_IRQ_IPI + nr_servers),
  593. };
  594. /*
  595. * EQ size - the sizes of pages supported by the system 4K, 64K,
  596. * 2M, 16M. We only advertise 64K for the moment.
  597. */
  598. uint32_t eq_sizes[] = {
  599. cpu_to_be32(16), /* 64K */
  600. };
  601. /*
  602. * QEMU/KVM only needs to define a single range to reserve the
  603. * escalation priority. A priority bitmask would have been more
  604. * appropriate.
  605. */
  606. uint32_t plat_res_int_priorities[] = {
  607. cpu_to_be32(xive->hv_prio), /* start */
  608. cpu_to_be32(0xff - xive->hv_prio), /* count */
  609. };
  610. /* Thread Interrupt Management Area : User (ring 3) and OS (ring 2) */
  611. timas[0] = cpu_to_be64(xive->tm_base +
  612. XIVE_TM_USER_PAGE * (1ull << TM_SHIFT));
  613. timas[1] = cpu_to_be64(1ull << TM_SHIFT);
  614. timas[2] = cpu_to_be64(xive->tm_base +
  615. XIVE_TM_OS_PAGE * (1ull << TM_SHIFT));
  616. timas[3] = cpu_to_be64(1ull << TM_SHIFT);
  617. _FDT(node = fdt_add_subnode(fdt, 0, xive->nodename));
  618. _FDT(fdt_setprop_string(fdt, node, "device_type", "power-ivpe"));
  619. _FDT(fdt_setprop(fdt, node, "reg", timas, sizeof(timas)));
  620. _FDT(fdt_setprop_string(fdt, node, "compatible", "ibm,power-ivpe"));
  621. _FDT(fdt_setprop(fdt, node, "ibm,xive-eq-sizes", eq_sizes,
  622. sizeof(eq_sizes)));
  623. _FDT(fdt_setprop(fdt, node, "ibm,xive-lisn-ranges", lisn_ranges,
  624. sizeof(lisn_ranges)));
  625. /* For Linux to link the LSIs to the interrupt controller. */
  626. _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0));
  627. _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2));
  628. /* For SLOF */
  629. _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle));
  630. _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle));
  631. /*
  632. * The "ibm,plat-res-int-priorities" property defines the priority
  633. * ranges reserved by the hypervisor
  634. */
  635. _FDT(fdt_setprop(fdt, 0, "ibm,plat-res-int-priorities",
  636. plat_res_int_priorities, sizeof(plat_res_int_priorities)));
  637. }
  638. static int spapr_xive_activate(SpaprInterruptController *intc,
  639. uint32_t nr_servers, Error **errp)
  640. {
  641. SpaprXive *xive = SPAPR_XIVE(intc);
  642. if (kvm_enabled()) {
  643. int rc = spapr_irq_init_kvm(kvmppc_xive_connect, intc, nr_servers,
  644. errp);
  645. if (rc < 0) {
  646. return rc;
  647. }
  648. }
  649. /* Activate the XIVE MMIOs */
  650. spapr_xive_mmio_set_enabled(xive, true);
  651. return 0;
  652. }
  653. static void spapr_xive_deactivate(SpaprInterruptController *intc)
  654. {
  655. SpaprXive *xive = SPAPR_XIVE(intc);
  656. spapr_xive_mmio_set_enabled(xive, false);
  657. if (spapr_xive_in_kernel(xive)) {
  658. kvmppc_xive_disconnect(intc);
  659. }
  660. }
  661. static bool spapr_xive_in_kernel_xptr(const XivePresenter *xptr)
  662. {
  663. return spapr_xive_in_kernel(SPAPR_XIVE(xptr));
  664. }
  665. static void spapr_xive_class_init(ObjectClass *klass, void *data)
  666. {
  667. DeviceClass *dc = DEVICE_CLASS(klass);
  668. XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass);
  669. SpaprInterruptControllerClass *sicc = SPAPR_INTC_CLASS(klass);
  670. XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
  671. SpaprXiveClass *sxc = SPAPR_XIVE_CLASS(klass);
  672. dc->desc = "sPAPR XIVE Interrupt Controller";
  673. device_class_set_props(dc, spapr_xive_properties);
  674. device_class_set_parent_realize(dc, spapr_xive_realize,
  675. &sxc->parent_realize);
  676. dc->vmsd = &vmstate_spapr_xive;
  677. xrc->get_eas = spapr_xive_get_eas;
  678. xrc->get_pq = spapr_xive_get_pq;
  679. xrc->set_pq = spapr_xive_set_pq;
  680. xrc->get_end = spapr_xive_get_end;
  681. xrc->write_end = spapr_xive_write_end;
  682. xrc->get_nvt = spapr_xive_get_nvt;
  683. xrc->write_nvt = spapr_xive_write_nvt;
  684. xrc->get_block_id = spapr_xive_get_block_id;
  685. sicc->activate = spapr_xive_activate;
  686. sicc->deactivate = spapr_xive_deactivate;
  687. sicc->cpu_intc_create = spapr_xive_cpu_intc_create;
  688. sicc->cpu_intc_reset = spapr_xive_cpu_intc_reset;
  689. sicc->cpu_intc_destroy = spapr_xive_cpu_intc_destroy;
  690. sicc->claim_irq = spapr_xive_claim_irq;
  691. sicc->free_irq = spapr_xive_free_irq;
  692. sicc->set_irq = spapr_xive_set_irq;
  693. sicc->print_info = spapr_xive_print_info;
  694. sicc->dt = spapr_xive_dt;
  695. sicc->post_load = spapr_xive_post_load;
  696. xpc->match_nvt = spapr_xive_match_nvt;
  697. xpc->get_config = spapr_xive_presenter_get_config;
  698. xpc->in_kernel = spapr_xive_in_kernel_xptr;
  699. }
  700. static const TypeInfo spapr_xive_info = {
  701. .name = TYPE_SPAPR_XIVE,
  702. .parent = TYPE_XIVE_ROUTER,
  703. .instance_init = spapr_xive_instance_init,
  704. .instance_size = sizeof(SpaprXive),
  705. .class_init = spapr_xive_class_init,
  706. .class_size = sizeof(SpaprXiveClass),
  707. .interfaces = (InterfaceInfo[]) {
  708. { TYPE_SPAPR_INTC },
  709. { }
  710. },
  711. };
  712. static void spapr_xive_register_types(void)
  713. {
  714. type_register_static(&spapr_xive_info);
  715. }
  716. type_init(spapr_xive_register_types)
  717. /*
  718. * XIVE hcalls
  719. *
  720. * The terminology used by the XIVE hcalls is the following :
  721. *
  722. * TARGET vCPU number
  723. * EQ Event Queue assigned by OS to receive event data
  724. * ESB page for source interrupt management
  725. * LISN Logical Interrupt Source Number identifying a source in the
  726. * machine
  727. * EISN Effective Interrupt Source Number used by guest OS to
  728. * identify source in the guest
  729. *
  730. * The EAS, END, NVT structures are not exposed.
  731. */
  732. /*
  733. * On POWER9, the KVM XIVE device uses priority 7 for the escalation
  734. * interrupts. So we only allow the guest to use priorities [0..6].
  735. */
  736. static bool spapr_xive_priority_is_reserved(SpaprXive *xive, uint8_t priority)
  737. {
  738. return priority >= xive->hv_prio;
  739. }
  740. /*
  741. * The H_INT_GET_SOURCE_INFO hcall() is used to obtain the logical
  742. * real address of the MMIO page through which the Event State Buffer
  743. * entry associated with the value of the "lisn" parameter is managed.
  744. *
  745. * Parameters:
  746. * Input
  747. * - R4: "flags"
  748. * Bits 0-63 reserved
  749. * - R5: "lisn" is per "interrupts", "interrupt-map", or
  750. * "ibm,xive-lisn-ranges" properties, or as returned by the
  751. * ibm,query-interrupt-source-number RTAS call, or as returned
  752. * by the H_ALLOCATE_VAS_WINDOW hcall
  753. *
  754. * Output
  755. * - R4: "flags"
  756. * Bits 0-59: Reserved
  757. * Bit 60: H_INT_ESB must be used for Event State Buffer
  758. * management
  759. * Bit 61: 1 == LSI 0 == MSI
  760. * Bit 62: the full function page supports trigger
  761. * Bit 63: Store EOI Supported
  762. * - R5: Logical Real address of full function Event State Buffer
  763. * management page, -1 if H_INT_ESB hcall flag is set to 1.
  764. * - R6: Logical Real Address of trigger only Event State Buffer
  765. * management page or -1.
  766. * - R7: Power of 2 page size for the ESB management pages returned in
  767. * R5 and R6.
  768. */
  769. #define SPAPR_XIVE_SRC_H_INT_ESB PPC_BIT(60) /* ESB manage with H_INT_ESB */
  770. #define SPAPR_XIVE_SRC_LSI PPC_BIT(61) /* Virtual LSI type */
  771. #define SPAPR_XIVE_SRC_TRIGGER PPC_BIT(62) /* Trigger and management
  772. on same page */
  773. #define SPAPR_XIVE_SRC_STORE_EOI PPC_BIT(63) /* Store EOI support */
  774. static target_ulong h_int_get_source_info(PowerPCCPU *cpu,
  775. SpaprMachineState *spapr,
  776. target_ulong opcode,
  777. target_ulong *args)
  778. {
  779. SpaprXive *xive = spapr->xive;
  780. XiveSource *xsrc = &xive->source;
  781. target_ulong flags = args[0];
  782. target_ulong lisn = args[1];
  783. trace_spapr_xive_get_source_info(flags, lisn);
  784. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  785. return H_FUNCTION;
  786. }
  787. if (flags) {
  788. return H_PARAMETER;
  789. }
  790. if (lisn >= xive->nr_irqs) {
  791. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
  792. lisn);
  793. return H_P2;
  794. }
  795. if (!xive_eas_is_valid(&xive->eat[lisn])) {
  796. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
  797. lisn);
  798. return H_P2;
  799. }
  800. /*
  801. * All sources are emulated under the main XIVE object and share
  802. * the same characteristics.
  803. */
  804. args[0] = 0;
  805. if (!xive_source_esb_has_2page(xsrc)) {
  806. args[0] |= SPAPR_XIVE_SRC_TRIGGER;
  807. }
  808. if (xsrc->esb_flags & XIVE_SRC_STORE_EOI) {
  809. args[0] |= SPAPR_XIVE_SRC_STORE_EOI;
  810. }
  811. /*
  812. * Force the use of the H_INT_ESB hcall in case of an LSI
  813. * interrupt. This is necessary under KVM to re-trigger the
  814. * interrupt if the level is still asserted
  815. */
  816. if (xive_source_irq_is_lsi(xsrc, lisn)) {
  817. args[0] |= SPAPR_XIVE_SRC_H_INT_ESB | SPAPR_XIVE_SRC_LSI;
  818. }
  819. if (!(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
  820. args[1] = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn);
  821. } else {
  822. args[1] = -1;
  823. }
  824. if (xive_source_esb_has_2page(xsrc) &&
  825. !(args[0] & SPAPR_XIVE_SRC_H_INT_ESB)) {
  826. args[2] = xive->vc_base + xive_source_esb_page(xsrc, lisn);
  827. } else {
  828. args[2] = -1;
  829. }
  830. if (xive_source_esb_has_2page(xsrc)) {
  831. args[3] = xsrc->esb_shift - 1;
  832. } else {
  833. args[3] = xsrc->esb_shift;
  834. }
  835. return H_SUCCESS;
  836. }
  837. /*
  838. * The H_INT_SET_SOURCE_CONFIG hcall() is used to assign a Logical
  839. * Interrupt Source to a target. The Logical Interrupt Source is
  840. * designated with the "lisn" parameter and the target is designated
  841. * with the "target" and "priority" parameters. Upon return from the
  842. * hcall(), no additional interrupts will be directed to the old EQ.
  843. *
  844. * Parameters:
  845. * Input:
  846. * - R4: "flags"
  847. * Bits 0-61: Reserved
  848. * Bit 62: set the "eisn" in the EAS
  849. * Bit 63: masks the interrupt source in the hardware interrupt
  850. * control structure. An interrupt masked by this mechanism will
  851. * be dropped, but it's source state bits will still be
  852. * set. There is no race-free way of unmasking and restoring the
  853. * source. Thus this should only be used in interrupts that are
  854. * also masked at the source, and only in cases where the
  855. * interrupt is not meant to be used for a large amount of time
  856. * because no valid target exists for it for example
  857. * - R5: "lisn" is per "interrupts", "interrupt-map", or
  858. * "ibm,xive-lisn-ranges" properties, or as returned by the
  859. * ibm,query-interrupt-source-number RTAS call, or as returned by
  860. * the H_ALLOCATE_VAS_WINDOW hcall
  861. * - R6: "target" is per "ibm,ppc-interrupt-server#s" or
  862. * "ibm,ppc-interrupt-gserver#s"
  863. * - R7: "priority" is a valid priority not in
  864. * "ibm,plat-res-int-priorities"
  865. * - R8: "eisn" is the guest EISN associated with the "lisn"
  866. *
  867. * Output:
  868. * - None
  869. */
  870. #define SPAPR_XIVE_SRC_SET_EISN PPC_BIT(62)
  871. #define SPAPR_XIVE_SRC_MASK PPC_BIT(63)
  872. static target_ulong h_int_set_source_config(PowerPCCPU *cpu,
  873. SpaprMachineState *spapr,
  874. target_ulong opcode,
  875. target_ulong *args)
  876. {
  877. SpaprXive *xive = spapr->xive;
  878. XiveEAS eas, new_eas;
  879. target_ulong flags = args[0];
  880. target_ulong lisn = args[1];
  881. target_ulong target = args[2];
  882. target_ulong priority = args[3];
  883. target_ulong eisn = args[4];
  884. uint8_t end_blk;
  885. uint32_t end_idx;
  886. trace_spapr_xive_set_source_config(flags, lisn, target, priority, eisn);
  887. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  888. return H_FUNCTION;
  889. }
  890. if (flags & ~(SPAPR_XIVE_SRC_SET_EISN | SPAPR_XIVE_SRC_MASK)) {
  891. return H_PARAMETER;
  892. }
  893. if (lisn >= xive->nr_irqs) {
  894. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
  895. lisn);
  896. return H_P2;
  897. }
  898. eas = xive->eat[lisn];
  899. if (!xive_eas_is_valid(&eas)) {
  900. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
  901. lisn);
  902. return H_P2;
  903. }
  904. /* priority 0xff is used to reset the EAS */
  905. if (priority == 0xff) {
  906. new_eas.w = cpu_to_be64(EAS_VALID | EAS_MASKED);
  907. goto out;
  908. }
  909. if (flags & SPAPR_XIVE_SRC_MASK) {
  910. new_eas.w = eas.w | cpu_to_be64(EAS_MASKED);
  911. } else {
  912. new_eas.w = eas.w & cpu_to_be64(~EAS_MASKED);
  913. }
  914. if (spapr_xive_priority_is_reserved(xive, priority)) {
  915. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
  916. " is reserved\n", priority);
  917. return H_P4;
  918. }
  919. /*
  920. * Validate that "target" is part of the list of threads allocated
  921. * to the partition. For that, find the END corresponding to the
  922. * target.
  923. */
  924. if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
  925. return H_P3;
  926. }
  927. new_eas.w = xive_set_field64(EAS_END_BLOCK, new_eas.w, end_blk);
  928. new_eas.w = xive_set_field64(EAS_END_INDEX, new_eas.w, end_idx);
  929. if (flags & SPAPR_XIVE_SRC_SET_EISN) {
  930. new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn);
  931. }
  932. if (spapr_xive_in_kernel(xive)) {
  933. Error *local_err = NULL;
  934. kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err);
  935. if (local_err) {
  936. error_report_err(local_err);
  937. return H_HARDWARE;
  938. }
  939. }
  940. out:
  941. xive->eat[lisn] = new_eas;
  942. return H_SUCCESS;
  943. }
  944. /*
  945. * The H_INT_GET_SOURCE_CONFIG hcall() is used to determine to which
  946. * target/priority pair is assigned to the specified Logical Interrupt
  947. * Source.
  948. *
  949. * Parameters:
  950. * Input:
  951. * - R4: "flags"
  952. * Bits 0-63 Reserved
  953. * - R5: "lisn" is per "interrupts", "interrupt-map", or
  954. * "ibm,xive-lisn-ranges" properties, or as returned by the
  955. * ibm,query-interrupt-source-number RTAS call, or as
  956. * returned by the H_ALLOCATE_VAS_WINDOW hcall
  957. *
  958. * Output:
  959. * - R4: Target to which the specified Logical Interrupt Source is
  960. * assigned
  961. * - R5: Priority to which the specified Logical Interrupt Source is
  962. * assigned
  963. * - R6: EISN for the specified Logical Interrupt Source (this will be
  964. * equivalent to the LISN if not changed by H_INT_SET_SOURCE_CONFIG)
  965. */
  966. static target_ulong h_int_get_source_config(PowerPCCPU *cpu,
  967. SpaprMachineState *spapr,
  968. target_ulong opcode,
  969. target_ulong *args)
  970. {
  971. SpaprXive *xive = spapr->xive;
  972. target_ulong flags = args[0];
  973. target_ulong lisn = args[1];
  974. XiveEAS eas;
  975. XiveEND *end;
  976. uint8_t nvt_blk;
  977. uint32_t end_idx, nvt_idx;
  978. trace_spapr_xive_get_source_config(flags, lisn);
  979. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  980. return H_FUNCTION;
  981. }
  982. if (flags) {
  983. return H_PARAMETER;
  984. }
  985. if (lisn >= xive->nr_irqs) {
  986. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
  987. lisn);
  988. return H_P2;
  989. }
  990. eas = xive->eat[lisn];
  991. if (!xive_eas_is_valid(&eas)) {
  992. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
  993. lisn);
  994. return H_P2;
  995. }
  996. /* EAS_END_BLOCK is unused on sPAPR */
  997. end_idx = xive_get_field64(EAS_END_INDEX, eas.w);
  998. assert(end_idx < xive->nr_ends);
  999. end = &xive->endt[end_idx];
  1000. nvt_blk = xive_get_field32(END_W6_NVT_BLOCK, end->w6);
  1001. nvt_idx = xive_get_field32(END_W6_NVT_INDEX, end->w6);
  1002. args[0] = spapr_xive_nvt_to_target(nvt_blk, nvt_idx);
  1003. if (xive_eas_is_masked(&eas)) {
  1004. args[1] = 0xff;
  1005. } else {
  1006. args[1] = xive_get_field32(END_W7_F0_PRIORITY, end->w7);
  1007. }
  1008. args[2] = xive_get_field64(EAS_END_DATA, eas.w);
  1009. return H_SUCCESS;
  1010. }
  1011. /*
  1012. * The H_INT_GET_QUEUE_INFO hcall() is used to get the logical real
  1013. * address of the notification management page associated with the
  1014. * specified target and priority.
  1015. *
  1016. * Parameters:
  1017. * Input:
  1018. * - R4: "flags"
  1019. * Bits 0-63 Reserved
  1020. * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
  1021. * "ibm,ppc-interrupt-gserver#s"
  1022. * - R6: "priority" is a valid priority not in
  1023. * "ibm,plat-res-int-priorities"
  1024. *
  1025. * Output:
  1026. * - R4: Logical real address of notification page
  1027. * - R5: Power of 2 page size of the notification page
  1028. */
  1029. static target_ulong h_int_get_queue_info(PowerPCCPU *cpu,
  1030. SpaprMachineState *spapr,
  1031. target_ulong opcode,
  1032. target_ulong *args)
  1033. {
  1034. SpaprXive *xive = spapr->xive;
  1035. XiveENDSource *end_xsrc = &xive->end_source;
  1036. target_ulong flags = args[0];
  1037. target_ulong target = args[1];
  1038. target_ulong priority = args[2];
  1039. XiveEND *end;
  1040. uint8_t end_blk;
  1041. uint32_t end_idx;
  1042. trace_spapr_xive_get_queue_info(flags, target, priority);
  1043. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  1044. return H_FUNCTION;
  1045. }
  1046. if (flags) {
  1047. return H_PARAMETER;
  1048. }
  1049. /*
  1050. * H_STATE should be returned if a H_INT_RESET is in progress.
  1051. * This is not needed when running the emulation under QEMU
  1052. */
  1053. if (spapr_xive_priority_is_reserved(xive, priority)) {
  1054. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
  1055. " is reserved\n", priority);
  1056. return H_P3;
  1057. }
  1058. /*
  1059. * Validate that "target" is part of the list of threads allocated
  1060. * to the partition. For that, find the END corresponding to the
  1061. * target.
  1062. */
  1063. if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
  1064. return H_P2;
  1065. }
  1066. assert(end_idx < xive->nr_ends);
  1067. end = &xive->endt[end_idx];
  1068. args[0] = xive->end_base + (1ull << (end_xsrc->esb_shift + 1)) * end_idx;
  1069. if (xive_end_is_enqueue(end)) {
  1070. args[1] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
  1071. } else {
  1072. args[1] = 0;
  1073. }
  1074. return H_SUCCESS;
  1075. }
  1076. /*
  1077. * The H_INT_SET_QUEUE_CONFIG hcall() is used to set or reset a EQ for
  1078. * a given "target" and "priority". It is also used to set the
  1079. * notification config associated with the EQ. An EQ size of 0 is
  1080. * used to reset the EQ config for a given target and priority. If
  1081. * resetting the EQ config, the END associated with the given "target"
  1082. * and "priority" will be changed to disable queueing.
  1083. *
  1084. * Upon return from the hcall(), no additional interrupts will be
  1085. * directed to the old EQ (if one was set). The old EQ (if one was
  1086. * set) should be investigated for interrupts that occurred prior to
  1087. * or during the hcall().
  1088. *
  1089. * Parameters:
  1090. * Input:
  1091. * - R4: "flags"
  1092. * Bits 0-62: Reserved
  1093. * Bit 63: Unconditional Notify (n) per the XIVE spec
  1094. * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
  1095. * "ibm,ppc-interrupt-gserver#s"
  1096. * - R6: "priority" is a valid priority not in
  1097. * "ibm,plat-res-int-priorities"
  1098. * - R7: "eventQueue": The logical real address of the start of the EQ
  1099. * - R8: "eventQueueSize": The power of 2 EQ size per "ibm,xive-eq-sizes"
  1100. *
  1101. * Output:
  1102. * - None
  1103. */
  1104. #define SPAPR_XIVE_END_ALWAYS_NOTIFY PPC_BIT(63)
  1105. static target_ulong h_int_set_queue_config(PowerPCCPU *cpu,
  1106. SpaprMachineState *spapr,
  1107. target_ulong opcode,
  1108. target_ulong *args)
  1109. {
  1110. SpaprXive *xive = spapr->xive;
  1111. target_ulong flags = args[0];
  1112. target_ulong target = args[1];
  1113. target_ulong priority = args[2];
  1114. target_ulong qpage = args[3];
  1115. target_ulong qsize = args[4];
  1116. XiveEND end;
  1117. uint8_t end_blk, nvt_blk;
  1118. uint32_t end_idx, nvt_idx;
  1119. trace_spapr_xive_set_queue_config(flags, target, priority, qpage, qsize);
  1120. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  1121. return H_FUNCTION;
  1122. }
  1123. if (flags & ~SPAPR_XIVE_END_ALWAYS_NOTIFY) {
  1124. return H_PARAMETER;
  1125. }
  1126. /*
  1127. * H_STATE should be returned if a H_INT_RESET is in progress.
  1128. * This is not needed when running the emulation under QEMU
  1129. */
  1130. if (spapr_xive_priority_is_reserved(xive, priority)) {
  1131. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
  1132. " is reserved\n", priority);
  1133. return H_P3;
  1134. }
  1135. /*
  1136. * Validate that "target" is part of the list of threads allocated
  1137. * to the partition. For that, find the END corresponding to the
  1138. * target.
  1139. */
  1140. if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
  1141. return H_P2;
  1142. }
  1143. assert(end_idx < xive->nr_ends);
  1144. memcpy(&end, &xive->endt[end_idx], sizeof(XiveEND));
  1145. switch (qsize) {
  1146. case 12:
  1147. case 16:
  1148. case 21:
  1149. case 24:
  1150. if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) {
  1151. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx
  1152. " is not naturally aligned with %" HWADDR_PRIx "\n",
  1153. qpage, (hwaddr)1 << qsize);
  1154. return H_P4;
  1155. }
  1156. end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff);
  1157. end.w3 = cpu_to_be32(qpage & 0xffffffff);
  1158. end.w0 |= cpu_to_be32(END_W0_ENQUEUE);
  1159. end.w0 = xive_set_field32(END_W0_QSIZE, end.w0, qsize - 12);
  1160. break;
  1161. case 0:
  1162. /* reset queue and disable queueing */
  1163. spapr_xive_end_reset(&end);
  1164. goto out;
  1165. default:
  1166. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EQ size %"PRIx64"\n",
  1167. qsize);
  1168. return H_P5;
  1169. }
  1170. if (qsize) {
  1171. hwaddr plen = 1 << qsize;
  1172. void *eq;
  1173. /*
  1174. * Validate the guest EQ. We should also check that the queue
  1175. * has been zeroed by the OS.
  1176. */
  1177. eq = address_space_map(CPU(cpu)->as, qpage, &plen, true,
  1178. MEMTXATTRS_UNSPECIFIED);
  1179. if (plen != 1 << qsize) {
  1180. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to map EQ @0x%"
  1181. HWADDR_PRIx "\n", qpage);
  1182. return H_P4;
  1183. }
  1184. address_space_unmap(CPU(cpu)->as, eq, plen, true, plen);
  1185. }
  1186. /* "target" should have been validated above */
  1187. if (spapr_xive_target_to_nvt(target, &nvt_blk, &nvt_idx)) {
  1188. g_assert_not_reached();
  1189. }
  1190. /*
  1191. * Ensure the priority and target are correctly set (they will not
  1192. * be right after allocation)
  1193. */
  1194. end.w6 = xive_set_field32(END_W6_NVT_BLOCK, 0ul, nvt_blk) |
  1195. xive_set_field32(END_W6_NVT_INDEX, 0ul, nvt_idx);
  1196. end.w7 = xive_set_field32(END_W7_F0_PRIORITY, 0ul, priority);
  1197. if (flags & SPAPR_XIVE_END_ALWAYS_NOTIFY) {
  1198. end.w0 |= cpu_to_be32(END_W0_UCOND_NOTIFY);
  1199. } else {
  1200. end.w0 &= cpu_to_be32((uint32_t)~END_W0_UCOND_NOTIFY);
  1201. }
  1202. /*
  1203. * The generation bit for the END starts at 1 and The END page
  1204. * offset counter starts at 0.
  1205. */
  1206. end.w1 = cpu_to_be32(END_W1_GENERATION) |
  1207. xive_set_field32(END_W1_PAGE_OFF, 0ul, 0ul);
  1208. end.w0 |= cpu_to_be32(END_W0_VALID);
  1209. /*
  1210. * TODO: issue syncs required to ensure all in-flight interrupts
  1211. * are complete on the old END
  1212. */
  1213. out:
  1214. if (spapr_xive_in_kernel(xive)) {
  1215. Error *local_err = NULL;
  1216. kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err);
  1217. if (local_err) {
  1218. error_report_err(local_err);
  1219. return H_HARDWARE;
  1220. }
  1221. }
  1222. /* Update END */
  1223. memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND));
  1224. return H_SUCCESS;
  1225. }
  1226. /*
  1227. * The H_INT_GET_QUEUE_CONFIG hcall() is used to get a EQ for a given
  1228. * target and priority.
  1229. *
  1230. * Parameters:
  1231. * Input:
  1232. * - R4: "flags"
  1233. * Bits 0-62: Reserved
  1234. * Bit 63: Debug: Return debug data
  1235. * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
  1236. * "ibm,ppc-interrupt-gserver#s"
  1237. * - R6: "priority" is a valid priority not in
  1238. * "ibm,plat-res-int-priorities"
  1239. *
  1240. * Output:
  1241. * - R4: "flags":
  1242. * Bits 0-61: Reserved
  1243. * Bit 62: The value of Event Queue Generation Number (g) per
  1244. * the XIVE spec if "Debug" = 1
  1245. * Bit 63: The value of Unconditional Notify (n) per the XIVE spec
  1246. * - R5: The logical real address of the start of the EQ
  1247. * - R6: The power of 2 EQ size per "ibm,xive-eq-sizes"
  1248. * - R7: The value of Event Queue Offset Counter per XIVE spec
  1249. * if "Debug" = 1, else 0
  1250. *
  1251. */
  1252. #define SPAPR_XIVE_END_DEBUG PPC_BIT(63)
  1253. static target_ulong h_int_get_queue_config(PowerPCCPU *cpu,
  1254. SpaprMachineState *spapr,
  1255. target_ulong opcode,
  1256. target_ulong *args)
  1257. {
  1258. SpaprXive *xive = spapr->xive;
  1259. target_ulong flags = args[0];
  1260. target_ulong target = args[1];
  1261. target_ulong priority = args[2];
  1262. XiveEND *end;
  1263. uint8_t end_blk;
  1264. uint32_t end_idx;
  1265. trace_spapr_xive_get_queue_config(flags, target, priority);
  1266. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  1267. return H_FUNCTION;
  1268. }
  1269. if (flags & ~SPAPR_XIVE_END_DEBUG) {
  1270. return H_PARAMETER;
  1271. }
  1272. /*
  1273. * H_STATE should be returned if a H_INT_RESET is in progress.
  1274. * This is not needed when running the emulation under QEMU
  1275. */
  1276. if (spapr_xive_priority_is_reserved(xive, priority)) {
  1277. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: priority " TARGET_FMT_ld
  1278. " is reserved\n", priority);
  1279. return H_P3;
  1280. }
  1281. /*
  1282. * Validate that "target" is part of the list of threads allocated
  1283. * to the partition. For that, find the END corresponding to the
  1284. * target.
  1285. */
  1286. if (spapr_xive_target_to_end(target, priority, &end_blk, &end_idx)) {
  1287. return H_P2;
  1288. }
  1289. assert(end_idx < xive->nr_ends);
  1290. end = &xive->endt[end_idx];
  1291. args[0] = 0;
  1292. if (xive_end_is_notify(end)) {
  1293. args[0] |= SPAPR_XIVE_END_ALWAYS_NOTIFY;
  1294. }
  1295. if (xive_end_is_enqueue(end)) {
  1296. args[1] = xive_end_qaddr(end);
  1297. args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12;
  1298. } else {
  1299. args[1] = 0;
  1300. args[2] = 0;
  1301. }
  1302. if (spapr_xive_in_kernel(xive)) {
  1303. Error *local_err = NULL;
  1304. kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err);
  1305. if (local_err) {
  1306. error_report_err(local_err);
  1307. return H_HARDWARE;
  1308. }
  1309. }
  1310. /* TODO: do we need any locking on the END ? */
  1311. if (flags & SPAPR_XIVE_END_DEBUG) {
  1312. /* Load the event queue generation number into the return flags */
  1313. args[0] |= (uint64_t)xive_get_field32(END_W1_GENERATION, end->w1) << 62;
  1314. /* Load R7 with the event queue offset counter */
  1315. args[3] = xive_get_field32(END_W1_PAGE_OFF, end->w1);
  1316. } else {
  1317. args[3] = 0;
  1318. }
  1319. return H_SUCCESS;
  1320. }
  1321. /*
  1322. * The H_INT_SET_OS_REPORTING_LINE hcall() is used to set the
  1323. * reporting cache line pair for the calling thread. The reporting
  1324. * cache lines will contain the OS interrupt context when the OS
  1325. * issues a CI store byte to @TIMA+0xC10 to acknowledge the OS
  1326. * interrupt. The reporting cache lines can be reset by inputting -1
  1327. * in "reportingLine". Issuing the CI store byte without reporting
  1328. * cache lines registered will result in the data not being accessible
  1329. * to the OS.
  1330. *
  1331. * Parameters:
  1332. * Input:
  1333. * - R4: "flags"
  1334. * Bits 0-63: Reserved
  1335. * - R5: "reportingLine": The logical real address of the reporting cache
  1336. * line pair
  1337. *
  1338. * Output:
  1339. * - None
  1340. */
  1341. static target_ulong h_int_set_os_reporting_line(PowerPCCPU *cpu,
  1342. SpaprMachineState *spapr,
  1343. target_ulong opcode,
  1344. target_ulong *args)
  1345. {
  1346. target_ulong flags = args[0];
  1347. trace_spapr_xive_set_os_reporting_line(flags);
  1348. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  1349. return H_FUNCTION;
  1350. }
  1351. /*
  1352. * H_STATE should be returned if a H_INT_RESET is in progress.
  1353. * This is not needed when running the emulation under QEMU
  1354. */
  1355. /* TODO: H_INT_SET_OS_REPORTING_LINE */
  1356. return H_FUNCTION;
  1357. }
  1358. /*
  1359. * The H_INT_GET_OS_REPORTING_LINE hcall() is used to get the logical
  1360. * real address of the reporting cache line pair set for the input
  1361. * "target". If no reporting cache line pair has been set, -1 is
  1362. * returned.
  1363. *
  1364. * Parameters:
  1365. * Input:
  1366. * - R4: "flags"
  1367. * Bits 0-63: Reserved
  1368. * - R5: "target" is per "ibm,ppc-interrupt-server#s" or
  1369. * "ibm,ppc-interrupt-gserver#s"
  1370. * - R6: "reportingLine": The logical real address of the reporting
  1371. * cache line pair
  1372. *
  1373. * Output:
  1374. * - R4: The logical real address of the reporting line if set, else -1
  1375. */
  1376. static target_ulong h_int_get_os_reporting_line(PowerPCCPU *cpu,
  1377. SpaprMachineState *spapr,
  1378. target_ulong opcode,
  1379. target_ulong *args)
  1380. {
  1381. target_ulong flags = args[0];
  1382. trace_spapr_xive_get_os_reporting_line(flags);
  1383. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  1384. return H_FUNCTION;
  1385. }
  1386. /*
  1387. * H_STATE should be returned if a H_INT_RESET is in progress.
  1388. * This is not needed when running the emulation under QEMU
  1389. */
  1390. /* TODO: H_INT_GET_OS_REPORTING_LINE */
  1391. return H_FUNCTION;
  1392. }
  1393. /*
  1394. * The H_INT_ESB hcall() is used to issue a load or store to the ESB
  1395. * page for the input "lisn". This hcall is only supported for LISNs
  1396. * that have the ESB hcall flag set to 1 when returned from hcall()
  1397. * H_INT_GET_SOURCE_INFO.
  1398. *
  1399. * Parameters:
  1400. * Input:
  1401. * - R4: "flags"
  1402. * Bits 0-62: Reserved
  1403. * bit 63: Store: Store=1, store operation, else load operation
  1404. * - R5: "lisn" is per "interrupts", "interrupt-map", or
  1405. * "ibm,xive-lisn-ranges" properties, or as returned by the
  1406. * ibm,query-interrupt-source-number RTAS call, or as
  1407. * returned by the H_ALLOCATE_VAS_WINDOW hcall
  1408. * - R6: "esbOffset" is the offset into the ESB page for the load or
  1409. * store operation
  1410. * - R7: "storeData" is the data to write for a store operation
  1411. *
  1412. * Output:
  1413. * - R4: The value of the load if load operation, else -1
  1414. */
  1415. #define SPAPR_XIVE_ESB_STORE PPC_BIT(63)
  1416. static target_ulong h_int_esb(PowerPCCPU *cpu,
  1417. SpaprMachineState *spapr,
  1418. target_ulong opcode,
  1419. target_ulong *args)
  1420. {
  1421. SpaprXive *xive = spapr->xive;
  1422. XiveEAS eas;
  1423. target_ulong flags = args[0];
  1424. target_ulong lisn = args[1];
  1425. target_ulong offset = args[2];
  1426. target_ulong data = args[3];
  1427. hwaddr mmio_addr;
  1428. XiveSource *xsrc = &xive->source;
  1429. trace_spapr_xive_esb(flags, lisn, offset, data);
  1430. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  1431. return H_FUNCTION;
  1432. }
  1433. if (flags & ~SPAPR_XIVE_ESB_STORE) {
  1434. return H_PARAMETER;
  1435. }
  1436. if (lisn >= xive->nr_irqs) {
  1437. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
  1438. lisn);
  1439. return H_P2;
  1440. }
  1441. eas = xive->eat[lisn];
  1442. if (!xive_eas_is_valid(&eas)) {
  1443. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
  1444. lisn);
  1445. return H_P2;
  1446. }
  1447. if (offset > (1ull << xsrc->esb_shift)) {
  1448. return H_P3;
  1449. }
  1450. if (spapr_xive_in_kernel(xive)) {
  1451. args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data,
  1452. flags & SPAPR_XIVE_ESB_STORE);
  1453. } else {
  1454. mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset;
  1455. if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8,
  1456. (flags & SPAPR_XIVE_ESB_STORE),
  1457. MEMTXATTRS_UNSPECIFIED)) {
  1458. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%"
  1459. HWADDR_PRIx "\n", mmio_addr);
  1460. return H_HARDWARE;
  1461. }
  1462. args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data;
  1463. }
  1464. return H_SUCCESS;
  1465. }
  1466. /*
  1467. * The H_INT_SYNC hcall() is used to issue hardware syncs that will
  1468. * ensure any in flight events for the input lisn are in the event
  1469. * queue.
  1470. *
  1471. * Parameters:
  1472. * Input:
  1473. * - R4: "flags"
  1474. * Bits 0-63: Reserved
  1475. * - R5: "lisn" is per "interrupts", "interrupt-map", or
  1476. * "ibm,xive-lisn-ranges" properties, or as returned by the
  1477. * ibm,query-interrupt-source-number RTAS call, or as
  1478. * returned by the H_ALLOCATE_VAS_WINDOW hcall
  1479. *
  1480. * Output:
  1481. * - None
  1482. */
  1483. static target_ulong h_int_sync(PowerPCCPU *cpu,
  1484. SpaprMachineState *spapr,
  1485. target_ulong opcode,
  1486. target_ulong *args)
  1487. {
  1488. SpaprXive *xive = spapr->xive;
  1489. XiveEAS eas;
  1490. target_ulong flags = args[0];
  1491. target_ulong lisn = args[1];
  1492. trace_spapr_xive_sync(flags, lisn);
  1493. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  1494. return H_FUNCTION;
  1495. }
  1496. if (flags) {
  1497. return H_PARAMETER;
  1498. }
  1499. if (lisn >= xive->nr_irqs) {
  1500. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN " TARGET_FMT_lx "\n",
  1501. lisn);
  1502. return H_P2;
  1503. }
  1504. eas = xive->eat[lisn];
  1505. if (!xive_eas_is_valid(&eas)) {
  1506. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN " TARGET_FMT_lx "\n",
  1507. lisn);
  1508. return H_P2;
  1509. }
  1510. /*
  1511. * H_STATE should be returned if a H_INT_RESET is in progress.
  1512. * This is not needed when running the emulation under QEMU
  1513. */
  1514. /*
  1515. * This is not real hardware. Nothing to be done unless when
  1516. * under KVM
  1517. */
  1518. if (spapr_xive_in_kernel(xive)) {
  1519. Error *local_err = NULL;
  1520. kvmppc_xive_sync_source(xive, lisn, &local_err);
  1521. if (local_err) {
  1522. error_report_err(local_err);
  1523. return H_HARDWARE;
  1524. }
  1525. }
  1526. return H_SUCCESS;
  1527. }
  1528. /*
  1529. * The H_INT_RESET hcall() is used to reset all of the partition's
  1530. * interrupt exploitation structures to their initial state. This
  1531. * means losing all previously set interrupt state set via
  1532. * H_INT_SET_SOURCE_CONFIG and H_INT_SET_QUEUE_CONFIG.
  1533. *
  1534. * Parameters:
  1535. * Input:
  1536. * - R4: "flags"
  1537. * Bits 0-63: Reserved
  1538. *
  1539. * Output:
  1540. * - None
  1541. */
  1542. static target_ulong h_int_reset(PowerPCCPU *cpu,
  1543. SpaprMachineState *spapr,
  1544. target_ulong opcode,
  1545. target_ulong *args)
  1546. {
  1547. SpaprXive *xive = spapr->xive;
  1548. target_ulong flags = args[0];
  1549. trace_spapr_xive_reset(flags);
  1550. if (!spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) {
  1551. return H_FUNCTION;
  1552. }
  1553. if (flags) {
  1554. return H_PARAMETER;
  1555. }
  1556. device_cold_reset(DEVICE(xive));
  1557. if (spapr_xive_in_kernel(xive)) {
  1558. Error *local_err = NULL;
  1559. kvmppc_xive_reset(xive, &local_err);
  1560. if (local_err) {
  1561. error_report_err(local_err);
  1562. return H_HARDWARE;
  1563. }
  1564. }
  1565. return H_SUCCESS;
  1566. }
  1567. void spapr_xive_hcall_init(SpaprMachineState *spapr)
  1568. {
  1569. spapr_register_hypercall(H_INT_GET_SOURCE_INFO, h_int_get_source_info);
  1570. spapr_register_hypercall(H_INT_SET_SOURCE_CONFIG, h_int_set_source_config);
  1571. spapr_register_hypercall(H_INT_GET_SOURCE_CONFIG, h_int_get_source_config);
  1572. spapr_register_hypercall(H_INT_GET_QUEUE_INFO, h_int_get_queue_info);
  1573. spapr_register_hypercall(H_INT_SET_QUEUE_CONFIG, h_int_set_queue_config);
  1574. spapr_register_hypercall(H_INT_GET_QUEUE_CONFIG, h_int_get_queue_config);
  1575. spapr_register_hypercall(H_INT_SET_OS_REPORTING_LINE,
  1576. h_int_set_os_reporting_line);
  1577. spapr_register_hypercall(H_INT_GET_OS_REPORTING_LINE,
  1578. h_int_get_os_reporting_line);
  1579. spapr_register_hypercall(H_INT_ESB, h_int_esb);
  1580. spapr_register_hypercall(H_INT_SYNC, h_int_sync);
  1581. spapr_register_hypercall(H_INT_RESET, h_int_reset);
  1582. }