2
0

xive2.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023
  1. /*
  2. * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
  3. *
  4. * Copyright (c) 2019-2022, IBM Corporation..
  5. *
  6. * This code is licensed under the GPL version 2 or later. See the
  7. * COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/log.h"
  11. #include "qemu/module.h"
  12. #include "qapi/error.h"
  13. #include "target/ppc/cpu.h"
  14. #include "sysemu/cpus.h"
  15. #include "sysemu/dma.h"
  16. #include "hw/qdev-properties.h"
  17. #include "monitor/monitor.h"
  18. #include "hw/ppc/xive.h"
  19. #include "hw/ppc/xive2.h"
  20. #include "hw/ppc/xive2_regs.h"
  21. uint32_t xive2_router_get_config(Xive2Router *xrtr)
  22. {
  23. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  24. return xrc->get_config(xrtr);
  25. }
  26. void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon)
  27. {
  28. if (!xive2_eas_is_valid(eas)) {
  29. return;
  30. }
  31. monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
  32. lisn, xive2_eas_is_masked(eas) ? "M" : " ",
  33. (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w),
  34. (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
  35. (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
  36. }
  37. void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width,
  38. Monitor *mon)
  39. {
  40. uint64_t qaddr_base = xive2_end_qaddr(end);
  41. uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
  42. uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
  43. uint32_t qentries = 1 << (qsize + 10);
  44. int i;
  45. /*
  46. * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
  47. */
  48. monitor_printf(mon, " [ ");
  49. qindex = (qindex - (width - 1)) & (qentries - 1);
  50. for (i = 0; i < width; i++) {
  51. uint64_t qaddr = qaddr_base + (qindex << 2);
  52. uint32_t qdata = -1;
  53. if (dma_memory_read(&address_space_memory, qaddr, &qdata,
  54. sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
  55. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
  56. HWADDR_PRIx "\n", qaddr);
  57. return;
  58. }
  59. monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
  60. be32_to_cpu(qdata));
  61. qindex = (qindex + 1) & (qentries - 1);
  62. }
  63. monitor_printf(mon, "]");
  64. }
  65. void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon)
  66. {
  67. uint64_t qaddr_base = xive2_end_qaddr(end);
  68. uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
  69. uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
  70. uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
  71. uint32_t qentries = 1 << (qsize + 10);
  72. uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
  73. uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
  74. uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
  75. uint8_t pq;
  76. if (!xive2_end_is_valid(end)) {
  77. return;
  78. }
  79. pq = xive_get_field32(END2_W1_ESn, end->w1);
  80. monitor_printf(mon,
  81. " %08x %c%c %c%c%c%c%c%c%c%c%c%c prio:%d nvp:%02x/%04x",
  82. end_idx,
  83. pq & XIVE_ESB_VAL_P ? 'P' : '-',
  84. pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
  85. xive2_end_is_valid(end) ? 'v' : '-',
  86. xive2_end_is_enqueue(end) ? 'q' : '-',
  87. xive2_end_is_notify(end) ? 'n' : '-',
  88. xive2_end_is_backlog(end) ? 'b' : '-',
  89. xive2_end_is_escalate(end) ? 'e' : '-',
  90. xive2_end_is_escalate_end(end) ? 'N' : '-',
  91. xive2_end_is_uncond_escalation(end) ? 'u' : '-',
  92. xive2_end_is_silent_escalation(end) ? 's' : '-',
  93. xive2_end_is_firmware1(end) ? 'f' : '-',
  94. xive2_end_is_firmware2(end) ? 'F' : '-',
  95. priority, nvp_blk, nvp_idx);
  96. if (qaddr_base) {
  97. monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
  98. qaddr_base, qindex, qentries, qgen);
  99. xive2_end_queue_pic_print_info(end, 6, mon);
  100. }
  101. monitor_printf(mon, "\n");
  102. }
  103. void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
  104. Monitor *mon)
  105. {
  106. Xive2Eas *eas = (Xive2Eas *) &end->w4;
  107. uint8_t pq;
  108. if (!xive2_end_is_escalate(end)) {
  109. return;
  110. }
  111. pq = xive_get_field32(END2_W1_ESe, end->w1);
  112. monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
  113. end_idx,
  114. pq & XIVE_ESB_VAL_P ? 'P' : '-',
  115. pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
  116. xive2_eas_is_valid(eas) ? 'v' : ' ',
  117. xive2_eas_is_masked(eas) ? 'M' : ' ',
  118. (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w),
  119. (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
  120. (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
  121. }
  122. static void xive2_end_enqueue(Xive2End *end, uint32_t data)
  123. {
  124. uint64_t qaddr_base = xive2_end_qaddr(end);
  125. uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
  126. uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
  127. uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
  128. uint64_t qaddr = qaddr_base + (qindex << 2);
  129. uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
  130. uint32_t qentries = 1 << (qsize + 10);
  131. if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata),
  132. MEMTXATTRS_UNSPECIFIED)) {
  133. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
  134. HWADDR_PRIx "\n", qaddr);
  135. return;
  136. }
  137. qindex = (qindex + 1) & (qentries - 1);
  138. if (qindex == 0) {
  139. qgen ^= 1;
  140. end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen);
  141. /* TODO(PowerNV): reset GF bit on a cache watch operation */
  142. end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen);
  143. }
  144. end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
  145. }
  146. /*
  147. * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
  148. *
  149. * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit
  150. *
  151. * - if a context is enabled with the H bit set, the VP context
  152. * information is retrieved from the NVP structure (“check out”)
  153. * and stored back on a context pull (“check in”), the SW receives
  154. * the same context pull information as on P9
  155. *
  156. * - the H bit cannot be changed while the V bit is set, i.e. a
  157. * context cannot be set up in the TIMA and then be “pushed” into
  158. * the NVP by changing the H bit while the context is enabled
  159. */
  160. static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
  161. uint8_t nvp_blk, uint32_t nvp_idx)
  162. {
  163. CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
  164. uint32_t pir = env->spr_cb[SPR_PIR].default_value;
  165. Xive2Nvp nvp;
  166. uint8_t *regs = &tctx->regs[TM_QW1_OS];
  167. if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
  168. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
  169. nvp_blk, nvp_idx);
  170. return;
  171. }
  172. if (!xive2_nvp_is_valid(&nvp)) {
  173. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
  174. nvp_blk, nvp_idx);
  175. return;
  176. }
  177. if (!xive2_nvp_is_hw(&nvp)) {
  178. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
  179. nvp_blk, nvp_idx);
  180. return;
  181. }
  182. if (!xive2_nvp_is_co(&nvp)) {
  183. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n",
  184. nvp_blk, nvp_idx);
  185. return;
  186. }
  187. if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) &&
  188. xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) {
  189. qemu_log_mask(LOG_GUEST_ERROR,
  190. "XIVE: NVP %x/%x invalid checkout Thread %x\n",
  191. nvp_blk, nvp_idx, pir);
  192. return;
  193. }
  194. nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
  195. nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
  196. nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
  197. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
  198. nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
  199. /* NVP2_W1_CO_THRID_VALID only set once */
  200. nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF);
  201. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
  202. }
  203. static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk,
  204. uint32_t *nvp_idx, bool *vo, bool *ho)
  205. {
  206. *nvp_blk = xive2_nvp_blk(cam);
  207. *nvp_idx = xive2_nvp_idx(cam);
  208. *vo = !!(cam & TM2_QW1W2_VO);
  209. *ho = !!(cam & TM2_QW1W2_HO);
  210. }
  211. uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
  212. hwaddr offset, unsigned size)
  213. {
  214. Xive2Router *xrtr = XIVE2_ROUTER(xptr);
  215. uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
  216. uint32_t qw1w2_new;
  217. uint32_t cam = be32_to_cpu(qw1w2);
  218. uint8_t nvp_blk;
  219. uint32_t nvp_idx;
  220. bool vo;
  221. bool do_save;
  222. xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save);
  223. if (!vo) {
  224. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
  225. nvp_blk, nvp_idx);
  226. }
  227. /* Invalidate CAM line */
  228. qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0);
  229. memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4);
  230. if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
  231. xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx);
  232. }
  233. xive_tctx_reset_os_signal(tctx);
  234. return qw1w2;
  235. }
  236. static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
  237. uint8_t nvp_blk, uint32_t nvp_idx,
  238. Xive2Nvp *nvp)
  239. {
  240. CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
  241. uint32_t pir = env->spr_cb[SPR_PIR].default_value;
  242. uint8_t cppr;
  243. if (!xive2_nvp_is_hw(nvp)) {
  244. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
  245. nvp_blk, nvp_idx);
  246. return 0;
  247. }
  248. cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2);
  249. nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0);
  250. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
  251. tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
  252. /* we don't model LSMFB */
  253. nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
  254. nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
  255. nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir);
  256. /*
  257. * Checkout privilege: 0:OS, 1:Pool, 2:Hard
  258. *
  259. * TODO: we only support OS push/pull
  260. */
  261. nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0);
  262. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1);
  263. /* return restored CPPR to generate a CPU exception if needed */
  264. return cppr;
  265. }
  266. static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
  267. uint8_t nvp_blk, uint32_t nvp_idx,
  268. bool do_restore)
  269. {
  270. Xive2Nvp nvp;
  271. uint8_t ipb;
  272. /*
  273. * Grab the associated thread interrupt context registers in the
  274. * associated NVP
  275. */
  276. if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
  277. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
  278. nvp_blk, nvp_idx);
  279. return;
  280. }
  281. if (!xive2_nvp_is_valid(&nvp)) {
  282. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
  283. nvp_blk, nvp_idx);
  284. return;
  285. }
  286. /* Automatically restore thread context registers */
  287. if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE &&
  288. do_restore) {
  289. xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
  290. }
  291. ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
  292. if (ipb) {
  293. nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
  294. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
  295. }
  296. /*
  297. * Always call xive_tctx_ipb_update(). Even if there were no
  298. * escalation triggered, there could be a pending interrupt which
  299. * was saved when the context was pulled and that we need to take
  300. * into account by recalculating the PIPR (which is not
  301. * saved/restored).
  302. * It will also raise the External interrupt signal if needed.
  303. */
  304. xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
  305. }
  306. /*
  307. * Updating the OS CAM line can trigger a resend of interrupt
  308. */
  309. void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
  310. hwaddr offset, uint64_t value, unsigned size)
  311. {
  312. uint32_t cam = value;
  313. uint32_t qw1w2 = cpu_to_be32(cam);
  314. uint8_t nvp_blk;
  315. uint32_t nvp_idx;
  316. bool vo;
  317. bool do_restore;
  318. xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
  319. /* First update the thead context */
  320. memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
  321. /* Check the interrupt pending bits */
  322. if (vo) {
  323. xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx,
  324. do_restore);
  325. }
  326. }
  327. /*
  328. * XIVE Router (aka. Virtualization Controller or IVRE)
  329. */
  330. int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
  331. Xive2Eas *eas)
  332. {
  333. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  334. return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
  335. }
  336. static
  337. int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
  338. uint8_t *pq)
  339. {
  340. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  341. return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
  342. }
  343. static
  344. int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
  345. uint8_t *pq)
  346. {
  347. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  348. return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
  349. }
  350. int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
  351. Xive2End *end)
  352. {
  353. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  354. return xrc->get_end(xrtr, end_blk, end_idx, end);
  355. }
  356. int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
  357. Xive2End *end, uint8_t word_number)
  358. {
  359. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  360. return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
  361. }
  362. int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
  363. Xive2Nvp *nvp)
  364. {
  365. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  366. return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp);
  367. }
  368. int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
  369. Xive2Nvp *nvp, uint8_t word_number)
  370. {
  371. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  372. return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
  373. }
  374. static int xive2_router_get_block_id(Xive2Router *xrtr)
  375. {
  376. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  377. return xrc->get_block_id(xrtr);
  378. }
  379. /*
  380. * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
  381. * width and block id width is configurable at the IC level.
  382. *
  383. * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
  384. * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit)
  385. */
  386. static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
  387. {
  388. Xive2Router *xrtr = XIVE2_ROUTER(xptr);
  389. CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
  390. uint32_t pir = env->spr_cb[SPR_PIR].default_value;
  391. uint8_t blk = xive2_router_get_block_id(xrtr);
  392. uint8_t tid_shift =
  393. xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
  394. uint8_t tid_mask = (1 << tid_shift) - 1;
  395. return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
  396. }
  397. /*
  398. * The thread context register words are in big-endian format.
  399. */
  400. int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
  401. uint8_t format,
  402. uint8_t nvt_blk, uint32_t nvt_idx,
  403. bool cam_ignore, uint32_t logic_serv)
  404. {
  405. uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx);
  406. uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
  407. uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
  408. uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
  409. uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
  410. /*
  411. * TODO (PowerNV): ignore mode. The low order bits of the NVT
  412. * identifier are ignored in the "CAM" match.
  413. */
  414. if (format == 0) {
  415. if (cam_ignore == true) {
  416. /*
  417. * F=0 & i=1: Logical server notification (bits ignored at
  418. * the end of the NVT identifier)
  419. */
  420. qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
  421. nvt_blk, nvt_idx);
  422. return -1;
  423. }
  424. /* F=0 & i=0: Specific NVT notification */
  425. /* PHYS ring */
  426. if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
  427. cam == xive2_tctx_hw_cam_line(xptr, tctx)) {
  428. return TM_QW3_HV_PHYS;
  429. }
  430. /* HV POOL ring */
  431. if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
  432. cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) {
  433. return TM_QW2_HV_POOL;
  434. }
  435. /* OS ring */
  436. if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
  437. cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) {
  438. return TM_QW1_OS;
  439. }
  440. } else {
  441. /* F=1 : User level Event-Based Branch (EBB) notification */
  442. /* USER ring */
  443. if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
  444. (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
  445. (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) &&
  446. (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) {
  447. return TM_QW0_USER;
  448. }
  449. }
  450. return -1;
  451. }
  452. static void xive2_router_realize(DeviceState *dev, Error **errp)
  453. {
  454. Xive2Router *xrtr = XIVE2_ROUTER(dev);
  455. assert(xrtr->xfb);
  456. }
  457. /*
  458. * Notification using the END ESe/ESn bit (Event State Buffer for
  459. * escalation and notification). Profide futher coalescing in the
  460. * Router.
  461. */
  462. static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
  463. uint32_t end_idx, Xive2End *end,
  464. uint32_t end_esmask)
  465. {
  466. uint8_t pq = xive_get_field32(end_esmask, end->w1);
  467. bool notify = xive_esb_trigger(&pq);
  468. if (pq != xive_get_field32(end_esmask, end->w1)) {
  469. end->w1 = xive_set_field32(end_esmask, end->w1, pq);
  470. xive2_router_write_end(xrtr, end_blk, end_idx, end, 1);
  471. }
  472. /* ESe/n[Q]=1 : end of notification */
  473. return notify;
  474. }
  475. /*
  476. * An END trigger can come from an event trigger (IPI or HW) or from
  477. * another chip. We don't model the PowerBus but the END trigger
  478. * message has the same parameters than in the function below.
  479. */
  480. static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
  481. uint32_t end_idx, uint32_t end_data)
  482. {
  483. Xive2End end;
  484. uint8_t priority;
  485. uint8_t format;
  486. bool found;
  487. Xive2Nvp nvp;
  488. uint8_t nvp_blk;
  489. uint32_t nvp_idx;
  490. /* END cache lookup */
  491. if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
  492. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
  493. end_idx);
  494. return;
  495. }
  496. if (!xive2_end_is_valid(&end)) {
  497. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
  498. end_blk, end_idx);
  499. return;
  500. }
  501. if (xive2_end_is_enqueue(&end)) {
  502. xive2_end_enqueue(&end, end_data);
  503. /* Enqueuing event data modifies the EQ toggle and index */
  504. xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
  505. }
  506. /*
  507. * When the END is silent, we skip the notification part.
  508. */
  509. if (xive2_end_is_silent_escalation(&end)) {
  510. goto do_escalation;
  511. }
  512. /*
  513. * The W7 format depends on the F bit in W6. It defines the type
  514. * of the notification :
  515. *
  516. * F=0 : single or multiple NVP notification
  517. * F=1 : User level Event-Based Branch (EBB) notification, no
  518. * priority
  519. */
  520. format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6);
  521. priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7);
  522. /* The END is masked */
  523. if (format == 0 && priority == 0xff) {
  524. return;
  525. }
  526. /*
  527. * Check the END ESn (Event State Buffer for notification) for
  528. * even futher coalescing in the Router
  529. */
  530. if (!xive2_end_is_notify(&end)) {
  531. /* ESn[Q]=1 : end of notification */
  532. if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
  533. &end, END2_W1_ESn)) {
  534. return;
  535. }
  536. }
  537. /*
  538. * Follows IVPE notification
  539. */
  540. nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
  541. nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
  542. /* NVP cache lookup */
  543. if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
  544. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
  545. nvp_blk, nvp_idx);
  546. return;
  547. }
  548. if (!xive2_nvp_is_valid(&nvp)) {
  549. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
  550. nvp_blk, nvp_idx);
  551. return;
  552. }
  553. found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
  554. xive_get_field32(END2_W6_IGNORE, end.w7),
  555. priority,
  556. xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7));
  557. /* TODO: Auto EOI. */
  558. if (found) {
  559. return;
  560. }
  561. /*
  562. * If no matching NVP is dispatched on a HW thread :
  563. * - specific VP: update the NVP structure if backlog is activated
  564. * - logical server : forward request to IVPE (not supported)
  565. */
  566. if (xive2_end_is_backlog(&end)) {
  567. uint8_t ipb;
  568. if (format == 1) {
  569. qemu_log_mask(LOG_GUEST_ERROR,
  570. "XIVE: END %x/%x invalid config: F1 & backlog\n",
  571. end_blk, end_idx);
  572. return;
  573. }
  574. /*
  575. * Record the IPB in the associated NVP structure for later
  576. * use. The presenter will resend the interrupt when the vCPU
  577. * is dispatched again on a HW thread.
  578. */
  579. ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
  580. xive_priority_to_ipb(priority);
  581. nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
  582. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
  583. /*
  584. * On HW, follows a "Broadcast Backlog" to IVPEs
  585. */
  586. }
  587. do_escalation:
  588. /*
  589. * If activated, escalate notification using the ESe PQ bits and
  590. * the EAS in w4-5
  591. */
  592. if (!xive2_end_is_escalate(&end)) {
  593. return;
  594. }
  595. /*
  596. * Check the END ESe (Event State Buffer for escalation) for even
  597. * futher coalescing in the Router
  598. */
  599. if (!xive2_end_is_uncond_escalation(&end)) {
  600. /* ESe[Q]=1 : end of escalation notification */
  601. if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
  602. &end, END2_W1_ESe)) {
  603. return;
  604. }
  605. }
  606. /*
  607. * The END trigger becomes an Escalation trigger
  608. */
  609. xive2_router_end_notify(xrtr,
  610. xive_get_field32(END2_W4_END_BLOCK, end.w4),
  611. xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
  612. xive_get_field32(END2_W5_ESC_END_DATA, end.w5));
  613. }
  614. void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
  615. {
  616. Xive2Router *xrtr = XIVE2_ROUTER(xn);
  617. uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
  618. uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
  619. Xive2Eas eas;
  620. /* EAS cache lookup */
  621. if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
  622. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
  623. return;
  624. }
  625. if (!pq_checked) {
  626. bool notify;
  627. uint8_t pq;
  628. /* PQ cache lookup */
  629. if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
  630. /* Set FIR */
  631. g_assert_not_reached();
  632. }
  633. notify = xive_esb_trigger(&pq);
  634. if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
  635. /* Set FIR */
  636. g_assert_not_reached();
  637. }
  638. if (!notify) {
  639. return;
  640. }
  641. }
  642. if (!xive2_eas_is_valid(&eas)) {
  643. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn);
  644. return;
  645. }
  646. if (xive2_eas_is_masked(&eas)) {
  647. /* Notification completed */
  648. return;
  649. }
  650. /*
  651. * The event trigger becomes an END trigger
  652. */
  653. xive2_router_end_notify(xrtr,
  654. xive_get_field64(EAS2_END_BLOCK, eas.w),
  655. xive_get_field64(EAS2_END_INDEX, eas.w),
  656. xive_get_field64(EAS2_END_DATA, eas.w));
  657. }
  658. static Property xive2_router_properties[] = {
  659. DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
  660. TYPE_XIVE_FABRIC, XiveFabric *),
  661. DEFINE_PROP_END_OF_LIST(),
  662. };
  663. static void xive2_router_class_init(ObjectClass *klass, void *data)
  664. {
  665. DeviceClass *dc = DEVICE_CLASS(klass);
  666. XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
  667. dc->desc = "XIVE2 Router Engine";
  668. device_class_set_props(dc, xive2_router_properties);
  669. /* Parent is SysBusDeviceClass. No need to call its realize hook */
  670. dc->realize = xive2_router_realize;
  671. xnc->notify = xive2_router_notify;
  672. }
  673. static const TypeInfo xive2_router_info = {
  674. .name = TYPE_XIVE2_ROUTER,
  675. .parent = TYPE_SYS_BUS_DEVICE,
  676. .abstract = true,
  677. .instance_size = sizeof(Xive2Router),
  678. .class_size = sizeof(Xive2RouterClass),
  679. .class_init = xive2_router_class_init,
  680. .interfaces = (InterfaceInfo[]) {
  681. { TYPE_XIVE_NOTIFIER },
  682. { TYPE_XIVE_PRESENTER },
  683. { }
  684. }
  685. };
  686. static inline bool addr_is_even(hwaddr addr, uint32_t shift)
  687. {
  688. return !((addr >> shift) & 1);
  689. }
  690. static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size)
  691. {
  692. Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
  693. uint32_t offset = addr & 0xFFF;
  694. uint8_t end_blk;
  695. uint32_t end_idx;
  696. Xive2End end;
  697. uint32_t end_esmask;
  698. uint8_t pq;
  699. uint64_t ret;
  700. /*
  701. * The block id should be deduced from the load address on the END
  702. * ESB MMIO but our model only supports a single block per XIVE chip.
  703. */
  704. end_blk = xive2_router_get_block_id(xsrc->xrtr);
  705. end_idx = addr >> (xsrc->esb_shift + 1);
  706. if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
  707. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
  708. end_idx);
  709. return -1;
  710. }
  711. if (!xive2_end_is_valid(&end)) {
  712. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
  713. end_blk, end_idx);
  714. return -1;
  715. }
  716. end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
  717. END2_W1_ESe;
  718. pq = xive_get_field32(end_esmask, end.w1);
  719. switch (offset) {
  720. case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
  721. ret = xive_esb_eoi(&pq);
  722. /* Forward the source event notification for routing ?? */
  723. break;
  724. case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
  725. ret = pq;
  726. break;
  727. case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
  728. case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
  729. case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
  730. case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
  731. ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
  732. break;
  733. default:
  734. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
  735. offset);
  736. return -1;
  737. }
  738. if (pq != xive_get_field32(end_esmask, end.w1)) {
  739. end.w1 = xive_set_field32(end_esmask, end.w1, pq);
  740. xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
  741. }
  742. return ret;
  743. }
  744. static void xive2_end_source_write(void *opaque, hwaddr addr,
  745. uint64_t value, unsigned size)
  746. {
  747. Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
  748. uint32_t offset = addr & 0xFFF;
  749. uint8_t end_blk;
  750. uint32_t end_idx;
  751. Xive2End end;
  752. uint32_t end_esmask;
  753. uint8_t pq;
  754. bool notify = false;
  755. /*
  756. * The block id should be deduced from the load address on the END
  757. * ESB MMIO but our model only supports a single block per XIVE chip.
  758. */
  759. end_blk = xive2_router_get_block_id(xsrc->xrtr);
  760. end_idx = addr >> (xsrc->esb_shift + 1);
  761. if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
  762. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
  763. end_idx);
  764. return;
  765. }
  766. if (!xive2_end_is_valid(&end)) {
  767. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
  768. end_blk, end_idx);
  769. return;
  770. }
  771. end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
  772. END2_W1_ESe;
  773. pq = xive_get_field32(end_esmask, end.w1);
  774. switch (offset) {
  775. case 0 ... 0x3FF:
  776. notify = xive_esb_trigger(&pq);
  777. break;
  778. case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
  779. /* TODO: can we check StoreEOI availability from the router ? */
  780. notify = xive_esb_eoi(&pq);
  781. break;
  782. case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
  783. if (end_esmask == END2_W1_ESe) {
  784. qemu_log_mask(LOG_GUEST_ERROR,
  785. "XIVE: END %x/%x can not EQ inject on ESe\n",
  786. end_blk, end_idx);
  787. return;
  788. }
  789. notify = true;
  790. break;
  791. default:
  792. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n",
  793. offset);
  794. return;
  795. }
  796. if (pq != xive_get_field32(end_esmask, end.w1)) {
  797. end.w1 = xive_set_field32(end_esmask, end.w1, pq);
  798. xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
  799. }
  800. /* TODO: Forward the source event notification for routing */
  801. if (notify) {
  802. ;
  803. }
  804. }
  805. static const MemoryRegionOps xive2_end_source_ops = {
  806. .read = xive2_end_source_read,
  807. .write = xive2_end_source_write,
  808. .endianness = DEVICE_BIG_ENDIAN,
  809. .valid = {
  810. .min_access_size = 8,
  811. .max_access_size = 8,
  812. },
  813. .impl = {
  814. .min_access_size = 8,
  815. .max_access_size = 8,
  816. },
  817. };
  818. static void xive2_end_source_realize(DeviceState *dev, Error **errp)
  819. {
  820. Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev);
  821. assert(xsrc->xrtr);
  822. if (!xsrc->nr_ends) {
  823. error_setg(errp, "Number of interrupt needs to be greater than 0");
  824. return;
  825. }
  826. if (xsrc->esb_shift != XIVE_ESB_4K &&
  827. xsrc->esb_shift != XIVE_ESB_64K) {
  828. error_setg(errp, "Invalid ESB shift setting");
  829. return;
  830. }
  831. /*
  832. * Each END is assigned an even/odd pair of MMIO pages, the even page
  833. * manages the ESn field while the odd page manages the ESe field.
  834. */
  835. memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
  836. &xive2_end_source_ops, xsrc, "xive.end",
  837. (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
  838. }
  839. static Property xive2_end_source_properties[] = {
  840. DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
  841. DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
  842. DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
  843. Xive2Router *),
  844. DEFINE_PROP_END_OF_LIST(),
  845. };
  846. static void xive2_end_source_class_init(ObjectClass *klass, void *data)
  847. {
  848. DeviceClass *dc = DEVICE_CLASS(klass);
  849. dc->desc = "XIVE END Source";
  850. device_class_set_props(dc, xive2_end_source_properties);
  851. dc->realize = xive2_end_source_realize;
  852. dc->user_creatable = false;
  853. }
  854. static const TypeInfo xive2_end_source_info = {
  855. .name = TYPE_XIVE2_END_SOURCE,
  856. .parent = TYPE_DEVICE,
  857. .instance_size = sizeof(Xive2EndSource),
  858. .class_init = xive2_end_source_class_init,
  859. };
  860. static void xive2_register_types(void)
  861. {
  862. type_register_static(&xive2_router_info);
  863. type_register_static(&xive2_end_source_info);
  864. }
  865. type_init(xive2_register_types)