xive2.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274
  1. /*
  2. * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
  3. *
  4. * Copyright (c) 2019-2022, IBM Corporation..
  5. *
  6. * This code is licensed under the GPL version 2 or later. See the
  7. * COPYING file in the top-level directory.
  8. */
  9. #include "qemu/osdep.h"
  10. #include "qemu/log.h"
  11. #include "qemu/module.h"
  12. #include "qapi/error.h"
  13. #include "target/ppc/cpu.h"
  14. #include "system/cpus.h"
  15. #include "system/dma.h"
  16. #include "hw/qdev-properties.h"
  17. #include "hw/ppc/xive.h"
  18. #include "hw/ppc/xive2.h"
  19. #include "hw/ppc/xive2_regs.h"
  20. uint32_t xive2_router_get_config(Xive2Router *xrtr)
  21. {
  22. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  23. return xrc->get_config(xrtr);
  24. }
  25. static int xive2_router_get_block_id(Xive2Router *xrtr)
  26. {
  27. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  28. return xrc->get_block_id(xrtr);
  29. }
  30. static uint64_t xive2_nvp_reporting_addr(Xive2Nvp *nvp)
  31. {
  32. uint64_t cache_addr;
  33. cache_addr = xive_get_field32(NVP2_W6_REPORTING_LINE, nvp->w6) << 24 |
  34. xive_get_field32(NVP2_W7_REPORTING_LINE, nvp->w7);
  35. cache_addr <<= 8; /* aligned on a cache line pair */
  36. return cache_addr;
  37. }
  38. static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority)
  39. {
  40. uint32_t val = 0;
  41. uint8_t *ptr, i;
  42. if (priority > 7) {
  43. return 0;
  44. }
  45. /*
  46. * The per-priority backlog counters are 24-bit and the structure
  47. * is stored in big endian
  48. */
  49. ptr = (uint8_t *)&nvgc->w2 + priority * 3;
  50. for (i = 0; i < 3; i++, ptr++) {
  51. val = (val << 8) + *ptr;
  52. }
  53. return val;
  54. }
  55. void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf)
  56. {
  57. if (!xive2_eas_is_valid(eas)) {
  58. return;
  59. }
  60. g_string_append_printf(buf, " %08x %s end:%02x/%04x data:%08x\n",
  61. lisn, xive2_eas_is_masked(eas) ? "M" : " ",
  62. (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w),
  63. (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
  64. (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
  65. }
  66. void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf)
  67. {
  68. uint64_t qaddr_base = xive2_end_qaddr(end);
  69. uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
  70. uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
  71. uint32_t qentries = 1 << (qsize + 10);
  72. int i;
  73. /*
  74. * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
  75. */
  76. g_string_append_printf(buf, " [ ");
  77. qindex = (qindex - (width - 1)) & (qentries - 1);
  78. for (i = 0; i < width; i++) {
  79. uint64_t qaddr = qaddr_base + (qindex << 2);
  80. uint32_t qdata = -1;
  81. if (dma_memory_read(&address_space_memory, qaddr, &qdata,
  82. sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
  83. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
  84. HWADDR_PRIx "\n", qaddr);
  85. return;
  86. }
  87. g_string_append_printf(buf, "%s%08x ", i == width - 1 ? "^" : "",
  88. be32_to_cpu(qdata));
  89. qindex = (qindex + 1) & (qentries - 1);
  90. }
  91. g_string_append_printf(buf, "]");
  92. }
  93. void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf)
  94. {
  95. uint64_t qaddr_base = xive2_end_qaddr(end);
  96. uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
  97. uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
  98. uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
  99. uint32_t qentries = 1 << (qsize + 10);
  100. uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
  101. uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
  102. uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
  103. uint8_t pq;
  104. if (!xive2_end_is_valid(end)) {
  105. return;
  106. }
  107. pq = xive_get_field32(END2_W1_ESn, end->w1);
  108. g_string_append_printf(buf,
  109. " %08x %c%c %c%c%c%c%c%c%c%c%c%c%c %c%c "
  110. "prio:%d nvp:%02x/%04x",
  111. end_idx,
  112. pq & XIVE_ESB_VAL_P ? 'P' : '-',
  113. pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
  114. xive2_end_is_valid(end) ? 'v' : '-',
  115. xive2_end_is_enqueue(end) ? 'q' : '-',
  116. xive2_end_is_notify(end) ? 'n' : '-',
  117. xive2_end_is_backlog(end) ? 'b' : '-',
  118. xive2_end_is_precluded_escalation(end) ? 'p' : '-',
  119. xive2_end_is_escalate(end) ? 'e' : '-',
  120. xive2_end_is_escalate_end(end) ? 'N' : '-',
  121. xive2_end_is_uncond_escalation(end) ? 'u' : '-',
  122. xive2_end_is_silent_escalation(end) ? 's' : '-',
  123. xive2_end_is_firmware1(end) ? 'f' : '-',
  124. xive2_end_is_firmware2(end) ? 'F' : '-',
  125. xive2_end_is_ignore(end) ? 'i' : '-',
  126. xive2_end_is_crowd(end) ? 'c' : '-',
  127. priority, nvp_blk, nvp_idx);
  128. if (qaddr_base) {
  129. g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d",
  130. qaddr_base, qindex, qentries, qgen);
  131. xive2_end_queue_pic_print_info(end, 6, buf);
  132. }
  133. g_string_append_c(buf, '\n');
  134. }
  135. void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
  136. GString *buf)
  137. {
  138. Xive2Eas *eas = (Xive2Eas *) &end->w4;
  139. uint8_t pq;
  140. if (!xive2_end_is_escalate(end)) {
  141. return;
  142. }
  143. pq = xive_get_field32(END2_W1_ESe, end->w1);
  144. g_string_append_printf(buf, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
  145. end_idx,
  146. pq & XIVE_ESB_VAL_P ? 'P' : '-',
  147. pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
  148. xive2_eas_is_valid(eas) ? 'v' : ' ',
  149. xive2_eas_is_masked(eas) ? 'M' : ' ',
  150. (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w),
  151. (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
  152. (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
  153. }
  154. void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf)
  155. {
  156. uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
  157. uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
  158. uint64_t cache_line = xive2_nvp_reporting_addr(nvp);
  159. if (!xive2_nvp_is_valid(nvp)) {
  160. return;
  161. }
  162. g_string_append_printf(buf, " %08x end:%02x/%04x IPB:%02x PGoFirst:%02x",
  163. nvp_idx, eq_blk, eq_idx,
  164. xive_get_field32(NVP2_W2_IPB, nvp->w2),
  165. xive_get_field32(NVP2_W0_PGOFIRST, nvp->w0));
  166. if (cache_line) {
  167. g_string_append_printf(buf, " reporting CL:%016"PRIx64, cache_line);
  168. }
  169. /*
  170. * When the NVP is HW controlled, more fields are updated
  171. */
  172. if (xive2_nvp_is_hw(nvp)) {
  173. g_string_append_printf(buf, " CPPR:%02x",
  174. xive_get_field32(NVP2_W2_CPPR, nvp->w2));
  175. if (xive2_nvp_is_co(nvp)) {
  176. g_string_append_printf(buf, " CO:%04x",
  177. xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
  178. }
  179. }
  180. g_string_append_c(buf, '\n');
  181. }
  182. void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf)
  183. {
  184. uint8_t i;
  185. if (!xive2_nvgc_is_valid(nvgc)) {
  186. return;
  187. }
  188. g_string_append_printf(buf, " %08x PGoNext:%02x bklog: ", nvgc_idx,
  189. xive_get_field32(NVGC2_W0_PGONEXT, nvgc->w0));
  190. for (i = 0; i <= XIVE_PRIORITY_MAX; i++) {
  191. g_string_append_printf(buf, "[%d]=0x%x ",
  192. i, xive2_nvgc_get_backlog(nvgc, i));
  193. }
  194. g_string_append_printf(buf, "\n");
  195. }
  196. static void xive2_end_enqueue(Xive2End *end, uint32_t data)
  197. {
  198. uint64_t qaddr_base = xive2_end_qaddr(end);
  199. uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
  200. uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
  201. uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
  202. uint64_t qaddr = qaddr_base + (qindex << 2);
  203. uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
  204. uint32_t qentries = 1 << (qsize + 10);
  205. if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata),
  206. MEMTXATTRS_UNSPECIFIED)) {
  207. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
  208. HWADDR_PRIx "\n", qaddr);
  209. return;
  210. }
  211. qindex = (qindex + 1) & (qentries - 1);
  212. if (qindex == 0) {
  213. qgen ^= 1;
  214. end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen);
  215. /* TODO(PowerNV): reset GF bit on a cache watch operation */
  216. end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen);
  217. }
  218. end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
  219. }
  220. /*
  221. * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
  222. *
  223. * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit
  224. *
  225. * - if a context is enabled with the H bit set, the VP context
  226. * information is retrieved from the NVP structure (“check out”)
  227. * and stored back on a context pull (“check in”), the SW receives
  228. * the same context pull information as on P9
  229. *
  230. * - the H bit cannot be changed while the V bit is set, i.e. a
  231. * context cannot be set up in the TIMA and then be “pushed” into
  232. * the NVP by changing the H bit while the context is enabled
  233. */
  234. static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
  235. uint8_t nvp_blk, uint32_t nvp_idx,
  236. uint8_t ring)
  237. {
  238. CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
  239. uint32_t pir = env->spr_cb[SPR_PIR].default_value;
  240. Xive2Nvp nvp;
  241. uint8_t *regs = &tctx->regs[ring];
  242. if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
  243. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
  244. nvp_blk, nvp_idx);
  245. return;
  246. }
  247. if (!xive2_nvp_is_valid(&nvp)) {
  248. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
  249. nvp_blk, nvp_idx);
  250. return;
  251. }
  252. if (!xive2_nvp_is_hw(&nvp)) {
  253. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
  254. nvp_blk, nvp_idx);
  255. return;
  256. }
  257. if (!xive2_nvp_is_co(&nvp)) {
  258. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n",
  259. nvp_blk, nvp_idx);
  260. return;
  261. }
  262. if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) &&
  263. xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) {
  264. qemu_log_mask(LOG_GUEST_ERROR,
  265. "XIVE: NVP %x/%x invalid checkout Thread %x\n",
  266. nvp_blk, nvp_idx, pir);
  267. return;
  268. }
  269. nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
  270. nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
  271. nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
  272. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
  273. nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
  274. /* NVP2_W1_CO_THRID_VALID only set once */
  275. nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF);
  276. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
  277. }
  278. static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk,
  279. uint32_t *nvp_idx, bool *valid, bool *hw)
  280. {
  281. *nvp_blk = xive2_nvp_blk(cam);
  282. *nvp_idx = xive2_nvp_idx(cam);
  283. *valid = !!(cam & TM2_W2_VALID);
  284. *hw = !!(cam & TM2_W2_HW);
  285. }
  286. /*
  287. * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
  288. * width and block id width is configurable at the IC level.
  289. *
  290. * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
  291. * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit)
  292. */
  293. static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
  294. {
  295. Xive2Router *xrtr = XIVE2_ROUTER(xptr);
  296. CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
  297. uint32_t pir = env->spr_cb[SPR_PIR].default_value;
  298. uint8_t blk = xive2_router_get_block_id(xrtr);
  299. uint8_t tid_shift =
  300. xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
  301. uint8_t tid_mask = (1 << tid_shift) - 1;
  302. return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
  303. }
  304. static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx,
  305. hwaddr offset, unsigned size, uint8_t ring)
  306. {
  307. Xive2Router *xrtr = XIVE2_ROUTER(xptr);
  308. uint32_t target_ringw2 = xive_tctx_word2(&tctx->regs[ring]);
  309. uint32_t cam = be32_to_cpu(target_ringw2);
  310. uint8_t nvp_blk;
  311. uint32_t nvp_idx;
  312. uint8_t cur_ring;
  313. bool valid;
  314. bool do_save;
  315. xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save);
  316. if (!valid) {
  317. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
  318. nvp_blk, nvp_idx);
  319. }
  320. /* Invalidate CAM line of requested ring and all lower rings */
  321. for (cur_ring = TM_QW0_USER; cur_ring <= ring;
  322. cur_ring += XIVE_TM_RING_SIZE) {
  323. uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]);
  324. uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0);
  325. memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4);
  326. }
  327. if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
  328. xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring);
  329. }
  330. /*
  331. * Lower external interrupt line of requested ring and below except for
  332. * USER, which doesn't exist.
  333. */
  334. for (cur_ring = TM_QW1_OS; cur_ring <= ring;
  335. cur_ring += XIVE_TM_RING_SIZE) {
  336. xive_tctx_reset_signal(tctx, cur_ring);
  337. }
  338. return target_ringw2;
  339. }
  340. uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
  341. hwaddr offset, unsigned size)
  342. {
  343. return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS);
  344. }
  345. #define REPORT_LINE_GEN1_SIZE 16
  346. static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data,
  347. uint8_t size)
  348. {
  349. uint8_t *regs = tctx->regs;
  350. g_assert(size == REPORT_LINE_GEN1_SIZE);
  351. memset(data, 0, size);
  352. /*
  353. * See xive architecture for description of what is saved. It is
  354. * hand-picked information to fit in 16 bytes.
  355. */
  356. data[0x0] = regs[TM_QW3_HV_PHYS + TM_NSR];
  357. data[0x1] = regs[TM_QW3_HV_PHYS + TM_CPPR];
  358. data[0x2] = regs[TM_QW3_HV_PHYS + TM_IPB];
  359. data[0x3] = regs[TM_QW2_HV_POOL + TM_IPB];
  360. data[0x4] = regs[TM_QW1_OS + TM_ACK_CNT];
  361. data[0x5] = regs[TM_QW3_HV_PHYS + TM_LGS];
  362. data[0x6] = 0xFF;
  363. data[0x7] = regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x80;
  364. data[0x7] |= (regs[TM_QW2_HV_POOL + TM_WORD2] & 0x80) >> 1;
  365. data[0x7] |= (regs[TM_QW1_OS + TM_WORD2] & 0x80) >> 2;
  366. data[0x7] |= (regs[TM_QW3_HV_PHYS + TM_WORD2] & 0x3);
  367. data[0x8] = regs[TM_QW1_OS + TM_NSR];
  368. data[0x9] = regs[TM_QW1_OS + TM_CPPR];
  369. data[0xA] = regs[TM_QW1_OS + TM_IPB];
  370. data[0xB] = regs[TM_QW1_OS + TM_LGS];
  371. if (regs[TM_QW0_USER + TM_WORD2] & 0x80) {
  372. /*
  373. * Logical server extension, except VU bit replaced by EB bit
  374. * from NSR
  375. */
  376. data[0xC] = regs[TM_QW0_USER + TM_WORD2];
  377. data[0xC] &= ~0x80;
  378. data[0xC] |= regs[TM_QW0_USER + TM_NSR] & 0x80;
  379. data[0xD] = regs[TM_QW0_USER + TM_WORD2 + 1];
  380. data[0xE] = regs[TM_QW0_USER + TM_WORD2 + 2];
  381. data[0xF] = regs[TM_QW0_USER + TM_WORD2 + 3];
  382. }
  383. }
  384. static void xive2_tm_pull_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
  385. hwaddr offset, uint64_t value,
  386. unsigned size, uint8_t ring)
  387. {
  388. Xive2Router *xrtr = XIVE2_ROUTER(xptr);
  389. uint32_t hw_cam, nvp_idx, xive2_cfg, reserved;
  390. uint8_t nvp_blk;
  391. Xive2Nvp nvp;
  392. uint64_t phys_addr;
  393. MemTxResult result;
  394. hw_cam = xive2_tctx_hw_cam_line(xptr, tctx);
  395. nvp_blk = xive2_nvp_blk(hw_cam);
  396. nvp_idx = xive2_nvp_idx(hw_cam);
  397. if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
  398. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
  399. nvp_blk, nvp_idx);
  400. return;
  401. }
  402. if (!xive2_nvp_is_valid(&nvp)) {
  403. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
  404. nvp_blk, nvp_idx);
  405. return;
  406. }
  407. xive2_cfg = xive2_router_get_config(xrtr);
  408. phys_addr = xive2_nvp_reporting_addr(&nvp) + 0x80; /* odd line */
  409. if (xive2_cfg & XIVE2_GEN1_TIMA_OS) {
  410. uint8_t pull_ctxt[REPORT_LINE_GEN1_SIZE];
  411. xive2_tm_report_line_gen1(tctx, pull_ctxt, REPORT_LINE_GEN1_SIZE);
  412. result = dma_memory_write(&address_space_memory, phys_addr,
  413. pull_ctxt, REPORT_LINE_GEN1_SIZE,
  414. MEMTXATTRS_UNSPECIFIED);
  415. assert(result == MEMTX_OK);
  416. } else {
  417. result = dma_memory_write(&address_space_memory, phys_addr,
  418. &tctx->regs, sizeof(tctx->regs),
  419. MEMTXATTRS_UNSPECIFIED);
  420. assert(result == MEMTX_OK);
  421. reserved = 0xFFFFFFFF;
  422. result = dma_memory_write(&address_space_memory, phys_addr + 12,
  423. &reserved, sizeof(reserved),
  424. MEMTXATTRS_UNSPECIFIED);
  425. assert(result == MEMTX_OK);
  426. }
  427. /* the rest is similar to pull context to registers */
  428. xive2_tm_pull_ctx(xptr, tctx, offset, size, ring);
  429. }
  430. void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
  431. hwaddr offset, uint64_t value, unsigned size)
  432. {
  433. xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW1_OS);
  434. }
  435. void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
  436. hwaddr offset, uint64_t value, unsigned size)
  437. {
  438. xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS);
  439. }
  440. static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
  441. uint8_t nvp_blk, uint32_t nvp_idx,
  442. Xive2Nvp *nvp)
  443. {
  444. CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
  445. uint32_t pir = env->spr_cb[SPR_PIR].default_value;
  446. uint8_t cppr;
  447. if (!xive2_nvp_is_hw(nvp)) {
  448. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
  449. nvp_blk, nvp_idx);
  450. return 0;
  451. }
  452. cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2);
  453. nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0);
  454. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
  455. tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
  456. /* we don't model LSMFB */
  457. nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
  458. nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
  459. nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir);
  460. /*
  461. * Checkout privilege: 0:OS, 1:Pool, 2:Hard
  462. *
  463. * TODO: we only support OS push/pull
  464. */
  465. nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0);
  466. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1);
  467. /* return restored CPPR to generate a CPU exception if needed */
  468. return cppr;
  469. }
  470. static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
  471. uint8_t nvp_blk, uint32_t nvp_idx,
  472. bool do_restore)
  473. {
  474. Xive2Nvp nvp;
  475. uint8_t ipb;
  476. /*
  477. * Grab the associated thread interrupt context registers in the
  478. * associated NVP
  479. */
  480. if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
  481. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
  482. nvp_blk, nvp_idx);
  483. return;
  484. }
  485. if (!xive2_nvp_is_valid(&nvp)) {
  486. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
  487. nvp_blk, nvp_idx);
  488. return;
  489. }
  490. /* Automatically restore thread context registers */
  491. if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE &&
  492. do_restore) {
  493. xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
  494. }
  495. ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
  496. if (ipb) {
  497. nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
  498. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
  499. }
  500. /*
  501. * Always call xive_tctx_ipb_update(). Even if there were no
  502. * escalation triggered, there could be a pending interrupt which
  503. * was saved when the context was pulled and that we need to take
  504. * into account by recalculating the PIPR (which is not
  505. * saved/restored).
  506. * It will also raise the External interrupt signal if needed.
  507. */
  508. xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
  509. }
  510. /*
  511. * Updating the OS CAM line can trigger a resend of interrupt
  512. */
  513. void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
  514. hwaddr offset, uint64_t value, unsigned size)
  515. {
  516. uint32_t cam;
  517. uint32_t qw1w2;
  518. uint64_t qw1dw1;
  519. uint8_t nvp_blk;
  520. uint32_t nvp_idx;
  521. bool vo;
  522. bool do_restore;
  523. /* First update the thead context */
  524. switch (size) {
  525. case 4:
  526. cam = value;
  527. qw1w2 = cpu_to_be32(cam);
  528. memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
  529. break;
  530. case 8:
  531. cam = value >> 32;
  532. qw1dw1 = cpu_to_be64(value);
  533. memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8);
  534. break;
  535. default:
  536. g_assert_not_reached();
  537. }
  538. xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
  539. /* Check the interrupt pending bits */
  540. if (vo) {
  541. xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx,
  542. do_restore);
  543. }
  544. }
  545. static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target)
  546. {
  547. uint8_t *regs = &tctx->regs[ring];
  548. regs[TM_T] = target;
  549. }
  550. void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
  551. hwaddr offset, uint64_t value, unsigned size)
  552. {
  553. xive2_tctx_set_target(tctx, TM_QW3_HV_PHYS, value & 0xff);
  554. }
  555. /*
  556. * XIVE Router (aka. Virtualization Controller or IVRE)
  557. */
  558. int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
  559. Xive2Eas *eas)
  560. {
  561. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  562. return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
  563. }
  564. static
  565. int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
  566. uint8_t *pq)
  567. {
  568. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  569. return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
  570. }
  571. static
  572. int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
  573. uint8_t *pq)
  574. {
  575. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  576. return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
  577. }
  578. int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
  579. Xive2End *end)
  580. {
  581. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  582. return xrc->get_end(xrtr, end_blk, end_idx, end);
  583. }
  584. int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
  585. Xive2End *end, uint8_t word_number)
  586. {
  587. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  588. return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
  589. }
  590. int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
  591. Xive2Nvp *nvp)
  592. {
  593. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  594. return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp);
  595. }
  596. int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
  597. Xive2Nvp *nvp, uint8_t word_number)
  598. {
  599. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  600. return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
  601. }
  602. int xive2_router_get_nvgc(Xive2Router *xrtr, bool crowd,
  603. uint8_t nvgc_blk, uint32_t nvgc_idx,
  604. Xive2Nvgc *nvgc)
  605. {
  606. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  607. return xrc->get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
  608. }
  609. int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
  610. uint8_t nvgc_blk, uint32_t nvgc_idx,
  611. Xive2Nvgc *nvgc)
  612. {
  613. Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
  614. return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc);
  615. }
  616. /*
  617. * The thread context register words are in big-endian format.
  618. */
  619. int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
  620. uint8_t format,
  621. uint8_t nvt_blk, uint32_t nvt_idx,
  622. bool cam_ignore, uint32_t logic_serv)
  623. {
  624. uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx);
  625. uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
  626. uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
  627. uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
  628. uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
  629. /*
  630. * TODO (PowerNV): ignore mode. The low order bits of the NVT
  631. * identifier are ignored in the "CAM" match.
  632. */
  633. if (format == 0) {
  634. if (cam_ignore == true) {
  635. /*
  636. * F=0 & i=1: Logical server notification (bits ignored at
  637. * the end of the NVT identifier)
  638. */
  639. qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
  640. nvt_blk, nvt_idx);
  641. return -1;
  642. }
  643. /* F=0 & i=0: Specific NVT notification */
  644. /* PHYS ring */
  645. if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
  646. cam == xive2_tctx_hw_cam_line(xptr, tctx)) {
  647. return TM_QW3_HV_PHYS;
  648. }
  649. /* HV POOL ring */
  650. if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
  651. cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) {
  652. return TM_QW2_HV_POOL;
  653. }
  654. /* OS ring */
  655. if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
  656. cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) {
  657. return TM_QW1_OS;
  658. }
  659. } else {
  660. /* F=1 : User level Event-Based Branch (EBB) notification */
  661. /* USER ring */
  662. if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
  663. (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
  664. (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) &&
  665. (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) {
  666. return TM_QW0_USER;
  667. }
  668. }
  669. return -1;
  670. }
  671. static void xive2_router_realize(DeviceState *dev, Error **errp)
  672. {
  673. Xive2Router *xrtr = XIVE2_ROUTER(dev);
  674. assert(xrtr->xfb);
  675. }
  676. /*
  677. * Notification using the END ESe/ESn bit (Event State Buffer for
  678. * escalation and notification). Profide further coalescing in the
  679. * Router.
  680. */
  681. static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
  682. uint32_t end_idx, Xive2End *end,
  683. uint32_t end_esmask)
  684. {
  685. uint8_t pq = xive_get_field32(end_esmask, end->w1);
  686. bool notify = xive_esb_trigger(&pq);
  687. if (pq != xive_get_field32(end_esmask, end->w1)) {
  688. end->w1 = xive_set_field32(end_esmask, end->w1, pq);
  689. xive2_router_write_end(xrtr, end_blk, end_idx, end, 1);
  690. }
  691. /* ESe/n[Q]=1 : end of notification */
  692. return notify;
  693. }
  694. /*
  695. * An END trigger can come from an event trigger (IPI or HW) or from
  696. * another chip. We don't model the PowerBus but the END trigger
  697. * message has the same parameters than in the function below.
  698. */
  699. static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
  700. uint32_t end_idx, uint32_t end_data)
  701. {
  702. Xive2End end;
  703. uint8_t priority;
  704. uint8_t format;
  705. bool found;
  706. Xive2Nvp nvp;
  707. uint8_t nvp_blk;
  708. uint32_t nvp_idx;
  709. /* END cache lookup */
  710. if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
  711. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
  712. end_idx);
  713. return;
  714. }
  715. if (!xive2_end_is_valid(&end)) {
  716. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
  717. end_blk, end_idx);
  718. return;
  719. }
  720. if (xive2_end_is_enqueue(&end)) {
  721. xive2_end_enqueue(&end, end_data);
  722. /* Enqueuing event data modifies the EQ toggle and index */
  723. xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
  724. }
  725. /*
  726. * When the END is silent, we skip the notification part.
  727. */
  728. if (xive2_end_is_silent_escalation(&end)) {
  729. goto do_escalation;
  730. }
  731. /*
  732. * The W7 format depends on the F bit in W6. It defines the type
  733. * of the notification :
  734. *
  735. * F=0 : single or multiple NVP notification
  736. * F=1 : User level Event-Based Branch (EBB) notification, no
  737. * priority
  738. */
  739. format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6);
  740. priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7);
  741. /* The END is masked */
  742. if (format == 0 && priority == 0xff) {
  743. return;
  744. }
  745. /*
  746. * Check the END ESn (Event State Buffer for notification) for
  747. * even further coalescing in the Router
  748. */
  749. if (!xive2_end_is_notify(&end)) {
  750. /* ESn[Q]=1 : end of notification */
  751. if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
  752. &end, END2_W1_ESn)) {
  753. return;
  754. }
  755. }
  756. /*
  757. * Follows IVPE notification
  758. */
  759. nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
  760. nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
  761. /* NVP cache lookup */
  762. if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
  763. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
  764. nvp_blk, nvp_idx);
  765. return;
  766. }
  767. if (!xive2_nvp_is_valid(&nvp)) {
  768. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
  769. nvp_blk, nvp_idx);
  770. return;
  771. }
  772. found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
  773. xive2_end_is_ignore(&end),
  774. priority,
  775. xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7));
  776. /* TODO: Auto EOI. */
  777. if (found) {
  778. return;
  779. }
  780. /*
  781. * If no matching NVP is dispatched on a HW thread :
  782. * - specific VP: update the NVP structure if backlog is activated
  783. * - logical server : forward request to IVPE (not supported)
  784. */
  785. if (xive2_end_is_backlog(&end)) {
  786. uint8_t ipb;
  787. if (format == 1) {
  788. qemu_log_mask(LOG_GUEST_ERROR,
  789. "XIVE: END %x/%x invalid config: F1 & backlog\n",
  790. end_blk, end_idx);
  791. return;
  792. }
  793. /*
  794. * Record the IPB in the associated NVP structure for later
  795. * use. The presenter will resend the interrupt when the vCPU
  796. * is dispatched again on a HW thread.
  797. */
  798. ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
  799. xive_priority_to_ipb(priority);
  800. nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
  801. xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
  802. /*
  803. * On HW, follows a "Broadcast Backlog" to IVPEs
  804. */
  805. }
  806. do_escalation:
  807. /*
  808. * If activated, escalate notification using the ESe PQ bits and
  809. * the EAS in w4-5
  810. */
  811. if (!xive2_end_is_escalate(&end)) {
  812. return;
  813. }
  814. /*
  815. * Check the END ESe (Event State Buffer for escalation) for even
  816. * further coalescing in the Router
  817. */
  818. if (!xive2_end_is_uncond_escalation(&end)) {
  819. /* ESe[Q]=1 : end of escalation notification */
  820. if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
  821. &end, END2_W1_ESe)) {
  822. return;
  823. }
  824. }
  825. /*
  826. * The END trigger becomes an Escalation trigger
  827. */
  828. xive2_router_end_notify(xrtr,
  829. xive_get_field32(END2_W4_END_BLOCK, end.w4),
  830. xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
  831. xive_get_field32(END2_W5_ESC_END_DATA, end.w5));
  832. }
  833. void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
  834. {
  835. Xive2Router *xrtr = XIVE2_ROUTER(xn);
  836. uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
  837. uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
  838. Xive2Eas eas;
  839. /* EAS cache lookup */
  840. if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
  841. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
  842. return;
  843. }
  844. if (!pq_checked) {
  845. bool notify;
  846. uint8_t pq;
  847. /* PQ cache lookup */
  848. if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
  849. /* Set FIR */
  850. g_assert_not_reached();
  851. }
  852. notify = xive_esb_trigger(&pq);
  853. if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
  854. /* Set FIR */
  855. g_assert_not_reached();
  856. }
  857. if (!notify) {
  858. return;
  859. }
  860. }
  861. if (!xive2_eas_is_valid(&eas)) {
  862. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn);
  863. return;
  864. }
  865. if (xive2_eas_is_masked(&eas)) {
  866. /* Notification completed */
  867. return;
  868. }
  869. /*
  870. * The event trigger becomes an END trigger
  871. */
  872. xive2_router_end_notify(xrtr,
  873. xive_get_field64(EAS2_END_BLOCK, eas.w),
  874. xive_get_field64(EAS2_END_INDEX, eas.w),
  875. xive_get_field64(EAS2_END_DATA, eas.w));
  876. }
  877. static const Property xive2_router_properties[] = {
  878. DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
  879. TYPE_XIVE_FABRIC, XiveFabric *),
  880. };
  881. static void xive2_router_class_init(ObjectClass *klass, void *data)
  882. {
  883. DeviceClass *dc = DEVICE_CLASS(klass);
  884. XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
  885. dc->desc = "XIVE2 Router Engine";
  886. device_class_set_props(dc, xive2_router_properties);
  887. /* Parent is SysBusDeviceClass. No need to call its realize hook */
  888. dc->realize = xive2_router_realize;
  889. xnc->notify = xive2_router_notify;
  890. }
  891. static const TypeInfo xive2_router_info = {
  892. .name = TYPE_XIVE2_ROUTER,
  893. .parent = TYPE_SYS_BUS_DEVICE,
  894. .abstract = true,
  895. .instance_size = sizeof(Xive2Router),
  896. .class_size = sizeof(Xive2RouterClass),
  897. .class_init = xive2_router_class_init,
  898. .interfaces = (InterfaceInfo[]) {
  899. { TYPE_XIVE_NOTIFIER },
  900. { TYPE_XIVE_PRESENTER },
  901. { }
  902. }
  903. };
  904. static inline bool addr_is_even(hwaddr addr, uint32_t shift)
  905. {
  906. return !((addr >> shift) & 1);
  907. }
  908. static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size)
  909. {
  910. Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
  911. uint32_t offset = addr & 0xFFF;
  912. uint8_t end_blk;
  913. uint32_t end_idx;
  914. Xive2End end;
  915. uint32_t end_esmask;
  916. uint8_t pq;
  917. uint64_t ret;
  918. /*
  919. * The block id should be deduced from the load address on the END
  920. * ESB MMIO but our model only supports a single block per XIVE chip.
  921. */
  922. end_blk = xive2_router_get_block_id(xsrc->xrtr);
  923. end_idx = addr >> (xsrc->esb_shift + 1);
  924. if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
  925. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
  926. end_idx);
  927. return -1;
  928. }
  929. if (!xive2_end_is_valid(&end)) {
  930. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
  931. end_blk, end_idx);
  932. return -1;
  933. }
  934. end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
  935. END2_W1_ESe;
  936. pq = xive_get_field32(end_esmask, end.w1);
  937. switch (offset) {
  938. case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
  939. ret = xive_esb_eoi(&pq);
  940. /* Forward the source event notification for routing ?? */
  941. break;
  942. case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
  943. ret = pq;
  944. break;
  945. case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
  946. case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
  947. case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
  948. case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
  949. ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
  950. break;
  951. default:
  952. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
  953. offset);
  954. return -1;
  955. }
  956. if (pq != xive_get_field32(end_esmask, end.w1)) {
  957. end.w1 = xive_set_field32(end_esmask, end.w1, pq);
  958. xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
  959. }
  960. return ret;
  961. }
  962. static void xive2_end_source_write(void *opaque, hwaddr addr,
  963. uint64_t value, unsigned size)
  964. {
  965. Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
  966. uint32_t offset = addr & 0xFFF;
  967. uint8_t end_blk;
  968. uint32_t end_idx;
  969. Xive2End end;
  970. uint32_t end_esmask;
  971. uint8_t pq;
  972. bool notify = false;
  973. /*
  974. * The block id should be deduced from the load address on the END
  975. * ESB MMIO but our model only supports a single block per XIVE chip.
  976. */
  977. end_blk = xive2_router_get_block_id(xsrc->xrtr);
  978. end_idx = addr >> (xsrc->esb_shift + 1);
  979. if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
  980. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
  981. end_idx);
  982. return;
  983. }
  984. if (!xive2_end_is_valid(&end)) {
  985. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
  986. end_blk, end_idx);
  987. return;
  988. }
  989. end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
  990. END2_W1_ESe;
  991. pq = xive_get_field32(end_esmask, end.w1);
  992. switch (offset) {
  993. case 0 ... 0x3FF:
  994. notify = xive_esb_trigger(&pq);
  995. break;
  996. case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
  997. /* TODO: can we check StoreEOI availability from the router ? */
  998. notify = xive_esb_eoi(&pq);
  999. break;
  1000. case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
  1001. if (end_esmask == END2_W1_ESe) {
  1002. qemu_log_mask(LOG_GUEST_ERROR,
  1003. "XIVE: END %x/%x can not EQ inject on ESe\n",
  1004. end_blk, end_idx);
  1005. return;
  1006. }
  1007. notify = true;
  1008. break;
  1009. default:
  1010. qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n",
  1011. offset);
  1012. return;
  1013. }
  1014. if (pq != xive_get_field32(end_esmask, end.w1)) {
  1015. end.w1 = xive_set_field32(end_esmask, end.w1, pq);
  1016. xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
  1017. }
  1018. /* TODO: Forward the source event notification for routing */
  1019. if (notify) {
  1020. ;
  1021. }
  1022. }
  1023. static const MemoryRegionOps xive2_end_source_ops = {
  1024. .read = xive2_end_source_read,
  1025. .write = xive2_end_source_write,
  1026. .endianness = DEVICE_BIG_ENDIAN,
  1027. .valid = {
  1028. .min_access_size = 1,
  1029. .max_access_size = 8,
  1030. },
  1031. .impl = {
  1032. .min_access_size = 1,
  1033. .max_access_size = 8,
  1034. },
  1035. };
  1036. static void xive2_end_source_realize(DeviceState *dev, Error **errp)
  1037. {
  1038. Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev);
  1039. assert(xsrc->xrtr);
  1040. if (!xsrc->nr_ends) {
  1041. error_setg(errp, "Number of interrupt needs to be greater than 0");
  1042. return;
  1043. }
  1044. if (xsrc->esb_shift != XIVE_ESB_4K &&
  1045. xsrc->esb_shift != XIVE_ESB_64K) {
  1046. error_setg(errp, "Invalid ESB shift setting");
  1047. return;
  1048. }
  1049. /*
  1050. * Each END is assigned an even/odd pair of MMIO pages, the even page
  1051. * manages the ESn field while the odd page manages the ESe field.
  1052. */
  1053. memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
  1054. &xive2_end_source_ops, xsrc, "xive.end",
  1055. (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
  1056. }
  1057. static const Property xive2_end_source_properties[] = {
  1058. DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
  1059. DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
  1060. DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
  1061. Xive2Router *),
  1062. };
  1063. static void xive2_end_source_class_init(ObjectClass *klass, void *data)
  1064. {
  1065. DeviceClass *dc = DEVICE_CLASS(klass);
  1066. dc->desc = "XIVE END Source";
  1067. device_class_set_props(dc, xive2_end_source_properties);
  1068. dc->realize = xive2_end_source_realize;
  1069. dc->user_creatable = false;
  1070. }
  1071. static const TypeInfo xive2_end_source_info = {
  1072. .name = TYPE_XIVE2_END_SOURCE,
  1073. .parent = TYPE_DEVICE,
  1074. .instance_size = sizeof(Xive2EndSource),
  1075. .class_init = xive2_end_source_class_init,
  1076. };
  1077. static void xive2_register_types(void)
  1078. {
  1079. type_register_static(&xive2_router_info);
  1080. type_register_static(&xive2_end_source_info);
  1081. }
  1082. type_init(xive2_register_types)