xics.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
  1. /*
  2. * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
  3. *
  4. * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
  5. *
  6. * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  21. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24. * THE SOFTWARE.
  25. *
  26. */
  27. #include "hw.h"
  28. #include "hw/spapr.h"
  29. #include "hw/xics.h"
  30. /*
  31. * ICP: Presentation layer
  32. */
  33. struct icp_server_state {
  34. uint32_t xirr;
  35. uint8_t pending_priority;
  36. uint8_t mfrr;
  37. qemu_irq output;
  38. };
  39. #define XISR_MASK 0x00ffffff
  40. #define CPPR_MASK 0xff000000
  41. #define XISR(ss) (((ss)->xirr) & XISR_MASK)
  42. #define CPPR(ss) (((ss)->xirr) >> 24)
  43. struct ics_state;
  44. struct icp_state {
  45. long nr_servers;
  46. struct icp_server_state *ss;
  47. struct ics_state *ics;
  48. };
  49. static void ics_reject(struct ics_state *ics, int nr);
  50. static void ics_resend(struct ics_state *ics);
  51. static void ics_eoi(struct ics_state *ics, int nr);
  52. static void icp_check_ipi(struct icp_state *icp, int server)
  53. {
  54. struct icp_server_state *ss = icp->ss + server;
  55. if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
  56. return;
  57. }
  58. if (XISR(ss)) {
  59. ics_reject(icp->ics, XISR(ss));
  60. }
  61. ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
  62. ss->pending_priority = ss->mfrr;
  63. qemu_irq_raise(ss->output);
  64. }
  65. static void icp_resend(struct icp_state *icp, int server)
  66. {
  67. struct icp_server_state *ss = icp->ss + server;
  68. if (ss->mfrr < CPPR(ss)) {
  69. icp_check_ipi(icp, server);
  70. }
  71. ics_resend(icp->ics);
  72. }
  73. static void icp_set_cppr(struct icp_state *icp, int server, uint8_t cppr)
  74. {
  75. struct icp_server_state *ss = icp->ss + server;
  76. uint8_t old_cppr;
  77. uint32_t old_xisr;
  78. old_cppr = CPPR(ss);
  79. ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
  80. if (cppr < old_cppr) {
  81. if (XISR(ss) && (cppr <= ss->pending_priority)) {
  82. old_xisr = XISR(ss);
  83. ss->xirr &= ~XISR_MASK; /* Clear XISR */
  84. qemu_irq_lower(ss->output);
  85. ics_reject(icp->ics, old_xisr);
  86. }
  87. } else {
  88. if (!XISR(ss)) {
  89. icp_resend(icp, server);
  90. }
  91. }
  92. }
  93. static void icp_set_mfrr(struct icp_state *icp, int nr, uint8_t mfrr)
  94. {
  95. struct icp_server_state *ss = icp->ss + nr;
  96. ss->mfrr = mfrr;
  97. if (mfrr < CPPR(ss)) {
  98. icp_check_ipi(icp, nr);
  99. }
  100. }
  101. static uint32_t icp_accept(struct icp_server_state *ss)
  102. {
  103. uint32_t xirr;
  104. qemu_irq_lower(ss->output);
  105. xirr = ss->xirr;
  106. ss->xirr = ss->pending_priority << 24;
  107. return xirr;
  108. }
  109. static void icp_eoi(struct icp_state *icp, int server, uint32_t xirr)
  110. {
  111. struct icp_server_state *ss = icp->ss + server;
  112. /* Send EOI -> ICS */
  113. ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
  114. ics_eoi(icp->ics, xirr & XISR_MASK);
  115. if (!XISR(ss)) {
  116. icp_resend(icp, server);
  117. }
  118. }
  119. static void icp_irq(struct icp_state *icp, int server, int nr, uint8_t priority)
  120. {
  121. struct icp_server_state *ss = icp->ss + server;
  122. if ((priority >= CPPR(ss))
  123. || (XISR(ss) && (ss->pending_priority <= priority))) {
  124. ics_reject(icp->ics, nr);
  125. } else {
  126. if (XISR(ss)) {
  127. ics_reject(icp->ics, XISR(ss));
  128. }
  129. ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
  130. ss->pending_priority = priority;
  131. qemu_irq_raise(ss->output);
  132. }
  133. }
  134. /*
  135. * ICS: Source layer
  136. */
  137. struct ics_irq_state {
  138. int server;
  139. uint8_t priority;
  140. uint8_t saved_priority;
  141. enum xics_irq_type type;
  142. int asserted:1;
  143. int sent:1;
  144. int rejected:1;
  145. int masked_pending:1;
  146. };
  147. struct ics_state {
  148. int nr_irqs;
  149. int offset;
  150. qemu_irq *qirqs;
  151. struct ics_irq_state *irqs;
  152. struct icp_state *icp;
  153. };
  154. static int ics_valid_irq(struct ics_state *ics, uint32_t nr)
  155. {
  156. return (nr >= ics->offset)
  157. && (nr < (ics->offset + ics->nr_irqs));
  158. }
  159. static void resend_msi(struct ics_state *ics, int srcno)
  160. {
  161. struct ics_irq_state *irq = ics->irqs + srcno;
  162. /* FIXME: filter by server#? */
  163. if (irq->rejected) {
  164. irq->rejected = 0;
  165. if (irq->priority != 0xff) {
  166. icp_irq(ics->icp, irq->server, srcno + ics->offset,
  167. irq->priority);
  168. }
  169. }
  170. }
  171. static void resend_lsi(struct ics_state *ics, int srcno)
  172. {
  173. struct ics_irq_state *irq = ics->irqs + srcno;
  174. if ((irq->priority != 0xff) && irq->asserted && !irq->sent) {
  175. irq->sent = 1;
  176. icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
  177. }
  178. }
  179. static void set_irq_msi(struct ics_state *ics, int srcno, int val)
  180. {
  181. struct ics_irq_state *irq = ics->irqs + srcno;
  182. if (val) {
  183. if (irq->priority == 0xff) {
  184. irq->masked_pending = 1;
  185. /* masked pending */ ;
  186. } else {
  187. icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
  188. }
  189. }
  190. }
  191. static void set_irq_lsi(struct ics_state *ics, int srcno, int val)
  192. {
  193. struct ics_irq_state *irq = ics->irqs + srcno;
  194. irq->asserted = val;
  195. resend_lsi(ics, srcno);
  196. }
  197. static void ics_set_irq(void *opaque, int srcno, int val)
  198. {
  199. struct ics_state *ics = (struct ics_state *)opaque;
  200. struct ics_irq_state *irq = ics->irqs + srcno;
  201. if (irq->type == XICS_LSI) {
  202. set_irq_lsi(ics, srcno, val);
  203. } else {
  204. set_irq_msi(ics, srcno, val);
  205. }
  206. }
  207. static void write_xive_msi(struct ics_state *ics, int srcno)
  208. {
  209. struct ics_irq_state *irq = ics->irqs + srcno;
  210. if (!irq->masked_pending || (irq->priority == 0xff)) {
  211. return;
  212. }
  213. irq->masked_pending = 0;
  214. icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
  215. }
  216. static void write_xive_lsi(struct ics_state *ics, int srcno)
  217. {
  218. resend_lsi(ics, srcno);
  219. }
  220. static void ics_write_xive(struct ics_state *ics, int nr, int server,
  221. uint8_t priority)
  222. {
  223. int srcno = nr - ics->offset;
  224. struct ics_irq_state *irq = ics->irqs + srcno;
  225. irq->server = server;
  226. irq->priority = priority;
  227. if (irq->type == XICS_LSI) {
  228. write_xive_lsi(ics, srcno);
  229. } else {
  230. write_xive_msi(ics, srcno);
  231. }
  232. }
  233. static void ics_reject(struct ics_state *ics, int nr)
  234. {
  235. struct ics_irq_state *irq = ics->irqs + nr - ics->offset;
  236. irq->rejected = 1; /* Irrelevant but harmless for LSI */
  237. irq->sent = 0; /* Irrelevant but harmless for MSI */
  238. }
  239. static void ics_resend(struct ics_state *ics)
  240. {
  241. int i;
  242. for (i = 0; i < ics->nr_irqs; i++) {
  243. struct ics_irq_state *irq = ics->irqs + i;
  244. /* FIXME: filter by server#? */
  245. if (irq->type == XICS_LSI) {
  246. resend_lsi(ics, i);
  247. } else {
  248. resend_msi(ics, i);
  249. }
  250. }
  251. }
  252. static void ics_eoi(struct ics_state *ics, int nr)
  253. {
  254. int srcno = nr - ics->offset;
  255. struct ics_irq_state *irq = ics->irqs + srcno;
  256. if (irq->type == XICS_LSI) {
  257. irq->sent = 0;
  258. }
  259. }
  260. /*
  261. * Exported functions
  262. */
  263. qemu_irq xics_get_qirq(struct icp_state *icp, int irq)
  264. {
  265. if ((irq < icp->ics->offset)
  266. || (irq >= (icp->ics->offset + icp->ics->nr_irqs))) {
  267. return NULL;
  268. }
  269. return icp->ics->qirqs[irq - icp->ics->offset];
  270. }
  271. void xics_set_irq_type(struct icp_state *icp, int irq,
  272. enum xics_irq_type type)
  273. {
  274. assert((irq >= icp->ics->offset)
  275. && (irq < (icp->ics->offset + icp->ics->nr_irqs)));
  276. assert((type == XICS_MSI) || (type == XICS_LSI));
  277. icp->ics->irqs[irq - icp->ics->offset].type = type;
  278. }
  279. static target_ulong h_cppr(CPUPPCState *env, sPAPREnvironment *spapr,
  280. target_ulong opcode, target_ulong *args)
  281. {
  282. target_ulong cppr = args[0];
  283. icp_set_cppr(spapr->icp, env->cpu_index, cppr);
  284. return H_SUCCESS;
  285. }
  286. static target_ulong h_ipi(CPUPPCState *env, sPAPREnvironment *spapr,
  287. target_ulong opcode, target_ulong *args)
  288. {
  289. target_ulong server = args[0];
  290. target_ulong mfrr = args[1];
  291. if (server >= spapr->icp->nr_servers) {
  292. return H_PARAMETER;
  293. }
  294. icp_set_mfrr(spapr->icp, server, mfrr);
  295. return H_SUCCESS;
  296. }
  297. static target_ulong h_xirr(CPUPPCState *env, sPAPREnvironment *spapr,
  298. target_ulong opcode, target_ulong *args)
  299. {
  300. uint32_t xirr = icp_accept(spapr->icp->ss + env->cpu_index);
  301. args[0] = xirr;
  302. return H_SUCCESS;
  303. }
  304. static target_ulong h_eoi(CPUPPCState *env, sPAPREnvironment *spapr,
  305. target_ulong opcode, target_ulong *args)
  306. {
  307. target_ulong xirr = args[0];
  308. icp_eoi(spapr->icp, env->cpu_index, xirr);
  309. return H_SUCCESS;
  310. }
  311. static void rtas_set_xive(sPAPREnvironment *spapr, uint32_t token,
  312. uint32_t nargs, target_ulong args,
  313. uint32_t nret, target_ulong rets)
  314. {
  315. struct ics_state *ics = spapr->icp->ics;
  316. uint32_t nr, server, priority;
  317. if ((nargs != 3) || (nret != 1)) {
  318. rtas_st(rets, 0, -3);
  319. return;
  320. }
  321. nr = rtas_ld(args, 0);
  322. server = rtas_ld(args, 1);
  323. priority = rtas_ld(args, 2);
  324. if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
  325. || (priority > 0xff)) {
  326. rtas_st(rets, 0, -3);
  327. return;
  328. }
  329. ics_write_xive(ics, nr, server, priority);
  330. rtas_st(rets, 0, 0); /* Success */
  331. }
  332. static void rtas_get_xive(sPAPREnvironment *spapr, uint32_t token,
  333. uint32_t nargs, target_ulong args,
  334. uint32_t nret, target_ulong rets)
  335. {
  336. struct ics_state *ics = spapr->icp->ics;
  337. uint32_t nr;
  338. if ((nargs != 1) || (nret != 3)) {
  339. rtas_st(rets, 0, -3);
  340. return;
  341. }
  342. nr = rtas_ld(args, 0);
  343. if (!ics_valid_irq(ics, nr)) {
  344. rtas_st(rets, 0, -3);
  345. return;
  346. }
  347. rtas_st(rets, 0, 0); /* Success */
  348. rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
  349. rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
  350. }
  351. static void rtas_int_off(sPAPREnvironment *spapr, uint32_t token,
  352. uint32_t nargs, target_ulong args,
  353. uint32_t nret, target_ulong rets)
  354. {
  355. struct ics_state *ics = spapr->icp->ics;
  356. uint32_t nr;
  357. if ((nargs != 1) || (nret != 1)) {
  358. rtas_st(rets, 0, -3);
  359. return;
  360. }
  361. nr = rtas_ld(args, 0);
  362. if (!ics_valid_irq(ics, nr)) {
  363. rtas_st(rets, 0, -3);
  364. return;
  365. }
  366. /* This is a NOP for now, since the described PAPR semantics don't
  367. * seem to gel with what Linux does */
  368. #if 0
  369. struct ics_irq_state *irq = xics->irqs + (nr - xics->offset);
  370. irq->saved_priority = irq->priority;
  371. ics_write_xive_msi(xics, nr, irq->server, 0xff);
  372. #endif
  373. rtas_st(rets, 0, 0); /* Success */
  374. }
  375. static void rtas_int_on(sPAPREnvironment *spapr, uint32_t token,
  376. uint32_t nargs, target_ulong args,
  377. uint32_t nret, target_ulong rets)
  378. {
  379. struct ics_state *ics = spapr->icp->ics;
  380. uint32_t nr;
  381. if ((nargs != 1) || (nret != 1)) {
  382. rtas_st(rets, 0, -3);
  383. return;
  384. }
  385. nr = rtas_ld(args, 0);
  386. if (!ics_valid_irq(ics, nr)) {
  387. rtas_st(rets, 0, -3);
  388. return;
  389. }
  390. /* This is a NOP for now, since the described PAPR semantics don't
  391. * seem to gel with what Linux does */
  392. #if 0
  393. struct ics_irq_state *irq = xics->irqs + (nr - xics->offset);
  394. ics_write_xive_msi(xics, nr, irq->server, irq->saved_priority);
  395. #endif
  396. rtas_st(rets, 0, 0); /* Success */
  397. }
  398. struct icp_state *xics_system_init(int nr_irqs)
  399. {
  400. CPUPPCState *env;
  401. int max_server_num;
  402. int i;
  403. struct icp_state *icp;
  404. struct ics_state *ics;
  405. max_server_num = -1;
  406. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  407. if (env->cpu_index > max_server_num) {
  408. max_server_num = env->cpu_index;
  409. }
  410. }
  411. icp = g_malloc0(sizeof(*icp));
  412. icp->nr_servers = max_server_num + 1;
  413. icp->ss = g_malloc0(icp->nr_servers*sizeof(struct icp_server_state));
  414. for (i = 0; i < icp->nr_servers; i++) {
  415. icp->ss[i].mfrr = 0xff;
  416. }
  417. for (env = first_cpu; env != NULL; env = env->next_cpu) {
  418. struct icp_server_state *ss = &icp->ss[env->cpu_index];
  419. switch (PPC_INPUT(env)) {
  420. case PPC_FLAGS_INPUT_POWER7:
  421. ss->output = env->irq_inputs[POWER7_INPUT_INT];
  422. break;
  423. case PPC_FLAGS_INPUT_970:
  424. ss->output = env->irq_inputs[PPC970_INPUT_INT];
  425. break;
  426. default:
  427. hw_error("XICS interrupt model does not support this CPU bus "
  428. "model\n");
  429. exit(1);
  430. }
  431. }
  432. ics = g_malloc0(sizeof(*ics));
  433. ics->nr_irqs = nr_irqs;
  434. ics->offset = 16;
  435. ics->irqs = g_malloc0(nr_irqs * sizeof(struct ics_irq_state));
  436. icp->ics = ics;
  437. ics->icp = icp;
  438. for (i = 0; i < nr_irqs; i++) {
  439. ics->irqs[i].priority = 0xff;
  440. ics->irqs[i].saved_priority = 0xff;
  441. }
  442. ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, nr_irqs);
  443. spapr_register_hypercall(H_CPPR, h_cppr);
  444. spapr_register_hypercall(H_IPI, h_ipi);
  445. spapr_register_hypercall(H_XIRR, h_xirr);
  446. spapr_register_hypercall(H_EOI, h_eoi);
  447. spapr_rtas_register("ibm,set-xive", rtas_set_xive);
  448. spapr_rtas_register("ibm,get-xive", rtas_get_xive);
  449. spapr_rtas_register("ibm,int-off", rtas_int_off);
  450. spapr_rtas_register("ibm,int-on", rtas_int_on);
  451. return icp;
  452. }