xics.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935
  1. /*
  2. * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
  3. *
  4. * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics
  5. *
  6. * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  21. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24. * THE SOFTWARE.
  25. *
  26. */
  27. #include "hw/hw.h"
  28. #include "trace.h"
  29. #include "qemu/timer.h"
  30. #include "hw/ppc/spapr.h"
  31. #include "hw/ppc/xics.h"
  32. #include "qemu/error-report.h"
  33. #include "qapi/visitor.h"
  34. static int get_cpu_index_by_dt_id(int cpu_dt_id)
  35. {
  36. PowerPCCPU *cpu = ppc_get_vcpu_by_dt_id(cpu_dt_id);
  37. if (cpu) {
  38. return cpu->parent_obj.cpu_index;
  39. }
  40. return -1;
  41. }
  42. void xics_cpu_setup(XICSState *icp, PowerPCCPU *cpu)
  43. {
  44. CPUState *cs = CPU(cpu);
  45. CPUPPCState *env = &cpu->env;
  46. ICPState *ss = &icp->ss[cs->cpu_index];
  47. XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
  48. assert(cs->cpu_index < icp->nr_servers);
  49. if (info->cpu_setup) {
  50. info->cpu_setup(icp, cpu);
  51. }
  52. switch (PPC_INPUT(env)) {
  53. case PPC_FLAGS_INPUT_POWER7:
  54. ss->output = env->irq_inputs[POWER7_INPUT_INT];
  55. break;
  56. case PPC_FLAGS_INPUT_970:
  57. ss->output = env->irq_inputs[PPC970_INPUT_INT];
  58. break;
  59. default:
  60. error_report("XICS interrupt controller does not support this CPU "
  61. "bus model");
  62. abort();
  63. }
  64. }
  65. /*
  66. * XICS Common class - parent for emulated XICS and KVM-XICS
  67. */
  68. static void xics_common_reset(DeviceState *d)
  69. {
  70. XICSState *icp = XICS_COMMON(d);
  71. int i;
  72. for (i = 0; i < icp->nr_servers; i++) {
  73. device_reset(DEVICE(&icp->ss[i]));
  74. }
  75. device_reset(DEVICE(icp->ics));
  76. }
  77. static void xics_prop_get_nr_irqs(Object *obj, Visitor *v,
  78. void *opaque, const char *name, Error **errp)
  79. {
  80. XICSState *icp = XICS_COMMON(obj);
  81. int64_t value = icp->nr_irqs;
  82. visit_type_int(v, &value, name, errp);
  83. }
  84. static void xics_prop_set_nr_irqs(Object *obj, Visitor *v,
  85. void *opaque, const char *name, Error **errp)
  86. {
  87. XICSState *icp = XICS_COMMON(obj);
  88. XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
  89. Error *error = NULL;
  90. int64_t value;
  91. visit_type_int(v, &value, name, &error);
  92. if (error) {
  93. error_propagate(errp, error);
  94. return;
  95. }
  96. if (icp->nr_irqs) {
  97. error_setg(errp, "Number of interrupts is already set to %u",
  98. icp->nr_irqs);
  99. return;
  100. }
  101. assert(info->set_nr_irqs);
  102. assert(icp->ics);
  103. info->set_nr_irqs(icp, value, errp);
  104. }
  105. static void xics_prop_get_nr_servers(Object *obj, Visitor *v,
  106. void *opaque, const char *name,
  107. Error **errp)
  108. {
  109. XICSState *icp = XICS_COMMON(obj);
  110. int64_t value = icp->nr_servers;
  111. visit_type_int(v, &value, name, errp);
  112. }
  113. static void xics_prop_set_nr_servers(Object *obj, Visitor *v,
  114. void *opaque, const char *name,
  115. Error **errp)
  116. {
  117. XICSState *icp = XICS_COMMON(obj);
  118. XICSStateClass *info = XICS_COMMON_GET_CLASS(icp);
  119. Error *error = NULL;
  120. int64_t value;
  121. visit_type_int(v, &value, name, &error);
  122. if (error) {
  123. error_propagate(errp, error);
  124. return;
  125. }
  126. if (icp->nr_servers) {
  127. error_setg(errp, "Number of servers is already set to %u",
  128. icp->nr_servers);
  129. return;
  130. }
  131. assert(info->set_nr_servers);
  132. info->set_nr_servers(icp, value, errp);
  133. }
  134. static void xics_common_initfn(Object *obj)
  135. {
  136. object_property_add(obj, "nr_irqs", "int",
  137. xics_prop_get_nr_irqs, xics_prop_set_nr_irqs,
  138. NULL, NULL, NULL);
  139. object_property_add(obj, "nr_servers", "int",
  140. xics_prop_get_nr_servers, xics_prop_set_nr_servers,
  141. NULL, NULL, NULL);
  142. }
  143. static void xics_common_class_init(ObjectClass *oc, void *data)
  144. {
  145. DeviceClass *dc = DEVICE_CLASS(oc);
  146. dc->reset = xics_common_reset;
  147. }
  148. static const TypeInfo xics_common_info = {
  149. .name = TYPE_XICS_COMMON,
  150. .parent = TYPE_SYS_BUS_DEVICE,
  151. .instance_size = sizeof(XICSState),
  152. .class_size = sizeof(XICSStateClass),
  153. .instance_init = xics_common_initfn,
  154. .class_init = xics_common_class_init,
  155. };
  156. /*
  157. * ICP: Presentation layer
  158. */
  159. #define XISR_MASK 0x00ffffff
  160. #define CPPR_MASK 0xff000000
  161. #define XISR(ss) (((ss)->xirr) & XISR_MASK)
  162. #define CPPR(ss) (((ss)->xirr) >> 24)
  163. static void ics_reject(ICSState *ics, int nr);
  164. static void ics_resend(ICSState *ics);
  165. static void ics_eoi(ICSState *ics, int nr);
  166. static void icp_check_ipi(XICSState *icp, int server)
  167. {
  168. ICPState *ss = icp->ss + server;
  169. if (XISR(ss) && (ss->pending_priority <= ss->mfrr)) {
  170. return;
  171. }
  172. trace_xics_icp_check_ipi(server, ss->mfrr);
  173. if (XISR(ss)) {
  174. ics_reject(icp->ics, XISR(ss));
  175. }
  176. ss->xirr = (ss->xirr & ~XISR_MASK) | XICS_IPI;
  177. ss->pending_priority = ss->mfrr;
  178. qemu_irq_raise(ss->output);
  179. }
  180. static void icp_resend(XICSState *icp, int server)
  181. {
  182. ICPState *ss = icp->ss + server;
  183. if (ss->mfrr < CPPR(ss)) {
  184. icp_check_ipi(icp, server);
  185. }
  186. ics_resend(icp->ics);
  187. }
  188. static void icp_set_cppr(XICSState *icp, int server, uint8_t cppr)
  189. {
  190. ICPState *ss = icp->ss + server;
  191. uint8_t old_cppr;
  192. uint32_t old_xisr;
  193. old_cppr = CPPR(ss);
  194. ss->xirr = (ss->xirr & ~CPPR_MASK) | (cppr << 24);
  195. if (cppr < old_cppr) {
  196. if (XISR(ss) && (cppr <= ss->pending_priority)) {
  197. old_xisr = XISR(ss);
  198. ss->xirr &= ~XISR_MASK; /* Clear XISR */
  199. ss->pending_priority = 0xff;
  200. qemu_irq_lower(ss->output);
  201. ics_reject(icp->ics, old_xisr);
  202. }
  203. } else {
  204. if (!XISR(ss)) {
  205. icp_resend(icp, server);
  206. }
  207. }
  208. }
  209. static void icp_set_mfrr(XICSState *icp, int server, uint8_t mfrr)
  210. {
  211. ICPState *ss = icp->ss + server;
  212. ss->mfrr = mfrr;
  213. if (mfrr < CPPR(ss)) {
  214. icp_check_ipi(icp, server);
  215. }
  216. }
  217. static uint32_t icp_accept(ICPState *ss)
  218. {
  219. uint32_t xirr = ss->xirr;
  220. qemu_irq_lower(ss->output);
  221. ss->xirr = ss->pending_priority << 24;
  222. ss->pending_priority = 0xff;
  223. trace_xics_icp_accept(xirr, ss->xirr);
  224. return xirr;
  225. }
  226. static void icp_eoi(XICSState *icp, int server, uint32_t xirr)
  227. {
  228. ICPState *ss = icp->ss + server;
  229. /* Send EOI -> ICS */
  230. ss->xirr = (ss->xirr & ~CPPR_MASK) | (xirr & CPPR_MASK);
  231. trace_xics_icp_eoi(server, xirr, ss->xirr);
  232. ics_eoi(icp->ics, xirr & XISR_MASK);
  233. if (!XISR(ss)) {
  234. icp_resend(icp, server);
  235. }
  236. }
  237. static void icp_irq(XICSState *icp, int server, int nr, uint8_t priority)
  238. {
  239. ICPState *ss = icp->ss + server;
  240. trace_xics_icp_irq(server, nr, priority);
  241. if ((priority >= CPPR(ss))
  242. || (XISR(ss) && (ss->pending_priority <= priority))) {
  243. ics_reject(icp->ics, nr);
  244. } else {
  245. if (XISR(ss)) {
  246. ics_reject(icp->ics, XISR(ss));
  247. }
  248. ss->xirr = (ss->xirr & ~XISR_MASK) | (nr & XISR_MASK);
  249. ss->pending_priority = priority;
  250. trace_xics_icp_raise(ss->xirr, ss->pending_priority);
  251. qemu_irq_raise(ss->output);
  252. }
  253. }
  254. static void icp_dispatch_pre_save(void *opaque)
  255. {
  256. ICPState *ss = opaque;
  257. ICPStateClass *info = ICP_GET_CLASS(ss);
  258. if (info->pre_save) {
  259. info->pre_save(ss);
  260. }
  261. }
  262. static int icp_dispatch_post_load(void *opaque, int version_id)
  263. {
  264. ICPState *ss = opaque;
  265. ICPStateClass *info = ICP_GET_CLASS(ss);
  266. if (info->post_load) {
  267. return info->post_load(ss, version_id);
  268. }
  269. return 0;
  270. }
  271. static const VMStateDescription vmstate_icp_server = {
  272. .name = "icp/server",
  273. .version_id = 1,
  274. .minimum_version_id = 1,
  275. .minimum_version_id_old = 1,
  276. .pre_save = icp_dispatch_pre_save,
  277. .post_load = icp_dispatch_post_load,
  278. .fields = (VMStateField []) {
  279. /* Sanity check */
  280. VMSTATE_UINT32(xirr, ICPState),
  281. VMSTATE_UINT8(pending_priority, ICPState),
  282. VMSTATE_UINT8(mfrr, ICPState),
  283. VMSTATE_END_OF_LIST()
  284. },
  285. };
  286. static void icp_reset(DeviceState *dev)
  287. {
  288. ICPState *icp = ICP(dev);
  289. icp->xirr = 0;
  290. icp->pending_priority = 0xff;
  291. icp->mfrr = 0xff;
  292. /* Make all outputs are deasserted */
  293. qemu_set_irq(icp->output, 0);
  294. }
  295. static void icp_class_init(ObjectClass *klass, void *data)
  296. {
  297. DeviceClass *dc = DEVICE_CLASS(klass);
  298. dc->reset = icp_reset;
  299. dc->vmsd = &vmstate_icp_server;
  300. }
  301. static const TypeInfo icp_info = {
  302. .name = TYPE_ICP,
  303. .parent = TYPE_DEVICE,
  304. .instance_size = sizeof(ICPState),
  305. .class_init = icp_class_init,
  306. .class_size = sizeof(ICPStateClass),
  307. };
  308. /*
  309. * ICS: Source layer
  310. */
  311. static int ics_valid_irq(ICSState *ics, uint32_t nr)
  312. {
  313. return (nr >= ics->offset)
  314. && (nr < (ics->offset + ics->nr_irqs));
  315. }
  316. static void resend_msi(ICSState *ics, int srcno)
  317. {
  318. ICSIRQState *irq = ics->irqs + srcno;
  319. /* FIXME: filter by server#? */
  320. if (irq->status & XICS_STATUS_REJECTED) {
  321. irq->status &= ~XICS_STATUS_REJECTED;
  322. if (irq->priority != 0xff) {
  323. icp_irq(ics->icp, irq->server, srcno + ics->offset,
  324. irq->priority);
  325. }
  326. }
  327. }
  328. static void resend_lsi(ICSState *ics, int srcno)
  329. {
  330. ICSIRQState *irq = ics->irqs + srcno;
  331. if ((irq->priority != 0xff)
  332. && (irq->status & XICS_STATUS_ASSERTED)
  333. && !(irq->status & XICS_STATUS_SENT)) {
  334. irq->status |= XICS_STATUS_SENT;
  335. icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
  336. }
  337. }
  338. static void set_irq_msi(ICSState *ics, int srcno, int val)
  339. {
  340. ICSIRQState *irq = ics->irqs + srcno;
  341. trace_xics_set_irq_msi(srcno, srcno + ics->offset);
  342. if (val) {
  343. if (irq->priority == 0xff) {
  344. irq->status |= XICS_STATUS_MASKED_PENDING;
  345. trace_xics_masked_pending();
  346. } else {
  347. icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
  348. }
  349. }
  350. }
  351. static void set_irq_lsi(ICSState *ics, int srcno, int val)
  352. {
  353. ICSIRQState *irq = ics->irqs + srcno;
  354. trace_xics_set_irq_lsi(srcno, srcno + ics->offset);
  355. if (val) {
  356. irq->status |= XICS_STATUS_ASSERTED;
  357. } else {
  358. irq->status &= ~XICS_STATUS_ASSERTED;
  359. }
  360. resend_lsi(ics, srcno);
  361. }
  362. static void ics_set_irq(void *opaque, int srcno, int val)
  363. {
  364. ICSState *ics = (ICSState *)opaque;
  365. if (ics->islsi[srcno]) {
  366. set_irq_lsi(ics, srcno, val);
  367. } else {
  368. set_irq_msi(ics, srcno, val);
  369. }
  370. }
  371. static void write_xive_msi(ICSState *ics, int srcno)
  372. {
  373. ICSIRQState *irq = ics->irqs + srcno;
  374. if (!(irq->status & XICS_STATUS_MASKED_PENDING)
  375. || (irq->priority == 0xff)) {
  376. return;
  377. }
  378. irq->status &= ~XICS_STATUS_MASKED_PENDING;
  379. icp_irq(ics->icp, irq->server, srcno + ics->offset, irq->priority);
  380. }
  381. static void write_xive_lsi(ICSState *ics, int srcno)
  382. {
  383. resend_lsi(ics, srcno);
  384. }
  385. static void ics_write_xive(ICSState *ics, int nr, int server,
  386. uint8_t priority, uint8_t saved_priority)
  387. {
  388. int srcno = nr - ics->offset;
  389. ICSIRQState *irq = ics->irqs + srcno;
  390. irq->server = server;
  391. irq->priority = priority;
  392. irq->saved_priority = saved_priority;
  393. trace_xics_ics_write_xive(nr, srcno, server, priority);
  394. if (ics->islsi[srcno]) {
  395. write_xive_lsi(ics, srcno);
  396. } else {
  397. write_xive_msi(ics, srcno);
  398. }
  399. }
  400. static void ics_reject(ICSState *ics, int nr)
  401. {
  402. ICSIRQState *irq = ics->irqs + nr - ics->offset;
  403. trace_xics_ics_reject(nr, nr - ics->offset);
  404. irq->status |= XICS_STATUS_REJECTED; /* Irrelevant but harmless for LSI */
  405. irq->status &= ~XICS_STATUS_SENT; /* Irrelevant but harmless for MSI */
  406. }
  407. static void ics_resend(ICSState *ics)
  408. {
  409. int i;
  410. for (i = 0; i < ics->nr_irqs; i++) {
  411. /* FIXME: filter by server#? */
  412. if (ics->islsi[i]) {
  413. resend_lsi(ics, i);
  414. } else {
  415. resend_msi(ics, i);
  416. }
  417. }
  418. }
  419. static void ics_eoi(ICSState *ics, int nr)
  420. {
  421. int srcno = nr - ics->offset;
  422. ICSIRQState *irq = ics->irqs + srcno;
  423. trace_xics_ics_eoi(nr);
  424. if (ics->islsi[srcno]) {
  425. irq->status &= ~XICS_STATUS_SENT;
  426. }
  427. }
  428. static void ics_reset(DeviceState *dev)
  429. {
  430. ICSState *ics = ICS(dev);
  431. int i;
  432. memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs);
  433. for (i = 0; i < ics->nr_irqs; i++) {
  434. ics->irqs[i].priority = 0xff;
  435. ics->irqs[i].saved_priority = 0xff;
  436. }
  437. }
  438. static int ics_post_load(ICSState *ics, int version_id)
  439. {
  440. int i;
  441. for (i = 0; i < ics->icp->nr_servers; i++) {
  442. icp_resend(ics->icp, i);
  443. }
  444. return 0;
  445. }
  446. static void ics_dispatch_pre_save(void *opaque)
  447. {
  448. ICSState *ics = opaque;
  449. ICSStateClass *info = ICS_GET_CLASS(ics);
  450. if (info->pre_save) {
  451. info->pre_save(ics);
  452. }
  453. }
  454. static int ics_dispatch_post_load(void *opaque, int version_id)
  455. {
  456. ICSState *ics = opaque;
  457. ICSStateClass *info = ICS_GET_CLASS(ics);
  458. if (info->post_load) {
  459. return info->post_load(ics, version_id);
  460. }
  461. return 0;
  462. }
  463. static const VMStateDescription vmstate_ics_irq = {
  464. .name = "ics/irq",
  465. .version_id = 1,
  466. .minimum_version_id = 1,
  467. .minimum_version_id_old = 1,
  468. .fields = (VMStateField []) {
  469. VMSTATE_UINT32(server, ICSIRQState),
  470. VMSTATE_UINT8(priority, ICSIRQState),
  471. VMSTATE_UINT8(saved_priority, ICSIRQState),
  472. VMSTATE_UINT8(status, ICSIRQState),
  473. VMSTATE_END_OF_LIST()
  474. },
  475. };
  476. static const VMStateDescription vmstate_ics = {
  477. .name = "ics",
  478. .version_id = 1,
  479. .minimum_version_id = 1,
  480. .minimum_version_id_old = 1,
  481. .pre_save = ics_dispatch_pre_save,
  482. .post_load = ics_dispatch_post_load,
  483. .fields = (VMStateField []) {
  484. /* Sanity check */
  485. VMSTATE_UINT32_EQUAL(nr_irqs, ICSState),
  486. VMSTATE_STRUCT_VARRAY_POINTER_UINT32(irqs, ICSState, nr_irqs,
  487. vmstate_ics_irq, ICSIRQState),
  488. VMSTATE_END_OF_LIST()
  489. },
  490. };
  491. static void ics_initfn(Object *obj)
  492. {
  493. ICSState *ics = ICS(obj);
  494. ics->offset = XICS_IRQ_BASE;
  495. }
  496. static void ics_realize(DeviceState *dev, Error **errp)
  497. {
  498. ICSState *ics = ICS(dev);
  499. if (!ics->nr_irqs) {
  500. error_setg(errp, "Number of interrupts needs to be greater 0");
  501. return;
  502. }
  503. ics->irqs = g_malloc0(ics->nr_irqs * sizeof(ICSIRQState));
  504. ics->islsi = g_malloc0(ics->nr_irqs * sizeof(bool));
  505. ics->qirqs = qemu_allocate_irqs(ics_set_irq, ics, ics->nr_irqs);
  506. }
  507. static void ics_class_init(ObjectClass *klass, void *data)
  508. {
  509. DeviceClass *dc = DEVICE_CLASS(klass);
  510. ICSStateClass *isc = ICS_CLASS(klass);
  511. dc->realize = ics_realize;
  512. dc->vmsd = &vmstate_ics;
  513. dc->reset = ics_reset;
  514. isc->post_load = ics_post_load;
  515. }
  516. static const TypeInfo ics_info = {
  517. .name = TYPE_ICS,
  518. .parent = TYPE_DEVICE,
  519. .instance_size = sizeof(ICSState),
  520. .class_init = ics_class_init,
  521. .class_size = sizeof(ICSStateClass),
  522. .instance_init = ics_initfn,
  523. };
  524. /*
  525. * Exported functions
  526. */
  527. qemu_irq xics_get_qirq(XICSState *icp, int irq)
  528. {
  529. if (!ics_valid_irq(icp->ics, irq)) {
  530. return NULL;
  531. }
  532. return icp->ics->qirqs[irq - icp->ics->offset];
  533. }
  534. void xics_set_irq_type(XICSState *icp, int irq, bool lsi)
  535. {
  536. assert(ics_valid_irq(icp->ics, irq));
  537. icp->ics->islsi[irq - icp->ics->offset] = lsi;
  538. }
  539. /*
  540. * Guest interfaces
  541. */
  542. static target_ulong h_cppr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  543. target_ulong opcode, target_ulong *args)
  544. {
  545. CPUState *cs = CPU(cpu);
  546. target_ulong cppr = args[0];
  547. icp_set_cppr(spapr->icp, cs->cpu_index, cppr);
  548. return H_SUCCESS;
  549. }
  550. static target_ulong h_ipi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  551. target_ulong opcode, target_ulong *args)
  552. {
  553. target_ulong server = get_cpu_index_by_dt_id(args[0]);
  554. target_ulong mfrr = args[1];
  555. if (server >= spapr->icp->nr_servers) {
  556. return H_PARAMETER;
  557. }
  558. icp_set_mfrr(spapr->icp, server, mfrr);
  559. return H_SUCCESS;
  560. }
  561. static target_ulong h_xirr(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  562. target_ulong opcode, target_ulong *args)
  563. {
  564. CPUState *cs = CPU(cpu);
  565. uint32_t xirr = icp_accept(spapr->icp->ss + cs->cpu_index);
  566. args[0] = xirr;
  567. return H_SUCCESS;
  568. }
  569. static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  570. target_ulong opcode, target_ulong *args)
  571. {
  572. CPUState *cs = CPU(cpu);
  573. ICPState *ss = &spapr->icp->ss[cs->cpu_index];
  574. uint32_t xirr = icp_accept(ss);
  575. args[0] = xirr;
  576. args[1] = cpu_get_real_ticks();
  577. return H_SUCCESS;
  578. }
  579. static target_ulong h_eoi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  580. target_ulong opcode, target_ulong *args)
  581. {
  582. CPUState *cs = CPU(cpu);
  583. target_ulong xirr = args[0];
  584. icp_eoi(spapr->icp, cs->cpu_index, xirr);
  585. return H_SUCCESS;
  586. }
  587. static target_ulong h_ipoll(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  588. target_ulong opcode, target_ulong *args)
  589. {
  590. CPUState *cs = CPU(cpu);
  591. ICPState *ss = &spapr->icp->ss[cs->cpu_index];
  592. args[0] = ss->xirr;
  593. args[1] = ss->mfrr;
  594. return H_SUCCESS;
  595. }
  596. static void rtas_set_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  597. uint32_t token,
  598. uint32_t nargs, target_ulong args,
  599. uint32_t nret, target_ulong rets)
  600. {
  601. ICSState *ics = spapr->icp->ics;
  602. uint32_t nr, server, priority;
  603. if ((nargs != 3) || (nret != 1)) {
  604. rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
  605. return;
  606. }
  607. nr = rtas_ld(args, 0);
  608. server = get_cpu_index_by_dt_id(rtas_ld(args, 1));
  609. priority = rtas_ld(args, 2);
  610. if (!ics_valid_irq(ics, nr) || (server >= ics->icp->nr_servers)
  611. || (priority > 0xff)) {
  612. rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
  613. return;
  614. }
  615. ics_write_xive(ics, nr, server, priority, priority);
  616. rtas_st(rets, 0, RTAS_OUT_SUCCESS);
  617. }
  618. static void rtas_get_xive(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  619. uint32_t token,
  620. uint32_t nargs, target_ulong args,
  621. uint32_t nret, target_ulong rets)
  622. {
  623. ICSState *ics = spapr->icp->ics;
  624. uint32_t nr;
  625. if ((nargs != 1) || (nret != 3)) {
  626. rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
  627. return;
  628. }
  629. nr = rtas_ld(args, 0);
  630. if (!ics_valid_irq(ics, nr)) {
  631. rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
  632. return;
  633. }
  634. rtas_st(rets, 0, RTAS_OUT_SUCCESS);
  635. rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
  636. rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
  637. }
  638. static void rtas_int_off(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  639. uint32_t token,
  640. uint32_t nargs, target_ulong args,
  641. uint32_t nret, target_ulong rets)
  642. {
  643. ICSState *ics = spapr->icp->ics;
  644. uint32_t nr;
  645. if ((nargs != 1) || (nret != 1)) {
  646. rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
  647. return;
  648. }
  649. nr = rtas_ld(args, 0);
  650. if (!ics_valid_irq(ics, nr)) {
  651. rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
  652. return;
  653. }
  654. ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server, 0xff,
  655. ics->irqs[nr - ics->offset].priority);
  656. rtas_st(rets, 0, RTAS_OUT_SUCCESS);
  657. }
  658. static void rtas_int_on(PowerPCCPU *cpu, sPAPREnvironment *spapr,
  659. uint32_t token,
  660. uint32_t nargs, target_ulong args,
  661. uint32_t nret, target_ulong rets)
  662. {
  663. ICSState *ics = spapr->icp->ics;
  664. uint32_t nr;
  665. if ((nargs != 1) || (nret != 1)) {
  666. rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
  667. return;
  668. }
  669. nr = rtas_ld(args, 0);
  670. if (!ics_valid_irq(ics, nr)) {
  671. rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
  672. return;
  673. }
  674. ics_write_xive(ics, nr, ics->irqs[nr - ics->offset].server,
  675. ics->irqs[nr - ics->offset].saved_priority,
  676. ics->irqs[nr - ics->offset].saved_priority);
  677. rtas_st(rets, 0, RTAS_OUT_SUCCESS);
  678. }
  679. /*
  680. * XICS
  681. */
  682. static void xics_set_nr_irqs(XICSState *icp, uint32_t nr_irqs, Error **errp)
  683. {
  684. icp->nr_irqs = icp->ics->nr_irqs = nr_irqs;
  685. }
  686. static void xics_set_nr_servers(XICSState *icp, uint32_t nr_servers,
  687. Error **errp)
  688. {
  689. int i;
  690. icp->nr_servers = nr_servers;
  691. icp->ss = g_malloc0(icp->nr_servers*sizeof(ICPState));
  692. for (i = 0; i < icp->nr_servers; i++) {
  693. char buffer[32];
  694. object_initialize(&icp->ss[i], sizeof(icp->ss[i]), TYPE_ICP);
  695. snprintf(buffer, sizeof(buffer), "icp[%d]", i);
  696. object_property_add_child(OBJECT(icp), buffer, OBJECT(&icp->ss[i]),
  697. errp);
  698. }
  699. }
  700. static void xics_realize(DeviceState *dev, Error **errp)
  701. {
  702. XICSState *icp = XICS(dev);
  703. Error *error = NULL;
  704. int i;
  705. if (!icp->nr_servers) {
  706. error_setg(errp, "Number of servers needs to be greater 0");
  707. return;
  708. }
  709. /* Registration of global state belongs into realize */
  710. spapr_rtas_register("ibm,set-xive", rtas_set_xive);
  711. spapr_rtas_register("ibm,get-xive", rtas_get_xive);
  712. spapr_rtas_register("ibm,int-off", rtas_int_off);
  713. spapr_rtas_register("ibm,int-on", rtas_int_on);
  714. spapr_register_hypercall(H_CPPR, h_cppr);
  715. spapr_register_hypercall(H_IPI, h_ipi);
  716. spapr_register_hypercall(H_XIRR, h_xirr);
  717. spapr_register_hypercall(H_XIRR_X, h_xirr_x);
  718. spapr_register_hypercall(H_EOI, h_eoi);
  719. spapr_register_hypercall(H_IPOLL, h_ipoll);
  720. object_property_set_bool(OBJECT(icp->ics), true, "realized", &error);
  721. if (error) {
  722. error_propagate(errp, error);
  723. return;
  724. }
  725. for (i = 0; i < icp->nr_servers; i++) {
  726. object_property_set_bool(OBJECT(&icp->ss[i]), true, "realized", &error);
  727. if (error) {
  728. error_propagate(errp, error);
  729. return;
  730. }
  731. }
  732. }
  733. static void xics_initfn(Object *obj)
  734. {
  735. XICSState *xics = XICS(obj);
  736. xics->ics = ICS(object_new(TYPE_ICS));
  737. object_property_add_child(obj, "ics", OBJECT(xics->ics), NULL);
  738. xics->ics->icp = xics;
  739. }
  740. static void xics_class_init(ObjectClass *oc, void *data)
  741. {
  742. DeviceClass *dc = DEVICE_CLASS(oc);
  743. XICSStateClass *xsc = XICS_CLASS(oc);
  744. dc->realize = xics_realize;
  745. xsc->set_nr_irqs = xics_set_nr_irqs;
  746. xsc->set_nr_servers = xics_set_nr_servers;
  747. }
  748. static const TypeInfo xics_info = {
  749. .name = TYPE_XICS,
  750. .parent = TYPE_XICS_COMMON,
  751. .instance_size = sizeof(XICSState),
  752. .class_size = sizeof(XICSStateClass),
  753. .class_init = xics_class_init,
  754. .instance_init = xics_initfn,
  755. };
  756. static void xics_register_types(void)
  757. {
  758. type_register_static(&xics_common_info);
  759. type_register_static(&xics_info);
  760. type_register_static(&ics_info);
  761. type_register_static(&icp_info);
  762. }
  763. type_init(xics_register_types)