2
0

xen_pt_msi.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * Copyright (c) 2007, Intel Corporation.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. *
  7. * Jiang Yunhong <yunhong.jiang@intel.com>
  8. *
  9. * This file implements direct PCI assignment to a HVM guest
  10. */
  11. #include "qemu/osdep.h"
  12. #include "hw/xen/xen-legacy-backend.h"
  13. #include "xen_pt.h"
  14. #include "hw/i386/apic-msidef.h"
  15. #define XEN_PT_AUTO_ASSIGN -1
  16. /* shift count for gflags */
  17. #define XEN_PT_GFLAGS_SHIFT_DEST_ID 0
  18. #define XEN_PT_GFLAGS_SHIFT_RH 8
  19. #define XEN_PT_GFLAGS_SHIFT_DM 9
  20. #define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12
  21. #define XEN_PT_GFLAGSSHIFT_TRG_MODE 15
  22. #define XEN_PT_GFLAGSSHIFT_UNMASKED 16
  23. #define latch(fld) latch[PCI_MSIX_ENTRY_##fld / sizeof(uint32_t)]
  24. /*
  25. * Helpers
  26. */
  27. static inline uint8_t msi_vector(uint32_t data)
  28. {
  29. return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
  30. }
  31. static inline uint8_t msi_dest_id(uint32_t addr)
  32. {
  33. return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
  34. }
  35. static inline uint32_t msi_ext_dest_id(uint32_t addr_hi)
  36. {
  37. return addr_hi & 0xffffff00;
  38. }
  39. static uint32_t msi_gflags(uint32_t data, uint64_t addr)
  40. {
  41. uint32_t result = 0;
  42. int rh, dm, dest_id, deliv_mode, trig_mode;
  43. rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
  44. dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
  45. dest_id = msi_dest_id(addr);
  46. deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
  47. trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
  48. result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH)
  49. | (dm << XEN_PT_GFLAGS_SHIFT_DM)
  50. | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE)
  51. | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE);
  52. return result;
  53. }
  54. static inline uint64_t msi_addr64(XenPTMSI *msi)
  55. {
  56. return (uint64_t)msi->addr_hi << 32 | msi->addr_lo;
  57. }
  58. static int msi_msix_enable(XenPCIPassthroughState *s,
  59. uint32_t address,
  60. uint16_t flag,
  61. bool enable)
  62. {
  63. uint16_t val = 0;
  64. int rc;
  65. if (!address) {
  66. return -1;
  67. }
  68. rc = xen_host_pci_get_word(&s->real_device, address, &val);
  69. if (rc) {
  70. XEN_PT_ERR(&s->dev, "Failed to read MSI/MSI-X register (0x%x), rc:%d\n",
  71. address, rc);
  72. return rc;
  73. }
  74. if (enable) {
  75. val |= flag;
  76. } else {
  77. val &= ~flag;
  78. }
  79. rc = xen_host_pci_set_word(&s->real_device, address, val);
  80. if (rc) {
  81. XEN_PT_ERR(&s->dev, "Failed to write MSI/MSI-X register (0x%x), rc:%d\n",
  82. address, rc);
  83. }
  84. return rc;
  85. }
  86. static int msi_msix_setup(XenPCIPassthroughState *s,
  87. uint64_t addr,
  88. uint32_t data,
  89. int *ppirq,
  90. bool is_msix,
  91. int msix_entry,
  92. bool is_not_mapped)
  93. {
  94. uint8_t gvec = msi_vector(data);
  95. int rc = 0;
  96. assert((!is_msix && msix_entry == 0) || is_msix);
  97. if (xen_is_pirq_msi(data)) {
  98. *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr);
  99. if (!*ppirq) {
  100. /* this probably identifies an misconfiguration of the guest,
  101. * try the emulated path */
  102. *ppirq = XEN_PT_UNASSIGNED_PIRQ;
  103. } else {
  104. XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s"
  105. " (vec: %#x, entry: %#x)\n",
  106. *ppirq, is_msix ? "-X" : "", gvec, msix_entry);
  107. }
  108. }
  109. if (is_not_mapped) {
  110. uint64_t table_base = 0;
  111. if (is_msix) {
  112. table_base = s->msix->table_base;
  113. }
  114. rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN,
  115. ppirq, PCI_DEVFN(s->real_device.dev,
  116. s->real_device.func),
  117. s->real_device.bus,
  118. msix_entry, table_base);
  119. if (rc) {
  120. XEN_PT_ERR(&s->dev,
  121. "Mapping of MSI%s (err: %i, vec: %#x, entry %#x)\n",
  122. is_msix ? "-X" : "", errno, gvec, msix_entry);
  123. return rc;
  124. }
  125. }
  126. return 0;
  127. }
  128. static int msi_msix_update(XenPCIPassthroughState *s,
  129. uint64_t addr,
  130. uint32_t data,
  131. int pirq,
  132. bool is_msix,
  133. int msix_entry,
  134. int *old_pirq,
  135. bool masked)
  136. {
  137. PCIDevice *d = &s->dev;
  138. uint8_t gvec = msi_vector(data);
  139. uint32_t gflags = msi_gflags(data, addr);
  140. int rc = 0;
  141. uint64_t table_addr = 0;
  142. XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec %#x gflags %#x"
  143. " (entry: %#x)\n",
  144. is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry);
  145. if (is_msix) {
  146. table_addr = s->msix->mmio_base_addr;
  147. }
  148. gflags |= masked ? 0 : (1u << XEN_PT_GFLAGSSHIFT_UNMASKED);
  149. rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec,
  150. pirq, gflags, table_addr);
  151. if (rc) {
  152. XEN_PT_ERR(d, "Updating of MSI%s failed. (err: %d)\n",
  153. is_msix ? "-X" : "", errno);
  154. if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) {
  155. XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %d)\n",
  156. is_msix ? "-X" : "", *old_pirq, errno);
  157. }
  158. *old_pirq = XEN_PT_UNASSIGNED_PIRQ;
  159. }
  160. return rc;
  161. }
  162. static int msi_msix_disable(XenPCIPassthroughState *s,
  163. uint64_t addr,
  164. uint32_t data,
  165. int pirq,
  166. bool is_msix,
  167. bool is_binded)
  168. {
  169. PCIDevice *d = &s->dev;
  170. uint8_t gvec = msi_vector(data);
  171. uint32_t gflags = msi_gflags(data, addr);
  172. int rc = 0;
  173. if (pirq == XEN_PT_UNASSIGNED_PIRQ) {
  174. return 0;
  175. }
  176. if (is_binded) {
  177. XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec %#x\n",
  178. is_msix ? "-X" : "", pirq, gvec);
  179. rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags);
  180. if (rc) {
  181. XEN_PT_ERR(d, "Unbinding of MSI%s failed. (err: %d, pirq: %d, gvec: %#x)\n",
  182. is_msix ? "-X" : "", errno, pirq, gvec);
  183. return rc;
  184. }
  185. }
  186. XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq);
  187. rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq);
  188. if (rc) {
  189. XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (err: %i)\n",
  190. is_msix ? "-X" : "", pirq, errno);
  191. return rc;
  192. }
  193. return 0;
  194. }
  195. /*
  196. * MSI virtualization functions
  197. */
  198. static int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable)
  199. {
  200. XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling");
  201. if (!s->msi) {
  202. return -1;
  203. }
  204. return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE,
  205. enable);
  206. }
  207. /* setup physical msi, but don't enable it */
  208. int xen_pt_msi_setup(XenPCIPassthroughState *s)
  209. {
  210. int pirq = XEN_PT_UNASSIGNED_PIRQ;
  211. int rc = 0;
  212. XenPTMSI *msi = s->msi;
  213. if (msi->initialized) {
  214. XEN_PT_ERR(&s->dev,
  215. "Setup physical MSI when it has been properly initialized.\n");
  216. return -1;
  217. }
  218. rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true);
  219. if (rc) {
  220. return rc;
  221. }
  222. if (pirq < 0) {
  223. XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq);
  224. return -1;
  225. }
  226. msi->pirq = pirq;
  227. XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq);
  228. return 0;
  229. }
  230. int xen_pt_msi_update(XenPCIPassthroughState *s)
  231. {
  232. XenPTMSI *msi = s->msi;
  233. /* Current MSI emulation in QEMU only supports 1 vector */
  234. return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq,
  235. false, 0, &msi->pirq, msi->mask & 1);
  236. }
  237. void xen_pt_msi_disable(XenPCIPassthroughState *s)
  238. {
  239. XenPTMSI *msi = s->msi;
  240. if (!msi) {
  241. return;
  242. }
  243. (void)xen_pt_msi_set_enable(s, false);
  244. msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false,
  245. msi->initialized);
  246. /* clear msi info */
  247. msi->flags &= ~PCI_MSI_FLAGS_ENABLE;
  248. msi->initialized = false;
  249. msi->mapped = false;
  250. msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
  251. }
  252. /*
  253. * MSI-X virtualization functions
  254. */
  255. static int msix_set_enable(XenPCIPassthroughState *s, bool enabled)
  256. {
  257. XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling");
  258. if (!s->msix) {
  259. return -1;
  260. }
  261. return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE,
  262. enabled);
  263. }
  264. static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr,
  265. uint32_t vec_ctrl)
  266. {
  267. XenPTMSIXEntry *entry = NULL;
  268. int pirq;
  269. int rc;
  270. if (entry_nr < 0 || entry_nr >= s->msix->total_entries) {
  271. return -EINVAL;
  272. }
  273. entry = &s->msix->msix_entry[entry_nr];
  274. if (!entry->updated) {
  275. return 0;
  276. }
  277. pirq = entry->pirq;
  278. /*
  279. * Update the entry addr and data to the latest values only when the
  280. * entry is masked or they are all masked, as required by the spec.
  281. * Addr and data changes while the MSI-X entry is unmasked get deferred
  282. * until the next masked -> unmasked transition.
  283. */
  284. if (pirq == XEN_PT_UNASSIGNED_PIRQ || s->msix->maskall ||
  285. (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
  286. entry->addr = entry->latch(LOWER_ADDR) |
  287. ((uint64_t)entry->latch(UPPER_ADDR) << 32);
  288. entry->data = entry->latch(DATA);
  289. }
  290. rc = msi_msix_setup(s, entry->addr, entry->data, &pirq, true, entry_nr,
  291. entry->pirq == XEN_PT_UNASSIGNED_PIRQ);
  292. if (rc) {
  293. return rc;
  294. }
  295. if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) {
  296. entry->pirq = pirq;
  297. }
  298. rc = msi_msix_update(s, entry->addr, entry->data, pirq, true,
  299. entry_nr, &entry->pirq,
  300. vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT);
  301. if (!rc) {
  302. entry->updated = false;
  303. }
  304. return rc;
  305. }
  306. int xen_pt_msix_update(XenPCIPassthroughState *s)
  307. {
  308. XenPTMSIX *msix = s->msix;
  309. int i;
  310. for (i = 0; i < msix->total_entries; i++) {
  311. xen_pt_msix_update_one(s, i, msix->msix_entry[i].latch(VECTOR_CTRL));
  312. }
  313. return 0;
  314. }
  315. void xen_pt_msix_disable(XenPCIPassthroughState *s)
  316. {
  317. int i = 0;
  318. msix_set_enable(s, false);
  319. for (i = 0; i < s->msix->total_entries; i++) {
  320. XenPTMSIXEntry *entry = &s->msix->msix_entry[i];
  321. msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true);
  322. /* clear MSI-X info */
  323. entry->pirq = XEN_PT_UNASSIGNED_PIRQ;
  324. entry->updated = false;
  325. }
  326. }
  327. int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index)
  328. {
  329. XenPTMSIXEntry *entry;
  330. int i, ret;
  331. if (!(s->msix && s->msix->bar_index == bar_index)) {
  332. return 0;
  333. }
  334. for (i = 0; i < s->msix->total_entries; i++) {
  335. entry = &s->msix->msix_entry[i];
  336. if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
  337. ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq,
  338. PT_IRQ_TYPE_MSI, 0, 0, 0, 0);
  339. if (ret) {
  340. XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed (err: %d)\n",
  341. entry->pirq, errno);
  342. }
  343. entry->updated = true;
  344. }
  345. }
  346. return xen_pt_msix_update(s);
  347. }
  348. static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset)
  349. {
  350. assert(!(offset % sizeof(*e->latch)));
  351. return e->latch[offset / sizeof(*e->latch)];
  352. }
  353. static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val)
  354. {
  355. assert(!(offset % sizeof(*e->latch)));
  356. e->latch[offset / sizeof(*e->latch)] = val;
  357. }
  358. static void pci_msix_write(void *opaque, hwaddr addr,
  359. uint64_t val, unsigned size)
  360. {
  361. XenPCIPassthroughState *s = opaque;
  362. XenPTMSIX *msix = s->msix;
  363. XenPTMSIXEntry *entry;
  364. unsigned int entry_nr, offset;
  365. entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
  366. if (entry_nr >= msix->total_entries) {
  367. return;
  368. }
  369. entry = &msix->msix_entry[entry_nr];
  370. offset = addr % PCI_MSIX_ENTRY_SIZE;
  371. if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) {
  372. if (get_entry_value(entry, offset) == val
  373. && entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
  374. return;
  375. }
  376. entry->updated = true;
  377. } else if (msix->enabled && entry->updated &&
  378. !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
  379. const volatile uint32_t *vec_ctrl;
  380. /*
  381. * If Xen intercepts the mask bit access, entry->vec_ctrl may not be
  382. * up-to-date. Read from hardware directly.
  383. */
  384. vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE
  385. + PCI_MSIX_ENTRY_VECTOR_CTRL;
  386. xen_pt_msix_update_one(s, entry_nr, *vec_ctrl);
  387. }
  388. set_entry_value(entry, offset, val);
  389. }
  390. static uint64_t pci_msix_read(void *opaque, hwaddr addr,
  391. unsigned size)
  392. {
  393. XenPCIPassthroughState *s = opaque;
  394. XenPTMSIX *msix = s->msix;
  395. int entry_nr, offset;
  396. entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
  397. if (entry_nr < 0) {
  398. XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr);
  399. return 0;
  400. }
  401. offset = addr % PCI_MSIX_ENTRY_SIZE;
  402. if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) {
  403. return get_entry_value(&msix->msix_entry[entry_nr], offset);
  404. } else {
  405. /* Pending Bit Array (PBA) */
  406. return *(uint32_t *)(msix->phys_iomem_base + addr);
  407. }
  408. }
  409. static bool pci_msix_accepts(void *opaque, hwaddr addr,
  410. unsigned size, bool is_write,
  411. MemTxAttrs attrs)
  412. {
  413. return !(addr & (size - 1));
  414. }
  415. static const MemoryRegionOps pci_msix_ops = {
  416. .read = pci_msix_read,
  417. .write = pci_msix_write,
  418. .endianness = DEVICE_NATIVE_ENDIAN,
  419. .valid = {
  420. .min_access_size = 4,
  421. .max_access_size = 4,
  422. .unaligned = false,
  423. .accepts = pci_msix_accepts
  424. },
  425. .impl = {
  426. .min_access_size = 4,
  427. .max_access_size = 4,
  428. .unaligned = false
  429. }
  430. };
  431. int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base)
  432. {
  433. uint8_t id = 0;
  434. uint16_t control = 0;
  435. uint32_t table_off = 0;
  436. int i, total_entries, bar_index;
  437. XenHostPCIDevice *hd = &s->real_device;
  438. PCIDevice *d = &s->dev;
  439. int fd = -1;
  440. XenPTMSIX *msix = NULL;
  441. int rc = 0;
  442. rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id);
  443. if (rc) {
  444. return rc;
  445. }
  446. if (id != PCI_CAP_ID_MSIX) {
  447. XEN_PT_ERR(d, "Invalid id %#x base %#x\n", id, base);
  448. return -1;
  449. }
  450. rc = xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control);
  451. if (rc) {
  452. XEN_PT_ERR(d, "Failed to read PCI_MSIX_FLAGS field\n");
  453. return rc;
  454. }
  455. total_entries = control & PCI_MSIX_FLAGS_QSIZE;
  456. total_entries += 1;
  457. s->msix = g_malloc0(sizeof (XenPTMSIX)
  458. + total_entries * sizeof (XenPTMSIXEntry));
  459. msix = s->msix;
  460. msix->total_entries = total_entries;
  461. for (i = 0; i < total_entries; i++) {
  462. msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ;
  463. }
  464. memory_region_init_io(&msix->mmio, OBJECT(s), &pci_msix_ops,
  465. s, "xen-pci-pt-msix",
  466. (total_entries * PCI_MSIX_ENTRY_SIZE
  467. + XC_PAGE_SIZE - 1)
  468. & XC_PAGE_MASK);
  469. rc = xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off);
  470. if (rc) {
  471. XEN_PT_ERR(d, "Failed to read PCI_MSIX_TABLE field\n");
  472. goto error_out;
  473. }
  474. bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK;
  475. table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK;
  476. msix->table_base = s->real_device.io_regions[bar_index].base_addr;
  477. XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base);
  478. fd = open("/dev/mem", O_RDWR);
  479. if (fd == -1) {
  480. rc = -errno;
  481. XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno));
  482. goto error_out;
  483. }
  484. XEN_PT_LOG(d, "table_off = %#x, total_entries = %d\n",
  485. table_off, total_entries);
  486. msix->table_offset_adjust = table_off & 0x0fff;
  487. msix->phys_iomem_base =
  488. mmap(NULL,
  489. total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust,
  490. PROT_READ,
  491. MAP_SHARED | MAP_LOCKED,
  492. fd,
  493. msix->table_base + table_off - msix->table_offset_adjust);
  494. close(fd);
  495. if (msix->phys_iomem_base == MAP_FAILED) {
  496. rc = -errno;
  497. XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno));
  498. goto error_out;
  499. }
  500. msix->phys_iomem_base = (char *)msix->phys_iomem_base
  501. + msix->table_offset_adjust;
  502. XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n",
  503. msix->phys_iomem_base);
  504. memory_region_add_subregion_overlap(&s->bar[bar_index], table_off,
  505. &msix->mmio,
  506. 2); /* Priority: pci default + 1 */
  507. return 0;
  508. error_out:
  509. g_free(s->msix);
  510. s->msix = NULL;
  511. return rc;
  512. }
  513. void xen_pt_msix_unmap(XenPCIPassthroughState *s)
  514. {
  515. XenPTMSIX *msix = s->msix;
  516. if (!msix) {
  517. return;
  518. }
  519. /* unmap the MSI-X memory mapped register area */
  520. if (msix->phys_iomem_base) {
  521. XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n",
  522. msix->phys_iomem_base);
  523. munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE
  524. + msix->table_offset_adjust);
  525. }
  526. memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio);
  527. }
  528. void xen_pt_msix_delete(XenPCIPassthroughState *s)
  529. {
  530. XenPTMSIX *msix = s->msix;
  531. if (!msix) {
  532. return;
  533. }
  534. object_unparent(OBJECT(&msix->mmio));
  535. g_free(s->msix);
  536. s->msix = NULL;
  537. }