2
0

msix.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668
  1. /*
  2. * MSI-X device support
  3. *
  4. * This module includes support for MSI-X in pci devices.
  5. *
  6. * Author: Michael S. Tsirkin <mst@redhat.com>
  7. *
  8. * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
  9. *
  10. * This work is licensed under the terms of the GNU GPL, version 2. See
  11. * the COPYING file in the top-level directory.
  12. *
  13. * Contributions after 2012-01-13 are licensed under the terms of the
  14. * GNU GPL, version 2 or (at your option) any later version.
  15. */
  16. #include "qemu/osdep.h"
  17. #include "hw/pci/msi.h"
  18. #include "hw/pci/msix.h"
  19. #include "hw/pci/pci.h"
  20. #include "hw/xen/xen.h"
  21. #include "sysemu/xen.h"
  22. #include "migration/qemu-file-types.h"
  23. #include "migration/vmstate.h"
  24. #include "qemu/range.h"
  25. #include "qapi/error.h"
  26. #include "trace.h"
  27. /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
  28. #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
  29. #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
  30. #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
  31. MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
  32. {
  33. uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  34. MSIMessage msg;
  35. msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
  36. msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
  37. return msg;
  38. }
  39. /*
  40. * Special API for POWER to configure the vectors through
  41. * a side channel. Should never be used by devices.
  42. */
  43. void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg)
  44. {
  45. uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  46. pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address);
  47. pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data);
  48. table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
  49. }
  50. static uint8_t msix_pending_mask(int vector)
  51. {
  52. return 1 << (vector % 8);
  53. }
  54. static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
  55. {
  56. return dev->msix_pba + vector / 8;
  57. }
  58. static int msix_is_pending(PCIDevice *dev, int vector)
  59. {
  60. return *msix_pending_byte(dev, vector) & msix_pending_mask(vector);
  61. }
  62. void msix_set_pending(PCIDevice *dev, unsigned int vector)
  63. {
  64. *msix_pending_byte(dev, vector) |= msix_pending_mask(vector);
  65. }
  66. void msix_clr_pending(PCIDevice *dev, int vector)
  67. {
  68. *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector);
  69. }
  70. static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
  71. {
  72. unsigned offset = vector * PCI_MSIX_ENTRY_SIZE;
  73. uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
  74. /* MSIs on Xen can be remapped into pirqs. In those cases, masking
  75. * and unmasking go through the PV evtchn path. */
  76. if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) {
  77. return false;
  78. }
  79. return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] &
  80. PCI_MSIX_ENTRY_CTRL_MASKBIT;
  81. }
  82. bool msix_is_masked(PCIDevice *dev, unsigned int vector)
  83. {
  84. return msix_vector_masked(dev, vector, dev->msix_function_masked);
  85. }
  86. static void msix_fire_vector_notifier(PCIDevice *dev,
  87. unsigned int vector, bool is_masked)
  88. {
  89. MSIMessage msg;
  90. int ret;
  91. if (!dev->msix_vector_use_notifier) {
  92. return;
  93. }
  94. if (is_masked) {
  95. dev->msix_vector_release_notifier(dev, vector);
  96. } else {
  97. msg = msix_get_message(dev, vector);
  98. ret = dev->msix_vector_use_notifier(dev, vector, msg);
  99. assert(ret >= 0);
  100. }
  101. }
  102. static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
  103. {
  104. bool is_masked = msix_is_masked(dev, vector);
  105. if (is_masked == was_masked) {
  106. return;
  107. }
  108. msix_fire_vector_notifier(dev, vector, is_masked);
  109. if (!is_masked && msix_is_pending(dev, vector)) {
  110. msix_clr_pending(dev, vector);
  111. msix_notify(dev, vector);
  112. }
  113. }
  114. static bool msix_masked(PCIDevice *dev)
  115. {
  116. return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK;
  117. }
  118. static void msix_update_function_masked(PCIDevice *dev)
  119. {
  120. dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev);
  121. }
  122. /* Handle MSI-X capability config write. */
  123. void msix_write_config(PCIDevice *dev, uint32_t addr,
  124. uint32_t val, int len)
  125. {
  126. unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET;
  127. int vector;
  128. bool was_masked;
  129. if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) {
  130. return;
  131. }
  132. trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev));
  133. was_masked = dev->msix_function_masked;
  134. msix_update_function_masked(dev);
  135. if (!msix_enabled(dev)) {
  136. return;
  137. }
  138. pci_device_deassert_intx(dev);
  139. if (dev->msix_function_masked == was_masked) {
  140. return;
  141. }
  142. for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
  143. msix_handle_mask_update(dev, vector,
  144. msix_vector_masked(dev, vector, was_masked));
  145. }
  146. }
  147. static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
  148. unsigned size)
  149. {
  150. PCIDevice *dev = opaque;
  151. return pci_get_long(dev->msix_table + addr);
  152. }
  153. static void msix_table_mmio_write(void *opaque, hwaddr addr,
  154. uint64_t val, unsigned size)
  155. {
  156. PCIDevice *dev = opaque;
  157. int vector = addr / PCI_MSIX_ENTRY_SIZE;
  158. bool was_masked;
  159. was_masked = msix_is_masked(dev, vector);
  160. pci_set_long(dev->msix_table + addr, val);
  161. msix_handle_mask_update(dev, vector, was_masked);
  162. }
  163. static const MemoryRegionOps msix_table_mmio_ops = {
  164. .read = msix_table_mmio_read,
  165. .write = msix_table_mmio_write,
  166. .endianness = DEVICE_LITTLE_ENDIAN,
  167. .valid = {
  168. .min_access_size = 4,
  169. .max_access_size = 8,
  170. },
  171. .impl = {
  172. .max_access_size = 4,
  173. },
  174. };
  175. static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
  176. unsigned size)
  177. {
  178. PCIDevice *dev = opaque;
  179. if (dev->msix_vector_poll_notifier) {
  180. unsigned vector_start = addr * 8;
  181. unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
  182. dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
  183. }
  184. return pci_get_long(dev->msix_pba + addr);
  185. }
  186. static void msix_pba_mmio_write(void *opaque, hwaddr addr,
  187. uint64_t val, unsigned size)
  188. {
  189. }
  190. static const MemoryRegionOps msix_pba_mmio_ops = {
  191. .read = msix_pba_mmio_read,
  192. .write = msix_pba_mmio_write,
  193. .endianness = DEVICE_LITTLE_ENDIAN,
  194. .valid = {
  195. .min_access_size = 4,
  196. .max_access_size = 8,
  197. },
  198. .impl = {
  199. .max_access_size = 4,
  200. },
  201. };
  202. static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
  203. {
  204. int vector;
  205. for (vector = 0; vector < nentries; ++vector) {
  206. unsigned offset =
  207. vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
  208. bool was_masked = msix_is_masked(dev, vector);
  209. dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
  210. msix_handle_mask_update(dev, vector, was_masked);
  211. }
  212. }
  213. /*
  214. * Make PCI device @dev MSI-X capable
  215. * @nentries is the max number of MSI-X vectors that the device support.
  216. * @table_bar is the MemoryRegion that MSI-X table structure resides.
  217. * @table_bar_nr is number of base address register corresponding to @table_bar.
  218. * @table_offset indicates the offset that the MSI-X table structure starts with
  219. * in @table_bar.
  220. * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides.
  221. * @pba_bar_nr is number of base address register corresponding to @pba_bar.
  222. * @pba_offset indicates the offset that the Pending Bit Array structure
  223. * starts with in @pba_bar.
  224. * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space.
  225. * @errp is for returning errors.
  226. *
  227. * Return 0 on success; set @errp and return -errno on error:
  228. * -ENOTSUP means lacking msi support for a msi-capable platform.
  229. * -EINVAL means capability overlap, happens when @cap_pos is non-zero,
  230. * also means a programming error, except device assignment, which can check
  231. * if a real HW is broken.
  232. */
  233. int msix_init(struct PCIDevice *dev, unsigned short nentries,
  234. MemoryRegion *table_bar, uint8_t table_bar_nr,
  235. unsigned table_offset, MemoryRegion *pba_bar,
  236. uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos,
  237. Error **errp)
  238. {
  239. int cap;
  240. unsigned table_size, pba_size;
  241. uint8_t *config;
  242. /* Nothing to do if MSI is not supported by interrupt controller */
  243. if (!msi_nonbroken) {
  244. error_setg(errp, "MSI-X is not supported by interrupt controller");
  245. return -ENOTSUP;
  246. }
  247. if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
  248. error_setg(errp, "The number of MSI-X vectors is invalid");
  249. return -EINVAL;
  250. }
  251. table_size = nentries * PCI_MSIX_ENTRY_SIZE;
  252. pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
  253. /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
  254. if ((table_bar_nr == pba_bar_nr &&
  255. ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
  256. table_offset + table_size > memory_region_size(table_bar) ||
  257. pba_offset + pba_size > memory_region_size(pba_bar) ||
  258. (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
  259. error_setg(errp, "table & pba overlap, or they don't fit in BARs,"
  260. " or don't align");
  261. return -EINVAL;
  262. }
  263. cap = pci_add_capability(dev, PCI_CAP_ID_MSIX,
  264. cap_pos, MSIX_CAP_LENGTH, errp);
  265. if (cap < 0) {
  266. return cap;
  267. }
  268. dev->msix_cap = cap;
  269. dev->cap_present |= QEMU_PCI_CAP_MSIX;
  270. config = dev->config + cap;
  271. pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
  272. dev->msix_entries_nr = nentries;
  273. dev->msix_function_masked = true;
  274. pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
  275. pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);
  276. /* Make flags bit writable. */
  277. dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
  278. MSIX_MASKALL_MASK;
  279. dev->msix_table = g_malloc0(table_size);
  280. dev->msix_pba = g_malloc0(pba_size);
  281. dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);
  282. msix_mask_all(dev, nentries);
  283. memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev,
  284. "msix-table", table_size);
  285. memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
  286. memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev,
  287. "msix-pba", pba_size);
  288. memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);
  289. return 0;
  290. }
  291. int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
  292. uint8_t bar_nr, Error **errp)
  293. {
  294. int ret;
  295. char *name;
  296. uint32_t bar_size = 4096;
  297. uint32_t bar_pba_offset = bar_size / 2;
  298. uint32_t bar_pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
  299. /*
  300. * Migration compatibility dictates that this remains a 4k
  301. * BAR with the vector table in the lower half and PBA in
  302. * the upper half for nentries which is lower or equal to 128.
  303. * No need to care about using more than 65 entries for legacy
  304. * machine types who has at most 64 queues.
  305. */
  306. if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
  307. bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE;
  308. }
  309. if (bar_pba_offset + bar_pba_size > 4096) {
  310. bar_size = bar_pba_offset + bar_pba_size;
  311. }
  312. bar_size = pow2ceil(bar_size);
  313. name = g_strdup_printf("%s-msix", dev->name);
  314. memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size);
  315. g_free(name);
  316. ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
  317. 0, &dev->msix_exclusive_bar,
  318. bar_nr, bar_pba_offset,
  319. 0, errp);
  320. if (ret) {
  321. return ret;
  322. }
  323. pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY,
  324. &dev->msix_exclusive_bar);
  325. return 0;
  326. }
  327. static void msix_free_irq_entries(PCIDevice *dev)
  328. {
  329. int vector;
  330. for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
  331. dev->msix_entry_used[vector] = 0;
  332. msix_clr_pending(dev, vector);
  333. }
  334. }
  335. static void msix_clear_all_vectors(PCIDevice *dev)
  336. {
  337. int vector;
  338. for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
  339. msix_clr_pending(dev, vector);
  340. }
  341. }
  342. /* Clean up resources for the device. */
  343. void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar)
  344. {
  345. if (!msix_present(dev)) {
  346. return;
  347. }
  348. pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH);
  349. dev->msix_cap = 0;
  350. msix_free_irq_entries(dev);
  351. dev->msix_entries_nr = 0;
  352. memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio);
  353. g_free(dev->msix_pba);
  354. dev->msix_pba = NULL;
  355. memory_region_del_subregion(table_bar, &dev->msix_table_mmio);
  356. g_free(dev->msix_table);
  357. dev->msix_table = NULL;
  358. g_free(dev->msix_entry_used);
  359. dev->msix_entry_used = NULL;
  360. dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
  361. }
  362. void msix_uninit_exclusive_bar(PCIDevice *dev)
  363. {
  364. if (msix_present(dev)) {
  365. msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar);
  366. }
  367. }
  368. void msix_save(PCIDevice *dev, QEMUFile *f)
  369. {
  370. unsigned n = dev->msix_entries_nr;
  371. if (!msix_present(dev)) {
  372. return;
  373. }
  374. qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
  375. qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
  376. }
  377. /* Should be called after restoring the config space. */
  378. void msix_load(PCIDevice *dev, QEMUFile *f)
  379. {
  380. unsigned n = dev->msix_entries_nr;
  381. unsigned int vector;
  382. if (!msix_present(dev)) {
  383. return;
  384. }
  385. msix_clear_all_vectors(dev);
  386. qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
  387. qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
  388. msix_update_function_masked(dev);
  389. for (vector = 0; vector < n; vector++) {
  390. msix_handle_mask_update(dev, vector, true);
  391. }
  392. }
  393. /* Does device support MSI-X? */
  394. int msix_present(PCIDevice *dev)
  395. {
  396. return dev->cap_present & QEMU_PCI_CAP_MSIX;
  397. }
  398. /* Is MSI-X enabled? */
  399. int msix_enabled(PCIDevice *dev)
  400. {
  401. return (dev->cap_present & QEMU_PCI_CAP_MSIX) &&
  402. (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
  403. MSIX_ENABLE_MASK);
  404. }
  405. /* Send an MSI-X message */
  406. void msix_notify(PCIDevice *dev, unsigned vector)
  407. {
  408. MSIMessage msg;
  409. if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
  410. return;
  411. }
  412. if (msix_is_masked(dev, vector)) {
  413. msix_set_pending(dev, vector);
  414. return;
  415. }
  416. msg = msix_get_message(dev, vector);
  417. msi_send_message(dev, msg);
  418. }
  419. void msix_reset(PCIDevice *dev)
  420. {
  421. if (!msix_present(dev)) {
  422. return;
  423. }
  424. msix_clear_all_vectors(dev);
  425. dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
  426. ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
  427. memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
  428. memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
  429. msix_mask_all(dev, dev->msix_entries_nr);
  430. }
  431. /* PCI spec suggests that devices make it possible for software to configure
  432. * less vectors than supported by the device, but does not specify a standard
  433. * mechanism for devices to do so.
  434. *
  435. * We support this by asking devices to declare vectors software is going to
  436. * actually use, and checking this on the notification path. Devices that
  437. * don't want to follow the spec suggestion can declare all vectors as used. */
  438. /* Mark vector as used. */
  439. int msix_vector_use(PCIDevice *dev, unsigned vector)
  440. {
  441. if (vector >= dev->msix_entries_nr) {
  442. return -EINVAL;
  443. }
  444. dev->msix_entry_used[vector]++;
  445. return 0;
  446. }
  447. /* Mark vector as unused. */
  448. void msix_vector_unuse(PCIDevice *dev, unsigned vector)
  449. {
  450. if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
  451. return;
  452. }
  453. if (--dev->msix_entry_used[vector]) {
  454. return;
  455. }
  456. msix_clr_pending(dev, vector);
  457. }
  458. void msix_unuse_all_vectors(PCIDevice *dev)
  459. {
  460. if (!msix_present(dev)) {
  461. return;
  462. }
  463. msix_free_irq_entries(dev);
  464. }
  465. unsigned int msix_nr_vectors_allocated(const PCIDevice *dev)
  466. {
  467. return dev->msix_entries_nr;
  468. }
  469. static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector)
  470. {
  471. MSIMessage msg;
  472. if (msix_is_masked(dev, vector)) {
  473. return 0;
  474. }
  475. msg = msix_get_message(dev, vector);
  476. return dev->msix_vector_use_notifier(dev, vector, msg);
  477. }
  478. static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector)
  479. {
  480. if (msix_is_masked(dev, vector)) {
  481. return;
  482. }
  483. dev->msix_vector_release_notifier(dev, vector);
  484. }
  485. int msix_set_vector_notifiers(PCIDevice *dev,
  486. MSIVectorUseNotifier use_notifier,
  487. MSIVectorReleaseNotifier release_notifier,
  488. MSIVectorPollNotifier poll_notifier)
  489. {
  490. int vector, ret;
  491. assert(use_notifier && release_notifier);
  492. dev->msix_vector_use_notifier = use_notifier;
  493. dev->msix_vector_release_notifier = release_notifier;
  494. dev->msix_vector_poll_notifier = poll_notifier;
  495. if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
  496. (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
  497. for (vector = 0; vector < dev->msix_entries_nr; vector++) {
  498. ret = msix_set_notifier_for_vector(dev, vector);
  499. if (ret < 0) {
  500. goto undo;
  501. }
  502. }
  503. }
  504. if (dev->msix_vector_poll_notifier) {
  505. dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr);
  506. }
  507. return 0;
  508. undo:
  509. while (--vector >= 0) {
  510. msix_unset_notifier_for_vector(dev, vector);
  511. }
  512. dev->msix_vector_use_notifier = NULL;
  513. dev->msix_vector_release_notifier = NULL;
  514. return ret;
  515. }
  516. void msix_unset_vector_notifiers(PCIDevice *dev)
  517. {
  518. int vector;
  519. assert(dev->msix_vector_use_notifier &&
  520. dev->msix_vector_release_notifier);
  521. if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
  522. (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
  523. for (vector = 0; vector < dev->msix_entries_nr; vector++) {
  524. msix_unset_notifier_for_vector(dev, vector);
  525. }
  526. }
  527. dev->msix_vector_use_notifier = NULL;
  528. dev->msix_vector_release_notifier = NULL;
  529. dev->msix_vector_poll_notifier = NULL;
  530. }
  531. static int put_msix_state(QEMUFile *f, void *pv, size_t size,
  532. const VMStateField *field, QJSON *vmdesc)
  533. {
  534. msix_save(pv, f);
  535. return 0;
  536. }
  537. static int get_msix_state(QEMUFile *f, void *pv, size_t size,
  538. const VMStateField *field)
  539. {
  540. msix_load(pv, f);
  541. return 0;
  542. }
  543. static VMStateInfo vmstate_info_msix = {
  544. .name = "msix state",
  545. .get = get_msix_state,
  546. .put = put_msix_state,
  547. };
  548. const VMStateDescription vmstate_msix = {
  549. .name = "msix",
  550. .fields = (VMStateField[]) {
  551. {
  552. .name = "msix",
  553. .version_id = 0,
  554. .field_exists = NULL,
  555. .size = 0, /* ouch */
  556. .info = &vmstate_info_msix,
  557. .flags = VMS_SINGLE,
  558. .offset = 0,
  559. },
  560. VMSTATE_END_OF_LIST()
  561. }
  562. };